• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2016 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // ContextVk.cpp:
7 //    Implements the class methods for ContextVk.
8 //
9 
10 #include "libANGLE/renderer/vulkan/ContextVk.h"
11 
12 #include "common/bitset_utils.h"
13 #include "common/debug.h"
14 #include "common/utilities.h"
15 #include "libANGLE/Context.h"
16 #include "libANGLE/Display.h"
17 #include "libANGLE/Program.h"
18 #include "libANGLE/Semaphore.h"
19 #include "libANGLE/Surface.h"
20 #include "libANGLE/angletypes.h"
21 #include "libANGLE/renderer/renderer_utils.h"
22 #include "libANGLE/renderer/vulkan/BufferVk.h"
23 #include "libANGLE/renderer/vulkan/CompilerVk.h"
24 #include "libANGLE/renderer/vulkan/DisplayVk.h"
25 #include "libANGLE/renderer/vulkan/FenceNVVk.h"
26 #include "libANGLE/renderer/vulkan/FramebufferVk.h"
27 #include "libANGLE/renderer/vulkan/MemoryObjectVk.h"
28 #include "libANGLE/renderer/vulkan/OverlayVk.h"
29 #include "libANGLE/renderer/vulkan/ProgramPipelineVk.h"
30 #include "libANGLE/renderer/vulkan/ProgramVk.h"
31 #include "libANGLE/renderer/vulkan/QueryVk.h"
32 #include "libANGLE/renderer/vulkan/RenderbufferVk.h"
33 #include "libANGLE/renderer/vulkan/RendererVk.h"
34 #include "libANGLE/renderer/vulkan/SamplerVk.h"
35 #include "libANGLE/renderer/vulkan/SemaphoreVk.h"
36 #include "libANGLE/renderer/vulkan/ShaderVk.h"
37 #include "libANGLE/renderer/vulkan/SurfaceVk.h"
38 #include "libANGLE/renderer/vulkan/SyncVk.h"
39 #include "libANGLE/renderer/vulkan/TextureVk.h"
40 #include "libANGLE/renderer/vulkan/TransformFeedbackVk.h"
41 #include "libANGLE/renderer/vulkan/VertexArrayVk.h"
42 
43 #include "libANGLE/trace.h"
44 
45 #include <iostream>
46 
47 namespace rx
48 {
49 
50 namespace
51 {
52 // For DesciptorSetUpdates
53 constexpr size_t kDescriptorBufferInfosInitialSize = 8;
54 constexpr size_t kDescriptorImageInfosInitialSize  = 4;
55 constexpr size_t kDescriptorWriteInfosInitialSize =
56     kDescriptorBufferInfosInitialSize + kDescriptorImageInfosInitialSize;
57 
58 // For shader uniforms such as gl_DepthRange and the viewport size.
59 struct GraphicsDriverUniforms
60 {
61     std::array<float, 4> viewport;
62 
63     // 32 bits for 32 clip planes
64     uint32_t enabledClipPlanes;
65 
66     uint32_t xfbActiveUnpaused;
67     int32_t xfbVerticesPerInstance;
68 
69     // Used to replace gl_NumSamples. Because gl_NumSamples cannot be recognized in SPIR-V.
70     int32_t numSamples;
71 
72     std::array<int32_t, 4> xfbBufferOffsets;
73 
74     // .xy contain packed 8-bit values for atomic counter buffer offsets.  These offsets are
75     // within Vulkan's minStorageBufferOffsetAlignment limit and are used to support unaligned
76     // offsets allowed in GL.
77     //
78     // .zw are unused.
79     std::array<uint32_t, 4> acbBufferOffsets;
80 
81     // We'll use x, y, z for near / far / diff respectively.
82     std::array<float, 4> depthRange;
83 };
84 static_assert(sizeof(GraphicsDriverUniforms) % (sizeof(uint32_t) * 4) == 0,
85               "GraphicsDriverUniforms should 16bytes aligned");
86 
87 // TODO: http://issuetracker.google.com/173636783 Once the bug is fixed, we should remove this.
88 struct GraphicsDriverUniformsExtended
89 {
90     GraphicsDriverUniforms common;
91 
92     // Used to flip gl_FragCoord (both .xy for Android pre-rotation; only .y for desktop)
93     std::array<float, 2> halfRenderArea;
94     std::array<float, 2> flipXY;
95     std::array<float, 2> negFlipXY;
96     std::array<int32_t, 2> padding;
97 
98     // Used to pre-rotate gl_FragCoord for swapchain images on Android (a mat2, which is padded to
99     // the size of two vec4's).
100     std::array<float, 8> fragRotation;
101 };
102 
103 struct ComputeDriverUniforms
104 {
105     // Atomic counter buffer offsets with the same layout as in GraphicsDriverUniforms.
106     std::array<uint32_t, 4> acbBufferOffsets;
107 };
108 
DefaultGLErrorCode(VkResult result)109 GLenum DefaultGLErrorCode(VkResult result)
110 {
111     switch (result)
112     {
113         case VK_ERROR_OUT_OF_HOST_MEMORY:
114         case VK_ERROR_OUT_OF_DEVICE_MEMORY:
115         case VK_ERROR_TOO_MANY_OBJECTS:
116             return GL_OUT_OF_MEMORY;
117         default:
118             return GL_INVALID_OPERATION;
119     }
120 }
121 
122 constexpr gl::ShaderMap<vk::ImageLayout> kShaderReadOnlyImageLayouts = {
123     {gl::ShaderType::Vertex, vk::ImageLayout::VertexShaderReadOnly},
124     {gl::ShaderType::TessControl, vk::ImageLayout::PreFragmentShadersReadOnly},
125     {gl::ShaderType::TessEvaluation, vk::ImageLayout::PreFragmentShadersReadOnly},
126     {gl::ShaderType::Geometry, vk::ImageLayout::PreFragmentShadersReadOnly},
127     {gl::ShaderType::Fragment, vk::ImageLayout::FragmentShaderReadOnly},
128     {gl::ShaderType::Compute, vk::ImageLayout::ComputeShaderReadOnly}};
129 
130 constexpr gl::ShaderMap<vk::ImageLayout> kShaderWriteImageLayouts = {
131     {gl::ShaderType::Vertex, vk::ImageLayout::VertexShaderWrite},
132     {gl::ShaderType::TessControl, vk::ImageLayout::PreFragmentShadersWrite},
133     {gl::ShaderType::TessEvaluation, vk::ImageLayout::PreFragmentShadersWrite},
134     {gl::ShaderType::Geometry, vk::ImageLayout::PreFragmentShadersWrite},
135     {gl::ShaderType::Fragment, vk::ImageLayout::FragmentShaderWrite},
136     {gl::ShaderType::Compute, vk::ImageLayout::ComputeShaderWrite}};
137 
138 constexpr VkBufferUsageFlags kVertexBufferUsage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
139 constexpr size_t kDefaultValueSize              = sizeof(gl::VertexAttribCurrentValueData::Values);
140 constexpr size_t kDefaultBufferSize             = kDefaultValueSize * 16;
141 constexpr size_t kDriverUniformsAllocatorPageSize = 4 * 1024;
142 
GetCoverageSampleCount(const gl::State & glState,FramebufferVk * drawFramebuffer)143 uint32_t GetCoverageSampleCount(const gl::State &glState, FramebufferVk *drawFramebuffer)
144 {
145     if (!glState.isSampleCoverageEnabled())
146     {
147         return 0;
148     }
149 
150     // Get a fraction of the samples based on the coverage parameters.
151     // There are multiple ways to obtain an integer value from a float -
152     //     truncation, ceil and round
153     //
154     // round() provides a more even distribution of values but doesn't seem to play well
155     // with all vendors (AMD). A way to work around this is to increase the comparison threshold
156     // of deqp tests. Though this takes care of deqp tests other apps would still have issues.
157     //
158     // Truncation provides an uneven distribution near the edges of the interval but seems to
159     // play well with all vendors.
160     //
161     // We are going with truncation for expediency.
162     return static_cast<uint32_t>(glState.getSampleCoverageValue() * drawFramebuffer->getSamples());
163 }
164 
ApplySampleCoverage(const gl::State & glState,uint32_t coverageSampleCount,uint32_t maskNumber,uint32_t * maskOut)165 void ApplySampleCoverage(const gl::State &glState,
166                          uint32_t coverageSampleCount,
167                          uint32_t maskNumber,
168                          uint32_t *maskOut)
169 {
170     if (!glState.isSampleCoverageEnabled())
171     {
172         return;
173     }
174 
175     uint32_t maskBitOffset = maskNumber * 32;
176     uint32_t coverageMask  = coverageSampleCount >= (maskBitOffset + 32)
177                                 ? std::numeric_limits<uint32_t>::max()
178                                 : (1u << (coverageSampleCount - maskBitOffset)) - 1;
179 
180     if (glState.getSampleCoverageInvert())
181     {
182         coverageMask = ~coverageMask;
183     }
184 
185     *maskOut &= coverageMask;
186 }
187 
IsRenderPassStartedAndUsesImage(const vk::CommandBufferHelper & renderPassCommands,const vk::ImageHelper & image)188 bool IsRenderPassStartedAndUsesImage(const vk::CommandBufferHelper &renderPassCommands,
189                                      const vk::ImageHelper &image)
190 {
191     return renderPassCommands.started() && renderPassCommands.usesImageInRenderPass(image);
192 }
193 
194 // When an Android surface is rotated differently than the device's native orientation, ANGLE must
195 // rotate gl_Position in the last pre-rasterization shader and gl_FragCoord in the fragment shader.
196 // Rotation of gl_Position is done in SPIR-V.  The following are the rotation matrices for the
197 // fragment shader.
198 //
199 // Note: these are mat2's that are appropriately padded (4 floats per row).
200 using PreRotationMatrixValues = std::array<float, 8>;
201 constexpr angle::PackedEnumMap<rx::SurfaceRotation,
202                                PreRotationMatrixValues,
203                                angle::EnumSize<rx::SurfaceRotation>()>
204     kFragRotationMatrices = {
205         {{rx::SurfaceRotation::Identity, {{1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f}}},
206          {rx::SurfaceRotation::Rotated90Degrees,
207           {{0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f}}},
208          {rx::SurfaceRotation::Rotated180Degrees,
209           {{1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f}}},
210          {rx::SurfaceRotation::Rotated270Degrees,
211           {{0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f}}},
212          {rx::SurfaceRotation::FlippedIdentity, {{1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f}}},
213          {rx::SurfaceRotation::FlippedRotated90Degrees,
214           {{0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f}}},
215          {rx::SurfaceRotation::FlippedRotated180Degrees,
216           {{1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f}}},
217          {rx::SurfaceRotation::FlippedRotated270Degrees,
218           {{0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f}}}}};
219 
IsRotatedAspectRatio(SurfaceRotation rotation)220 bool IsRotatedAspectRatio(SurfaceRotation rotation)
221 {
222     return ((rotation == SurfaceRotation::Rotated90Degrees) ||
223             (rotation == SurfaceRotation::Rotated270Degrees) ||
224             (rotation == SurfaceRotation::FlippedRotated90Degrees) ||
225             (rotation == SurfaceRotation::FlippedRotated270Degrees));
226 }
227 
DetermineSurfaceRotation(gl::Framebuffer * framebuffer,WindowSurfaceVk * windowSurface)228 SurfaceRotation DetermineSurfaceRotation(gl::Framebuffer *framebuffer,
229                                          WindowSurfaceVk *windowSurface)
230 {
231     if (windowSurface && framebuffer->isDefault())
232     {
233         switch (windowSurface->getPreTransform())
234         {
235             case VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR:
236                 // Do not rotate gl_Position (surface matches the device's orientation):
237                 return SurfaceRotation::Identity;
238             case VK_SURFACE_TRANSFORM_ROTATE_90_BIT_KHR:
239                 // Rotate gl_Position 90 degrees:
240                 return SurfaceRotation::Rotated90Degrees;
241             case VK_SURFACE_TRANSFORM_ROTATE_180_BIT_KHR:
242                 // Rotate gl_Position 180 degrees:
243                 return SurfaceRotation::Rotated180Degrees;
244             case VK_SURFACE_TRANSFORM_ROTATE_270_BIT_KHR:
245                 // Rotate gl_Position 270 degrees:
246                 return SurfaceRotation::Rotated270Degrees;
247             default:
248                 UNREACHABLE();
249                 return SurfaceRotation::Identity;
250         }
251     }
252     else
253     {
254         // Do not rotate gl_Position (offscreen framebuffer):
255         return SurfaceRotation::Identity;
256     }
257 }
258 
259 // Should not generate a copy with modern C++.
GetTraceEventName(const char * title,uint32_t counter)260 EventName GetTraceEventName(const char *title, uint32_t counter)
261 {
262     EventName buf;
263     snprintf(buf.data(), kMaxGpuEventNameLen - 1, "%s %u", title, counter);
264     return buf;
265 }
266 
GetDepthAccess(const gl::DepthStencilState & dsState)267 vk::ResourceAccess GetDepthAccess(const gl::DepthStencilState &dsState)
268 {
269     if (!dsState.depthTest)
270     {
271         return vk::ResourceAccess::Unused;
272     }
273     return dsState.isDepthMaskedOut() ? vk::ResourceAccess::ReadOnly : vk::ResourceAccess::Write;
274 }
275 
GetStencilAccess(const gl::DepthStencilState & dsState)276 vk::ResourceAccess GetStencilAccess(const gl::DepthStencilState &dsState)
277 {
278     if (!dsState.stencilTest)
279     {
280         return vk::ResourceAccess::Unused;
281     }
282 
283     return dsState.isStencilNoOp() && dsState.isStencilBackNoOp() ? vk::ResourceAccess::ReadOnly
284                                                                   : vk::ResourceAccess::Write;
285 }
286 
GetContextPriority(const gl::State & state)287 egl::ContextPriority GetContextPriority(const gl::State &state)
288 {
289     return egl::FromEGLenum<egl::ContextPriority>(state.getContextPriority());
290 }
291 
292 template <typename MaskT>
AppendBufferVectorToDesc(vk::ShaderBuffersDescriptorDesc * desc,const gl::BufferVector & buffers,const MaskT & buffersMask,bool appendOffset)293 void AppendBufferVectorToDesc(vk::ShaderBuffersDescriptorDesc *desc,
294                               const gl::BufferVector &buffers,
295                               const MaskT &buffersMask,
296                               bool appendOffset)
297 {
298     if (buffersMask.any())
299     {
300         typename MaskT::param_type lastBufferIndex = buffersMask.last();
301         for (typename MaskT::param_type bufferIndex = 0; bufferIndex <= lastBufferIndex;
302              ++bufferIndex)
303         {
304             const gl::OffsetBindingPointer<gl::Buffer> &binding = buffers[bufferIndex];
305             const gl::Buffer *bufferGL                          = binding.get();
306 
307             if (!bufferGL)
308             {
309                 desc->append32BitValue(0);
310                 continue;
311             }
312 
313             BufferVk *bufferVk = vk::GetImpl(bufferGL);
314 
315             if (!bufferVk->isBufferValid())
316             {
317                 desc->append32BitValue(0);
318                 continue;
319             }
320 
321             VkDeviceSize bufferOffset = 0;
322             vk::BufferSerial bufferSerial =
323                 bufferVk->getBufferAndOffset(&bufferOffset).getBufferSerial();
324 
325             desc->appendBufferSerial(bufferSerial);
326             ASSERT(static_cast<uint64_t>(binding.getSize()) <=
327                    static_cast<uint64_t>(std::numeric_limits<uint32_t>::max()));
328             desc->append32BitValue(static_cast<uint32_t>(binding.getSize()));
329             if (appendOffset)
330             {
331                 ASSERT(static_cast<uint64_t>(binding.getOffset()) <
332                        static_cast<uint64_t>(std::numeric_limits<uint32_t>::max()));
333                 desc->append32BitValue(static_cast<uint32_t>(bufferOffset + binding.getOffset()));
334             }
335         }
336     }
337 
338     desc->append32BitValue(std::numeric_limits<uint32_t>::max());
339 }
340 }  // anonymous namespace
341 
342 // Not necessary once upgraded to C++17.
343 constexpr ContextVk::DirtyBits ContextVk::kIndexAndVertexDirtyBits;
344 constexpr ContextVk::DirtyBits ContextVk::kPipelineDescAndBindingDirtyBits;
345 constexpr ContextVk::DirtyBits ContextVk::kTexturesAndDescSetDirtyBits;
346 constexpr ContextVk::DirtyBits ContextVk::kResourcesAndDescSetDirtyBits;
347 constexpr ContextVk::DirtyBits ContextVk::kXfbBuffersAndDescSetDirtyBits;
348 constexpr ContextVk::DirtyBits ContextVk::kDriverUniformsAndBindingDirtyBits;
349 
flushDescriptorSetUpdates()350 ANGLE_INLINE void ContextVk::flushDescriptorSetUpdates()
351 {
352     if (mWriteDescriptorSets.empty())
353     {
354         ASSERT(mDescriptorBufferInfos.empty());
355         ASSERT(mDescriptorImageInfos.empty());
356         return;
357     }
358 
359     vkUpdateDescriptorSets(getDevice(), static_cast<uint32_t>(mWriteDescriptorSets.size()),
360                            mWriteDescriptorSets.data(), 0, nullptr);
361     mWriteDescriptorSets.clear();
362     mDescriptorBufferInfos.clear();
363     mDescriptorImageInfos.clear();
364 }
365 
onRenderPassFinished()366 ANGLE_INLINE void ContextVk::onRenderPassFinished()
367 {
368     pauseRenderPassQueriesIfActive();
369 
370     mRenderPassCommandBuffer = nullptr;
371     mGraphicsDirtyBits.set(DIRTY_BIT_RENDER_PASS);
372 }
373 
374 // ContextVk::ScopedDescriptorSetUpdates implementation.
375 class ContextVk::ScopedDescriptorSetUpdates final : angle::NonCopyable
376 {
377   public:
ScopedDescriptorSetUpdates(ContextVk * contextVk)378     ANGLE_INLINE ScopedDescriptorSetUpdates(ContextVk *contextVk) : mContextVk(contextVk) {}
~ScopedDescriptorSetUpdates()379     ANGLE_INLINE ~ScopedDescriptorSetUpdates() { mContextVk->flushDescriptorSetUpdates(); }
380 
381   private:
382     ContextVk *mContextVk;
383 };
384 
DriverUniformsDescriptorSet()385 ContextVk::DriverUniformsDescriptorSet::DriverUniformsDescriptorSet()
386     : descriptorSet(VK_NULL_HANDLE), dynamicOffset(0)
387 {}
388 
389 ContextVk::DriverUniformsDescriptorSet::~DriverUniformsDescriptorSet() = default;
390 
init(RendererVk * rendererVk)391 void ContextVk::DriverUniformsDescriptorSet::init(RendererVk *rendererVk)
392 {
393     size_t minAlignment = static_cast<size_t>(
394         rendererVk->getPhysicalDeviceProperties().limits.minUniformBufferOffsetAlignment);
395     dynamicBuffer.init(rendererVk, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, minAlignment,
396                        kDriverUniformsAllocatorPageSize, true,
397                        vk::DynamicBufferPolicy::FrequentSmallAllocations);
398     descriptorSetCache.clear();
399 }
400 
destroy(RendererVk * renderer)401 void ContextVk::DriverUniformsDescriptorSet::destroy(RendererVk *renderer)
402 {
403     descriptorSetLayout.reset();
404     descriptorPoolBinding.reset();
405     dynamicBuffer.destroy(renderer);
406     descriptorSetCache.clear();
407     descriptorSetCache.destroy(renderer);
408 }
409 
410 // ContextVk implementation.
ContextVk(const gl::State & state,gl::ErrorSet * errorSet,RendererVk * renderer)411 ContextVk::ContextVk(const gl::State &state, gl::ErrorSet *errorSet, RendererVk *renderer)
412     : ContextImpl(state, errorSet),
413       vk::Context(renderer),
414       mGraphicsDirtyBitHandlers{},
415       mComputeDirtyBitHandlers{},
416       mRenderPassCommandBuffer(nullptr),
417       mCurrentGraphicsPipeline(nullptr),
418       mCurrentComputePipeline(nullptr),
419       mCurrentDrawMode(gl::PrimitiveMode::InvalidEnum),
420       mCurrentWindowSurface(nullptr),
421       mCurrentRotationDrawFramebuffer(SurfaceRotation::Identity),
422       mCurrentRotationReadFramebuffer(SurfaceRotation::Identity),
423       mActiveRenderPassQueries{},
424       mVertexArray(nullptr),
425       mDrawFramebuffer(nullptr),
426       mProgram(nullptr),
427       mExecutable(nullptr),
428       mLastIndexBufferOffset(nullptr),
429       mCurrentIndexBufferOffset(0),
430       mCurrentDrawElementsType(gl::DrawElementsType::InvalidEnum),
431       mXfbBaseVertex(0),
432       mXfbVertexCountPerInstance(0),
433       mClearColorValue{},
434       mClearDepthStencilValue{},
435       mClearColorMasks(0),
436       mFlipYForCurrentSurface(false),
437       mFlipViewportForDrawFramebuffer(false),
438       mFlipViewportForReadFramebuffer(false),
439       mIsAnyHostVisibleBufferWritten(false),
440       mEmulateSeamfulCubeMapSampling(false),
441       mOutsideRenderPassCommands(nullptr),
442       mRenderPassCommands(nullptr),
443       mQueryEventType(GraphicsEventCmdBuf::NotInQueryCmd),
444       mGpuEventsEnabled(false),
445       mEGLSyncObjectPendingFlush(false),
446       mHasDeferredFlush(false),
447       mLastProgramUsesFramebufferFetch(false),
448       mGpuClockSync{std::numeric_limits<double>::max(), std::numeric_limits<double>::max()},
449       mGpuEventTimestampOrigin(0),
450       mPerfCounters{},
451       mContextPerfCounters{},
452       mCumulativeContextPerfCounters{},
453       mContextPriority(renderer->getDriverPriority(GetContextPriority(state))),
454       mShareGroupVk(vk::GetImpl(state.getShareGroup()))
455 {
456     ANGLE_TRACE_EVENT0("gpu.angle", "ContextVk::ContextVk");
457     memset(&mClearColorValue, 0, sizeof(mClearColorValue));
458     memset(&mClearDepthStencilValue, 0, sizeof(mClearDepthStencilValue));
459     memset(&mViewport, 0, sizeof(mViewport));
460     memset(&mScissor, 0, sizeof(mScissor));
461 
462     // Ensure viewport is within Vulkan requirements
463     vk::ClampViewport(&mViewport);
464 
465     mNonIndexedDirtyBitsMask.set();
466     mNonIndexedDirtyBitsMask.reset(DIRTY_BIT_INDEX_BUFFER);
467 
468     mIndexedDirtyBitsMask.set();
469 
470     // Once a command buffer is ended, all bindings (through |vkCmdBind*| calls) are lost per Vulkan
471     // spec.  Once a new command buffer is allocated, we must make sure every previously bound
472     // resource is bound again.
473     //
474     // Note that currently these dirty bits are set every time a new render pass command buffer is
475     // begun.  However, using ANGLE's SecondaryCommandBuffer, the Vulkan command buffer (which is
476     // the primary command buffer) is not ended, so technically we don't need to rebind these.
477     mNewGraphicsCommandBufferDirtyBits =
478         DirtyBits{DIRTY_BIT_RENDER_PASS,     DIRTY_BIT_PIPELINE_BINDING,
479                   DIRTY_BIT_TEXTURES,        DIRTY_BIT_VERTEX_BUFFERS,
480                   DIRTY_BIT_INDEX_BUFFER,    DIRTY_BIT_SHADER_RESOURCES,
481                   DIRTY_BIT_DESCRIPTOR_SETS, DIRTY_BIT_DRIVER_UNIFORMS_BINDING,
482                   DIRTY_BIT_VIEWPORT,        DIRTY_BIT_SCISSOR};
483     if (getFeatures().supportsTransformFeedbackExtension.enabled)
484     {
485         mNewGraphicsCommandBufferDirtyBits.set(DIRTY_BIT_TRANSFORM_FEEDBACK_BUFFERS);
486     }
487 
488     mNewComputeCommandBufferDirtyBits =
489         DirtyBits{DIRTY_BIT_PIPELINE_BINDING, DIRTY_BIT_TEXTURES, DIRTY_BIT_SHADER_RESOURCES,
490                   DIRTY_BIT_DESCRIPTOR_SETS, DIRTY_BIT_DRIVER_UNIFORMS_BINDING};
491 
492     mGraphicsDirtyBitHandlers[DIRTY_BIT_MEMORY_BARRIER] =
493         &ContextVk::handleDirtyGraphicsMemoryBarrier;
494     mGraphicsDirtyBitHandlers[DIRTY_BIT_EVENT_LOG] = &ContextVk::handleDirtyGraphicsEventLog;
495     mGraphicsDirtyBitHandlers[DIRTY_BIT_DEFAULT_ATTRIBS] =
496         &ContextVk::handleDirtyGraphicsDefaultAttribs;
497     mGraphicsDirtyBitHandlers[DIRTY_BIT_PIPELINE_DESC] =
498         &ContextVk::handleDirtyGraphicsPipelineDesc;
499     mGraphicsDirtyBitHandlers[DIRTY_BIT_RENDER_PASS] = &ContextVk::handleDirtyGraphicsRenderPass;
500     mGraphicsDirtyBitHandlers[DIRTY_BIT_PIPELINE_BINDING] =
501         &ContextVk::handleDirtyGraphicsPipelineBinding;
502     mGraphicsDirtyBitHandlers[DIRTY_BIT_TEXTURES] = &ContextVk::handleDirtyGraphicsTextures;
503     mGraphicsDirtyBitHandlers[DIRTY_BIT_VERTEX_BUFFERS] =
504         &ContextVk::handleDirtyGraphicsVertexBuffers;
505     mGraphicsDirtyBitHandlers[DIRTY_BIT_INDEX_BUFFER] = &ContextVk::handleDirtyGraphicsIndexBuffer;
506     mGraphicsDirtyBitHandlers[DIRTY_BIT_DRIVER_UNIFORMS] =
507         &ContextVk::handleDirtyGraphicsDriverUniforms;
508     mGraphicsDirtyBitHandlers[DIRTY_BIT_DRIVER_UNIFORMS_BINDING] =
509         &ContextVk::handleDirtyGraphicsDriverUniformsBinding;
510     mGraphicsDirtyBitHandlers[DIRTY_BIT_SHADER_RESOURCES] =
511         &ContextVk::handleDirtyGraphicsShaderResources;
512     mGraphicsDirtyBitHandlers[DIRTY_BIT_FRAMEBUFFER_FETCH_BARRIER] =
513         &ContextVk::handleDirtyGraphicsFramebufferFetchBarrier;
514     if (getFeatures().supportsTransformFeedbackExtension.enabled)
515     {
516         mGraphicsDirtyBitHandlers[DIRTY_BIT_TRANSFORM_FEEDBACK_BUFFERS] =
517             &ContextVk::handleDirtyGraphicsTransformFeedbackBuffersExtension;
518         mGraphicsDirtyBitHandlers[DIRTY_BIT_TRANSFORM_FEEDBACK_RESUME] =
519             &ContextVk::handleDirtyGraphicsTransformFeedbackResume;
520     }
521     else if (getFeatures().emulateTransformFeedback.enabled)
522     {
523         mGraphicsDirtyBitHandlers[DIRTY_BIT_TRANSFORM_FEEDBACK_BUFFERS] =
524             &ContextVk::handleDirtyGraphicsTransformFeedbackBuffersEmulation;
525     }
526 
527     mGraphicsDirtyBitHandlers[DIRTY_BIT_DESCRIPTOR_SETS] =
528         &ContextVk::handleDirtyGraphicsDescriptorSets;
529 
530     mGraphicsDirtyBitHandlers[DIRTY_BIT_VIEWPORT] = &ContextVk::handleDirtyGraphicsViewport;
531     mGraphicsDirtyBitHandlers[DIRTY_BIT_SCISSOR]  = &ContextVk::handleDirtyGraphicsScissor;
532 
533     mComputeDirtyBitHandlers[DIRTY_BIT_MEMORY_BARRIER] =
534         &ContextVk::handleDirtyComputeMemoryBarrier;
535     mComputeDirtyBitHandlers[DIRTY_BIT_EVENT_LOG]     = &ContextVk::handleDirtyComputeEventLog;
536     mComputeDirtyBitHandlers[DIRTY_BIT_PIPELINE_DESC] = &ContextVk::handleDirtyComputePipelineDesc;
537     mComputeDirtyBitHandlers[DIRTY_BIT_PIPELINE_BINDING] =
538         &ContextVk::handleDirtyComputePipelineBinding;
539     mComputeDirtyBitHandlers[DIRTY_BIT_TEXTURES] = &ContextVk::handleDirtyComputeTextures;
540     mComputeDirtyBitHandlers[DIRTY_BIT_DRIVER_UNIFORMS] =
541         &ContextVk::handleDirtyComputeDriverUniforms;
542     mComputeDirtyBitHandlers[DIRTY_BIT_DRIVER_UNIFORMS_BINDING] =
543         &ContextVk::handleDirtyComputeDriverUniformsBinding;
544     mComputeDirtyBitHandlers[DIRTY_BIT_SHADER_RESOURCES] =
545         &ContextVk::handleDirtyComputeShaderResources;
546     mComputeDirtyBitHandlers[DIRTY_BIT_DESCRIPTOR_SETS] =
547         &ContextVk::handleDirtyComputeDescriptorSets;
548 
549     mGraphicsDirtyBits = mNewGraphicsCommandBufferDirtyBits;
550     mComputeDirtyBits  = mNewComputeCommandBufferDirtyBits;
551 
552     mActiveTextures.fill({nullptr, nullptr, true});
553     mActiveImages.fill(nullptr);
554 
555     // The following dirty bits don't affect the program pipeline:
556     //
557     // - READ_FRAMEBUFFER_BINDING only affects operations that read from said framebuffer,
558     // - CLEAR_* only affect following clear calls,
559     // - PACK/UNPACK_STATE only affect texture data upload/download,
560     // - *_BINDING only affect descriptor sets.
561     //
562     mPipelineDirtyBitsMask.set();
563     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_READ_FRAMEBUFFER_BINDING);
564     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_CLEAR_COLOR);
565     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_CLEAR_DEPTH);
566     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_CLEAR_STENCIL);
567     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_UNPACK_STATE);
568     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_UNPACK_BUFFER_BINDING);
569     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_PACK_STATE);
570     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_PACK_BUFFER_BINDING);
571     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_RENDERBUFFER_BINDING);
572     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_DRAW_INDIRECT_BUFFER_BINDING);
573     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_DISPATCH_INDIRECT_BUFFER_BINDING);
574     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_SAMPLER_BINDINGS);
575     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_TEXTURE_BINDINGS);
576     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_IMAGE_BINDINGS);
577     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_TRANSFORM_FEEDBACK_BINDING);
578     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_UNIFORM_BUFFER_BINDINGS);
579     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_SHADER_STORAGE_BUFFER_BINDING);
580     mPipelineDirtyBitsMask.reset(gl::State::DIRTY_BIT_ATOMIC_COUNTER_BUFFER_BINDING);
581 
582     // Reserve reasonable amount of spaces so that for majority of apps we don't need to grow at all
583     mDescriptorBufferInfos.reserve(kDescriptorBufferInfosInitialSize);
584     mDescriptorImageInfos.reserve(kDescriptorImageInfosInitialSize);
585     mWriteDescriptorSets.reserve(kDescriptorWriteInfosInitialSize);
586 }
587 
588 ContextVk::~ContextVk() = default;
589 
onDestroy(const gl::Context * context)590 void ContextVk::onDestroy(const gl::Context *context)
591 {
592     outputCumulativePerfCounters();
593 
594     // Remove context from the share group
595     mShareGroupVk->getContexts()->erase(this);
596 
597     // This will not destroy any resources. It will release them to be collected after finish.
598     mIncompleteTextures.onDestroy(context);
599 
600     // Flush and complete current outstanding work before destruction.
601     (void)finishImpl();
602 
603     VkDevice device = getDevice();
604 
605     for (DriverUniformsDescriptorSet &driverUniforms : mDriverUniforms)
606     {
607         driverUniforms.destroy(mRenderer);
608     }
609 
610     for (vk::DynamicDescriptorPool &dynamicDescriptorPool : mDriverUniformsDescriptorPools)
611     {
612         dynamicDescriptorPool.destroy(device);
613     }
614 
615     mDefaultUniformStorage.release(mRenderer);
616     mEmptyBuffer.release(mRenderer);
617     mStagingBuffer.release(mRenderer);
618 
619     for (vk::DynamicBuffer &defaultBuffer : mDefaultAttribBuffers)
620     {
621         defaultBuffer.destroy(mRenderer);
622     }
623 
624     for (vk::DynamicQueryPool &queryPool : mQueryPools)
625     {
626         queryPool.destroy(device);
627     }
628 
629     // Recycle current commands buffers.
630     mRenderer->recycleCommandBufferHelper(mOutsideRenderPassCommands);
631     mRenderer->recycleCommandBufferHelper(mRenderPassCommands);
632     mOutsideRenderPassCommands = nullptr;
633     mRenderPassCommands        = nullptr;
634 
635     mRenderer->releaseSharedResources(&mResourceUseList);
636 
637     mUtils.destroy(mRenderer);
638 
639     mRenderPassCache.destroy(mRenderer);
640     mShaderLibrary.destroy(device);
641     mGpuEventQueryPool.destroy(device);
642     mCommandPool.destroy(device);
643 
644     ASSERT(mCurrentGarbage.empty());
645     ASSERT(mResourceUseList.empty());
646 }
647 
getIncompleteTexture(const gl::Context * context,gl::TextureType type,gl::SamplerFormat format,gl::Texture ** textureOut)648 angle::Result ContextVk::getIncompleteTexture(const gl::Context *context,
649                                               gl::TextureType type,
650                                               gl::SamplerFormat format,
651                                               gl::Texture **textureOut)
652 {
653     return mIncompleteTextures.getIncompleteTexture(context, type, format, this, textureOut);
654 }
655 
initialize()656 angle::Result ContextVk::initialize()
657 {
658     ANGLE_TRACE_EVENT0("gpu.angle", "ContextVk::initialize");
659 
660     ANGLE_TRY(mQueryPools[gl::QueryType::AnySamples].init(this, VK_QUERY_TYPE_OCCLUSION,
661                                                           vk::kDefaultOcclusionQueryPoolSize));
662     ANGLE_TRY(mQueryPools[gl::QueryType::AnySamplesConservative].init(
663         this, VK_QUERY_TYPE_OCCLUSION, vk::kDefaultOcclusionQueryPoolSize));
664 
665     // Only initialize the timestamp query pools if the extension is available.
666     if (mRenderer->getQueueFamilyProperties().timestampValidBits > 0)
667     {
668         ANGLE_TRY(mQueryPools[gl::QueryType::Timestamp].init(this, VK_QUERY_TYPE_TIMESTAMP,
669                                                              vk::kDefaultTimestampQueryPoolSize));
670         ANGLE_TRY(mQueryPools[gl::QueryType::TimeElapsed].init(this, VK_QUERY_TYPE_TIMESTAMP,
671                                                                vk::kDefaultTimestampQueryPoolSize));
672     }
673 
674     if (getFeatures().supportsTransformFeedbackExtension.enabled)
675     {
676         ANGLE_TRY(mQueryPools[gl::QueryType::TransformFeedbackPrimitivesWritten].init(
677             this, VK_QUERY_TYPE_TRANSFORM_FEEDBACK_STREAM_EXT,
678             vk::kDefaultTransformFeedbackQueryPoolSize));
679     }
680 
681     // The primitives generated query is provided through the Vulkan pipeline statistics query if
682     // supported.  TODO: If VK_EXT_primitives_generated_query is supported, use that instead.
683     // http://anglebug.com/5430
684     if (getFeatures().supportsPipelineStatisticsQuery.enabled)
685     {
686         ANGLE_TRY(mQueryPools[gl::QueryType::PrimitivesGenerated].init(
687             this, VK_QUERY_TYPE_PIPELINE_STATISTICS, vk::kDefaultPrimitivesGeneratedQueryPoolSize));
688     }
689 
690     // Init GLES to Vulkan index type map.
691     initIndexTypeMap();
692 
693     // Init driver uniforms and get the descriptor set layouts.
694     constexpr angle::PackedEnumMap<PipelineType, VkShaderStageFlags> kPipelineStages = {
695         {PipelineType::Graphics, VK_SHADER_STAGE_ALL_GRAPHICS},
696         {PipelineType::Compute, VK_SHADER_STAGE_COMPUTE_BIT},
697     };
698     for (PipelineType pipeline : angle::AllEnums<PipelineType>())
699     {
700         mDriverUniforms[pipeline].init(mRenderer);
701 
702         vk::DescriptorSetLayoutDesc desc =
703             getDriverUniformsDescriptorSetDesc(kPipelineStages[pipeline]);
704         ANGLE_TRY(getDescriptorSetLayoutCache().getDescriptorSetLayout(
705             this, desc, &mDriverUniforms[pipeline].descriptorSetLayout));
706 
707         vk::DescriptorSetLayoutBindingVector bindingVector;
708         std::vector<VkSampler> immutableSamplers;
709         desc.unpackBindings(&bindingVector, &immutableSamplers);
710         std::vector<VkDescriptorPoolSize> descriptorPoolSizes;
711 
712         for (const VkDescriptorSetLayoutBinding &binding : bindingVector)
713         {
714             if (binding.descriptorCount > 0)
715             {
716                 VkDescriptorPoolSize poolSize = {};
717 
718                 poolSize.type            = binding.descriptorType;
719                 poolSize.descriptorCount = binding.descriptorCount;
720                 descriptorPoolSizes.emplace_back(poolSize);
721             }
722         }
723         if (!descriptorPoolSizes.empty())
724         {
725             ANGLE_TRY(mDriverUniformsDescriptorPools[pipeline].init(
726                 this, descriptorPoolSizes.data(), descriptorPoolSizes.size(),
727                 mDriverUniforms[pipeline].descriptorSetLayout.get().getHandle()));
728         }
729     }
730 
731     mGraphicsPipelineDesc.reset(new vk::GraphicsPipelineDesc());
732     mGraphicsPipelineDesc->initDefaults(this);
733 
734     // Initialize current value/default attribute buffers.
735     for (vk::DynamicBuffer &buffer : mDefaultAttribBuffers)
736     {
737         buffer.init(mRenderer, kVertexBufferUsage, 1, kDefaultBufferSize, true,
738                     vk::DynamicBufferPolicy::FrequentSmallAllocations);
739     }
740 
741 #if ANGLE_ENABLE_VULKAN_GPU_TRACE_EVENTS
742     angle::PlatformMethods *platform = ANGLEPlatformCurrent();
743     ASSERT(platform);
744 
745     // GPU tracing workaround for anglebug.com/2927.  The renderer should not emit gpu events
746     // during platform discovery.
747     const unsigned char *gpuEventsEnabled =
748         platform->getTraceCategoryEnabledFlag(platform, "gpu.angle.gpu");
749     mGpuEventsEnabled = gpuEventsEnabled && *gpuEventsEnabled;
750 #endif
751 
752     mEmulateSeamfulCubeMapSampling = shouldEmulateSeamfulCubeMapSampling();
753 
754     // Assign initial command buffers from queue
755     mOutsideRenderPassCommands = mRenderer->getCommandBufferHelper(false);
756     mRenderPassCommands        = mRenderer->getCommandBufferHelper(true);
757 
758     if (mGpuEventsEnabled)
759     {
760         // GPU events should only be available if timestamp queries are available.
761         ASSERT(mRenderer->getQueueFamilyProperties().timestampValidBits > 0);
762         // Calculate the difference between CPU and GPU clocks for GPU event reporting.
763         ANGLE_TRY(mGpuEventQueryPool.init(this, VK_QUERY_TYPE_TIMESTAMP,
764                                           vk::kDefaultTimestampQueryPoolSize));
765         ANGLE_TRY(synchronizeCpuGpuTime());
766 
767         mPerfCounters.primaryBuffers++;
768 
769         EventName eventName = GetTraceEventName("Primary", mPerfCounters.primaryBuffers);
770         ANGLE_TRY(traceGpuEvent(&mOutsideRenderPassCommands->getCommandBuffer(),
771                                 TRACE_EVENT_PHASE_BEGIN, eventName));
772     }
773 
774     size_t minAlignment = static_cast<size_t>(
775         mRenderer->getPhysicalDeviceProperties().limits.minUniformBufferOffsetAlignment);
776     mDefaultUniformStorage.init(mRenderer, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, minAlignment,
777                                 mRenderer->getDefaultUniformBufferSize(), true,
778                                 vk::DynamicBufferPolicy::FrequentSmallAllocations);
779 
780     // Initialize an "empty" buffer for use with default uniform blocks where there are no uniforms,
781     // or atomic counter buffer array indices that are unused.
782     constexpr VkBufferUsageFlags kEmptyBufferUsage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
783                                                      VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
784                                                      VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
785     VkBufferCreateInfo emptyBufferInfo          = {};
786     emptyBufferInfo.sType                       = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
787     emptyBufferInfo.flags                       = 0;
788     emptyBufferInfo.size                        = 16;
789     emptyBufferInfo.usage                       = kEmptyBufferUsage;
790     emptyBufferInfo.sharingMode                 = VK_SHARING_MODE_EXCLUSIVE;
791     emptyBufferInfo.queueFamilyIndexCount       = 0;
792     emptyBufferInfo.pQueueFamilyIndices         = nullptr;
793     constexpr VkMemoryPropertyFlags kMemoryType = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
794     ANGLE_TRY(mEmptyBuffer.init(this, emptyBufferInfo, kMemoryType));
795 
796     constexpr VkImageUsageFlags kStagingBufferUsageFlags =
797         VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
798     size_t stagingBufferAlignment =
799         static_cast<size_t>(mRenderer->getPhysicalDeviceProperties().limits.minMemoryMapAlignment);
800     constexpr size_t kStagingBufferSize = 1024u * 1024u;  // 1M
801     mStagingBuffer.init(mRenderer, kStagingBufferUsageFlags, stagingBufferAlignment,
802                         kStagingBufferSize, true, vk::DynamicBufferPolicy::SporadicTextureUpload);
803 
804     // Add context into the share group
805     mShareGroupVk->getContexts()->insert(this);
806 
807     return angle::Result::Continue;
808 }
809 
flush(const gl::Context * context)810 angle::Result ContextVk::flush(const gl::Context *context)
811 {
812     // If a sync object has been used or this is a shared context, then we need to flush the
813     // commands and end the render pass to make sure the sync object (and any preceding commands)
814     // lands in the correct place within the command stream.
815     // EGL sync objects can span across context share groups, so don't defer flushes if there's one
816     // pending a flush.
817     if (getShareGroupVk()->isSyncObjectPendingFlush() && context->isShared() &&
818         !mEGLSyncObjectPendingFlush)
819     {
820         // Flush the commands to create a sync point in the command stream.
821         ANGLE_TRY(flushCommandsAndEndRenderPass());
822         // Move the resources to the share group, so they are released during the next vkQueueSubmit
823         // performed by any context in the share group. Note that this relies heavily on the global
824         // mutex to guarantee that no two contexts are modifying the lists at the same time.
825         getShareGroupVk()->acquireResourceUseList(std::move(mResourceUseList));
826         mHasDeferredFlush = true;
827         return angle::Result::Continue;
828     }
829 
830     // EGL sync objects can span across context share groups, so don't defer flushes if there's one
831     // pending a flush.
832     if (!mEGLSyncObjectPendingFlush &&
833         mRenderer->getFeatures().deferFlushUntilEndRenderPass.enabled && hasStartedRenderPass())
834     {
835         mHasDeferredFlush = true;
836         return angle::Result::Continue;
837     }
838 
839     return flushImpl(nullptr);
840 }
841 
finish(const gl::Context * context)842 angle::Result ContextVk::finish(const gl::Context *context)
843 {
844     return finishImpl();
845 }
846 
setupDraw(const gl::Context * context,gl::PrimitiveMode mode,GLint firstVertexOrInvalid,GLsizei vertexOrIndexCount,GLsizei instanceCount,gl::DrawElementsType indexTypeOrInvalid,const void * indices,DirtyBits dirtyBitMask)847 angle::Result ContextVk::setupDraw(const gl::Context *context,
848                                    gl::PrimitiveMode mode,
849                                    GLint firstVertexOrInvalid,
850                                    GLsizei vertexOrIndexCount,
851                                    GLsizei instanceCount,
852                                    gl::DrawElementsType indexTypeOrInvalid,
853                                    const void *indices,
854                                    DirtyBits dirtyBitMask)
855 {
856     // Set any dirty bits that depend on draw call parameters or other objects.
857     if (mode != mCurrentDrawMode)
858     {
859         invalidateCurrentGraphicsPipeline();
860         mCurrentDrawMode = mode;
861         mGraphicsPipelineDesc->updateTopology(&mGraphicsPipelineTransition, mCurrentDrawMode);
862     }
863 
864     // Must be called before the command buffer is started. Can call finish.
865     if (mVertexArray->getStreamingVertexAttribsMask().any())
866     {
867         // All client attribs & any emulated buffered attribs will be updated
868         ANGLE_TRY(mVertexArray->updateStreamedAttribs(context, firstVertexOrInvalid,
869                                                       vertexOrIndexCount, instanceCount,
870                                                       indexTypeOrInvalid, indices));
871 
872         mGraphicsDirtyBits.set(DIRTY_BIT_VERTEX_BUFFERS);
873     }
874 
875     // Create a local object to ensure we flush the descriptor updates to device when we leave this
876     // function
877     ScopedDescriptorSetUpdates descriptorSetUpdates(this);
878 
879     if (mProgram && mProgram->dirtyUniforms())
880     {
881         ANGLE_TRY(mProgram->updateUniforms(this));
882         mGraphicsDirtyBits.set(DIRTY_BIT_DESCRIPTOR_SETS);
883     }
884     else if (mProgramPipeline && mProgramPipeline->dirtyUniforms(getState()))
885     {
886         ANGLE_TRY(mProgramPipeline->updateUniforms(this));
887         mGraphicsDirtyBits.set(DIRTY_BIT_DESCRIPTOR_SETS);
888     }
889 
890     // Update transform feedback offsets on every draw call when emulating transform feedback.  This
891     // relies on the fact that no geometry/tessellation, indirect or indexed calls are supported in
892     // ES3.1 (and emulation is not done for ES3.2).
893     if (getFeatures().emulateTransformFeedback.enabled &&
894         mState.isTransformFeedbackActiveUnpaused())
895     {
896         ASSERT(firstVertexOrInvalid != -1);
897         mXfbBaseVertex             = firstVertexOrInvalid;
898         mXfbVertexCountPerInstance = vertexOrIndexCount;
899         invalidateGraphicsDriverUniforms();
900     }
901 
902     DirtyBits dirtyBits = mGraphicsDirtyBits & dirtyBitMask;
903 
904     if (dirtyBits.none())
905     {
906         ASSERT(mRenderPassCommandBuffer);
907         return angle::Result::Continue;
908     }
909 
910     // Flush any relevant dirty bits.
911     for (DirtyBits::Iterator dirtyBitIter = dirtyBits.begin(); dirtyBitIter != dirtyBits.end();
912          ++dirtyBitIter)
913     {
914         ASSERT(mGraphicsDirtyBitHandlers[*dirtyBitIter]);
915         ANGLE_TRY((this->*mGraphicsDirtyBitHandlers[*dirtyBitIter])(&dirtyBitIter, dirtyBitMask));
916     }
917 
918     mGraphicsDirtyBits &= ~dirtyBitMask;
919 
920     // Render pass must be always available at this point.
921     ASSERT(mRenderPassCommandBuffer);
922 
923     return angle::Result::Continue;
924 }
925 
setupIndexedDraw(const gl::Context * context,gl::PrimitiveMode mode,GLsizei indexCount,GLsizei instanceCount,gl::DrawElementsType indexType,const void * indices)926 angle::Result ContextVk::setupIndexedDraw(const gl::Context *context,
927                                           gl::PrimitiveMode mode,
928                                           GLsizei indexCount,
929                                           GLsizei instanceCount,
930                                           gl::DrawElementsType indexType,
931                                           const void *indices)
932 {
933     ASSERT(mode != gl::PrimitiveMode::LineLoop);
934 
935     if (indexType != mCurrentDrawElementsType)
936     {
937         mCurrentDrawElementsType = indexType;
938         ANGLE_TRY(onIndexBufferChange(nullptr));
939     }
940 
941     const gl::Buffer *elementArrayBuffer = mVertexArray->getState().getElementArrayBuffer();
942     if (!elementArrayBuffer)
943     {
944         mGraphicsDirtyBits.set(DIRTY_BIT_INDEX_BUFFER);
945         ANGLE_TRY(mVertexArray->convertIndexBufferCPU(this, indexType, indexCount, indices));
946         mCurrentIndexBufferOffset = 0;
947     }
948     else
949     {
950         mCurrentIndexBufferOffset = reinterpret_cast<VkDeviceSize>(indices);
951 
952         if (indices != mLastIndexBufferOffset)
953         {
954             mGraphicsDirtyBits.set(DIRTY_BIT_INDEX_BUFFER);
955             mLastIndexBufferOffset = indices;
956         }
957         if (shouldConvertUint8VkIndexType(indexType) && mGraphicsDirtyBits[DIRTY_BIT_INDEX_BUFFER])
958         {
959             ANGLE_PERF_WARNING(getDebug(), GL_DEBUG_SEVERITY_LOW,
960                                "Potential inefficiency emulating uint8 vertex attributes due to "
961                                "lack of hardware support");
962 
963             BufferVk *bufferVk             = vk::GetImpl(elementArrayBuffer);
964             VkDeviceSize bufferOffset      = 0;
965             vk::BufferHelper &bufferHelper = bufferVk->getBufferAndOffset(&bufferOffset);
966 
967             if (bufferHelper.isHostVisible() &&
968                 !bufferHelper.isCurrentlyInUse(getLastCompletedQueueSerial()))
969             {
970                 uint8_t *src = nullptr;
971                 ANGLE_TRY(bufferVk->mapImpl(this, reinterpret_cast<void **>(&src)));
972                 // Note: bufferOffset is not added here because mapImpl already adds it.
973                 src += reinterpret_cast<uintptr_t>(indices);
974                 const size_t byteCount = static_cast<size_t>(elementArrayBuffer->getSize()) -
975                                          reinterpret_cast<uintptr_t>(indices);
976                 ANGLE_TRY(mVertexArray->convertIndexBufferCPU(this, indexType, byteCount, src));
977                 ANGLE_TRY(bufferVk->unmapImpl(this));
978             }
979             else
980             {
981                 ANGLE_TRY(mVertexArray->convertIndexBufferGPU(this, bufferVk, indices));
982             }
983 
984             mCurrentIndexBufferOffset = 0;
985         }
986     }
987 
988     return setupDraw(context, mode, 0, indexCount, instanceCount, indexType, indices,
989                      mIndexedDirtyBitsMask);
990 }
991 
setupIndirectDraw(const gl::Context * context,gl::PrimitiveMode mode,DirtyBits dirtyBitMask,vk::BufferHelper * indirectBuffer,VkDeviceSize indirectBufferOffset)992 angle::Result ContextVk::setupIndirectDraw(const gl::Context *context,
993                                            gl::PrimitiveMode mode,
994                                            DirtyBits dirtyBitMask,
995                                            vk::BufferHelper *indirectBuffer,
996                                            VkDeviceSize indirectBufferOffset)
997 {
998     GLint firstVertex     = -1;
999     GLsizei vertexCount   = 0;
1000     GLsizei instanceCount = 1;
1001 
1002     // Break the render pass if the indirect buffer was previously used as the output from transform
1003     // feedback.
1004     if (mCurrentTransformFeedbackBuffers.contains(indirectBuffer))
1005     {
1006         ANGLE_TRY(flushCommandsAndEndRenderPass());
1007     }
1008 
1009     ANGLE_TRY(setupDraw(context, mode, firstVertex, vertexCount, instanceCount,
1010                         gl::DrawElementsType::InvalidEnum, nullptr, dirtyBitMask));
1011 
1012     // Process indirect buffer after render pass has started.
1013     mRenderPassCommands->bufferRead(this, VK_ACCESS_INDIRECT_COMMAND_READ_BIT,
1014                                     vk::PipelineStage::DrawIndirect, indirectBuffer);
1015 
1016     return angle::Result::Continue;
1017 }
1018 
setupIndexedIndirectDraw(const gl::Context * context,gl::PrimitiveMode mode,gl::DrawElementsType indexType,vk::BufferHelper * indirectBuffer,VkDeviceSize indirectBufferOffset)1019 angle::Result ContextVk::setupIndexedIndirectDraw(const gl::Context *context,
1020                                                   gl::PrimitiveMode mode,
1021                                                   gl::DrawElementsType indexType,
1022                                                   vk::BufferHelper *indirectBuffer,
1023                                                   VkDeviceSize indirectBufferOffset)
1024 {
1025     ASSERT(mode != gl::PrimitiveMode::LineLoop);
1026 
1027     if (indexType != mCurrentDrawElementsType)
1028     {
1029         mCurrentDrawElementsType = indexType;
1030         ANGLE_TRY(onIndexBufferChange(nullptr));
1031     }
1032 
1033     return setupIndirectDraw(context, mode, mIndexedDirtyBitsMask, indirectBuffer,
1034                              indirectBufferOffset);
1035 }
1036 
setupLineLoopIndexedIndirectDraw(const gl::Context * context,gl::PrimitiveMode mode,gl::DrawElementsType indexType,vk::BufferHelper * srcIndirectBuf,VkDeviceSize indirectBufferOffset,vk::BufferHelper ** indirectBufferOut,VkDeviceSize * indirectBufferOffsetOut)1037 angle::Result ContextVk::setupLineLoopIndexedIndirectDraw(const gl::Context *context,
1038                                                           gl::PrimitiveMode mode,
1039                                                           gl::DrawElementsType indexType,
1040                                                           vk::BufferHelper *srcIndirectBuf,
1041                                                           VkDeviceSize indirectBufferOffset,
1042                                                           vk::BufferHelper **indirectBufferOut,
1043                                                           VkDeviceSize *indirectBufferOffsetOut)
1044 {
1045     ASSERT(mode == gl::PrimitiveMode::LineLoop);
1046 
1047     vk::BufferHelper *dstIndirectBuf  = nullptr;
1048     VkDeviceSize dstIndirectBufOffset = 0;
1049 
1050     ANGLE_TRY(mVertexArray->handleLineLoopIndexIndirect(this, indexType, srcIndirectBuf,
1051                                                         indirectBufferOffset, &dstIndirectBuf,
1052                                                         &dstIndirectBufOffset));
1053 
1054     *indirectBufferOut       = dstIndirectBuf;
1055     *indirectBufferOffsetOut = dstIndirectBufOffset;
1056 
1057     if (indexType != mCurrentDrawElementsType)
1058     {
1059         mCurrentDrawElementsType = indexType;
1060         ANGLE_TRY(onIndexBufferChange(nullptr));
1061     }
1062 
1063     return setupIndirectDraw(context, mode, mIndexedDirtyBitsMask, dstIndirectBuf,
1064                              dstIndirectBufOffset);
1065 }
1066 
setupLineLoopIndirectDraw(const gl::Context * context,gl::PrimitiveMode mode,vk::BufferHelper * indirectBuffer,VkDeviceSize indirectBufferOffset,vk::BufferHelper ** indirectBufferOut,VkDeviceSize * indirectBufferOffsetOut)1067 angle::Result ContextVk::setupLineLoopIndirectDraw(const gl::Context *context,
1068                                                    gl::PrimitiveMode mode,
1069                                                    vk::BufferHelper *indirectBuffer,
1070                                                    VkDeviceSize indirectBufferOffset,
1071                                                    vk::BufferHelper **indirectBufferOut,
1072                                                    VkDeviceSize *indirectBufferOffsetOut)
1073 {
1074     ASSERT(mode == gl::PrimitiveMode::LineLoop);
1075 
1076     vk::BufferHelper *indirectBufferHelperOut = nullptr;
1077 
1078     ANGLE_TRY(mVertexArray->handleLineLoopIndirectDraw(
1079         context, indirectBuffer, indirectBufferOffset, &indirectBufferHelperOut,
1080         indirectBufferOffsetOut));
1081 
1082     *indirectBufferOut = indirectBufferHelperOut;
1083 
1084     if (gl::DrawElementsType::UnsignedInt != mCurrentDrawElementsType)
1085     {
1086         mCurrentDrawElementsType = gl::DrawElementsType::UnsignedInt;
1087         ANGLE_TRY(onIndexBufferChange(nullptr));
1088     }
1089 
1090     return setupIndirectDraw(context, mode, mIndexedDirtyBitsMask, indirectBufferHelperOut,
1091                              *indirectBufferOffsetOut);
1092 }
1093 
setupLineLoopDraw(const gl::Context * context,gl::PrimitiveMode mode,GLint firstVertex,GLsizei vertexOrIndexCount,gl::DrawElementsType indexTypeOrInvalid,const void * indices,uint32_t * numIndicesOut)1094 angle::Result ContextVk::setupLineLoopDraw(const gl::Context *context,
1095                                            gl::PrimitiveMode mode,
1096                                            GLint firstVertex,
1097                                            GLsizei vertexOrIndexCount,
1098                                            gl::DrawElementsType indexTypeOrInvalid,
1099                                            const void *indices,
1100                                            uint32_t *numIndicesOut)
1101 {
1102     mCurrentIndexBufferOffset = 0;
1103     ANGLE_TRY(mVertexArray->handleLineLoop(this, firstVertex, vertexOrIndexCount,
1104                                            indexTypeOrInvalid, indices, numIndicesOut));
1105     ANGLE_TRY(onIndexBufferChange(nullptr));
1106     mCurrentDrawElementsType = indexTypeOrInvalid != gl::DrawElementsType::InvalidEnum
1107                                    ? indexTypeOrInvalid
1108                                    : gl::DrawElementsType::UnsignedInt;
1109     return setupDraw(context, mode, firstVertex, vertexOrIndexCount, 1, indexTypeOrInvalid, indices,
1110                      mIndexedDirtyBitsMask);
1111 }
1112 
setupDispatch(const gl::Context * context)1113 angle::Result ContextVk::setupDispatch(const gl::Context *context)
1114 {
1115     // Note: numerous tests miss a glMemoryBarrier call between the initial texture data upload and
1116     // the dispatch call.  Flush the outside render pass command buffer as a workaround.
1117     // TODO: Remove this and fix tests.  http://anglebug.com/5070
1118     ANGLE_TRY(flushOutsideRenderPassCommands());
1119 
1120     // Create a local object to ensure we flush the descriptor updates to device when we leave this
1121     // function
1122     ScopedDescriptorSetUpdates descriptorSetUpdates(this);
1123 
1124     if (mProgram && mProgram->dirtyUniforms())
1125     {
1126         ANGLE_TRY(mProgram->updateUniforms(this));
1127         mComputeDirtyBits.set(DIRTY_BIT_DESCRIPTOR_SETS);
1128     }
1129     else if (mProgramPipeline && mProgramPipeline->dirtyUniforms(getState()))
1130     {
1131         ANGLE_TRY(mProgramPipeline->updateUniforms(this));
1132         mComputeDirtyBits.set(DIRTY_BIT_DESCRIPTOR_SETS);
1133     }
1134 
1135     DirtyBits dirtyBits = mComputeDirtyBits;
1136 
1137     // Flush any relevant dirty bits.
1138     for (size_t dirtyBit : dirtyBits)
1139     {
1140         ASSERT(mComputeDirtyBitHandlers[dirtyBit]);
1141         ANGLE_TRY((this->*mComputeDirtyBitHandlers[dirtyBit])());
1142     }
1143 
1144     mComputeDirtyBits.reset();
1145 
1146     return angle::Result::Continue;
1147 }
1148 
handleDirtyGraphicsMemoryBarrier(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)1149 angle::Result ContextVk::handleDirtyGraphicsMemoryBarrier(DirtyBits::Iterator *dirtyBitsIterator,
1150                                                           DirtyBits dirtyBitMask)
1151 {
1152     return handleDirtyMemoryBarrierImpl(dirtyBitsIterator, dirtyBitMask);
1153 }
1154 
handleDirtyComputeMemoryBarrier()1155 angle::Result ContextVk::handleDirtyComputeMemoryBarrier()
1156 {
1157     return handleDirtyMemoryBarrierImpl(nullptr, {});
1158 }
1159 
renderPassUsesStorageResources() const1160 bool ContextVk::renderPassUsesStorageResources() const
1161 {
1162     const gl::ProgramExecutable *executable = mState.getProgramExecutable();
1163     ASSERT(executable);
1164 
1165     // Storage images:
1166     for (size_t imageUnitIndex : executable->getActiveImagesMask())
1167     {
1168         const gl::Texture *texture = mState.getImageUnit(imageUnitIndex).texture.get();
1169         if (texture == nullptr)
1170         {
1171             continue;
1172         }
1173 
1174         TextureVk *textureVk = vk::GetImpl(texture);
1175 
1176         if (texture->getType() == gl::TextureType::Buffer)
1177         {
1178             VkDeviceSize bufferOffset = 0;
1179             vk::BufferHelper &buffer =
1180                 vk::GetImpl(textureVk->getBuffer().get())->getBufferAndOffset(&bufferOffset);
1181             if (mRenderPassCommands->usesBuffer(buffer))
1182             {
1183                 return true;
1184             }
1185         }
1186         else
1187         {
1188             vk::ImageHelper &image = textureVk->getImage();
1189             // Images only need to close the render pass if they need a layout transition.  Outside
1190             // render pass command buffer doesn't need closing as the layout transition barriers are
1191             // recorded in sequence with the rest of the commands.
1192             if (IsRenderPassStartedAndUsesImage(*mRenderPassCommands, image))
1193             {
1194                 return true;
1195             }
1196         }
1197     }
1198 
1199     gl::ShaderMap<const gl::ProgramState *> programStates;
1200     mExecutable->fillProgramStateMap(this, &programStates);
1201 
1202     for (const gl::ShaderType shaderType : executable->getLinkedShaderStages())
1203     {
1204         const gl::ProgramState *programState = programStates[shaderType];
1205         ASSERT(programState);
1206 
1207         // Storage buffers:
1208         const std::vector<gl::InterfaceBlock> &blocks = programState->getShaderStorageBlocks();
1209 
1210         for (uint32_t bufferIndex = 0; bufferIndex < blocks.size(); ++bufferIndex)
1211         {
1212             const gl::InterfaceBlock &block = blocks[bufferIndex];
1213             const gl::OffsetBindingPointer<gl::Buffer> &bufferBinding =
1214                 mState.getIndexedShaderStorageBuffer(block.binding);
1215 
1216             if (!block.isActive(shaderType) || bufferBinding.get() == nullptr)
1217             {
1218                 continue;
1219             }
1220 
1221             VkDeviceSize bufferOffset = 0;
1222             vk::BufferHelper &buffer =
1223                 vk::GetImpl(bufferBinding.get())->getBufferAndOffset(&bufferOffset);
1224             if (mRenderPassCommands->usesBuffer(buffer))
1225             {
1226                 return true;
1227             }
1228         }
1229 
1230         // Atomic counters:
1231         const std::vector<gl::AtomicCounterBuffer> &atomicCounterBuffers =
1232             programState->getAtomicCounterBuffers();
1233 
1234         for (uint32_t bufferIndex = 0; bufferIndex < atomicCounterBuffers.size(); ++bufferIndex)
1235         {
1236             uint32_t binding = atomicCounterBuffers[bufferIndex].binding;
1237             const gl::OffsetBindingPointer<gl::Buffer> &bufferBinding =
1238                 mState.getIndexedAtomicCounterBuffer(binding);
1239 
1240             if (bufferBinding.get() == nullptr)
1241             {
1242                 continue;
1243             }
1244 
1245             VkDeviceSize bufferOffset = 0;
1246             vk::BufferHelper &buffer =
1247                 vk::GetImpl(bufferBinding.get())->getBufferAndOffset(&bufferOffset);
1248             if (mRenderPassCommands->usesBuffer(buffer))
1249             {
1250                 return true;
1251             }
1252         }
1253     }
1254 
1255     return false;
1256 }
1257 
handleDirtyMemoryBarrierImpl(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)1258 angle::Result ContextVk::handleDirtyMemoryBarrierImpl(DirtyBits::Iterator *dirtyBitsIterator,
1259                                                       DirtyBits dirtyBitMask)
1260 {
1261     const gl::ProgramExecutable *executable = mState.getProgramExecutable();
1262     ASSERT(executable);
1263 
1264     const bool hasImages         = executable->hasImages();
1265     const bool hasStorageBuffers = executable->hasStorageBuffers();
1266     const bool hasAtomicCounters = executable->hasAtomicCounterBuffers();
1267 
1268     if (!hasImages && !hasStorageBuffers && !hasAtomicCounters)
1269     {
1270         return angle::Result::Continue;
1271     }
1272 
1273     // Break the render pass if necessary.  This is only needed for write-after-read situations, and
1274     // is done by checking whether current storage buffers and images are used in the render pass.
1275     if (renderPassUsesStorageResources())
1276     {
1277         // Either set later bits (if called during handling of graphics dirty bits), or set the
1278         // dirty bits directly (if called during handling of compute dirty bits).
1279         if (dirtyBitsIterator)
1280         {
1281             return flushDirtyGraphicsRenderPass(dirtyBitsIterator, dirtyBitMask);
1282         }
1283         else
1284         {
1285             return flushCommandsAndEndRenderPass();
1286         }
1287     }
1288 
1289     // Flushing outside render pass commands is cheap.  If a memory barrier has been issued in its
1290     // life time, just flush it instead of wasting time trying to figure out if it's necessary.
1291     if (mOutsideRenderPassCommands->hasGLMemoryBarrierIssued())
1292     {
1293         ANGLE_TRY(flushOutsideRenderPassCommands());
1294     }
1295 
1296     return angle::Result::Continue;
1297 }
1298 
handleDirtyGraphicsEventLog(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)1299 angle::Result ContextVk::handleDirtyGraphicsEventLog(DirtyBits::Iterator *dirtyBitsIterator,
1300                                                      DirtyBits dirtyBitMask)
1301 {
1302     return handleDirtyEventLogImpl(mRenderPassCommandBuffer);
1303 }
1304 
handleDirtyComputeEventLog()1305 angle::Result ContextVk::handleDirtyComputeEventLog()
1306 {
1307     return handleDirtyEventLogImpl(&mOutsideRenderPassCommands->getCommandBuffer());
1308 }
1309 
handleDirtyEventLogImpl(vk::CommandBuffer * commandBuffer)1310 angle::Result ContextVk::handleDirtyEventLogImpl(vk::CommandBuffer *commandBuffer)
1311 {
1312     // This method is called when a draw or dispatch command is being processed.  It's purpose is
1313     // to call the vkCmd*DebugUtilsLabelEXT functions in order to communicate to debuggers
1314     // (e.g. AGI) the OpenGL ES commands that the application uses.
1315 
1316     // Exit early if no OpenGL ES commands have been logged, or if no command buffer (for a no-op
1317     // draw), or if calling the vkCmd*DebugUtilsLabelEXT functions is not enabled.
1318     if (mEventLog.empty() || commandBuffer == nullptr || !mRenderer->angleDebuggerMode())
1319     {
1320         return angle::Result::Continue;
1321     }
1322 
1323     // Insert OpenGL ES commands into debug label.  We create a 3-level cascade here for
1324     // OpenGL-ES-first debugging in AGI.  Here's the general outline of commands:
1325     // -glDrawCommand
1326     // --vkCmdBeginDebugUtilsLabelEXT() #1 for "glDrawCommand"
1327     // --OpenGL ES Commands
1328     // ---vkCmdBeginDebugUtilsLabelEXT() #2 for "OpenGL ES Commands"
1329     // ---Individual OpenGL ES Commands leading up to glDrawCommand
1330     // ----vkCmdBeginDebugUtilsLabelEXT() #3 for each individual OpenGL ES Command
1331     // ----vkCmdEndDebugUtilsLabelEXT() #3 for each individual OpenGL ES Command
1332     // ----...More Individual OGL Commands...
1333     // ----Final Individual OGL command will be the same glDrawCommand shown in #1 above
1334     // ---vkCmdEndDebugUtilsLabelEXT() #2 for "OpenGL ES Commands"
1335     // --VK SetupDraw & Draw-related commands will be embedded here under glDraw #1
1336     // --vkCmdEndDebugUtilsLabelEXT() #1 is called after each vkDraw* or vkDispatch* call
1337     VkDebugUtilsLabelEXT label = {VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT,
1338                                   nullptr,
1339                                   mEventLog.back().c_str(),
1340                                   {0.0f, 0.0f, 0.0f, 0.0f}};
1341     // This is #1 from comment above
1342     commandBuffer->beginDebugUtilsLabelEXT(label);
1343     std::string oglCmds = "OpenGL ES Commands";
1344     label.pLabelName    = oglCmds.c_str();
1345     // This is #2 from comment above
1346     commandBuffer->beginDebugUtilsLabelEXT(label);
1347     for (uint32_t i = 0; i < mEventLog.size(); ++i)
1348     {
1349         label.pLabelName = mEventLog[i].c_str();
1350         // NOTE: We have to use a begin/end pair here because AGI does not promote the
1351         // pLabelName from an insertDebugUtilsLabelEXT() call to the Commands panel.
1352         // Internal bug b/169243237 is tracking this and once the insert* call shows the
1353         // pLabelName similar to begin* call, we can switch these to insert* calls instead.
1354         // This is #3 from comment above.
1355         commandBuffer->beginDebugUtilsLabelEXT(label);
1356         commandBuffer->endDebugUtilsLabelEXT();
1357     }
1358     commandBuffer->endDebugUtilsLabelEXT();
1359     // The final end* call for #1 above is made in the ContextVk::draw* or
1360     //  ContextVk::dispatch* function calls.
1361 
1362     mEventLog.clear();
1363     return angle::Result::Continue;
1364 }
1365 
handleDirtyGraphicsDefaultAttribs(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)1366 angle::Result ContextVk::handleDirtyGraphicsDefaultAttribs(DirtyBits::Iterator *dirtyBitsIterator,
1367                                                            DirtyBits dirtyBitMask)
1368 {
1369     ASSERT(mDirtyDefaultAttribsMask.any());
1370 
1371     for (size_t attribIndex : mDirtyDefaultAttribsMask)
1372     {
1373         ANGLE_TRY(updateDefaultAttribute(attribIndex));
1374     }
1375 
1376     mDirtyDefaultAttribsMask.reset();
1377     return angle::Result::Continue;
1378 }
1379 
handleDirtyGraphicsPipelineDesc(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)1380 angle::Result ContextVk::handleDirtyGraphicsPipelineDesc(DirtyBits::Iterator *dirtyBitsIterator,
1381                                                          DirtyBits dirtyBitMask)
1382 {
1383     const VkPipeline previousPipeline = mCurrentGraphicsPipeline
1384                                             ? mCurrentGraphicsPipeline->getPipeline().getHandle()
1385                                             : VK_NULL_HANDLE;
1386 
1387     ASSERT(mExecutable);
1388 
1389     if (!mCurrentGraphicsPipeline)
1390     {
1391         const vk::GraphicsPipelineDesc *descPtr;
1392 
1393         // The desc's specialization constant depends on program's
1394         // specConstUsageBits. We need to update it if program has changed.
1395         SpecConstUsageBits usageBits = getCurrentProgramSpecConstUsageBits();
1396         updateGraphicsPipelineDescWithSpecConstUsageBits(usageBits);
1397 
1398         // Draw call shader patching, shader compilation, and pipeline cache query.
1399         ANGLE_TRY(mExecutable->getGraphicsPipeline(
1400             this, mCurrentDrawMode, *mGraphicsPipelineDesc,
1401             mState.getProgramExecutable()->getNonBuiltinAttribLocationsMask(), &descPtr,
1402             &mCurrentGraphicsPipeline));
1403         mGraphicsPipelineTransition.reset();
1404     }
1405     else if (mGraphicsPipelineTransition.any())
1406     {
1407         ASSERT(mCurrentGraphicsPipeline->valid());
1408         if (!mCurrentGraphicsPipeline->findTransition(
1409                 mGraphicsPipelineTransition, *mGraphicsPipelineDesc, &mCurrentGraphicsPipeline))
1410         {
1411             vk::PipelineHelper *oldPipeline = mCurrentGraphicsPipeline;
1412             const vk::GraphicsPipelineDesc *descPtr;
1413 
1414             ANGLE_TRY(mExecutable->getGraphicsPipeline(
1415                 this, mCurrentDrawMode, *mGraphicsPipelineDesc,
1416                 mState.getProgramExecutable()->getNonBuiltinAttribLocationsMask(), &descPtr,
1417                 &mCurrentGraphicsPipeline));
1418 
1419             oldPipeline->addTransition(mGraphicsPipelineTransition, descPtr,
1420                                        mCurrentGraphicsPipeline);
1421         }
1422 
1423         mGraphicsPipelineTransition.reset();
1424     }
1425     // Update the queue serial for the pipeline object.
1426     ASSERT(mCurrentGraphicsPipeline && mCurrentGraphicsPipeline->valid());
1427     // TODO: https://issuetracker.google.com/issues/169788986: Need to change this so that we get
1428     // the actual serial used when this work is submitted.
1429     mCurrentGraphicsPipeline->updateSerial(getCurrentQueueSerial());
1430 
1431     const VkPipeline newPipeline = mCurrentGraphicsPipeline->getPipeline().getHandle();
1432 
1433     // If there's no change in pipeline, avoid rebinding it later.  If the rebind is due to a new
1434     // command buffer or UtilsVk, it will happen anyway with DIRTY_BIT_PIPELINE_BINDING.
1435     if (newPipeline == previousPipeline)
1436     {
1437         return angle::Result::Continue;
1438     }
1439 
1440     // VK_EXT_transform_feedback disallows binding pipelines while transform feedback is active.
1441     // If a new pipeline needs to be bound, the render pass should necessarily be broken (which
1442     // implicitly pauses transform feedback), as resuming requires a barrier on the transform
1443     // feedback counter buffer.
1444     if (mRenderPassCommands->started() && mRenderPassCommands->isTransformFeedbackActiveUnpaused())
1445     {
1446         ANGLE_TRY(flushDirtyGraphicsRenderPass(dirtyBitsIterator, dirtyBitMask));
1447 
1448         dirtyBitsIterator->setLaterBit(DIRTY_BIT_TRANSFORM_FEEDBACK_RESUME);
1449     }
1450 
1451     // The pipeline needs to rebind because it's changed.
1452     dirtyBitsIterator->setLaterBit(DIRTY_BIT_PIPELINE_BINDING);
1453 
1454     return angle::Result::Continue;
1455 }
1456 
handleDirtyGraphicsRenderPass(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)1457 angle::Result ContextVk::handleDirtyGraphicsRenderPass(DirtyBits::Iterator *dirtyBitsIterator,
1458                                                        DirtyBits dirtyBitMask)
1459 {
1460     // If the render pass needs to be recreated, close it using the special mid-dirty-bit-handling
1461     // function, so later dirty bits can be set.
1462     if (mRenderPassCommands->started())
1463     {
1464         ANGLE_TRY(flushDirtyGraphicsRenderPass(dirtyBitsIterator,
1465                                                dirtyBitMask & ~DirtyBits{DIRTY_BIT_RENDER_PASS}));
1466     }
1467 
1468     gl::Rectangle scissoredRenderArea = mDrawFramebuffer->getRotatedScissoredRenderArea(this);
1469     bool renderPassDescChanged        = false;
1470 
1471     ANGLE_TRY(startRenderPass(scissoredRenderArea, nullptr, &renderPassDescChanged));
1472 
1473     // The render pass desc can change when starting the render pass, for example due to
1474     // multisampled-render-to-texture needs based on loadOps.  In that case, recreate the graphics
1475     // pipeline.
1476     if (renderPassDescChanged)
1477     {
1478         ANGLE_TRY(handleDirtyGraphicsPipelineDesc(dirtyBitsIterator, dirtyBitMask));
1479     }
1480 
1481     return angle::Result::Continue;
1482 }
1483 
handleDirtyGraphicsPipelineBinding(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)1484 angle::Result ContextVk::handleDirtyGraphicsPipelineBinding(DirtyBits::Iterator *dirtyBitsIterator,
1485                                                             DirtyBits dirtyBitMask)
1486 {
1487     ASSERT(mCurrentGraphicsPipeline);
1488 
1489     mRenderPassCommandBuffer->bindGraphicsPipeline(mCurrentGraphicsPipeline->getPipeline());
1490 
1491     return angle::Result::Continue;
1492 }
1493 
handleDirtyComputePipelineDesc()1494 angle::Result ContextVk::handleDirtyComputePipelineDesc()
1495 {
1496     if (!mCurrentComputePipeline)
1497     {
1498         ASSERT(mExecutable);
1499         ANGLE_TRY(mExecutable->getComputePipeline(this, &mCurrentComputePipeline));
1500     }
1501 
1502     ASSERT(mComputeDirtyBits.test(DIRTY_BIT_PIPELINE_BINDING));
1503 
1504     return angle::Result::Continue;
1505 }
1506 
handleDirtyComputePipelineBinding()1507 angle::Result ContextVk::handleDirtyComputePipelineBinding()
1508 {
1509     ASSERT(mCurrentComputePipeline);
1510 
1511     mOutsideRenderPassCommands->getCommandBuffer().bindComputePipeline(
1512         mCurrentComputePipeline->get());
1513     // TODO: https://issuetracker.google.com/issues/169788986: Need to change this so that we get
1514     // the actual serial used when this work is submitted.
1515     mCurrentComputePipeline->updateSerial(getCurrentQueueSerial());
1516 
1517     return angle::Result::Continue;
1518 }
1519 
handleDirtyTexturesImpl(vk::CommandBufferHelper * commandBufferHelper)1520 ANGLE_INLINE angle::Result ContextVk::handleDirtyTexturesImpl(
1521     vk::CommandBufferHelper *commandBufferHelper)
1522 {
1523     const gl::ProgramExecutable *executable = mState.getProgramExecutable();
1524     ASSERT(executable);
1525     const gl::ActiveTextureMask &activeTextures = executable->getActiveSamplersMask();
1526 
1527     for (size_t textureUnit : activeTextures)
1528     {
1529         const vk::TextureUnit &unit = mActiveTextures[textureUnit];
1530         TextureVk *textureVk        = unit.texture;
1531 
1532         // If it's a texture buffer, get the attached buffer.
1533         if (textureVk->getBuffer().get() != nullptr)
1534         {
1535             BufferVk *bufferVk        = vk::GetImpl(textureVk->getBuffer().get());
1536             VkDeviceSize bufferOffset = 0;
1537             vk::BufferHelper &buffer  = bufferVk->getBufferAndOffset(&bufferOffset);
1538 
1539             gl::ShaderBitSet stages =
1540                 executable->getSamplerShaderBitsForTextureUnitIndex(textureUnit);
1541             ASSERT(stages.any());
1542 
1543             // TODO: accept multiple stages in bufferRead.  http://anglebug.com/3573
1544             for (gl::ShaderType stage : stages)
1545             {
1546                 // Note: if another range of the same buffer is simultaneously used for storage,
1547                 // such as for transform feedback output, or SSBO, unnecessary barriers can be
1548                 // generated.
1549                 commandBufferHelper->bufferRead(this, VK_ACCESS_SHADER_READ_BIT,
1550                                                 vk::GetPipelineStage(stage), &buffer);
1551             }
1552 
1553             textureVk->retainBufferViews(&mResourceUseList);
1554 
1555             continue;
1556         }
1557 
1558         vk::ImageHelper &image = textureVk->getImage();
1559 
1560         // The image should be flushed and ready to use at this point. There may still be
1561         // lingering staged updates in its staging buffer for unused texture mip levels or
1562         // layers. Therefore we can't verify it has no staged updates right here.
1563 
1564         // Select the appropriate vk::ImageLayout depending on whether the texture is also bound as
1565         // a GL image, and whether the program is a compute or graphics shader.
1566         vk::ImageLayout textureLayout;
1567         if (textureVk->hasBeenBoundAsImage())
1568         {
1569             textureLayout = executable->isCompute() ? vk::ImageLayout::ComputeShaderWrite
1570                                                     : vk::ImageLayout::AllGraphicsShadersWrite;
1571         }
1572         else
1573         {
1574             gl::ShaderBitSet remainingShaderBits =
1575                 executable->getSamplerShaderBitsForTextureUnitIndex(textureUnit);
1576             ASSERT(remainingShaderBits.any());
1577             gl::ShaderType firstShader = remainingShaderBits.first();
1578             gl::ShaderType lastShader  = remainingShaderBits.last();
1579             remainingShaderBits.reset(firstShader);
1580             remainingShaderBits.reset(lastShader);
1581 
1582             if (image.hasRenderPassUsageFlag(vk::RenderPassUsage::RenderTargetAttachment))
1583             {
1584                 // Right now we set this flag only when RenderTargetAttachment is set since we do
1585                 // not track all textures in the renderpass.
1586                 image.setRenderPassUsageFlag(vk::RenderPassUsage::TextureSampler);
1587 
1588                 if (image.isDepthOrStencil())
1589                 {
1590                     if (image.hasRenderPassUsageFlag(vk::RenderPassUsage::ReadOnlyAttachment))
1591                     {
1592                         if (firstShader == gl::ShaderType::Fragment)
1593                         {
1594                             ASSERT(remainingShaderBits.none() && lastShader == firstShader);
1595                             textureLayout = vk::ImageLayout::DSAttachmentReadAndFragmentShaderRead;
1596                         }
1597                         else
1598                         {
1599                             textureLayout = vk::ImageLayout::DSAttachmentReadAndAllShadersRead;
1600                         }
1601                     }
1602                     else
1603                     {
1604                         if (firstShader == gl::ShaderType::Fragment)
1605                         {
1606                             textureLayout = vk::ImageLayout::DSAttachmentWriteAndFragmentShaderRead;
1607                         }
1608                         else
1609                         {
1610                             textureLayout = vk::ImageLayout::DSAttachmentWriteAndAllShadersRead;
1611                         }
1612                     }
1613                 }
1614                 else
1615                 {
1616                     if (firstShader == gl::ShaderType::Fragment)
1617                     {
1618                         textureLayout = vk::ImageLayout::ColorAttachmentAndFragmentShaderRead;
1619                     }
1620                     else
1621                     {
1622                         textureLayout = vk::ImageLayout::ColorAttachmentAndAllShadersRead;
1623                     }
1624                 }
1625             }
1626             else if (image.isDepthOrStencil())
1627             {
1628                 // We always use a depth-stencil read-only layout for any depth Textures to simplify
1629                 // our implementation's handling of depth-stencil read-only mode. We don't have to
1630                 // split a RenderPass to transition a depth texture from shader-read to read-only.
1631                 // This improves performance in Manhattan. Future optimizations are likely possible
1632                 // here including using specialized barriers without breaking the RenderPass.
1633                 if (firstShader == gl::ShaderType::Fragment)
1634                 {
1635                     ASSERT(remainingShaderBits.none() && lastShader == firstShader);
1636                     textureLayout = vk::ImageLayout::DSAttachmentReadAndFragmentShaderRead;
1637                 }
1638                 else
1639                 {
1640                     textureLayout = vk::ImageLayout::DSAttachmentReadAndAllShadersRead;
1641                 }
1642             }
1643             else
1644             {
1645                 // We barrier against either:
1646                 // - Vertex only
1647                 // - Fragment only
1648                 // - Pre-fragment only (vertex, geometry and tessellation together)
1649                 if (remainingShaderBits.any() || firstShader != lastShader)
1650                 {
1651                     textureLayout = lastShader == gl::ShaderType::Fragment
1652                                         ? vk::ImageLayout::AllGraphicsShadersReadOnly
1653                                         : vk::ImageLayout::PreFragmentShadersReadOnly;
1654                 }
1655                 else
1656                 {
1657                     textureLayout = kShaderReadOnlyImageLayouts[firstShader];
1658                 }
1659             }
1660         }
1661         // Ensure the image is in the desired layout
1662         commandBufferHelper->imageRead(this, image.getAspectFlags(), textureLayout, &image);
1663 
1664         textureVk->retainImageViews(&mResourceUseList);
1665     }
1666 
1667     if (executable->hasTextures())
1668     {
1669         ANGLE_TRY(mExecutable->updateTexturesDescriptorSet(this, mActiveTexturesDesc));
1670     }
1671 
1672     return angle::Result::Continue;
1673 }
1674 
handleDirtyGraphicsTextures(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)1675 angle::Result ContextVk::handleDirtyGraphicsTextures(DirtyBits::Iterator *dirtyBitsIterator,
1676                                                      DirtyBits dirtyBitMask)
1677 {
1678     return handleDirtyTexturesImpl(mRenderPassCommands);
1679 }
1680 
handleDirtyComputeTextures()1681 angle::Result ContextVk::handleDirtyComputeTextures()
1682 {
1683     return handleDirtyTexturesImpl(mOutsideRenderPassCommands);
1684 }
1685 
handleDirtyGraphicsVertexBuffers(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)1686 angle::Result ContextVk::handleDirtyGraphicsVertexBuffers(DirtyBits::Iterator *dirtyBitsIterator,
1687                                                           DirtyBits dirtyBitMask)
1688 {
1689     uint32_t maxAttrib = mState.getProgramExecutable()->getMaxActiveAttribLocation();
1690     const gl::AttribArray<VkBuffer> &bufferHandles = mVertexArray->getCurrentArrayBufferHandles();
1691     const gl::AttribArray<VkDeviceSize> &bufferOffsets =
1692         mVertexArray->getCurrentArrayBufferOffsets();
1693 
1694     mRenderPassCommandBuffer->bindVertexBuffers(0, maxAttrib, bufferHandles.data(),
1695                                                 bufferOffsets.data());
1696 
1697     const gl::AttribArray<vk::BufferHelper *> &arrayBufferResources =
1698         mVertexArray->getCurrentArrayBuffers();
1699 
1700     // Mark all active vertex buffers as accessed.
1701     const gl::ProgramExecutable *executable = mState.getProgramExecutable();
1702     gl::AttributesMask attribsMask          = executable->getActiveAttribLocationsMask();
1703     for (size_t attribIndex : attribsMask)
1704     {
1705         vk::BufferHelper *arrayBuffer = arrayBufferResources[attribIndex];
1706         if (arrayBuffer)
1707         {
1708             mRenderPassCommands->bufferRead(this, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
1709                                             vk::PipelineStage::VertexInput, arrayBuffer);
1710         }
1711     }
1712 
1713     return angle::Result::Continue;
1714 }
1715 
handleDirtyGraphicsIndexBuffer(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)1716 angle::Result ContextVk::handleDirtyGraphicsIndexBuffer(DirtyBits::Iterator *dirtyBitsIterator,
1717                                                         DirtyBits dirtyBitMask)
1718 {
1719     vk::BufferHelper *elementArrayBuffer = mVertexArray->getCurrentElementArrayBuffer();
1720     ASSERT(elementArrayBuffer != nullptr);
1721 
1722     VkDeviceSize offset =
1723         mVertexArray->getCurrentElementArrayBufferOffset() + mCurrentIndexBufferOffset;
1724 
1725     mRenderPassCommandBuffer->bindIndexBuffer(elementArrayBuffer->getBuffer(), offset,
1726                                               getVkIndexType(mCurrentDrawElementsType));
1727 
1728     mRenderPassCommands->bufferRead(this, VK_ACCESS_INDEX_READ_BIT, vk::PipelineStage::VertexInput,
1729                                     elementArrayBuffer);
1730 
1731     return angle::Result::Continue;
1732 }
1733 
handleDirtyGraphicsFramebufferFetchBarrier(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)1734 angle::Result ContextVk::handleDirtyGraphicsFramebufferFetchBarrier(
1735     DirtyBits::Iterator *dirtyBitsIterator,
1736     DirtyBits dirtyBitMask)
1737 {
1738     VkMemoryBarrier memoryBarrier = {};
1739     memoryBarrier.sType           = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
1740     memoryBarrier.srcAccessMask   = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
1741     memoryBarrier.dstAccessMask   = VK_ACCESS_INPUT_ATTACHMENT_READ_BIT;
1742 
1743     mRenderPassCommandBuffer->pipelineBarrier(
1744         VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
1745         VK_DEPENDENCY_BY_REGION_BIT, 1, &memoryBarrier, 0, nullptr, 0, nullptr);
1746 
1747     return angle::Result::Continue;
1748 }
1749 
handleDirtyShaderResourcesImpl(vk::CommandBufferHelper * commandBufferHelper)1750 ANGLE_INLINE angle::Result ContextVk::handleDirtyShaderResourcesImpl(
1751     vk::CommandBufferHelper *commandBufferHelper)
1752 {
1753     const gl::ProgramExecutable *executable = mState.getProgramExecutable();
1754     ASSERT(executable);
1755 
1756     const bool hasImages = executable->hasImages();
1757     const bool hasStorageBuffers =
1758         executable->hasStorageBuffers() || executable->hasAtomicCounterBuffers();
1759     const bool hasUniformBuffers = executable->hasUniformBuffers();
1760 
1761     if (!hasUniformBuffers && !hasStorageBuffers && !hasImages &&
1762         !executable->usesFramebufferFetch())
1763     {
1764         return angle::Result::Continue;
1765     }
1766 
1767     if (hasImages)
1768     {
1769         ANGLE_TRY(updateActiveImages(commandBufferHelper));
1770     }
1771 
1772     // Process buffer barriers.
1773     gl::ShaderMap<const gl::ProgramState *> programStates;
1774     mExecutable->fillProgramStateMap(this, &programStates);
1775     for (const gl::ShaderType shaderType : executable->getLinkedShaderStages())
1776     {
1777         const gl::ProgramState &programState        = *programStates[shaderType];
1778         const std::vector<gl::InterfaceBlock> &ubos = programState.getUniformBlocks();
1779 
1780         for (const gl::InterfaceBlock &ubo : ubos)
1781         {
1782             const gl::OffsetBindingPointer<gl::Buffer> &bufferBinding =
1783                 mState.getIndexedUniformBuffer(ubo.binding);
1784 
1785             if (!ubo.isActive(shaderType))
1786             {
1787                 continue;
1788             }
1789 
1790             if (bufferBinding.get() == nullptr)
1791             {
1792                 continue;
1793             }
1794 
1795             BufferVk *bufferVk             = vk::GetImpl(bufferBinding.get());
1796             VkDeviceSize bufferOffset      = 0;
1797             vk::BufferHelper &bufferHelper = bufferVk->getBufferAndOffset(&bufferOffset);
1798 
1799             commandBufferHelper->bufferRead(this, VK_ACCESS_UNIFORM_READ_BIT,
1800                                             vk::GetPipelineStage(shaderType), &bufferHelper);
1801         }
1802 
1803         const std::vector<gl::InterfaceBlock> &ssbos = programState.getShaderStorageBlocks();
1804         for (const gl::InterfaceBlock &ssbo : ssbos)
1805         {
1806             const gl::OffsetBindingPointer<gl::Buffer> &bufferBinding =
1807                 mState.getIndexedShaderStorageBuffer(ssbo.binding);
1808 
1809             if (!ssbo.isActive(shaderType))
1810             {
1811                 continue;
1812             }
1813 
1814             if (bufferBinding.get() == nullptr)
1815             {
1816                 continue;
1817             }
1818 
1819             BufferVk *bufferVk             = vk::GetImpl(bufferBinding.get());
1820             VkDeviceSize bufferOffset      = 0;
1821             vk::BufferHelper &bufferHelper = bufferVk->getBufferAndOffset(&bufferOffset);
1822 
1823             // We set the SHADER_READ_BIT to be conservative.
1824             VkAccessFlags accessFlags = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
1825             commandBufferHelper->bufferWrite(this, accessFlags, vk::GetPipelineStage(shaderType),
1826                                              vk::AliasingMode::Allowed, &bufferHelper);
1827         }
1828 
1829         const std::vector<gl::AtomicCounterBuffer> &acbs = programState.getAtomicCounterBuffers();
1830         for (const gl::AtomicCounterBuffer &atomicCounterBuffer : acbs)
1831         {
1832             uint32_t binding = atomicCounterBuffer.binding;
1833             const gl::OffsetBindingPointer<gl::Buffer> &bufferBinding =
1834                 mState.getIndexedAtomicCounterBuffer(binding);
1835 
1836             if (bufferBinding.get() == nullptr)
1837             {
1838                 continue;
1839             }
1840 
1841             BufferVk *bufferVk             = vk::GetImpl(bufferBinding.get());
1842             VkDeviceSize bufferOffset      = 0;
1843             vk::BufferHelper &bufferHelper = bufferVk->getBufferAndOffset(&bufferOffset);
1844 
1845             // We set SHADER_READ_BIT to be conservative.
1846             commandBufferHelper->bufferWrite(
1847                 this, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
1848                 vk::GetPipelineStage(shaderType), vk::AliasingMode::Allowed, &bufferHelper);
1849         }
1850     }
1851 
1852     ANGLE_TRY(mExecutable->updateShaderResourcesDescriptorSet(
1853         this, mDrawFramebuffer, mShaderBuffersDescriptorDesc, commandBufferHelper));
1854 
1855     // Record usage of storage buffers and images in the command buffer to aid handling of
1856     // glMemoryBarrier.
1857     if (hasImages || hasStorageBuffers)
1858     {
1859         commandBufferHelper->setHasShaderStorageOutput();
1860     }
1861 
1862     return angle::Result::Continue;
1863 }
1864 
handleDirtyGraphicsShaderResources(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)1865 angle::Result ContextVk::handleDirtyGraphicsShaderResources(DirtyBits::Iterator *dirtyBitsIterator,
1866                                                             DirtyBits dirtyBitMask)
1867 {
1868     return handleDirtyShaderResourcesImpl(mRenderPassCommands);
1869 }
1870 
handleDirtyComputeShaderResources()1871 angle::Result ContextVk::handleDirtyComputeShaderResources()
1872 {
1873     return handleDirtyShaderResourcesImpl(mOutsideRenderPassCommands);
1874 }
1875 
handleDirtyGraphicsTransformFeedbackBuffersEmulation(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)1876 angle::Result ContextVk::handleDirtyGraphicsTransformFeedbackBuffersEmulation(
1877     DirtyBits::Iterator *dirtyBitsIterator,
1878     DirtyBits dirtyBitMask)
1879 {
1880     const gl::ProgramExecutable *executable = mState.getProgramExecutable();
1881     ASSERT(executable);
1882 
1883     if (!executable->hasTransformFeedbackOutput())
1884     {
1885         return angle::Result::Continue;
1886     }
1887 
1888     TransformFeedbackVk *transformFeedbackVk = vk::GetImpl(mState.getCurrentTransformFeedback());
1889 
1890     if (mState.isTransformFeedbackActiveUnpaused())
1891     {
1892         size_t bufferCount = executable->getTransformFeedbackBufferCount();
1893         const gl::TransformFeedbackBuffersArray<vk::BufferHelper *> &bufferHelpers =
1894             transformFeedbackVk->getBufferHelpers();
1895 
1896         for (size_t bufferIndex = 0; bufferIndex < bufferCount; ++bufferIndex)
1897         {
1898             vk::BufferHelper *bufferHelper = bufferHelpers[bufferIndex];
1899             ASSERT(bufferHelper);
1900             mRenderPassCommands->bufferWrite(this, VK_ACCESS_SHADER_WRITE_BIT,
1901                                              vk::PipelineStage::VertexShader,
1902                                              vk::AliasingMode::Disallowed, bufferHelper);
1903         }
1904     }
1905 
1906     // TODO(http://anglebug.com/3570): Need to update to handle Program Pipelines
1907     vk::BufferHelper *uniformBuffer = mDefaultUniformStorage.getCurrentBuffer();
1908     vk::UniformsAndXfbDescriptorDesc xfbBufferDesc =
1909         transformFeedbackVk->getTransformFeedbackDesc();
1910     xfbBufferDesc.updateDefaultUniformBuffer(uniformBuffer ? uniformBuffer->getBufferSerial()
1911                                                            : vk::kInvalidBufferSerial);
1912 
1913     return mProgram->getExecutable().updateTransformFeedbackDescriptorSet(
1914         mProgram->getState(), mProgram->getDefaultUniformBlocks(), uniformBuffer, this,
1915         xfbBufferDesc);
1916 }
1917 
handleDirtyGraphicsTransformFeedbackBuffersExtension(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)1918 angle::Result ContextVk::handleDirtyGraphicsTransformFeedbackBuffersExtension(
1919     DirtyBits::Iterator *dirtyBitsIterator,
1920     DirtyBits dirtyBitMask)
1921 {
1922     const gl::ProgramExecutable *executable = mState.getProgramExecutable();
1923     ASSERT(executable);
1924 
1925     if (!executable->hasTransformFeedbackOutput() || !mState.isTransformFeedbackActive())
1926     {
1927         return angle::Result::Continue;
1928     }
1929 
1930     TransformFeedbackVk *transformFeedbackVk = vk::GetImpl(mState.getCurrentTransformFeedback());
1931     size_t bufferCount                       = executable->getTransformFeedbackBufferCount();
1932 
1933     const gl::TransformFeedbackBuffersArray<vk::BufferHelper *> &buffers =
1934         transformFeedbackVk->getBufferHelpers();
1935     gl::TransformFeedbackBuffersArray<vk::BufferHelper> &counterBuffers =
1936         transformFeedbackVk->getCounterBufferHelpers();
1937 
1938     // Issue necessary barriers for the transform feedback buffers.
1939     for (size_t bufferIndex = 0; bufferIndex < bufferCount; ++bufferIndex)
1940     {
1941         vk::BufferHelper *bufferHelper = buffers[bufferIndex];
1942         ASSERT(bufferHelper);
1943         mRenderPassCommands->bufferWrite(this, VK_ACCESS_TRANSFORM_FEEDBACK_WRITE_BIT_EXT,
1944                                          vk::PipelineStage::TransformFeedback,
1945                                          vk::AliasingMode::Disallowed, bufferHelper);
1946     }
1947 
1948     // Issue necessary barriers for the transform feedback counter buffer.  Note that the barrier is
1949     // issued only on the first buffer (which uses a global memory barrier), as all the counter
1950     // buffers of the transform feedback object are used together.
1951     ASSERT(counterBuffers[0].valid());
1952     mRenderPassCommands->bufferWrite(this,
1953                                      VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_WRITE_BIT_EXT |
1954                                          VK_ACCESS_TRANSFORM_FEEDBACK_COUNTER_READ_BIT_EXT,
1955                                      vk::PipelineStage::TransformFeedback,
1956                                      vk::AliasingMode::Disallowed, &counterBuffers[0]);
1957 
1958     const gl::TransformFeedbackBuffersArray<VkBuffer> &bufferHandles =
1959         transformFeedbackVk->getBufferHandles();
1960     const gl::TransformFeedbackBuffersArray<VkDeviceSize> &bufferOffsets =
1961         transformFeedbackVk->getBufferOffsets();
1962     const gl::TransformFeedbackBuffersArray<VkDeviceSize> &bufferSizes =
1963         transformFeedbackVk->getBufferSizes();
1964 
1965     mRenderPassCommandBuffer->bindTransformFeedbackBuffers(
1966         0, static_cast<uint32_t>(bufferCount), bufferHandles.data(), bufferOffsets.data(),
1967         bufferSizes.data());
1968 
1969     if (!mState.isTransformFeedbackActiveUnpaused())
1970     {
1971         return angle::Result::Continue;
1972     }
1973 
1974     // We should have same number of counter buffers as xfb buffers have
1975     const gl::TransformFeedbackBuffersArray<VkBuffer> &counterBufferHandles =
1976         transformFeedbackVk->getCounterBufferHandles();
1977 
1978     bool rebindBuffers = transformFeedbackVk->getAndResetBufferRebindState();
1979 
1980     mRenderPassCommands->beginTransformFeedback(bufferCount, counterBufferHandles.data(),
1981                                                 rebindBuffers);
1982 
1983     return angle::Result::Continue;
1984 }
1985 
handleDirtyGraphicsTransformFeedbackResume(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)1986 angle::Result ContextVk::handleDirtyGraphicsTransformFeedbackResume(
1987     DirtyBits::Iterator *dirtyBitsIterator,
1988     DirtyBits dirtyBitMask)
1989 {
1990     if (mRenderPassCommands->isTransformFeedbackStarted())
1991     {
1992         mRenderPassCommands->resumeTransformFeedback();
1993     }
1994     return angle::Result::Continue;
1995 }
1996 
handleDirtyGraphicsDescriptorSets(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)1997 angle::Result ContextVk::handleDirtyGraphicsDescriptorSets(DirtyBits::Iterator *dirtyBitsIterator,
1998                                                            DirtyBits dirtyBitMask)
1999 {
2000     return handleDirtyDescriptorSetsImpl(mRenderPassCommandBuffer);
2001 }
2002 
handleDirtyGraphicsViewport(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)2003 angle::Result ContextVk::handleDirtyGraphicsViewport(DirtyBits::Iterator *dirtyBitsIterator,
2004                                                      DirtyBits dirtyBitMask)
2005 {
2006     mRenderPassCommandBuffer->setViewport(0, 1, &mViewport);
2007     return angle::Result::Continue;
2008 }
2009 
handleDirtyGraphicsScissor(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)2010 angle::Result ContextVk::handleDirtyGraphicsScissor(DirtyBits::Iterator *dirtyBitsIterator,
2011                                                     DirtyBits dirtyBitMask)
2012 {
2013     handleDirtyGraphicsScissorImpl(mState.isQueryActive(gl::QueryType::PrimitivesGenerated));
2014     return angle::Result::Continue;
2015 }
2016 
handleDirtyGraphicsScissorImpl(bool isPrimitivesGeneratedQueryActive)2017 void ContextVk::handleDirtyGraphicsScissorImpl(bool isPrimitivesGeneratedQueryActive)
2018 {
2019     // If primitives generated query and rasterizer discard are both active, but the Vulkan
2020     // implementation of the query does not support rasterizer discard, use an empty scissor to
2021     // emulate it.
2022     if (isEmulatingRasterizerDiscardDuringPrimitivesGeneratedQuery(
2023             isPrimitivesGeneratedQueryActive))
2024     {
2025         VkRect2D emptyScissor = {};
2026         mRenderPassCommandBuffer->setScissor(0, 1, &emptyScissor);
2027     }
2028     else
2029     {
2030         mRenderPassCommandBuffer->setScissor(0, 1, &mScissor);
2031     }
2032 }
2033 
handleDirtyComputeDescriptorSets()2034 angle::Result ContextVk::handleDirtyComputeDescriptorSets()
2035 {
2036     return handleDirtyDescriptorSetsImpl(&mOutsideRenderPassCommands->getCommandBuffer());
2037 }
2038 
handleDirtyDescriptorSetsImpl(vk::CommandBuffer * commandBuffer)2039 angle::Result ContextVk::handleDirtyDescriptorSetsImpl(vk::CommandBuffer *commandBuffer)
2040 {
2041     return mExecutable->updateDescriptorSets(this, commandBuffer);
2042 }
2043 
syncObjectPerfCounters()2044 void ContextVk::syncObjectPerfCounters()
2045 {
2046     mPerfCounters.descriptorSetAllocations              = 0;
2047     mPerfCounters.shaderBuffersDescriptorSetCacheHits   = 0;
2048     mPerfCounters.shaderBuffersDescriptorSetCacheMisses = 0;
2049 
2050     // ContextVk's descriptor set allocations
2051     ContextVkPerfCounters contextCounters = getAndResetObjectPerfCounters();
2052     for (uint32_t count : contextCounters.descriptorSetsAllocated)
2053     {
2054         mPerfCounters.descriptorSetAllocations += count;
2055     }
2056     // UtilsVk's descriptor set allocations
2057     mPerfCounters.descriptorSetAllocations +=
2058         mUtils.getAndResetObjectPerfCounters().descriptorSetsAllocated;
2059     // ProgramExecutableVk's descriptor set allocations
2060     const gl::State &state                             = getState();
2061     const gl::ShaderProgramManager &shadersAndPrograms = state.getShaderProgramManagerForCapture();
2062     const gl::ResourceMap<gl::Program, gl::ShaderProgramID> &programs =
2063         shadersAndPrograms.getProgramsForCaptureAndPerf();
2064     for (const std::pair<GLuint, gl::Program *> &resource : programs)
2065     {
2066         gl::Program *program = resource.second;
2067         if (program->hasLinkingState())
2068         {
2069             continue;
2070         }
2071         ProgramVk *programVk = vk::GetImpl(resource.second);
2072         ProgramExecutablePerfCounters progPerfCounters =
2073             programVk->getExecutable().getAndResetObjectPerfCounters();
2074 
2075         for (uint32_t count : progPerfCounters.descriptorSetAllocations)
2076         {
2077             mPerfCounters.descriptorSetAllocations += count;
2078         }
2079 
2080         mPerfCounters.shaderBuffersDescriptorSetCacheHits +=
2081             progPerfCounters.descriptorSetCacheHits[DescriptorSetIndex::ShaderResource];
2082         mPerfCounters.shaderBuffersDescriptorSetCacheMisses +=
2083             progPerfCounters.descriptorSetCacheMisses[DescriptorSetIndex::ShaderResource];
2084     }
2085 }
2086 
updateOverlayOnPresent()2087 void ContextVk::updateOverlayOnPresent()
2088 {
2089     const gl::OverlayType *overlay = mState.getOverlay();
2090     ASSERT(overlay->isEnabled());
2091 
2092     syncObjectPerfCounters();
2093 
2094     // Update overlay if active.
2095     {
2096         gl::RunningGraphWidget *renderPassCount =
2097             overlay->getRunningGraphWidget(gl::WidgetId::VulkanRenderPassCount);
2098         renderPassCount->add(mRenderPassCommands->getAndResetCounter());
2099         renderPassCount->next();
2100     }
2101 
2102     {
2103         gl::RunningGraphWidget *writeDescriptorSetCount =
2104             overlay->getRunningGraphWidget(gl::WidgetId::VulkanWriteDescriptorSetCount);
2105         writeDescriptorSetCount->add(mPerfCounters.writeDescriptorSets);
2106         writeDescriptorSetCount->next();
2107 
2108         mPerfCounters.writeDescriptorSets = 0;
2109     }
2110 
2111     {
2112         gl::RunningGraphWidget *descriptorSetAllocationCount =
2113             overlay->getRunningGraphWidget(gl::WidgetId::VulkanDescriptorSetAllocations);
2114         descriptorSetAllocationCount->add(mPerfCounters.descriptorSetAllocations);
2115         descriptorSetAllocationCount->next();
2116     }
2117 
2118     {
2119         gl::RunningGraphWidget *shaderBufferHitRate =
2120             overlay->getRunningGraphWidget(gl::WidgetId::VulkanShaderBufferDSHitRate);
2121         size_t numCacheAccesses = mPerfCounters.shaderBuffersDescriptorSetCacheHits +
2122                                   mPerfCounters.shaderBuffersDescriptorSetCacheMisses;
2123         if (numCacheAccesses > 0)
2124         {
2125             float hitRateFloat =
2126                 static_cast<float>(mPerfCounters.shaderBuffersDescriptorSetCacheHits) /
2127                 static_cast<float>(numCacheAccesses);
2128             size_t hitRate = static_cast<size_t>(hitRateFloat * 100.0f);
2129             shaderBufferHitRate->add(hitRate);
2130             shaderBufferHitRate->next();
2131         }
2132     }
2133 
2134     {
2135         gl::RunningGraphWidget *dynamicBufferAllocations =
2136             overlay->getRunningGraphWidget(gl::WidgetId::VulkanDynamicBufferAllocations);
2137         dynamicBufferAllocations->next();
2138     }
2139 }
2140 
addOverlayUsedBuffersCount(vk::CommandBufferHelper * commandBuffer)2141 void ContextVk::addOverlayUsedBuffersCount(vk::CommandBufferHelper *commandBuffer)
2142 {
2143     const gl::OverlayType *overlay = mState.getOverlay();
2144     if (!overlay->isEnabled())
2145     {
2146         return;
2147     }
2148 
2149     gl::RunningHistogramWidget *widget =
2150         overlay->getRunningHistogramWidget(gl::WidgetId::VulkanRenderPassBufferCount);
2151     size_t buffersCount = commandBuffer->getUsedBuffersCount();
2152     if (buffersCount > 0)
2153     {
2154         widget->add(buffersCount);
2155         widget->next();
2156     }
2157 }
2158 
submitFrame(const vk::Semaphore * signalSemaphore)2159 angle::Result ContextVk::submitFrame(const vk::Semaphore *signalSemaphore)
2160 {
2161     if (mCurrentWindowSurface)
2162     {
2163         vk::Semaphore waitSemaphore = mCurrentWindowSurface->getAcquireImageSemaphore();
2164         if (waitSemaphore.valid())
2165         {
2166             addWaitSemaphore(waitSemaphore.getHandle(),
2167                              VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
2168             addGarbage(&waitSemaphore);
2169         }
2170     }
2171 
2172     if (vk::CommandBufferHelper::kEnableCommandStreamDiagnostics)
2173     {
2174         dumpCommandStreamDiagnostics();
2175     }
2176 
2177     getShareGroupVk()->acquireResourceUseList(std::move(mResourceUseList));
2178     ANGLE_TRY(mRenderer->submitFrame(
2179         this, hasProtectedContent(), mContextPriority, std::move(mWaitSemaphores),
2180         std::move(mWaitSemaphoreStageMasks), signalSemaphore,
2181         getShareGroupVk()->releaseResourceUseLists(), std::move(mCurrentGarbage), &mCommandPool));
2182 
2183     onRenderPassFinished();
2184     mComputeDirtyBits |= mNewComputeCommandBufferDirtyBits;
2185 
2186     if (mGpuEventsEnabled)
2187     {
2188         ANGLE_TRY(checkCompletedGpuEvents());
2189     }
2190 
2191     return angle::Result::Continue;
2192 }
2193 
synchronizeCpuGpuTime()2194 angle::Result ContextVk::synchronizeCpuGpuTime()
2195 {
2196     ASSERT(mGpuEventsEnabled);
2197 
2198     angle::PlatformMethods *platform = ANGLEPlatformCurrent();
2199     ASSERT(platform);
2200 
2201     // To synchronize CPU and GPU times, we need to get the CPU timestamp as close as possible
2202     // to the GPU timestamp.  The process of getting the GPU timestamp is as follows:
2203     //
2204     //             CPU                            GPU
2205     //
2206     //     Record command buffer
2207     //     with timestamp query
2208     //
2209     //     Submit command buffer
2210     //
2211     //     Post-submission work             Begin execution
2212     //
2213     //            ????                    Write timestamp Tgpu
2214     //
2215     //            ????                       End execution
2216     //
2217     //            ????                    Return query results
2218     //
2219     //            ????
2220     //
2221     //       Get query results
2222     //
2223     // The areas of unknown work (????) on the CPU indicate that the CPU may or may not have
2224     // finished post-submission work while the GPU is executing in parallel. With no further
2225     // work, querying CPU timestamps before submission and after getting query results give the
2226     // bounds to Tgpu, which could be quite large.
2227     //
2228     // Using VkEvents, the GPU can be made to wait for the CPU and vice versa, in an effort to
2229     // reduce this range. This function implements the following procedure:
2230     //
2231     //             CPU                            GPU
2232     //
2233     //     Record command buffer
2234     //     with timestamp query
2235     //
2236     //     Submit command buffer
2237     //
2238     //     Post-submission work             Begin execution
2239     //
2240     //            ????                    Set Event GPUReady
2241     //
2242     //    Wait on Event GPUReady         Wait on Event CPUReady
2243     //
2244     //       Get CPU Time Ts             Wait on Event CPUReady
2245     //
2246     //      Set Event CPUReady           Wait on Event CPUReady
2247     //
2248     //      Get CPU Time Tcpu              Get GPU Time Tgpu
2249     //
2250     //    Wait on Event GPUDone            Set Event GPUDone
2251     //
2252     //       Get CPU Time Te                 End Execution
2253     //
2254     //            Idle                    Return query results
2255     //
2256     //      Get query results
2257     //
2258     // If Te-Ts > epsilon, a GPU or CPU interruption can be assumed and the operation can be
2259     // retried.  Once Te-Ts < epsilon, Tcpu can be taken to presumably match Tgpu.  Finding an
2260     // epsilon that's valid for all devices may be difficult, so the loop can be performed only
2261     // a limited number of times and the Tcpu,Tgpu pair corresponding to smallest Te-Ts used for
2262     // calibration.
2263     //
2264     // Note: Once VK_EXT_calibrated_timestamps is ubiquitous, this should be redone.
2265 
2266     // Make sure nothing is running
2267     ASSERT(!hasRecordedCommands());
2268 
2269     ANGLE_TRACE_EVENT0("gpu.angle", "ContextVk::synchronizeCpuGpuTime");
2270 
2271     // Create a query used to receive the GPU timestamp
2272     vk::QueryHelper timestampQuery;
2273     ANGLE_TRY(mGpuEventQueryPool.allocateQuery(this, &timestampQuery, 1));
2274 
2275     // Create the three events
2276     VkEventCreateInfo eventCreateInfo = {};
2277     eventCreateInfo.sType             = VK_STRUCTURE_TYPE_EVENT_CREATE_INFO;
2278     eventCreateInfo.flags             = 0;
2279 
2280     VkDevice device = getDevice();
2281     vk::DeviceScoped<vk::Event> cpuReady(device), gpuReady(device), gpuDone(device);
2282     ANGLE_VK_TRY(this, cpuReady.get().init(device, eventCreateInfo));
2283     ANGLE_VK_TRY(this, gpuReady.get().init(device, eventCreateInfo));
2284     ANGLE_VK_TRY(this, gpuDone.get().init(device, eventCreateInfo));
2285 
2286     constexpr uint32_t kRetries = 10;
2287 
2288     // Time suffixes used are S for seconds and Cycles for cycles
2289     double tightestRangeS = 1e6f;
2290     double TcpuS          = 0;
2291     uint64_t TgpuCycles   = 0;
2292     for (uint32_t i = 0; i < kRetries; ++i)
2293     {
2294         // Reset the events
2295         ANGLE_VK_TRY(this, cpuReady.get().reset(device));
2296         ANGLE_VK_TRY(this, gpuReady.get().reset(device));
2297         ANGLE_VK_TRY(this, gpuDone.get().reset(device));
2298 
2299         // Record the command buffer
2300         vk::DeviceScoped<vk::PrimaryCommandBuffer> commandBatch(device);
2301         vk::PrimaryCommandBuffer &commandBuffer = commandBatch.get();
2302 
2303         vk::ResourceUseList scratchResourceUseList;
2304 
2305         ANGLE_TRY(mRenderer->getCommandBufferOneOff(this, hasProtectedContent(), &commandBuffer));
2306 
2307         commandBuffer.setEvent(gpuReady.get().getHandle(), VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
2308         commandBuffer.waitEvents(1, cpuReady.get().ptr(), VK_PIPELINE_STAGE_HOST_BIT,
2309                                  VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, 0, nullptr, 0, nullptr, 0,
2310                                  nullptr);
2311         timestampQuery.writeTimestampToPrimary(this, &commandBuffer);
2312         timestampQuery.retain(&scratchResourceUseList);
2313 
2314         commandBuffer.setEvent(gpuDone.get().getHandle(), VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
2315 
2316         ANGLE_VK_TRY(this, commandBuffer.end());
2317 
2318         Serial throwAwaySerial;
2319         // vkEvent's are externally synchronized, therefore need work to be submitted before calling
2320         // vkGetEventStatus
2321         ANGLE_TRY(mRenderer->queueSubmitOneOff(
2322             this, std::move(commandBuffer), hasProtectedContent(), mContextPriority, nullptr,
2323             vk::SubmitPolicy::EnsureSubmitted, &throwAwaySerial));
2324         scratchResourceUseList.releaseResourceUsesAndUpdateSerials(throwAwaySerial);
2325 
2326         // Wait for GPU to be ready.  This is a short busy wait.
2327         VkResult result = VK_EVENT_RESET;
2328         do
2329         {
2330             result = gpuReady.get().getStatus(device);
2331             if (result != VK_EVENT_SET && result != VK_EVENT_RESET)
2332             {
2333                 ANGLE_VK_TRY(this, result);
2334             }
2335         } while (result == VK_EVENT_RESET);
2336 
2337         double TsS = platform->monotonicallyIncreasingTime(platform);
2338 
2339         // Tell the GPU to go ahead with the timestamp query.
2340         ANGLE_VK_TRY(this, cpuReady.get().set(device));
2341         double cpuTimestampS = platform->monotonicallyIncreasingTime(platform);
2342 
2343         // Wait for GPU to be done.  Another short busy wait.
2344         do
2345         {
2346             result = gpuDone.get().getStatus(device);
2347             if (result != VK_EVENT_SET && result != VK_EVENT_RESET)
2348             {
2349                 ANGLE_VK_TRY(this, result);
2350             }
2351         } while (result == VK_EVENT_RESET);
2352 
2353         double TeS = platform->monotonicallyIncreasingTime(platform);
2354 
2355         // Get the query results
2356         // Note: This LastSubmittedQueueSerial may include more work then was submitted above if
2357         // another thread had submitted work.
2358         ANGLE_TRY(finishToSerial(getLastSubmittedQueueSerial()));
2359 
2360         vk::QueryResult gpuTimestampCycles(1);
2361         ANGLE_TRY(timestampQuery.getUint64Result(this, &gpuTimestampCycles));
2362 
2363         // Use the first timestamp queried as origin.
2364         if (mGpuEventTimestampOrigin == 0)
2365         {
2366             mGpuEventTimestampOrigin =
2367                 gpuTimestampCycles.getResult(vk::QueryResult::kDefaultResultIndex);
2368         }
2369 
2370         // Take these CPU and GPU timestamps if there is better confidence.
2371         double confidenceRangeS = TeS - TsS;
2372         if (confidenceRangeS < tightestRangeS)
2373         {
2374             tightestRangeS = confidenceRangeS;
2375             TcpuS          = cpuTimestampS;
2376             TgpuCycles     = gpuTimestampCycles.getResult(vk::QueryResult::kDefaultResultIndex);
2377         }
2378     }
2379 
2380     mGpuEventQueryPool.freeQuery(this, &timestampQuery);
2381 
2382     // timestampPeriod gives nanoseconds/cycle.
2383     double TgpuS =
2384         (TgpuCycles - mGpuEventTimestampOrigin) *
2385         static_cast<double>(getRenderer()->getPhysicalDeviceProperties().limits.timestampPeriod) /
2386         1'000'000'000.0;
2387 
2388     flushGpuEvents(TgpuS, TcpuS);
2389 
2390     mGpuClockSync.gpuTimestampS = TgpuS;
2391     mGpuClockSync.cpuTimestampS = TcpuS;
2392 
2393     return angle::Result::Continue;
2394 }
2395 
traceGpuEventImpl(vk::CommandBuffer * commandBuffer,char phase,const EventName & name)2396 angle::Result ContextVk::traceGpuEventImpl(vk::CommandBuffer *commandBuffer,
2397                                            char phase,
2398                                            const EventName &name)
2399 {
2400     ASSERT(mGpuEventsEnabled);
2401 
2402     GpuEventQuery gpuEvent;
2403     gpuEvent.name  = name;
2404     gpuEvent.phase = phase;
2405     ANGLE_TRY(mGpuEventQueryPool.allocateQuery(this, &gpuEvent.queryHelper, 1));
2406 
2407     gpuEvent.queryHelper.writeTimestamp(this, commandBuffer);
2408 
2409     mInFlightGpuEventQueries.push_back(std::move(gpuEvent));
2410     return angle::Result::Continue;
2411 }
2412 
checkCompletedGpuEvents()2413 angle::Result ContextVk::checkCompletedGpuEvents()
2414 {
2415     ASSERT(mGpuEventsEnabled);
2416 
2417     angle::PlatformMethods *platform = ANGLEPlatformCurrent();
2418     ASSERT(platform);
2419 
2420     int finishedCount = 0;
2421 
2422     Serial lastCompletedSerial = getLastCompletedQueueSerial();
2423 
2424     for (GpuEventQuery &eventQuery : mInFlightGpuEventQueries)
2425     {
2426         // Only check the timestamp query if the submission has finished.
2427         if (eventQuery.queryHelper.usedInRunningCommands(lastCompletedSerial))
2428         {
2429             break;
2430         }
2431 
2432         // See if the results are available.
2433         vk::QueryResult gpuTimestampCycles(1);
2434         bool available = false;
2435         ANGLE_TRY(eventQuery.queryHelper.getUint64ResultNonBlocking(this, &gpuTimestampCycles,
2436                                                                     &available));
2437         if (!available)
2438         {
2439             break;
2440         }
2441 
2442         mGpuEventQueryPool.freeQuery(this, &eventQuery.queryHelper);
2443 
2444         GpuEvent gpuEvent;
2445         gpuEvent.gpuTimestampCycles =
2446             gpuTimestampCycles.getResult(vk::QueryResult::kDefaultResultIndex);
2447         gpuEvent.name  = eventQuery.name;
2448         gpuEvent.phase = eventQuery.phase;
2449 
2450         mGpuEvents.emplace_back(gpuEvent);
2451 
2452         ++finishedCount;
2453     }
2454 
2455     mInFlightGpuEventQueries.erase(mInFlightGpuEventQueries.begin(),
2456                                    mInFlightGpuEventQueries.begin() + finishedCount);
2457 
2458     return angle::Result::Continue;
2459 }
2460 
flushGpuEvents(double nextSyncGpuTimestampS,double nextSyncCpuTimestampS)2461 void ContextVk::flushGpuEvents(double nextSyncGpuTimestampS, double nextSyncCpuTimestampS)
2462 {
2463     if (mGpuEvents.empty())
2464     {
2465         return;
2466     }
2467 
2468     angle::PlatformMethods *platform = ANGLEPlatformCurrent();
2469     ASSERT(platform);
2470 
2471     // Find the slope of the clock drift for adjustment
2472     double lastGpuSyncTimeS  = mGpuClockSync.gpuTimestampS;
2473     double lastGpuSyncDiffS  = mGpuClockSync.cpuTimestampS - mGpuClockSync.gpuTimestampS;
2474     double gpuSyncDriftSlope = 0;
2475 
2476     double nextGpuSyncTimeS = nextSyncGpuTimestampS;
2477     double nextGpuSyncDiffS = nextSyncCpuTimestampS - nextSyncGpuTimestampS;
2478 
2479     // No gpu trace events should have been generated before the clock sync, so if there is no
2480     // "previous" clock sync, there should be no gpu events (i.e. the function early-outs
2481     // above).
2482     ASSERT(mGpuClockSync.gpuTimestampS != std::numeric_limits<double>::max() &&
2483            mGpuClockSync.cpuTimestampS != std::numeric_limits<double>::max());
2484 
2485     gpuSyncDriftSlope =
2486         (nextGpuSyncDiffS - lastGpuSyncDiffS) / (nextGpuSyncTimeS - lastGpuSyncTimeS);
2487 
2488     for (const GpuEvent &gpuEvent : mGpuEvents)
2489     {
2490         double gpuTimestampS =
2491             (gpuEvent.gpuTimestampCycles - mGpuEventTimestampOrigin) *
2492             static_cast<double>(
2493                 getRenderer()->getPhysicalDeviceProperties().limits.timestampPeriod) *
2494             1e-9;
2495 
2496         // Account for clock drift.
2497         gpuTimestampS += lastGpuSyncDiffS + gpuSyncDriftSlope * (gpuTimestampS - lastGpuSyncTimeS);
2498 
2499         // Generate the trace now that the GPU timestamp is available and clock drifts are
2500         // accounted for.
2501         static long long eventId = 1;
2502         static const unsigned char *categoryEnabled =
2503             TRACE_EVENT_API_GET_CATEGORY_ENABLED(platform, "gpu.angle.gpu");
2504         platform->addTraceEvent(platform, gpuEvent.phase, categoryEnabled, gpuEvent.name.data(),
2505                                 eventId++, gpuTimestampS, 0, nullptr, nullptr, nullptr,
2506                                 TRACE_EVENT_FLAG_NONE);
2507     }
2508 
2509     mGpuEvents.clear();
2510 }
2511 
clearAllGarbage()2512 void ContextVk::clearAllGarbage()
2513 {
2514     ANGLE_TRACE_EVENT0("gpu.angle", "ContextVk::clearAllGarbage");
2515     for (vk::GarbageObject &garbage : mCurrentGarbage)
2516     {
2517         garbage.destroy(mRenderer);
2518     }
2519     mCurrentGarbage.clear();
2520 }
2521 
handleDeviceLost()2522 void ContextVk::handleDeviceLost()
2523 {
2524     mOutsideRenderPassCommands->reset();
2525     mRenderPassCommands->reset();
2526     mRenderer->handleDeviceLost();
2527     clearAllGarbage();
2528 
2529     mRenderer->notifyDeviceLost();
2530 }
2531 
drawArrays(const gl::Context * context,gl::PrimitiveMode mode,GLint first,GLsizei count)2532 angle::Result ContextVk::drawArrays(const gl::Context *context,
2533                                     gl::PrimitiveMode mode,
2534                                     GLint first,
2535                                     GLsizei count)
2536 {
2537     uint32_t clampedVertexCount = gl::GetClampedVertexCount<uint32_t>(count);
2538 
2539     if (mode == gl::PrimitiveMode::LineLoop)
2540     {
2541         uint32_t numIndices;
2542         ANGLE_TRY(setupLineLoopDraw(context, mode, first, count, gl::DrawElementsType::InvalidEnum,
2543                                     nullptr, &numIndices));
2544         vk::LineLoopHelper::Draw(numIndices, 0, mRenderPassCommandBuffer);
2545     }
2546     else
2547     {
2548         ANGLE_TRY(setupDraw(context, mode, first, count, 1, gl::DrawElementsType::InvalidEnum,
2549                             nullptr, mNonIndexedDirtyBitsMask));
2550         mRenderPassCommandBuffer->draw(clampedVertexCount, first);
2551     }
2552 
2553     return angle::Result::Continue;
2554 }
2555 
drawArraysInstanced(const gl::Context * context,gl::PrimitiveMode mode,GLint first,GLsizei count,GLsizei instances)2556 angle::Result ContextVk::drawArraysInstanced(const gl::Context *context,
2557                                              gl::PrimitiveMode mode,
2558                                              GLint first,
2559                                              GLsizei count,
2560                                              GLsizei instances)
2561 {
2562     if (mode == gl::PrimitiveMode::LineLoop)
2563     {
2564         uint32_t clampedVertexCount = gl::GetClampedVertexCount<uint32_t>(count);
2565         uint32_t numIndices;
2566         ANGLE_TRY(setupLineLoopDraw(context, mode, first, clampedVertexCount,
2567                                     gl::DrawElementsType::InvalidEnum, nullptr, &numIndices));
2568         mRenderPassCommandBuffer->drawIndexedInstanced(numIndices, instances);
2569         return angle::Result::Continue;
2570     }
2571 
2572     ANGLE_TRY(setupDraw(context, mode, first, count, instances, gl::DrawElementsType::InvalidEnum,
2573                         nullptr, mNonIndexedDirtyBitsMask));
2574     mRenderPassCommandBuffer->drawInstanced(gl::GetClampedVertexCount<uint32_t>(count), instances,
2575                                             first);
2576     return angle::Result::Continue;
2577 }
2578 
drawArraysInstancedBaseInstance(const gl::Context * context,gl::PrimitiveMode mode,GLint first,GLsizei count,GLsizei instances,GLuint baseInstance)2579 angle::Result ContextVk::drawArraysInstancedBaseInstance(const gl::Context *context,
2580                                                          gl::PrimitiveMode mode,
2581                                                          GLint first,
2582                                                          GLsizei count,
2583                                                          GLsizei instances,
2584                                                          GLuint baseInstance)
2585 {
2586     if (mode == gl::PrimitiveMode::LineLoop)
2587     {
2588         uint32_t clampedVertexCount = gl::GetClampedVertexCount<uint32_t>(count);
2589         uint32_t numIndices;
2590         ANGLE_TRY(setupLineLoopDraw(context, mode, first, clampedVertexCount,
2591                                     gl::DrawElementsType::InvalidEnum, nullptr, &numIndices));
2592         mRenderPassCommandBuffer->drawIndexedInstancedBaseVertexBaseInstance(numIndices, instances,
2593                                                                              0, 0, baseInstance);
2594         return angle::Result::Continue;
2595     }
2596 
2597     ANGLE_TRY(setupDraw(context, mode, first, count, instances, gl::DrawElementsType::InvalidEnum,
2598                         nullptr, mNonIndexedDirtyBitsMask));
2599     mRenderPassCommandBuffer->drawInstancedBaseInstance(gl::GetClampedVertexCount<uint32_t>(count),
2600                                                         instances, first, baseInstance);
2601     return angle::Result::Continue;
2602 }
2603 
drawElements(const gl::Context * context,gl::PrimitiveMode mode,GLsizei count,gl::DrawElementsType type,const void * indices)2604 angle::Result ContextVk::drawElements(const gl::Context *context,
2605                                       gl::PrimitiveMode mode,
2606                                       GLsizei count,
2607                                       gl::DrawElementsType type,
2608                                       const void *indices)
2609 {
2610     if (mode == gl::PrimitiveMode::LineLoop)
2611     {
2612         uint32_t indexCount;
2613         ANGLE_TRY(setupLineLoopDraw(context, mode, 0, count, type, indices, &indexCount));
2614         vk::LineLoopHelper::Draw(indexCount, 0, mRenderPassCommandBuffer);
2615     }
2616     else
2617     {
2618         ANGLE_TRY(setupIndexedDraw(context, mode, count, 1, type, indices));
2619         mRenderPassCommandBuffer->drawIndexed(count);
2620     }
2621 
2622     return angle::Result::Continue;
2623 }
2624 
drawElementsBaseVertex(const gl::Context * context,gl::PrimitiveMode mode,GLsizei count,gl::DrawElementsType type,const void * indices,GLint baseVertex)2625 angle::Result ContextVk::drawElementsBaseVertex(const gl::Context *context,
2626                                                 gl::PrimitiveMode mode,
2627                                                 GLsizei count,
2628                                                 gl::DrawElementsType type,
2629                                                 const void *indices,
2630                                                 GLint baseVertex)
2631 {
2632     if (mode == gl::PrimitiveMode::LineLoop)
2633     {
2634         uint32_t indexCount;
2635         ANGLE_TRY(setupLineLoopDraw(context, mode, 0, count, type, indices, &indexCount));
2636         vk::LineLoopHelper::Draw(indexCount, baseVertex, mRenderPassCommandBuffer);
2637     }
2638     else
2639     {
2640         ANGLE_TRY(setupIndexedDraw(context, mode, count, 1, type, indices));
2641         mRenderPassCommandBuffer->drawIndexedBaseVertex(count, baseVertex);
2642     }
2643 
2644     return angle::Result::Continue;
2645 }
2646 
drawElementsInstanced(const gl::Context * context,gl::PrimitiveMode mode,GLsizei count,gl::DrawElementsType type,const void * indices,GLsizei instances)2647 angle::Result ContextVk::drawElementsInstanced(const gl::Context *context,
2648                                                gl::PrimitiveMode mode,
2649                                                GLsizei count,
2650                                                gl::DrawElementsType type,
2651                                                const void *indices,
2652                                                GLsizei instances)
2653 {
2654     if (mode == gl::PrimitiveMode::LineLoop)
2655     {
2656         uint32_t indexCount;
2657         ANGLE_TRY(setupLineLoopDraw(context, mode, 0, count, type, indices, &indexCount));
2658         count = indexCount;
2659     }
2660     else
2661     {
2662         ANGLE_TRY(setupIndexedDraw(context, mode, count, instances, type, indices));
2663     }
2664 
2665     mRenderPassCommandBuffer->drawIndexedInstanced(count, instances);
2666     return angle::Result::Continue;
2667 }
2668 
drawElementsInstancedBaseVertex(const gl::Context * context,gl::PrimitiveMode mode,GLsizei count,gl::DrawElementsType type,const void * indices,GLsizei instances,GLint baseVertex)2669 angle::Result ContextVk::drawElementsInstancedBaseVertex(const gl::Context *context,
2670                                                          gl::PrimitiveMode mode,
2671                                                          GLsizei count,
2672                                                          gl::DrawElementsType type,
2673                                                          const void *indices,
2674                                                          GLsizei instances,
2675                                                          GLint baseVertex)
2676 {
2677     if (mode == gl::PrimitiveMode::LineLoop)
2678     {
2679         uint32_t indexCount;
2680         ANGLE_TRY(setupLineLoopDraw(context, mode, 0, count, type, indices, &indexCount));
2681         count = indexCount;
2682     }
2683     else
2684     {
2685         ANGLE_TRY(setupIndexedDraw(context, mode, count, instances, type, indices));
2686     }
2687 
2688     mRenderPassCommandBuffer->drawIndexedInstancedBaseVertex(count, instances, baseVertex);
2689     return angle::Result::Continue;
2690 }
2691 
drawElementsInstancedBaseVertexBaseInstance(const gl::Context * context,gl::PrimitiveMode mode,GLsizei count,gl::DrawElementsType type,const void * indices,GLsizei instances,GLint baseVertex,GLuint baseInstance)2692 angle::Result ContextVk::drawElementsInstancedBaseVertexBaseInstance(const gl::Context *context,
2693                                                                      gl::PrimitiveMode mode,
2694                                                                      GLsizei count,
2695                                                                      gl::DrawElementsType type,
2696                                                                      const void *indices,
2697                                                                      GLsizei instances,
2698                                                                      GLint baseVertex,
2699                                                                      GLuint baseInstance)
2700 {
2701     if (mode == gl::PrimitiveMode::LineLoop)
2702     {
2703         uint32_t indexCount;
2704         ANGLE_TRY(setupLineLoopDraw(context, mode, 0, count, type, indices, &indexCount));
2705         mRenderPassCommandBuffer->drawIndexedInstancedBaseVertexBaseInstance(
2706             indexCount, instances, 0, baseVertex, baseInstance);
2707         return angle::Result::Continue;
2708     }
2709 
2710     ANGLE_TRY(setupIndexedDraw(context, mode, count, instances, type, indices));
2711     mRenderPassCommandBuffer->drawIndexedInstancedBaseVertexBaseInstance(count, instances, 0,
2712                                                                          baseVertex, baseInstance);
2713     return angle::Result::Continue;
2714 }
2715 
drawRangeElements(const gl::Context * context,gl::PrimitiveMode mode,GLuint start,GLuint end,GLsizei count,gl::DrawElementsType type,const void * indices)2716 angle::Result ContextVk::drawRangeElements(const gl::Context *context,
2717                                            gl::PrimitiveMode mode,
2718                                            GLuint start,
2719                                            GLuint end,
2720                                            GLsizei count,
2721                                            gl::DrawElementsType type,
2722                                            const void *indices)
2723 {
2724     return drawElements(context, mode, count, type, indices);
2725 }
2726 
drawRangeElementsBaseVertex(const gl::Context * context,gl::PrimitiveMode mode,GLuint start,GLuint end,GLsizei count,gl::DrawElementsType type,const void * indices,GLint baseVertex)2727 angle::Result ContextVk::drawRangeElementsBaseVertex(const gl::Context *context,
2728                                                      gl::PrimitiveMode mode,
2729                                                      GLuint start,
2730                                                      GLuint end,
2731                                                      GLsizei count,
2732                                                      gl::DrawElementsType type,
2733                                                      const void *indices,
2734                                                      GLint baseVertex)
2735 {
2736     return drawElementsBaseVertex(context, mode, count, type, indices, baseVertex);
2737 }
2738 
getDevice() const2739 VkDevice ContextVk::getDevice() const
2740 {
2741     return mRenderer->getDevice();
2742 }
2743 
drawArraysIndirect(const gl::Context * context,gl::PrimitiveMode mode,const void * indirect)2744 angle::Result ContextVk::drawArraysIndirect(const gl::Context *context,
2745                                             gl::PrimitiveMode mode,
2746                                             const void *indirect)
2747 {
2748     gl::Buffer *indirectBuffer        = mState.getTargetBuffer(gl::BufferBinding::DrawIndirect);
2749     VkDeviceSize indirectBufferOffset = 0;
2750     vk::BufferHelper *currentIndirectBuf =
2751         &vk::GetImpl(indirectBuffer)->getBufferAndOffset(&indirectBufferOffset);
2752     VkDeviceSize currentIndirectBufOffset =
2753         indirectBufferOffset + reinterpret_cast<VkDeviceSize>(indirect);
2754 
2755     if (mVertexArray->getStreamingVertexAttribsMask().any())
2756     {
2757         // We have instanced vertex attributes that need to be emulated for Vulkan.
2758         // invalidate any cache and map the buffer so that we can read the indirect data.
2759         // Mapping the buffer will cause a flush.
2760         ANGLE_TRY(currentIndirectBuf->invalidate(mRenderer, 0, sizeof(VkDrawIndirectCommand)));
2761         uint8_t *buffPtr;
2762         ANGLE_TRY(currentIndirectBuf->map(this, &buffPtr));
2763         const VkDrawIndirectCommand *indirectData =
2764             reinterpret_cast<VkDrawIndirectCommand *>(buffPtr + currentIndirectBufOffset);
2765 
2766         ANGLE_TRY(drawArraysInstanced(context, mode, indirectData->firstVertex,
2767                                       indirectData->vertexCount, indirectData->instanceCount));
2768 
2769         currentIndirectBuf->unmap(mRenderer);
2770         return angle::Result::Continue;
2771     }
2772 
2773     if (mode == gl::PrimitiveMode::LineLoop)
2774     {
2775         ASSERT(indirectBuffer);
2776         vk::BufferHelper *dstIndirectBuf  = nullptr;
2777         VkDeviceSize dstIndirectBufOffset = 0;
2778 
2779         ANGLE_TRY(setupLineLoopIndirectDraw(context, mode, currentIndirectBuf,
2780                                             currentIndirectBufOffset, &dstIndirectBuf,
2781                                             &dstIndirectBufOffset));
2782 
2783         mRenderPassCommandBuffer->drawIndexedIndirect(dstIndirectBuf->getBuffer(),
2784                                                       dstIndirectBufOffset, 1, 0);
2785         return angle::Result::Continue;
2786     }
2787 
2788     ANGLE_TRY(setupIndirectDraw(context, mode, mNonIndexedDirtyBitsMask, currentIndirectBuf,
2789                                 currentIndirectBufOffset));
2790 
2791     mRenderPassCommandBuffer->drawIndirect(currentIndirectBuf->getBuffer(),
2792                                            currentIndirectBufOffset, 1, 0);
2793     return angle::Result::Continue;
2794 }
2795 
drawElementsIndirect(const gl::Context * context,gl::PrimitiveMode mode,gl::DrawElementsType type,const void * indirect)2796 angle::Result ContextVk::drawElementsIndirect(const gl::Context *context,
2797                                               gl::PrimitiveMode mode,
2798                                               gl::DrawElementsType type,
2799                                               const void *indirect)
2800 {
2801     gl::Buffer *indirectBuffer = mState.getTargetBuffer(gl::BufferBinding::DrawIndirect);
2802     ASSERT(indirectBuffer);
2803     VkDeviceSize indirectBufferOffset = 0;
2804     vk::BufferHelper *currentIndirectBuf =
2805         &vk::GetImpl(indirectBuffer)->getBufferAndOffset(&indirectBufferOffset);
2806     VkDeviceSize currentIndirectBufOffset =
2807         indirectBufferOffset + reinterpret_cast<VkDeviceSize>(indirect);
2808 
2809     if (mVertexArray->getStreamingVertexAttribsMask().any())
2810     {
2811         // We have instanced vertex attributes that need to be emulated for Vulkan.
2812         // invalidate any cache and map the buffer so that we can read the indirect data.
2813         // Mapping the buffer will cause a flush.
2814         ANGLE_TRY(
2815             currentIndirectBuf->invalidate(mRenderer, 0, sizeof(VkDrawIndexedIndirectCommand)));
2816         uint8_t *buffPtr;
2817         ANGLE_TRY(currentIndirectBuf->map(this, &buffPtr));
2818         const VkDrawIndexedIndirectCommand *indirectData =
2819             reinterpret_cast<VkDrawIndexedIndirectCommand *>(buffPtr + currentIndirectBufOffset);
2820 
2821         ANGLE_TRY(drawElementsInstanced(context, mode, indirectData->indexCount, type, nullptr,
2822                                         indirectData->instanceCount));
2823 
2824         currentIndirectBuf->unmap(mRenderer);
2825         return angle::Result::Continue;
2826     }
2827 
2828     if (shouldConvertUint8VkIndexType(type) && mGraphicsDirtyBits[DIRTY_BIT_INDEX_BUFFER])
2829     {
2830         ANGLE_PERF_WARNING(getDebug(), GL_DEBUG_SEVERITY_LOW,
2831                            "Potential inefficiency emulating uint8 vertex attributes due to lack "
2832                            "of hardware support");
2833 
2834         vk::BufferHelper *dstIndirectBuf;
2835         VkDeviceSize dstIndirectBufOffset;
2836 
2837         ANGLE_TRY(mVertexArray->convertIndexBufferIndirectGPU(
2838             this, currentIndirectBuf, currentIndirectBufOffset, &dstIndirectBuf,
2839             &dstIndirectBufOffset));
2840 
2841         currentIndirectBuf       = dstIndirectBuf;
2842         currentIndirectBufOffset = dstIndirectBufOffset;
2843     }
2844 
2845     if (mode == gl::PrimitiveMode::LineLoop)
2846     {
2847         vk::BufferHelper *dstIndirectBuf;
2848         VkDeviceSize dstIndirectBufOffset;
2849 
2850         ANGLE_TRY(setupLineLoopIndexedIndirectDraw(context, mode, type, currentIndirectBuf,
2851                                                    currentIndirectBufOffset, &dstIndirectBuf,
2852                                                    &dstIndirectBufOffset));
2853 
2854         currentIndirectBuf       = dstIndirectBuf;
2855         currentIndirectBufOffset = dstIndirectBufOffset;
2856     }
2857     else
2858     {
2859         ANGLE_TRY(setupIndexedIndirectDraw(context, mode, type, currentIndirectBuf,
2860                                            currentIndirectBufOffset));
2861     }
2862 
2863     mRenderPassCommandBuffer->drawIndexedIndirect(currentIndirectBuf->getBuffer(),
2864                                                   currentIndirectBufOffset, 1, 0);
2865     return angle::Result::Continue;
2866 }
2867 
multiDrawArrays(const gl::Context * context,gl::PrimitiveMode mode,const GLint * firsts,const GLsizei * counts,GLsizei drawcount)2868 angle::Result ContextVk::multiDrawArrays(const gl::Context *context,
2869                                          gl::PrimitiveMode mode,
2870                                          const GLint *firsts,
2871                                          const GLsizei *counts,
2872                                          GLsizei drawcount)
2873 {
2874     return rx::MultiDrawArraysGeneral(this, context, mode, firsts, counts, drawcount);
2875 }
2876 
multiDrawArraysInstanced(const gl::Context * context,gl::PrimitiveMode mode,const GLint * firsts,const GLsizei * counts,const GLsizei * instanceCounts,GLsizei drawcount)2877 angle::Result ContextVk::multiDrawArraysInstanced(const gl::Context *context,
2878                                                   gl::PrimitiveMode mode,
2879                                                   const GLint *firsts,
2880                                                   const GLsizei *counts,
2881                                                   const GLsizei *instanceCounts,
2882                                                   GLsizei drawcount)
2883 {
2884     return rx::MultiDrawArraysInstancedGeneral(this, context, mode, firsts, counts, instanceCounts,
2885                                                drawcount);
2886 }
2887 
multiDrawElements(const gl::Context * context,gl::PrimitiveMode mode,const GLsizei * counts,gl::DrawElementsType type,const GLvoid * const * indices,GLsizei drawcount)2888 angle::Result ContextVk::multiDrawElements(const gl::Context *context,
2889                                            gl::PrimitiveMode mode,
2890                                            const GLsizei *counts,
2891                                            gl::DrawElementsType type,
2892                                            const GLvoid *const *indices,
2893                                            GLsizei drawcount)
2894 {
2895     return rx::MultiDrawElementsGeneral(this, context, mode, counts, type, indices, drawcount);
2896 }
2897 
multiDrawElementsInstanced(const gl::Context * context,gl::PrimitiveMode mode,const GLsizei * counts,gl::DrawElementsType type,const GLvoid * const * indices,const GLsizei * instanceCounts,GLsizei drawcount)2898 angle::Result ContextVk::multiDrawElementsInstanced(const gl::Context *context,
2899                                                     gl::PrimitiveMode mode,
2900                                                     const GLsizei *counts,
2901                                                     gl::DrawElementsType type,
2902                                                     const GLvoid *const *indices,
2903                                                     const GLsizei *instanceCounts,
2904                                                     GLsizei drawcount)
2905 {
2906     return rx::MultiDrawElementsInstancedGeneral(this, context, mode, counts, type, indices,
2907                                                  instanceCounts, drawcount);
2908 }
2909 
multiDrawArraysInstancedBaseInstance(const gl::Context * context,gl::PrimitiveMode mode,const GLint * firsts,const GLsizei * counts,const GLsizei * instanceCounts,const GLuint * baseInstances,GLsizei drawcount)2910 angle::Result ContextVk::multiDrawArraysInstancedBaseInstance(const gl::Context *context,
2911                                                               gl::PrimitiveMode mode,
2912                                                               const GLint *firsts,
2913                                                               const GLsizei *counts,
2914                                                               const GLsizei *instanceCounts,
2915                                                               const GLuint *baseInstances,
2916                                                               GLsizei drawcount)
2917 {
2918     return rx::MultiDrawArraysInstancedBaseInstanceGeneral(
2919         this, context, mode, firsts, counts, instanceCounts, baseInstances, drawcount);
2920 }
2921 
multiDrawElementsInstancedBaseVertexBaseInstance(const gl::Context * context,gl::PrimitiveMode mode,const GLsizei * counts,gl::DrawElementsType type,const GLvoid * const * indices,const GLsizei * instanceCounts,const GLint * baseVertices,const GLuint * baseInstances,GLsizei drawcount)2922 angle::Result ContextVk::multiDrawElementsInstancedBaseVertexBaseInstance(
2923     const gl::Context *context,
2924     gl::PrimitiveMode mode,
2925     const GLsizei *counts,
2926     gl::DrawElementsType type,
2927     const GLvoid *const *indices,
2928     const GLsizei *instanceCounts,
2929     const GLint *baseVertices,
2930     const GLuint *baseInstances,
2931     GLsizei drawcount)
2932 {
2933     return rx::MultiDrawElementsInstancedBaseVertexBaseInstanceGeneral(
2934         this, context, mode, counts, type, indices, instanceCounts, baseVertices, baseInstances,
2935         drawcount);
2936 }
2937 
optimizeRenderPassForPresent(VkFramebuffer framebufferHandle)2938 void ContextVk::optimizeRenderPassForPresent(VkFramebuffer framebufferHandle)
2939 {
2940     if (!mRenderPassCommands->started())
2941     {
2942         return;
2943     }
2944 
2945     if (framebufferHandle != mRenderPassCommands->getFramebufferHandle())
2946     {
2947         return;
2948     }
2949 
2950     RenderTargetVk *color0RenderTarget = mDrawFramebuffer->getColorDrawRenderTarget(0);
2951     if (!color0RenderTarget)
2952     {
2953         return;
2954     }
2955 
2956     // EGL1.5 spec: The contents of ancillary buffers are always undefined after calling
2957     // eglSwapBuffers
2958     RenderTargetVk *depthStencilRenderTarget = mDrawFramebuffer->getDepthStencilRenderTarget();
2959     if (depthStencilRenderTarget)
2960     {
2961         // Change depthstencil attachment storeOp to DONT_CARE
2962         const gl::DepthStencilState &dsState = mState.getDepthStencilState();
2963         mRenderPassCommands->invalidateRenderPassStencilAttachment(
2964             dsState, mRenderPassCommands->getRenderArea());
2965         mRenderPassCommands->invalidateRenderPassDepthAttachment(
2966             dsState, mRenderPassCommands->getRenderArea());
2967         // Mark content as invalid so that we will not load them in next renderpass
2968         depthStencilRenderTarget->invalidateEntireContent(this);
2969         depthStencilRenderTarget->invalidateEntireStencilContent(this);
2970     }
2971 
2972     // Use finalLayout instead of extra barrier for layout change to present
2973     vk::ImageHelper &image = color0RenderTarget->getImageForWrite();
2974     mRenderPassCommands->setImageOptimizeForPresent(&image);
2975 }
2976 
getResetStatus()2977 gl::GraphicsResetStatus ContextVk::getResetStatus()
2978 {
2979     if (mRenderer->isDeviceLost())
2980     {
2981         // TODO(geofflang): It may be possible to track which context caused the device lost and
2982         // return either GL_GUILTY_CONTEXT_RESET or GL_INNOCENT_CONTEXT_RESET.
2983         // http://anglebug.com/2787
2984         return gl::GraphicsResetStatus::UnknownContextReset;
2985     }
2986 
2987     return gl::GraphicsResetStatus::NoError;
2988 }
2989 
insertEventMarker(GLsizei length,const char * marker)2990 angle::Result ContextVk::insertEventMarker(GLsizei length, const char *marker)
2991 {
2992     if (!mRenderer->enableDebugUtils() && !mRenderer->angleDebuggerMode())
2993     {
2994         return angle::Result::Continue;
2995     }
2996 
2997     VkDebugUtilsLabelEXT label;
2998     vk::MakeDebugUtilsLabel(GL_DEBUG_SOURCE_APPLICATION, marker, &label);
2999     mOutsideRenderPassCommands->getCommandBuffer().insertDebugUtilsLabelEXT(label);
3000 
3001     return angle::Result::Continue;
3002 }
3003 
pushGroupMarker(GLsizei length,const char * marker)3004 angle::Result ContextVk::pushGroupMarker(GLsizei length, const char *marker)
3005 {
3006     if (!mRenderer->enableDebugUtils() && !mRenderer->angleDebuggerMode())
3007     {
3008         return angle::Result::Continue;
3009     }
3010 
3011     VkDebugUtilsLabelEXT label;
3012     vk::MakeDebugUtilsLabel(GL_DEBUG_SOURCE_APPLICATION, marker, &label);
3013     mOutsideRenderPassCommands->getCommandBuffer().beginDebugUtilsLabelEXT(label);
3014 
3015     return angle::Result::Continue;
3016 }
3017 
popGroupMarker()3018 angle::Result ContextVk::popGroupMarker()
3019 {
3020     if (!mRenderer->enableDebugUtils() && !mRenderer->angleDebuggerMode())
3021     {
3022         return angle::Result::Continue;
3023     }
3024 
3025     mOutsideRenderPassCommands->getCommandBuffer().endDebugUtilsLabelEXT();
3026 
3027     return angle::Result::Continue;
3028 }
3029 
pushDebugGroup(const gl::Context * context,GLenum source,GLuint id,const std::string & message)3030 angle::Result ContextVk::pushDebugGroup(const gl::Context *context,
3031                                         GLenum source,
3032                                         GLuint id,
3033                                         const std::string &message)
3034 {
3035     if (!mRenderer->enableDebugUtils() && !mRenderer->angleDebuggerMode())
3036     {
3037         return angle::Result::Continue;
3038     }
3039 
3040     VkDebugUtilsLabelEXT label;
3041     vk::MakeDebugUtilsLabel(source, message.c_str(), &label);
3042     mOutsideRenderPassCommands->getCommandBuffer().beginDebugUtilsLabelEXT(label);
3043 
3044     return angle::Result::Continue;
3045 }
3046 
popDebugGroup(const gl::Context * context)3047 angle::Result ContextVk::popDebugGroup(const gl::Context *context)
3048 {
3049     if (!mRenderer->enableDebugUtils() && !mRenderer->angleDebuggerMode())
3050     {
3051         return angle::Result::Continue;
3052     }
3053 
3054     mOutsideRenderPassCommands->getCommandBuffer().endDebugUtilsLabelEXT();
3055 
3056     return angle::Result::Continue;
3057 }
3058 
logEvent(const char * eventString)3059 void ContextVk::logEvent(const char *eventString)
3060 {
3061     if (!mRenderer->angleDebuggerMode())
3062     {
3063         return;
3064     }
3065 
3066     // Save this event (about an OpenGL ES command being called).
3067     mEventLog.push_back(eventString);
3068 
3069     // Set a dirty bit in order to stay off the "hot path" for when not logging.
3070     mGraphicsDirtyBits.set(DIRTY_BIT_EVENT_LOG);
3071     mComputeDirtyBits.set(DIRTY_BIT_EVENT_LOG);
3072 }
3073 
endEventLog(angle::EntryPoint entryPoint,PipelineType pipelineType)3074 void ContextVk::endEventLog(angle::EntryPoint entryPoint, PipelineType pipelineType)
3075 {
3076     if (!mRenderer->angleDebuggerMode())
3077     {
3078         return;
3079     }
3080 
3081     if (pipelineType == PipelineType::Graphics)
3082     {
3083         ASSERT(mRenderPassCommands);
3084         mRenderPassCommands->getCommandBuffer().endDebugUtilsLabelEXT();
3085     }
3086     else
3087     {
3088         ASSERT(pipelineType == PipelineType::Compute);
3089         ASSERT(mOutsideRenderPassCommands);
3090         mOutsideRenderPassCommands->getCommandBuffer().endDebugUtilsLabelEXT();
3091     }
3092 }
endEventLogForClearOrQuery()3093 void ContextVk::endEventLogForClearOrQuery()
3094 {
3095     if (!mRenderer->angleDebuggerMode())
3096     {
3097         return;
3098     }
3099 
3100     vk::CommandBuffer *commandBuffer = nullptr;
3101     switch (mQueryEventType)
3102     {
3103         case GraphicsEventCmdBuf::InOutsideCmdBufQueryCmd:
3104             ASSERT(mOutsideRenderPassCommands);
3105             commandBuffer = &mOutsideRenderPassCommands->getCommandBuffer();
3106             break;
3107         case GraphicsEventCmdBuf::InRenderPassCmdBufQueryCmd:
3108             ASSERT(mRenderPassCommands);
3109             commandBuffer = &mRenderPassCommands->getCommandBuffer();
3110             break;
3111         case GraphicsEventCmdBuf::NotInQueryCmd:
3112             // The glClear* or gl*Query* command was noop'd or otherwise ended early.  We could
3113             // call handleDirtyEventLogImpl() to start the hierarchy, but it isn't clear which (if
3114             // any) command buffer to use.  We'll just skip processing this command (other than to
3115             // let it stay queued for the next time handleDirtyEventLogImpl() is called.
3116             return;
3117         default:
3118             UNREACHABLE();
3119     }
3120     commandBuffer->endDebugUtilsLabelEXT();
3121 
3122     mQueryEventType = GraphicsEventCmdBuf::NotInQueryCmd;
3123 }
3124 
handleNoopDrawEvent()3125 angle::Result ContextVk::handleNoopDrawEvent()
3126 {
3127     // Even though this draw call is being no-op'd, we still must handle the dirty event log
3128     return handleDirtyEventLogImpl(mRenderPassCommandBuffer);
3129 }
3130 
handleGraphicsEventLog(GraphicsEventCmdBuf queryEventType)3131 angle::Result ContextVk::handleGraphicsEventLog(GraphicsEventCmdBuf queryEventType)
3132 {
3133     ASSERT(mQueryEventType == GraphicsEventCmdBuf::NotInQueryCmd);
3134     if (!mRenderer->angleDebuggerMode())
3135     {
3136         return angle::Result::Continue;
3137     }
3138 
3139     mQueryEventType = queryEventType;
3140 
3141     vk::CommandBuffer *commandBuffer = nullptr;
3142     switch (mQueryEventType)
3143     {
3144         case GraphicsEventCmdBuf::InOutsideCmdBufQueryCmd:
3145             ASSERT(mOutsideRenderPassCommands);
3146             commandBuffer = &mOutsideRenderPassCommands->getCommandBuffer();
3147             break;
3148         case GraphicsEventCmdBuf::InRenderPassCmdBufQueryCmd:
3149             ASSERT(mRenderPassCommands);
3150             commandBuffer = &mRenderPassCommands->getCommandBuffer();
3151             break;
3152         default:
3153             UNREACHABLE();
3154     }
3155     return handleDirtyEventLogImpl(commandBuffer);
3156 }
3157 
isViewportFlipEnabledForDrawFBO() const3158 bool ContextVk::isViewportFlipEnabledForDrawFBO() const
3159 {
3160     return mFlipViewportForDrawFramebuffer && mFlipYForCurrentSurface;
3161 }
3162 
isViewportFlipEnabledForReadFBO() const3163 bool ContextVk::isViewportFlipEnabledForReadFBO() const
3164 {
3165     return mFlipViewportForReadFramebuffer;
3166 }
3167 
isRotatedAspectRatioForDrawFBO() const3168 bool ContextVk::isRotatedAspectRatioForDrawFBO() const
3169 {
3170     return IsRotatedAspectRatio(mCurrentRotationDrawFramebuffer);
3171 }
3172 
isRotatedAspectRatioForReadFBO() const3173 bool ContextVk::isRotatedAspectRatioForReadFBO() const
3174 {
3175     return IsRotatedAspectRatio(mCurrentRotationReadFramebuffer);
3176 }
3177 
getRotationDrawFramebuffer() const3178 SurfaceRotation ContextVk::getRotationDrawFramebuffer() const
3179 {
3180     return mCurrentRotationDrawFramebuffer;
3181 }
3182 
getRotationReadFramebuffer() const3183 SurfaceRotation ContextVk::getRotationReadFramebuffer() const
3184 {
3185     return mCurrentRotationReadFramebuffer;
3186 }
3187 
updateColorMasks(const gl::BlendStateExt & blendStateExt)3188 void ContextVk::updateColorMasks(const gl::BlendStateExt &blendStateExt)
3189 {
3190     mClearColorMasks = blendStateExt.mColorMask;
3191 
3192     FramebufferVk *framebufferVk = vk::GetImpl(mState.getDrawFramebuffer());
3193     mGraphicsPipelineDesc->updateColorWriteMasks(&mGraphicsPipelineTransition, mClearColorMasks,
3194                                                  framebufferVk->getEmulatedAlphaAttachmentMask(),
3195                                                  framebufferVk->getState().getEnabledDrawBuffers());
3196 }
3197 
updateSampleMaskWithRasterizationSamples(const uint32_t rasterizationSamples)3198 void ContextVk::updateSampleMaskWithRasterizationSamples(const uint32_t rasterizationSamples)
3199 {
3200     // FramebufferVk::syncState could have been the origin for this call, at which point the
3201     // draw FBO may have changed, retrieve the latest draw FBO.
3202     FramebufferVk *drawFramebuffer = vk::GetImpl(mState.getDrawFramebuffer());
3203 
3204     // If sample coverage is enabled, emulate it by generating and applying a mask on top of the
3205     // sample mask.
3206     uint32_t coverageSampleCount = GetCoverageSampleCount(mState, drawFramebuffer);
3207 
3208     static_assert(sizeof(uint32_t) == sizeof(GLbitfield), "Vulkan assumes 32-bit sample masks");
3209     for (uint32_t maskNumber = 0; maskNumber < mState.getMaxSampleMaskWords(); ++maskNumber)
3210     {
3211         uint32_t mask = mState.isSampleMaskEnabled() && rasterizationSamples > 1
3212                             ? mState.getSampleMaskWord(maskNumber)
3213                             : std::numeric_limits<uint32_t>::max();
3214 
3215         ApplySampleCoverage(mState, coverageSampleCount, maskNumber, &mask);
3216 
3217         mGraphicsPipelineDesc->updateSampleMask(&mGraphicsPipelineTransition, maskNumber, mask);
3218     }
3219 }
3220 
getCorrectedViewport(const gl::Rectangle & viewport) const3221 gl::Rectangle ContextVk::getCorrectedViewport(const gl::Rectangle &viewport) const
3222 {
3223     const gl::Caps &caps                   = getCaps();
3224     const VkPhysicalDeviceLimits &limitsVk = mRenderer->getPhysicalDeviceProperties().limits;
3225     const int viewportBoundsRangeLow       = static_cast<int>(limitsVk.viewportBoundsRange[0]);
3226     const int viewportBoundsRangeHigh      = static_cast<int>(limitsVk.viewportBoundsRange[1]);
3227 
3228     // Clamp the viewport values to what Vulkan specifies
3229 
3230     // width must be greater than 0.0 and less than or equal to
3231     // VkPhysicalDeviceLimits::maxViewportDimensions[0]
3232     int correctedWidth = std::min<int>(viewport.width, caps.maxViewportWidth);
3233     correctedWidth     = std::max<int>(correctedWidth, 0);
3234     // height must be greater than 0.0 and less than or equal to
3235     // VkPhysicalDeviceLimits::maxViewportDimensions[1]
3236     int correctedHeight = std::min<int>(viewport.height, caps.maxViewportHeight);
3237     correctedHeight     = std::max<int>(correctedHeight, 0);
3238     // x and y must each be between viewportBoundsRange[0] and viewportBoundsRange[1], inclusive.
3239     // Viewport size cannot be 0 so ensure there is always size for a 1x1 viewport
3240     int correctedX = std::min<int>(viewport.x, viewportBoundsRangeHigh - 1);
3241     correctedX     = std::max<int>(correctedX, viewportBoundsRangeLow);
3242     int correctedY = std::min<int>(viewport.y, viewportBoundsRangeHigh - 1);
3243     correctedY     = std::max<int>(correctedY, viewportBoundsRangeLow);
3244     // x + width must be less than or equal to viewportBoundsRange[1]
3245     if ((correctedX + correctedWidth) > viewportBoundsRangeHigh)
3246     {
3247         correctedWidth = viewportBoundsRangeHigh - correctedX;
3248     }
3249     // y + height must be less than or equal to viewportBoundsRange[1]
3250     if ((correctedY + correctedHeight) > viewportBoundsRangeHigh)
3251     {
3252         correctedHeight = viewportBoundsRangeHigh - correctedY;
3253     }
3254 
3255     return gl::Rectangle(correctedX, correctedY, correctedWidth, correctedHeight);
3256 }
3257 
updateViewport(FramebufferVk * framebufferVk,const gl::Rectangle & viewport,float nearPlane,float farPlane)3258 void ContextVk::updateViewport(FramebufferVk *framebufferVk,
3259                                const gl::Rectangle &viewport,
3260                                float nearPlane,
3261                                float farPlane)
3262 {
3263 
3264     gl::Box fbDimensions        = framebufferVk->getState().getDimensions();
3265     gl::Rectangle correctedRect = getCorrectedViewport(viewport);
3266     gl::Rectangle rotatedRect;
3267     RotateRectangle(getRotationDrawFramebuffer(), false, fbDimensions.width, fbDimensions.height,
3268                     correctedRect, &rotatedRect);
3269 
3270     bool invertViewport =
3271         isViewportFlipEnabledForDrawFBO() && getFeatures().supportsNegativeViewport.enabled;
3272 
3273     gl_vk::GetViewport(
3274         rotatedRect, nearPlane, farPlane, invertViewport,
3275         // If clip space origin is upper left, viewport origin's y value will be offset by the
3276         // height of the viewport when clip space is mapped into screen space.
3277         mState.getClipSpaceOrigin() == gl::ClipSpaceOrigin::UpperLeft,
3278         // If the surface is rotated 90/270 degrees, use the framebuffer's width instead of the
3279         // height for calculating the final viewport.
3280         isRotatedAspectRatioForDrawFBO() ? fbDimensions.width : fbDimensions.height, &mViewport);
3281 
3282     // Ensure viewport is within Vulkan requirements
3283     vk::ClampViewport(&mViewport);
3284 
3285     invalidateGraphicsDriverUniforms();
3286     mGraphicsDirtyBits.set(DIRTY_BIT_VIEWPORT);
3287 }
3288 
updateDepthRange(float nearPlane,float farPlane)3289 void ContextVk::updateDepthRange(float nearPlane, float farPlane)
3290 {
3291     // GLES2.0 Section 2.12.1: Each of n and f are clamped to lie within [0, 1], as are all
3292     // arguments of type clampf.
3293     ASSERT(nearPlane >= 0.0f && nearPlane <= 1.0f);
3294     ASSERT(farPlane >= 0.0f && farPlane <= 1.0f);
3295     mViewport.minDepth = nearPlane;
3296     mViewport.maxDepth = farPlane;
3297 
3298     invalidateGraphicsDriverUniforms();
3299     mGraphicsDirtyBits.set(DIRTY_BIT_VIEWPORT);
3300 }
3301 
updateScissor(const gl::State & glState)3302 void ContextVk::updateScissor(const gl::State &glState)
3303 {
3304     FramebufferVk *framebufferVk = vk::GetImpl(glState.getDrawFramebuffer());
3305     gl::Rectangle renderArea     = framebufferVk->getNonRotatedCompleteRenderArea();
3306 
3307     // Clip the render area to the viewport.
3308     gl::Rectangle viewportClippedRenderArea;
3309     if (!gl::ClipRectangle(renderArea, getCorrectedViewport(glState.getViewport()),
3310                            &viewportClippedRenderArea))
3311     {
3312         viewportClippedRenderArea = gl::Rectangle();
3313     }
3314 
3315     gl::Rectangle scissoredArea = ClipRectToScissor(getState(), viewportClippedRenderArea, false);
3316     gl::Rectangle rotatedScissoredArea;
3317     RotateRectangle(getRotationDrawFramebuffer(), isViewportFlipEnabledForDrawFBO(),
3318                     renderArea.width, renderArea.height, scissoredArea, &rotatedScissoredArea);
3319     mScissor = gl_vk::GetRect(rotatedScissoredArea);
3320     mGraphicsDirtyBits.set(DIRTY_BIT_SCISSOR);
3321 
3322     // If the scissor has grown beyond the previous scissoredRenderArea, grow the render pass render
3323     // area.  The only undesirable effect this may have is that if the render area does not cover a
3324     // previously invalidated area, that invalidate will have to be discarded.
3325     if (mRenderPassCommandBuffer &&
3326         !mRenderPassCommands->getRenderArea().encloses(rotatedScissoredArea))
3327     {
3328         ASSERT(mRenderPassCommands->started());
3329         mRenderPassCommands->growRenderArea(this, rotatedScissoredArea);
3330     }
3331 }
3332 
updateDepthStencil(const gl::State & glState)3333 void ContextVk::updateDepthStencil(const gl::State &glState)
3334 {
3335     const gl::DepthStencilState depthStencilState = glState.getDepthStencilState();
3336 
3337     gl::Framebuffer *drawFramebuffer = mState.getDrawFramebuffer();
3338     mGraphicsPipelineDesc->updateDepthTestEnabled(&mGraphicsPipelineTransition, depthStencilState,
3339                                                   drawFramebuffer);
3340     mGraphicsPipelineDesc->updateDepthWriteEnabled(&mGraphicsPipelineTransition, depthStencilState,
3341                                                    drawFramebuffer);
3342     mGraphicsPipelineDesc->updateStencilTestEnabled(&mGraphicsPipelineTransition, depthStencilState,
3343                                                     drawFramebuffer);
3344     mGraphicsPipelineDesc->updateStencilFrontWriteMask(&mGraphicsPipelineTransition,
3345                                                        depthStencilState, drawFramebuffer);
3346     mGraphicsPipelineDesc->updateStencilBackWriteMask(&mGraphicsPipelineTransition,
3347                                                       depthStencilState, drawFramebuffer);
3348 }
3349 
3350 // If the target is a single-sampled target, sampleShading should be disabled, to use Bresenham line
3351 // raterization feature.
updateSampleShadingWithRasterizationSamples(const uint32_t rasterizationSamples)3352 void ContextVk::updateSampleShadingWithRasterizationSamples(const uint32_t rasterizationSamples)
3353 {
3354     bool sampleShadingEnable =
3355         (rasterizationSamples <= 1 ? false : mState.isSampleShadingEnabled());
3356 
3357     mGraphicsPipelineDesc->updateSampleShading(&mGraphicsPipelineTransition, sampleShadingEnable,
3358                                                mState.getMinSampleShading());
3359 }
3360 
3361 // If the target is switched between a single-sampled and multisample, the dependency related to the
3362 // rasterization sample should be updated.
updateRasterizationSamples(const uint32_t rasterizationSamples)3363 void ContextVk::updateRasterizationSamples(const uint32_t rasterizationSamples)
3364 {
3365     mGraphicsPipelineDesc->updateRasterizationSamples(&mGraphicsPipelineTransition,
3366                                                       rasterizationSamples);
3367     updateSampleShadingWithRasterizationSamples(rasterizationSamples);
3368     updateSampleMaskWithRasterizationSamples(rasterizationSamples);
3369 }
3370 
updateRasterizerDiscardEnabled(bool isPrimitivesGeneratedQueryActive)3371 void ContextVk::updateRasterizerDiscardEnabled(bool isPrimitivesGeneratedQueryActive)
3372 {
3373     // On some devices, when rasterizerDiscardEnable is enabled, the
3374     // VK_EXT_primitives_generated_query as well as the pipeline statistics query used to emulate it
3375     // are non-functional.  For VK_EXT_primitives_generated_query there's a feature bit but not for
3376     // pipeline statistics query.  If the primitives generated query is active (and rasterizer
3377     // discard is not supported), rasterizerDiscardEnable is set to false and the functionality
3378     // is otherwise emulated (by using an empty scissor).
3379 
3380     // If the primitives generated query implementation supports rasterizer discard, just set
3381     // rasterizer discard as requested.  Otherwise disable it.
3382     bool isRasterizerDiscardEnabled   = mState.isRasterizerDiscardEnabled();
3383     bool isEmulatingRasterizerDiscard = isEmulatingRasterizerDiscardDuringPrimitivesGeneratedQuery(
3384         isPrimitivesGeneratedQueryActive);
3385 
3386     mGraphicsPipelineDesc->updateRasterizerDiscardEnabled(
3387         &mGraphicsPipelineTransition, isRasterizerDiscardEnabled && !isEmulatingRasterizerDiscard);
3388 
3389     invalidateCurrentGraphicsPipeline();
3390 
3391     if (!isEmulatingRasterizerDiscard)
3392     {
3393         return;
3394     }
3395 
3396     // If we are emulating rasterizer discard, update the scissor if in render pass.  If not in
3397     // render pass, DIRTY_BIT_SCISSOR will be set when the render pass next starts.
3398     if (hasStartedRenderPass())
3399     {
3400         handleDirtyGraphicsScissorImpl(isPrimitivesGeneratedQueryActive);
3401     }
3402 }
3403 
invalidateProgramBindingHelper(const gl::State & glState)3404 void ContextVk::invalidateProgramBindingHelper(const gl::State &glState)
3405 {
3406     mProgram         = nullptr;
3407     mProgramPipeline = nullptr;
3408     mExecutable      = nullptr;
3409 
3410     if (glState.getProgram())
3411     {
3412         mProgram    = vk::GetImpl(glState.getProgram());
3413         mExecutable = &mProgram->getExecutable();
3414     }
3415 
3416     if (glState.getProgramPipeline())
3417     {
3418         mProgramPipeline = vk::GetImpl(glState.getProgramPipeline());
3419         if (!mExecutable)
3420         {
3421             // A bound program always overrides a program pipeline
3422             mExecutable = &mProgramPipeline->getExecutable();
3423         }
3424     }
3425 
3426     if (mProgram)
3427     {
3428         mProgram->onProgramBind();
3429     }
3430     else if (mProgramPipeline)
3431     {
3432         mProgramPipeline->onProgramBind(this);
3433     }
3434 }
3435 
invalidateProgramExecutableHelper(const gl::Context * context)3436 angle::Result ContextVk::invalidateProgramExecutableHelper(const gl::Context *context)
3437 {
3438     const gl::State &glState                = context->getState();
3439     const gl::ProgramExecutable *executable = glState.getProgramExecutable();
3440 
3441     if (glState.getProgramExecutable()->isCompute())
3442     {
3443         invalidateCurrentComputePipeline();
3444     }
3445     else
3446     {
3447         invalidateCurrentGraphicsPipeline();
3448         // No additional work is needed here. We will update the pipeline desc
3449         // later.
3450         invalidateDefaultAttributes(context->getStateCache().getActiveDefaultAttribsMask());
3451         invalidateVertexAndIndexBuffers();
3452         bool useVertexBuffer = (executable->getMaxActiveAttribLocation() > 0);
3453         mNonIndexedDirtyBitsMask.set(DIRTY_BIT_VERTEX_BUFFERS, useVertexBuffer);
3454         mIndexedDirtyBitsMask.set(DIRTY_BIT_VERTEX_BUFFERS, useVertexBuffer);
3455         mCurrentGraphicsPipeline = nullptr;
3456         mGraphicsPipelineTransition.reset();
3457 
3458         ASSERT(mExecutable);
3459         mExecutable->updateEarlyFragmentTestsOptimization(this);
3460 
3461         if (mLastProgramUsesFramebufferFetch != executable->usesFramebufferFetch())
3462         {
3463             mLastProgramUsesFramebufferFetch = executable->usesFramebufferFetch();
3464             ANGLE_TRY(flushCommandsAndEndRenderPass());
3465 
3466             ASSERT(mDrawFramebuffer);
3467             mDrawFramebuffer->onSwitchProgramFramebufferFetch(this,
3468                                                               executable->usesFramebufferFetch());
3469         }
3470     }
3471 
3472     return angle::Result::Continue;
3473 }
3474 
syncState(const gl::Context * context,const gl::State::DirtyBits & dirtyBits,const gl::State::DirtyBits & bitMask)3475 angle::Result ContextVk::syncState(const gl::Context *context,
3476                                    const gl::State::DirtyBits &dirtyBits,
3477                                    const gl::State::DirtyBits &bitMask)
3478 {
3479     const gl::State &glState                       = context->getState();
3480     const gl::ProgramExecutable *programExecutable = glState.getProgramExecutable();
3481 
3482     if ((dirtyBits & mPipelineDirtyBitsMask).any() &&
3483         (programExecutable == nullptr || !programExecutable->isCompute()))
3484     {
3485         invalidateCurrentGraphicsPipeline();
3486     }
3487 
3488     for (auto iter = dirtyBits.begin(), endIter = dirtyBits.end(); iter != endIter; ++iter)
3489     {
3490         size_t dirtyBit = *iter;
3491         switch (dirtyBit)
3492         {
3493             case gl::State::DIRTY_BIT_SCISSOR_TEST_ENABLED:
3494             case gl::State::DIRTY_BIT_SCISSOR:
3495                 updateScissor(glState);
3496                 break;
3497             case gl::State::DIRTY_BIT_VIEWPORT:
3498             {
3499                 FramebufferVk *framebufferVk = vk::GetImpl(glState.getDrawFramebuffer());
3500                 updateViewport(framebufferVk, glState.getViewport(), glState.getNearPlane(),
3501                                glState.getFarPlane());
3502                 // Update the scissor, which will be constrained to the viewport
3503                 updateScissor(glState);
3504                 break;
3505             }
3506             case gl::State::DIRTY_BIT_DEPTH_RANGE:
3507                 updateDepthRange(glState.getNearPlane(), glState.getFarPlane());
3508                 break;
3509             case gl::State::DIRTY_BIT_BLEND_ENABLED:
3510                 mGraphicsPipelineDesc->updateBlendEnabled(&mGraphicsPipelineTransition,
3511                                                           glState.getBlendStateExt().mEnabledMask);
3512                 break;
3513             case gl::State::DIRTY_BIT_BLEND_COLOR:
3514                 mGraphicsPipelineDesc->updateBlendColor(&mGraphicsPipelineTransition,
3515                                                         glState.getBlendColor());
3516                 break;
3517             case gl::State::DIRTY_BIT_BLEND_FUNCS:
3518                 mGraphicsPipelineDesc->updateBlendFuncs(&mGraphicsPipelineTransition,
3519                                                         glState.getBlendStateExt());
3520                 break;
3521             case gl::State::DIRTY_BIT_BLEND_EQUATIONS:
3522                 mGraphicsPipelineDesc->updateBlendEquations(&mGraphicsPipelineTransition,
3523                                                             glState.getBlendStateExt());
3524                 break;
3525             case gl::State::DIRTY_BIT_COLOR_MASK:
3526                 updateColorMasks(glState.getBlendStateExt());
3527                 break;
3528             case gl::State::DIRTY_BIT_SAMPLE_ALPHA_TO_COVERAGE_ENABLED:
3529                 mGraphicsPipelineDesc->updateAlphaToCoverageEnable(
3530                     &mGraphicsPipelineTransition, glState.isSampleAlphaToCoverageEnabled());
3531                 static_assert(gl::State::DIRTY_BIT_PROGRAM_EXECUTABLE >
3532                                   gl::State::DIRTY_BIT_SAMPLE_ALPHA_TO_COVERAGE_ENABLED,
3533                               "Dirty bit order");
3534                 iter.setLaterBit(gl::State::DIRTY_BIT_PROGRAM_EXECUTABLE);
3535                 break;
3536             case gl::State::DIRTY_BIT_SAMPLE_COVERAGE_ENABLED:
3537                 updateSampleMaskWithRasterizationSamples(mDrawFramebuffer->getSamples());
3538                 break;
3539             case gl::State::DIRTY_BIT_SAMPLE_COVERAGE:
3540                 updateSampleMaskWithRasterizationSamples(mDrawFramebuffer->getSamples());
3541                 break;
3542             case gl::State::DIRTY_BIT_SAMPLE_MASK_ENABLED:
3543                 updateSampleMaskWithRasterizationSamples(mDrawFramebuffer->getSamples());
3544                 break;
3545             case gl::State::DIRTY_BIT_SAMPLE_MASK:
3546                 updateSampleMaskWithRasterizationSamples(mDrawFramebuffer->getSamples());
3547                 break;
3548             case gl::State::DIRTY_BIT_DEPTH_TEST_ENABLED:
3549             {
3550                 mGraphicsPipelineDesc->updateDepthTestEnabled(&mGraphicsPipelineTransition,
3551                                                               glState.getDepthStencilState(),
3552                                                               glState.getDrawFramebuffer());
3553                 ANGLE_TRY(updateRenderPassDepthStencilAccess());
3554                 break;
3555             }
3556             case gl::State::DIRTY_BIT_DEPTH_FUNC:
3557                 mGraphicsPipelineDesc->updateDepthFunc(&mGraphicsPipelineTransition,
3558                                                        glState.getDepthStencilState());
3559                 break;
3560             case gl::State::DIRTY_BIT_DEPTH_MASK:
3561             {
3562                 mGraphicsPipelineDesc->updateDepthWriteEnabled(&mGraphicsPipelineTransition,
3563                                                                glState.getDepthStencilState(),
3564                                                                glState.getDrawFramebuffer());
3565                 ANGLE_TRY(updateRenderPassDepthStencilAccess());
3566                 break;
3567             }
3568             case gl::State::DIRTY_BIT_STENCIL_TEST_ENABLED:
3569             {
3570                 mGraphicsPipelineDesc->updateStencilTestEnabled(&mGraphicsPipelineTransition,
3571                                                                 glState.getDepthStencilState(),
3572                                                                 glState.getDrawFramebuffer());
3573                 ANGLE_TRY(updateRenderPassDepthStencilAccess());
3574                 break;
3575             }
3576             case gl::State::DIRTY_BIT_STENCIL_FUNCS_FRONT:
3577                 mGraphicsPipelineDesc->updateStencilFrontFuncs(&mGraphicsPipelineTransition,
3578                                                                glState.getStencilRef(),
3579                                                                glState.getDepthStencilState());
3580                 break;
3581             case gl::State::DIRTY_BIT_STENCIL_FUNCS_BACK:
3582                 mGraphicsPipelineDesc->updateStencilBackFuncs(&mGraphicsPipelineTransition,
3583                                                               glState.getStencilBackRef(),
3584                                                               glState.getDepthStencilState());
3585                 break;
3586             case gl::State::DIRTY_BIT_STENCIL_OPS_FRONT:
3587                 mGraphicsPipelineDesc->updateStencilFrontOps(&mGraphicsPipelineTransition,
3588                                                              glState.getDepthStencilState());
3589                 break;
3590             case gl::State::DIRTY_BIT_STENCIL_OPS_BACK:
3591                 mGraphicsPipelineDesc->updateStencilBackOps(&mGraphicsPipelineTransition,
3592                                                             glState.getDepthStencilState());
3593                 break;
3594             case gl::State::DIRTY_BIT_STENCIL_WRITEMASK_FRONT:
3595                 mGraphicsPipelineDesc->updateStencilFrontWriteMask(&mGraphicsPipelineTransition,
3596                                                                    glState.getDepthStencilState(),
3597                                                                    glState.getDrawFramebuffer());
3598                 break;
3599             case gl::State::DIRTY_BIT_STENCIL_WRITEMASK_BACK:
3600                 mGraphicsPipelineDesc->updateStencilBackWriteMask(&mGraphicsPipelineTransition,
3601                                                                   glState.getDepthStencilState(),
3602                                                                   glState.getDrawFramebuffer());
3603                 break;
3604             case gl::State::DIRTY_BIT_CULL_FACE_ENABLED:
3605             case gl::State::DIRTY_BIT_CULL_FACE:
3606                 mGraphicsPipelineDesc->updateCullMode(&mGraphicsPipelineTransition,
3607                                                       glState.getRasterizerState());
3608                 break;
3609             case gl::State::DIRTY_BIT_FRONT_FACE:
3610                 mGraphicsPipelineDesc->updateFrontFace(&mGraphicsPipelineTransition,
3611                                                        glState.getRasterizerState(),
3612                                                        isYFlipEnabledForDrawFBO());
3613                 break;
3614             case gl::State::DIRTY_BIT_POLYGON_OFFSET_FILL_ENABLED:
3615                 mGraphicsPipelineDesc->updatePolygonOffsetFillEnabled(
3616                     &mGraphicsPipelineTransition, glState.isPolygonOffsetFillEnabled());
3617                 break;
3618             case gl::State::DIRTY_BIT_POLYGON_OFFSET:
3619                 mGraphicsPipelineDesc->updatePolygonOffset(&mGraphicsPipelineTransition,
3620                                                            glState.getRasterizerState());
3621                 break;
3622             case gl::State::DIRTY_BIT_RASTERIZER_DISCARD_ENABLED:
3623                 updateRasterizerDiscardEnabled(
3624                     mState.isQueryActive(gl::QueryType::PrimitivesGenerated));
3625                 break;
3626             case gl::State::DIRTY_BIT_LINE_WIDTH:
3627                 mGraphicsPipelineDesc->updateLineWidth(&mGraphicsPipelineTransition,
3628                                                        glState.getLineWidth());
3629                 break;
3630             case gl::State::DIRTY_BIT_PRIMITIVE_RESTART_ENABLED:
3631                 mGraphicsPipelineDesc->updatePrimitiveRestartEnabled(
3632                     &mGraphicsPipelineTransition, glState.isPrimitiveRestartEnabled());
3633                 break;
3634             case gl::State::DIRTY_BIT_CLEAR_COLOR:
3635                 mClearColorValue.color.float32[0] = glState.getColorClearValue().red;
3636                 mClearColorValue.color.float32[1] = glState.getColorClearValue().green;
3637                 mClearColorValue.color.float32[2] = glState.getColorClearValue().blue;
3638                 mClearColorValue.color.float32[3] = glState.getColorClearValue().alpha;
3639                 break;
3640             case gl::State::DIRTY_BIT_CLEAR_DEPTH:
3641                 mClearDepthStencilValue.depthStencil.depth = glState.getDepthClearValue();
3642                 break;
3643             case gl::State::DIRTY_BIT_CLEAR_STENCIL:
3644                 mClearDepthStencilValue.depthStencil.stencil =
3645                     static_cast<uint32_t>(glState.getStencilClearValue());
3646                 break;
3647             case gl::State::DIRTY_BIT_UNPACK_STATE:
3648                 // This is a no-op, it's only important to use the right unpack state when we do
3649                 // setImage or setSubImage in TextureVk, which is plumbed through the frontend
3650                 // call
3651                 break;
3652             case gl::State::DIRTY_BIT_UNPACK_BUFFER_BINDING:
3653                 break;
3654             case gl::State::DIRTY_BIT_PACK_STATE:
3655                 // This is a no-op, its only important to use the right pack state when we do
3656                 // call readPixels later on.
3657                 break;
3658             case gl::State::DIRTY_BIT_PACK_BUFFER_BINDING:
3659                 break;
3660             case gl::State::DIRTY_BIT_DITHER_ENABLED:
3661                 break;
3662             case gl::State::DIRTY_BIT_READ_FRAMEBUFFER_BINDING:
3663                 updateFlipViewportReadFramebuffer(context->getState());
3664                 updateSurfaceRotationReadFramebuffer(glState);
3665                 break;
3666             case gl::State::DIRTY_BIT_DRAW_FRAMEBUFFER_BINDING:
3667             {
3668                 // FramebufferVk::syncState signals that we should start a new command buffer.
3669                 // But changing the binding can skip FramebufferVk::syncState if the Framebuffer
3670                 // has no dirty bits. Thus we need to explicitly clear the current command
3671                 // buffer to ensure we start a new one. We don't actually close the render pass here
3672                 // as some optimizations in non-draw commands require the render pass to remain
3673                 // open, such as invalidate or blit. Note that we always start a new command buffer
3674                 // because we currently can only support one open RenderPass at a time.
3675                 onRenderPassFinished();
3676                 if (mRenderer->getFeatures().preferSubmitAtFBOBoundary.enabled)
3677                 {
3678                     // This will behave as if user called glFlush, but the actual flush will be
3679                     // triggered at endRenderPass time.
3680                     mHasDeferredFlush = true;
3681                 }
3682                 gl::Framebuffer *drawFramebuffer = glState.getDrawFramebuffer();
3683                 mDrawFramebuffer                 = vk::GetImpl(drawFramebuffer);
3684                 mDrawFramebuffer->setReadOnlyDepthFeedbackLoopMode(false);
3685                 updateFlipViewportDrawFramebuffer(glState);
3686                 updateSurfaceRotationDrawFramebuffer(glState);
3687                 SpecConstUsageBits usageBits = getCurrentProgramSpecConstUsageBits();
3688                 updateGraphicsPipelineDescWithSpecConstUsageBits(usageBits);
3689                 updateViewport(mDrawFramebuffer, glState.getViewport(), glState.getNearPlane(),
3690                                glState.getFarPlane());
3691                 updateColorMasks(glState.getBlendStateExt());
3692                 updateRasterizationSamples(mDrawFramebuffer->getSamples());
3693                 updateRasterizerDiscardEnabled(
3694                     mState.isQueryActive(gl::QueryType::PrimitivesGenerated));
3695 
3696                 mGraphicsPipelineDesc->updateFrontFace(&mGraphicsPipelineTransition,
3697                                                        glState.getRasterizerState(),
3698                                                        isYFlipEnabledForDrawFBO());
3699                 updateScissor(glState);
3700                 updateDepthStencil(glState);
3701                 mGraphicsPipelineDesc->resetSubpass(&mGraphicsPipelineTransition);
3702                 onDrawFramebufferRenderPassDescChange(mDrawFramebuffer, nullptr);
3703                 break;
3704             }
3705             case gl::State::DIRTY_BIT_RENDERBUFFER_BINDING:
3706                 break;
3707             case gl::State::DIRTY_BIT_VERTEX_ARRAY_BINDING:
3708             {
3709                 mVertexArray = vk::GetImpl(glState.getVertexArray());
3710                 invalidateDefaultAttributes(context->getStateCache().getActiveDefaultAttribsMask());
3711                 ANGLE_TRY(mVertexArray->updateActiveAttribInfo(this));
3712                 ANGLE_TRY(onIndexBufferChange(mVertexArray->getCurrentElementArrayBuffer()));
3713                 break;
3714             }
3715             case gl::State::DIRTY_BIT_DRAW_INDIRECT_BUFFER_BINDING:
3716                 break;
3717             case gl::State::DIRTY_BIT_DISPATCH_INDIRECT_BUFFER_BINDING:
3718                 break;
3719             case gl::State::DIRTY_BIT_PROGRAM_BINDING:
3720                 invalidateProgramBindingHelper(glState);
3721                 break;
3722             case gl::State::DIRTY_BIT_PROGRAM_EXECUTABLE:
3723             {
3724                 ASSERT(programExecutable);
3725                 invalidateCurrentDefaultUniforms();
3726                 static_assert(
3727                     gl::State::DIRTY_BIT_TEXTURE_BINDINGS > gl::State::DIRTY_BIT_PROGRAM_EXECUTABLE,
3728                     "Dirty bit order");
3729                 iter.setLaterBit(gl::State::DIRTY_BIT_TEXTURE_BINDINGS);
3730                 static_assert(gl::State::DIRTY_BIT_ATOMIC_COUNTER_BUFFER_BINDING >
3731                                   gl::State::DIRTY_BIT_PROGRAM_EXECUTABLE,
3732                               "Dirty bit order");
3733                 iter.setLaterBit(gl::State::DIRTY_BIT_ATOMIC_COUNTER_BUFFER_BINDING);
3734                 ANGLE_TRY(invalidateProgramExecutableHelper(context));
3735                 break;
3736             }
3737             case gl::State::DIRTY_BIT_SAMPLER_BINDINGS:
3738             {
3739                 static_assert(
3740                     gl::State::DIRTY_BIT_TEXTURE_BINDINGS > gl::State::DIRTY_BIT_SAMPLER_BINDINGS,
3741                     "Dirty bit order");
3742                 iter.setLaterBit(gl::State::DIRTY_BIT_TEXTURE_BINDINGS);
3743                 break;
3744             }
3745             case gl::State::DIRTY_BIT_TEXTURE_BINDINGS:
3746                 ANGLE_TRY(invalidateCurrentTextures(context));
3747                 break;
3748             case gl::State::DIRTY_BIT_TRANSFORM_FEEDBACK_BINDING:
3749                 // Nothing to do.
3750                 break;
3751             case gl::State::DIRTY_BIT_IMAGE_BINDINGS:
3752                 static_assert(gl::State::DIRTY_BIT_ATOMIC_COUNTER_BUFFER_BINDING >
3753                                   gl::State::DIRTY_BIT_IMAGE_BINDINGS,
3754                               "Dirty bit order");
3755                 iter.setLaterBit(gl::State::DIRTY_BIT_ATOMIC_COUNTER_BUFFER_BINDING);
3756                 break;
3757             case gl::State::DIRTY_BIT_SHADER_STORAGE_BUFFER_BINDING:
3758                 static_assert(gl::State::DIRTY_BIT_ATOMIC_COUNTER_BUFFER_BINDING >
3759                                   gl::State::DIRTY_BIT_SHADER_STORAGE_BUFFER_BINDING,
3760                               "Dirty bit order");
3761                 iter.setLaterBit(gl::State::DIRTY_BIT_ATOMIC_COUNTER_BUFFER_BINDING);
3762                 break;
3763             case gl::State::DIRTY_BIT_UNIFORM_BUFFER_BINDINGS:
3764                 static_assert(gl::State::DIRTY_BIT_ATOMIC_COUNTER_BUFFER_BINDING >
3765                                   gl::State::DIRTY_BIT_UNIFORM_BUFFER_BINDINGS,
3766                               "Dirty bit order");
3767                 iter.setLaterBit(gl::State::DIRTY_BIT_ATOMIC_COUNTER_BUFFER_BINDING);
3768                 break;
3769             case gl::State::DIRTY_BIT_ATOMIC_COUNTER_BUFFER_BINDING:
3770                 ANGLE_TRY(invalidateCurrentShaderResources());
3771                 invalidateDriverUniforms();
3772                 break;
3773             case gl::State::DIRTY_BIT_MULTISAMPLING:
3774                 // TODO(syoussefi): this should configure the pipeline to render as if
3775                 // single-sampled, and write the results to all samples of a pixel regardless of
3776                 // coverage. See EXT_multisample_compatibility.  http://anglebug.com/3204
3777                 break;
3778             case gl::State::DIRTY_BIT_SAMPLE_ALPHA_TO_ONE:
3779                 // TODO(syoussefi): this is part of EXT_multisample_compatibility.  The
3780                 // alphaToOne Vulkan feature should be enabled to support this extension.
3781                 // http://anglebug.com/3204
3782                 mGraphicsPipelineDesc->updateAlphaToOneEnable(&mGraphicsPipelineTransition,
3783                                                               glState.isSampleAlphaToOneEnabled());
3784                 break;
3785             case gl::State::DIRTY_BIT_SAMPLE_SHADING:
3786                 updateSampleShadingWithRasterizationSamples(mDrawFramebuffer->getSamples());
3787                 break;
3788             case gl::State::DIRTY_BIT_COVERAGE_MODULATION:
3789                 break;
3790             case gl::State::DIRTY_BIT_FRAMEBUFFER_SRGB_WRITE_CONTROL_MODE:
3791                 break;
3792             case gl::State::DIRTY_BIT_CURRENT_VALUES:
3793             {
3794                 invalidateDefaultAttributes(glState.getAndResetDirtyCurrentValues());
3795                 break;
3796             }
3797             case gl::State::DIRTY_BIT_PROVOKING_VERTEX:
3798                 break;
3799             case gl::State::DIRTY_BIT_EXTENDED:
3800             {
3801                 gl::State::ExtendedDirtyBits extendedDirtyBits =
3802                     glState.getAndResetExtendedDirtyBits();
3803                 for (size_t extendedDirtyBit : extendedDirtyBits)
3804                 {
3805                     switch (extendedDirtyBit)
3806                     {
3807                         case gl::State::ExtendedDirtyBitType::EXTENDED_DIRTY_BIT_CLIP_CONTROL:
3808                             updateViewport(vk::GetImpl(glState.getDrawFramebuffer()),
3809                                            glState.getViewport(), glState.getNearPlane(),
3810                                            glState.getFarPlane());
3811                             // Since we are flipping the y coordinate, update front face state
3812                             mGraphicsPipelineDesc->updateFrontFace(&mGraphicsPipelineTransition,
3813                                                                    glState.getRasterizerState(),
3814                                                                    isYFlipEnabledForDrawFBO());
3815                             updateScissor(glState);
3816 
3817                             // Nothing is needed for depth correction for EXT_clip_control.
3818                             // glState will be used to toggle control path of depth correction code
3819                             // in SPIR-V tranform options.
3820                             break;
3821                         case gl::State::ExtendedDirtyBitType::EXTENDED_DIRTY_BIT_CLIP_DISTANCES:
3822                             invalidateGraphicsDriverUniforms();
3823                             break;
3824                         case gl::State::ExtendedDirtyBitType::
3825                             EXTENDED_DIRTY_BIT_MIPMAP_GENERATION_HINT:
3826                             break;
3827                         case gl::State::ExtendedDirtyBitType::
3828                             EXTENDED_DIRTY_BIT_SHADER_DERIVATIVE_HINT:
3829                             break;
3830                         default:
3831                             UNREACHABLE();
3832                     }
3833                 }
3834                 break;
3835             }
3836             case gl::State::DIRTY_BIT_PATCH_VERTICES:
3837                 mGraphicsPipelineDesc->updatePatchVertices(&mGraphicsPipelineTransition,
3838                                                            glState.getPatchVertices());
3839                 break;
3840             default:
3841                 UNREACHABLE();
3842                 break;
3843         }
3844     }
3845 
3846     return angle::Result::Continue;
3847 }
3848 
getGPUDisjoint()3849 GLint ContextVk::getGPUDisjoint()
3850 {
3851     // No extension seems to be available to query this information.
3852     return 0;
3853 }
3854 
getTimestamp()3855 GLint64 ContextVk::getTimestamp()
3856 {
3857     // This function should only be called if timestamp queries are available.
3858     ASSERT(mRenderer->getQueueFamilyProperties().timestampValidBits > 0);
3859 
3860     uint64_t timestamp = 0;
3861 
3862     (void)getTimestamp(&timestamp);
3863 
3864     return static_cast<GLint64>(timestamp);
3865 }
3866 
onMakeCurrent(const gl::Context * context)3867 angle::Result ContextVk::onMakeCurrent(const gl::Context *context)
3868 {
3869     mRenderer->reloadVolkIfNeeded();
3870 
3871     // Flip viewports if the user did not request that the surface is flipped.
3872     egl::Surface *drawSurface = context->getCurrentDrawSurface();
3873     mFlipYForCurrentSurface =
3874         drawSurface != nullptr &&
3875         !IsMaskFlagSet(drawSurface->getOrientation(), EGL_SURFACE_ORIENTATION_INVERT_Y_ANGLE);
3876 
3877     if (drawSurface && drawSurface->getType() == EGL_WINDOW_BIT)
3878     {
3879         mCurrentWindowSurface = GetImplAs<WindowSurfaceVk>(drawSurface);
3880     }
3881     else
3882     {
3883         mCurrentWindowSurface = nullptr;
3884     }
3885 
3886     const gl::State &glState = context->getState();
3887     updateFlipViewportDrawFramebuffer(glState);
3888     updateFlipViewportReadFramebuffer(glState);
3889     updateSurfaceRotationDrawFramebuffer(glState);
3890     updateSurfaceRotationReadFramebuffer(glState);
3891 
3892     if (getFeatures().forceDriverUniformOverSpecConst.enabled)
3893     {
3894         invalidateDriverUniforms();
3895     }
3896     else
3897     {
3898         // Force update mGraphicsPipelineDesc
3899         mCurrentGraphicsPipeline = nullptr;
3900         invalidateCurrentGraphicsPipeline();
3901     }
3902 
3903     const gl::ProgramExecutable *executable = mState.getProgramExecutable();
3904     if (executable && executable->hasTransformFeedbackOutput() &&
3905         mState.isTransformFeedbackActive())
3906     {
3907         onTransformFeedbackStateChanged();
3908         if (getFeatures().supportsTransformFeedbackExtension.enabled)
3909         {
3910             mGraphicsDirtyBits.set(DIRTY_BIT_TRANSFORM_FEEDBACK_RESUME);
3911         }
3912     }
3913 
3914     return angle::Result::Continue;
3915 }
3916 
onUnMakeCurrent(const gl::Context * context)3917 angle::Result ContextVk::onUnMakeCurrent(const gl::Context *context)
3918 {
3919     ANGLE_TRY(flushImpl(nullptr));
3920     mCurrentWindowSurface = nullptr;
3921     return angle::Result::Continue;
3922 }
3923 
updateFlipViewportDrawFramebuffer(const gl::State & glState)3924 void ContextVk::updateFlipViewportDrawFramebuffer(const gl::State &glState)
3925 {
3926     // The default framebuffer (originating from the swapchain) is rendered upside-down due to the
3927     // difference in the coordinate systems of Vulkan and GLES.  Rendering upside-down has the
3928     // effect that rendering is done the same way as OpenGL.  The KHR_MAINTENANCE_1 extension is
3929     // subsequently enabled to allow negative viewports.  We inverse rendering to the backbuffer by
3930     // reversing the height of the viewport and increasing Y by the height.  So if the viewport was
3931     // (0, 0, width, height), it becomes (0, height, width, -height).  Unfortunately, when we start
3932     // doing this, we also need to adjust a number of places since the rendering now happens
3933     // upside-down.  Affected places so far:
3934     //
3935     // - readPixels
3936     // - copyTexImage
3937     // - framebuffer blit
3938     // - generating mipmaps
3939     // - Point sprites tests
3940     // - texStorage
3941     gl::Framebuffer *drawFramebuffer = glState.getDrawFramebuffer();
3942     mFlipViewportForDrawFramebuffer  = drawFramebuffer->isDefault();
3943 }
3944 
updateFlipViewportReadFramebuffer(const gl::State & glState)3945 void ContextVk::updateFlipViewportReadFramebuffer(const gl::State &glState)
3946 {
3947     gl::Framebuffer *readFramebuffer = glState.getReadFramebuffer();
3948     mFlipViewportForReadFramebuffer  = readFramebuffer->isDefault();
3949 }
3950 
getCurrentProgramSpecConstUsageBits() const3951 SpecConstUsageBits ContextVk::getCurrentProgramSpecConstUsageBits() const
3952 {
3953     SpecConstUsageBits usageBits;
3954     if (mState.getProgram())
3955     {
3956         usageBits = mState.getProgram()->getState().getSpecConstUsageBits();
3957     }
3958     else if (mState.getProgramPipeline())
3959     {
3960         usageBits = mState.getProgramPipeline()->getState().getSpecConstUsageBits();
3961     }
3962     return usageBits;
3963 }
3964 
updateGraphicsPipelineDescWithSpecConstUsageBits(SpecConstUsageBits usageBits)3965 void ContextVk::updateGraphicsPipelineDescWithSpecConstUsageBits(SpecConstUsageBits usageBits)
3966 {
3967     SurfaceRotation rotationAndFlip = mCurrentRotationDrawFramebuffer;
3968     ASSERT(ToUnderlying(rotationAndFlip) < ToUnderlying(SurfaceRotation::FlippedIdentity));
3969     bool yFlipped =
3970         isViewportFlipEnabledForDrawFBO() && (usageBits.test(sh::vk::SpecConstUsage::YFlip) ||
3971                                               !getFeatures().supportsNegativeViewport.enabled);
3972 
3973     // usageBits are only set when specialization constants are used.  With gl_Position pre-rotation
3974     // handled by the SPIR-V transformer, we need to have this information even when the driver
3975     // uniform path is taken to pre-rotate everything else.
3976     const bool programUsesRotation = usageBits.test(sh::vk::SpecConstUsage::Rotation) ||
3977                                      getFeatures().forceDriverUniformOverSpecConst.enabled;
3978 
3979     // If program is not using rotation at all, we force it to use the Identity or FlippedIdentity
3980     // slot to improve the program cache hit rate
3981     if (!programUsesRotation)
3982     {
3983         rotationAndFlip = yFlipped ? SurfaceRotation::FlippedIdentity : SurfaceRotation::Identity;
3984     }
3985     else if (yFlipped)
3986     {
3987         // DetermineSurfaceRotation() does not encode yflip information. Shader code uses
3988         // SurfaceRotation specialization constant to determine yflip as well. We add yflip
3989         // information to the SurfaceRotation here so the shader does yflip properly.
3990         rotationAndFlip = static_cast<SurfaceRotation>(
3991             ToUnderlying(SurfaceRotation::FlippedIdentity) + ToUnderlying(rotationAndFlip));
3992     }
3993     else
3994     {
3995         // If program is not using yflip, then we just use the non-flipped slot to increase the
3996         // chance of pipeline program cache hit even if drawable is yflipped.
3997     }
3998 
3999     if (rotationAndFlip != mGraphicsPipelineDesc->getSurfaceRotation())
4000     {
4001         // surface rotation are specialization constants, which affects program compilation. When
4002         // rotation changes, we need to update GraphicsPipelineDesc so that the correct pipeline
4003         // program object will be retrieved.
4004         mGraphicsPipelineDesc->updateSurfaceRotation(&mGraphicsPipelineTransition, rotationAndFlip);
4005     }
4006 
4007     if (usageBits.test(sh::vk::SpecConstUsage::DrawableSize))
4008     {
4009         const gl::Box &dimensions = getState().getDrawFramebuffer()->getDimensions();
4010         mGraphicsPipelineDesc->updateDrawableSize(&mGraphicsPipelineTransition, dimensions.width,
4011                                                   dimensions.height);
4012     }
4013     else
4014     {
4015         // Always set specialization constant to 1x1 if it is not used so that pipeline program with
4016         // only drawable size difference will be able to be reused.
4017         mGraphicsPipelineDesc->updateDrawableSize(&mGraphicsPipelineTransition, 1, 1);
4018     }
4019 }
4020 
updateSurfaceRotationDrawFramebuffer(const gl::State & glState)4021 void ContextVk::updateSurfaceRotationDrawFramebuffer(const gl::State &glState)
4022 {
4023     gl::Framebuffer *drawFramebuffer = glState.getDrawFramebuffer();
4024     mCurrentRotationDrawFramebuffer =
4025         DetermineSurfaceRotation(drawFramebuffer, mCurrentWindowSurface);
4026 }
4027 
updateSurfaceRotationReadFramebuffer(const gl::State & glState)4028 void ContextVk::updateSurfaceRotationReadFramebuffer(const gl::State &glState)
4029 {
4030     gl::Framebuffer *readFramebuffer = glState.getReadFramebuffer();
4031     mCurrentRotationReadFramebuffer =
4032         DetermineSurfaceRotation(readFramebuffer, mCurrentWindowSurface);
4033 }
4034 
getNativeCaps() const4035 gl::Caps ContextVk::getNativeCaps() const
4036 {
4037     return mRenderer->getNativeCaps();
4038 }
4039 
getNativeTextureCaps() const4040 const gl::TextureCapsMap &ContextVk::getNativeTextureCaps() const
4041 {
4042     return mRenderer->getNativeTextureCaps();
4043 }
4044 
getNativeExtensions() const4045 const gl::Extensions &ContextVk::getNativeExtensions() const
4046 {
4047     return mRenderer->getNativeExtensions();
4048 }
4049 
getNativeLimitations() const4050 const gl::Limitations &ContextVk::getNativeLimitations() const
4051 {
4052     return mRenderer->getNativeLimitations();
4053 }
4054 
createCompiler()4055 CompilerImpl *ContextVk::createCompiler()
4056 {
4057     return new CompilerVk();
4058 }
4059 
createShader(const gl::ShaderState & state)4060 ShaderImpl *ContextVk::createShader(const gl::ShaderState &state)
4061 {
4062     return new ShaderVk(state);
4063 }
4064 
createProgram(const gl::ProgramState & state)4065 ProgramImpl *ContextVk::createProgram(const gl::ProgramState &state)
4066 {
4067     return new ProgramVk(state);
4068 }
4069 
createFramebuffer(const gl::FramebufferState & state)4070 FramebufferImpl *ContextVk::createFramebuffer(const gl::FramebufferState &state)
4071 {
4072     return FramebufferVk::CreateUserFBO(mRenderer, state);
4073 }
4074 
createTexture(const gl::TextureState & state)4075 TextureImpl *ContextVk::createTexture(const gl::TextureState &state)
4076 {
4077     return new TextureVk(state, mRenderer);
4078 }
4079 
createRenderbuffer(const gl::RenderbufferState & state)4080 RenderbufferImpl *ContextVk::createRenderbuffer(const gl::RenderbufferState &state)
4081 {
4082     return new RenderbufferVk(state);
4083 }
4084 
createBuffer(const gl::BufferState & state)4085 BufferImpl *ContextVk::createBuffer(const gl::BufferState &state)
4086 {
4087     return new BufferVk(state);
4088 }
4089 
createVertexArray(const gl::VertexArrayState & state)4090 VertexArrayImpl *ContextVk::createVertexArray(const gl::VertexArrayState &state)
4091 {
4092     return new VertexArrayVk(this, state);
4093 }
4094 
createQuery(gl::QueryType type)4095 QueryImpl *ContextVk::createQuery(gl::QueryType type)
4096 {
4097     return new QueryVk(type);
4098 }
4099 
createFenceNV()4100 FenceNVImpl *ContextVk::createFenceNV()
4101 {
4102     return new FenceNVVk();
4103 }
4104 
createSync()4105 SyncImpl *ContextVk::createSync()
4106 {
4107     return new SyncVk();
4108 }
4109 
createTransformFeedback(const gl::TransformFeedbackState & state)4110 TransformFeedbackImpl *ContextVk::createTransformFeedback(const gl::TransformFeedbackState &state)
4111 {
4112     return new TransformFeedbackVk(state);
4113 }
4114 
createSampler(const gl::SamplerState & state)4115 SamplerImpl *ContextVk::createSampler(const gl::SamplerState &state)
4116 {
4117     return new SamplerVk(state);
4118 }
4119 
createProgramPipeline(const gl::ProgramPipelineState & state)4120 ProgramPipelineImpl *ContextVk::createProgramPipeline(const gl::ProgramPipelineState &state)
4121 {
4122     return new ProgramPipelineVk(state);
4123 }
4124 
createMemoryObject()4125 MemoryObjectImpl *ContextVk::createMemoryObject()
4126 {
4127     return new MemoryObjectVk();
4128 }
4129 
createSemaphore()4130 SemaphoreImpl *ContextVk::createSemaphore()
4131 {
4132     return new SemaphoreVk();
4133 }
4134 
createOverlay(const gl::OverlayState & state)4135 OverlayImpl *ContextVk::createOverlay(const gl::OverlayState &state)
4136 {
4137     return new OverlayVk(state);
4138 }
4139 
invalidateCurrentDefaultUniforms()4140 void ContextVk::invalidateCurrentDefaultUniforms()
4141 {
4142     const gl::ProgramExecutable *executable = mState.getProgramExecutable();
4143     ASSERT(executable);
4144 
4145     if (executable->hasDefaultUniforms())
4146     {
4147         mGraphicsDirtyBits.set(DIRTY_BIT_DESCRIPTOR_SETS);
4148         mComputeDirtyBits.set(DIRTY_BIT_DESCRIPTOR_SETS);
4149     }
4150 }
4151 
invalidateCurrentTextures(const gl::Context * context)4152 angle::Result ContextVk::invalidateCurrentTextures(const gl::Context *context)
4153 {
4154     const gl::ProgramExecutable *executable = mState.getProgramExecutable();
4155     ASSERT(executable);
4156 
4157     if (executable->hasTextures())
4158     {
4159         mGraphicsDirtyBits |= kTexturesAndDescSetDirtyBits;
4160         mComputeDirtyBits |= kTexturesAndDescSetDirtyBits;
4161 
4162         ANGLE_TRY(updateActiveTextures(context));
4163 
4164         // Take care of read-after-write hazards that require implicit synchronization.
4165         if (executable->isCompute())
4166         {
4167             ANGLE_TRY(endRenderPassIfComputeReadAfterAttachmentWrite());
4168         }
4169     }
4170 
4171     return angle::Result::Continue;
4172 }
4173 
invalidateCurrentShaderResources()4174 angle::Result ContextVk::invalidateCurrentShaderResources()
4175 {
4176     const gl::ProgramExecutable *executable = mState.getProgramExecutable();
4177     ASSERT(executable);
4178 
4179     const bool hasImages = executable->hasImages();
4180     const bool hasStorageBuffers =
4181         executable->hasStorageBuffers() || executable->hasAtomicCounterBuffers();
4182     const bool hasUniformBuffers = executable->hasUniformBuffers();
4183 
4184     if (hasUniformBuffers || hasStorageBuffers || hasImages || executable->usesFramebufferFetch())
4185     {
4186         mGraphicsDirtyBits |= kResourcesAndDescSetDirtyBits;
4187         mComputeDirtyBits |= kResourcesAndDescSetDirtyBits;
4188     }
4189 
4190     // Take care of read-after-write hazards that require implicit synchronization.
4191     if (hasUniformBuffers && executable->isCompute())
4192     {
4193         ANGLE_TRY(endRenderPassIfComputeReadAfterTransformFeedbackWrite());
4194     }
4195 
4196     // If memory barrier has been issued but the command buffers haven't been flushed, make sure
4197     // they get a chance to do so if necessary on program and storage buffer/image binding change.
4198     const bool hasGLMemoryBarrierIssuedInCommandBuffers =
4199         mOutsideRenderPassCommands->hasGLMemoryBarrierIssued() ||
4200         mRenderPassCommands->hasGLMemoryBarrierIssued();
4201 
4202     if ((hasStorageBuffers || hasImages) && hasGLMemoryBarrierIssuedInCommandBuffers)
4203     {
4204         mGraphicsDirtyBits.set(DIRTY_BIT_MEMORY_BARRIER);
4205         mComputeDirtyBits.set(DIRTY_BIT_MEMORY_BARRIER);
4206     }
4207 
4208     if (hasUniformBuffers || hasStorageBuffers)
4209     {
4210         mShaderBuffersDescriptorDesc.reset();
4211 
4212         ProgramExecutableVk *executableVk = nullptr;
4213         if (mState.getProgram())
4214         {
4215             ProgramVk *programVk = vk::GetImpl(mState.getProgram());
4216             executableVk         = &programVk->getExecutable();
4217         }
4218         else
4219         {
4220             ASSERT(mState.getProgramPipeline());
4221             ProgramPipelineVk *pipelineVk = vk::GetImpl(mState.getProgramPipeline());
4222             executableVk                  = &pipelineVk->getExecutable();
4223         }
4224 
4225         const gl::BufferVector &uniformBuffers = mState.getOffsetBindingPointerUniformBuffers();
4226         AppendBufferVectorToDesc(&mShaderBuffersDescriptorDesc, uniformBuffers,
4227                                  mState.getUniformBuffersMask(),
4228                                  !executableVk->usesDynamicUniformBufferDescriptors());
4229 
4230         const gl::BufferVector &shaderStorageBuffers =
4231             mState.getOffsetBindingPointerShaderStorageBuffers();
4232         AppendBufferVectorToDesc(&mShaderBuffersDescriptorDesc, shaderStorageBuffers,
4233                                  mState.getShaderStorageBuffersMask(), true);
4234 
4235         const gl::BufferVector &atomicCounterBuffers =
4236             mState.getOffsetBindingPointerAtomicCounterBuffers();
4237         AppendBufferVectorToDesc(&mShaderBuffersDescriptorDesc, atomicCounterBuffers,
4238                                  mState.getAtomicCounterBuffersMask(), true);
4239     }
4240 
4241     return angle::Result::Continue;
4242 }
4243 
invalidateGraphicsDriverUniforms()4244 void ContextVk::invalidateGraphicsDriverUniforms()
4245 {
4246     mGraphicsDirtyBits |= kDriverUniformsAndBindingDirtyBits;
4247 }
4248 
invalidateDriverUniforms()4249 void ContextVk::invalidateDriverUniforms()
4250 {
4251     mGraphicsDirtyBits |= kDriverUniformsAndBindingDirtyBits;
4252     mComputeDirtyBits |= kDriverUniformsAndBindingDirtyBits;
4253 }
4254 
onFramebufferChange(FramebufferVk * framebufferVk)4255 angle::Result ContextVk::onFramebufferChange(FramebufferVk *framebufferVk)
4256 {
4257     // This is called from FramebufferVk::syncState.  Skip these updates if the framebuffer being
4258     // synced is the read framebuffer (which is not equal the draw framebuffer).
4259     if (framebufferVk != vk::GetImpl(mState.getDrawFramebuffer()))
4260     {
4261         return angle::Result::Continue;
4262     }
4263 
4264     // Ensure that the pipeline description is updated.
4265     if (mGraphicsPipelineDesc->getRasterizationSamples() !=
4266         static_cast<uint32_t>(framebufferVk->getSamples()))
4267     {
4268         updateRasterizationSamples(framebufferVk->getSamples());
4269     }
4270 
4271     // Update scissor.
4272     updateScissor(mState);
4273 
4274     // Update depth and stencil.
4275     updateDepthStencil(mState);
4276 
4277     if (mState.getProgramExecutable())
4278     {
4279         ANGLE_TRY(invalidateCurrentShaderResources());
4280     }
4281 
4282     onDrawFramebufferRenderPassDescChange(framebufferVk, nullptr);
4283 
4284     return angle::Result::Continue;
4285 }
4286 
onDrawFramebufferRenderPassDescChange(FramebufferVk * framebufferVk,bool * renderPassDescChangedOut)4287 void ContextVk::onDrawFramebufferRenderPassDescChange(FramebufferVk *framebufferVk,
4288                                                       bool *renderPassDescChangedOut)
4289 {
4290     mGraphicsPipelineDesc->updateRenderPassDesc(&mGraphicsPipelineTransition,
4291                                                 framebufferVk->getRenderPassDesc());
4292     const gl::Box &dimensions = framebufferVk->getState().getDimensions();
4293     mGraphicsPipelineDesc->updateDrawableSize(&mGraphicsPipelineTransition, dimensions.width,
4294                                               dimensions.height);
4295 
4296     if (renderPassDescChangedOut)
4297     {
4298         // If render pass desc has changed while processing the dirty bits, notify the caller.
4299         *renderPassDescChangedOut = true;
4300     }
4301     else
4302     {
4303         // Otherwise mark the pipeline as dirty.
4304         invalidateCurrentGraphicsPipeline();
4305     }
4306 }
4307 
invalidateCurrentTransformFeedbackBuffers()4308 void ContextVk::invalidateCurrentTransformFeedbackBuffers()
4309 {
4310     if (getFeatures().supportsTransformFeedbackExtension.enabled)
4311     {
4312         mGraphicsDirtyBits.set(DIRTY_BIT_TRANSFORM_FEEDBACK_BUFFERS);
4313     }
4314     else if (getFeatures().emulateTransformFeedback.enabled)
4315     {
4316         mGraphicsDirtyBits |= kXfbBuffersAndDescSetDirtyBits;
4317     }
4318 }
4319 
onTransformFeedbackStateChanged()4320 void ContextVk::onTransformFeedbackStateChanged()
4321 {
4322     if (getFeatures().supportsTransformFeedbackExtension.enabled)
4323     {
4324         mGraphicsDirtyBits.set(DIRTY_BIT_TRANSFORM_FEEDBACK_BUFFERS);
4325     }
4326     else if (getFeatures().emulateTransformFeedback.enabled)
4327     {
4328         invalidateGraphicsDriverUniforms();
4329         invalidateCurrentTransformFeedbackBuffers();
4330     }
4331 }
4332 
onBeginTransformFeedback(size_t bufferCount,const gl::TransformFeedbackBuffersArray<vk::BufferHelper * > & buffers,const gl::TransformFeedbackBuffersArray<vk::BufferHelper> & counterBuffers)4333 angle::Result ContextVk::onBeginTransformFeedback(
4334     size_t bufferCount,
4335     const gl::TransformFeedbackBuffersArray<vk::BufferHelper *> &buffers,
4336     const gl::TransformFeedbackBuffersArray<vk::BufferHelper> &counterBuffers)
4337 {
4338     onTransformFeedbackStateChanged();
4339 
4340     bool shouldEndRenderPass = false;
4341 
4342     // If any of the buffers were previously used in the render pass, break the render pass as a
4343     // barrier is needed.
4344     for (size_t bufferIndex = 0; bufferIndex < bufferCount; ++bufferIndex)
4345     {
4346         const vk::BufferHelper *buffer = buffers[bufferIndex];
4347         if (mRenderPassCommands->usesBuffer(*buffer))
4348         {
4349             shouldEndRenderPass = true;
4350             break;
4351         }
4352     }
4353 
4354     if (getFeatures().supportsTransformFeedbackExtension.enabled)
4355     {
4356         // Break the render pass if the counter buffers are used too.  Note that Vulkan requires a
4357         // barrier on the counter buffer between pause and resume, so it cannot be resumed in the
4358         // same render pass.  Note additionally that we don't need to test all counters being used
4359         // in the render pass, as outside of the transform feedback object these buffers are
4360         // inaccessible and are therefore always used together.
4361         if (!shouldEndRenderPass && mRenderPassCommands->usesBuffer(counterBuffers[0]))
4362         {
4363             shouldEndRenderPass = true;
4364         }
4365 
4366         mGraphicsDirtyBits.set(DIRTY_BIT_TRANSFORM_FEEDBACK_RESUME);
4367     }
4368 
4369     if (shouldEndRenderPass)
4370     {
4371         ANGLE_TRY(flushCommandsAndEndRenderPass());
4372     }
4373 
4374     populateTransformFeedbackBufferSet(bufferCount, buffers);
4375 
4376     return angle::Result::Continue;
4377 }
4378 
populateTransformFeedbackBufferSet(size_t bufferCount,const gl::TransformFeedbackBuffersArray<vk::BufferHelper * > & buffers)4379 void ContextVk::populateTransformFeedbackBufferSet(
4380     size_t bufferCount,
4381     const gl::TransformFeedbackBuffersArray<vk::BufferHelper *> &buffers)
4382 {
4383     for (size_t bufferIndex = 0; bufferIndex < bufferCount; ++bufferIndex)
4384     {
4385         vk::BufferHelper *buffer = buffers[bufferIndex];
4386         if (!mCurrentTransformFeedbackBuffers.contains(buffer))
4387         {
4388             mCurrentTransformFeedbackBuffers.insert(buffer);
4389         }
4390     }
4391 }
4392 
onEndTransformFeedback()4393 void ContextVk::onEndTransformFeedback()
4394 {
4395     if (getFeatures().supportsTransformFeedbackExtension.enabled)
4396     {
4397         if (mRenderPassCommands->isTransformFeedbackStarted())
4398         {
4399             mRenderPassCommands->endTransformFeedback();
4400         }
4401     }
4402     else if (getFeatures().emulateTransformFeedback.enabled)
4403     {
4404         onTransformFeedbackStateChanged();
4405     }
4406 }
4407 
onPauseTransformFeedback()4408 angle::Result ContextVk::onPauseTransformFeedback()
4409 {
4410     if (getFeatures().supportsTransformFeedbackExtension.enabled)
4411     {
4412         // If transform feedback was already active on this render pass, break it.  This
4413         // is for simplicity to avoid tracking multiple simultaneously active transform feedback
4414         // settings in the render pass.
4415         if (mRenderPassCommands->isTransformFeedbackActiveUnpaused())
4416         {
4417             return flushCommandsAndEndRenderPass();
4418         }
4419     }
4420     else if (getFeatures().emulateTransformFeedback.enabled)
4421     {
4422         invalidateCurrentTransformFeedbackBuffers();
4423     }
4424     return angle::Result::Continue;
4425 }
4426 
invalidateGraphicsPipelineBinding()4427 void ContextVk::invalidateGraphicsPipelineBinding()
4428 {
4429     mGraphicsDirtyBits.set(DIRTY_BIT_PIPELINE_BINDING);
4430 }
4431 
invalidateComputePipelineBinding()4432 void ContextVk::invalidateComputePipelineBinding()
4433 {
4434     mComputeDirtyBits.set(DIRTY_BIT_PIPELINE_BINDING);
4435 }
4436 
invalidateGraphicsDescriptorSet(DescriptorSetIndex usedDescriptorSet)4437 void ContextVk::invalidateGraphicsDescriptorSet(DescriptorSetIndex usedDescriptorSet)
4438 {
4439     // UtilsVk currently only uses set 0
4440     ASSERT(usedDescriptorSet == DescriptorSetIndex::Internal);
4441     if (mDriverUniforms[PipelineType::Graphics].descriptorSet != VK_NULL_HANDLE)
4442     {
4443         mGraphicsDirtyBits.set(DIRTY_BIT_DRIVER_UNIFORMS_BINDING);
4444     }
4445 }
4446 
invalidateComputeDescriptorSet(DescriptorSetIndex usedDescriptorSet)4447 void ContextVk::invalidateComputeDescriptorSet(DescriptorSetIndex usedDescriptorSet)
4448 {
4449     // UtilsVk currently only uses set 0
4450     ASSERT(usedDescriptorSet == DescriptorSetIndex::Internal);
4451     if (mDriverUniforms[PipelineType::Compute].descriptorSet != VK_NULL_HANDLE)
4452     {
4453         mComputeDirtyBits.set(DIRTY_BIT_DRIVER_UNIFORMS_BINDING);
4454     }
4455 }
4456 
invalidateViewportAndScissor()4457 void ContextVk::invalidateViewportAndScissor()
4458 {
4459     mGraphicsDirtyBits.set(DIRTY_BIT_VIEWPORT);
4460     mGraphicsDirtyBits.set(DIRTY_BIT_SCISSOR);
4461 }
4462 
dispatchCompute(const gl::Context * context,GLuint numGroupsX,GLuint numGroupsY,GLuint numGroupsZ)4463 angle::Result ContextVk::dispatchCompute(const gl::Context *context,
4464                                          GLuint numGroupsX,
4465                                          GLuint numGroupsY,
4466                                          GLuint numGroupsZ)
4467 {
4468     ANGLE_TRY(setupDispatch(context));
4469 
4470     mOutsideRenderPassCommands->getCommandBuffer().dispatch(numGroupsX, numGroupsY, numGroupsZ);
4471 
4472     return angle::Result::Continue;
4473 }
4474 
dispatchComputeIndirect(const gl::Context * context,GLintptr indirect)4475 angle::Result ContextVk::dispatchComputeIndirect(const gl::Context *context, GLintptr indirect)
4476 {
4477     gl::Buffer *glBuffer      = getState().getTargetBuffer(gl::BufferBinding::DispatchIndirect);
4478     VkDeviceSize bufferOffset = 0;
4479     vk::BufferHelper &buffer  = vk::GetImpl(glBuffer)->getBufferAndOffset(&bufferOffset);
4480 
4481     // Break the render pass if the indirect buffer was previously used as the output from transform
4482     // feedback.
4483     if (mCurrentTransformFeedbackBuffers.contains(&buffer))
4484     {
4485         ANGLE_TRY(flushCommandsAndEndRenderPass());
4486     }
4487 
4488     ANGLE_TRY(setupDispatch(context));
4489 
4490     // Process indirect buffer after command buffer has started.
4491     mOutsideRenderPassCommands->bufferRead(this, VK_ACCESS_INDIRECT_COMMAND_READ_BIT,
4492                                            vk::PipelineStage::DrawIndirect, &buffer);
4493 
4494     mOutsideRenderPassCommands->getCommandBuffer().dispatchIndirect(buffer.getBuffer(),
4495                                                                     bufferOffset + indirect);
4496 
4497     return angle::Result::Continue;
4498 }
4499 
memoryBarrier(const gl::Context * context,GLbitfield barriers)4500 angle::Result ContextVk::memoryBarrier(const gl::Context *context, GLbitfield barriers)
4501 {
4502     // First, turn GL_ALL_BARRIER_BITS into a mask that has only the valid barriers set.
4503     constexpr GLbitfield kCoreBarrierBits =
4504         GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT | GL_ELEMENT_ARRAY_BARRIER_BIT | GL_UNIFORM_BARRIER_BIT |
4505         GL_TEXTURE_FETCH_BARRIER_BIT | GL_SHADER_IMAGE_ACCESS_BARRIER_BIT | GL_COMMAND_BARRIER_BIT |
4506         GL_PIXEL_BUFFER_BARRIER_BIT | GL_TEXTURE_UPDATE_BARRIER_BIT | GL_BUFFER_UPDATE_BARRIER_BIT |
4507         GL_FRAMEBUFFER_BARRIER_BIT | GL_TRANSFORM_FEEDBACK_BARRIER_BIT |
4508         GL_ATOMIC_COUNTER_BARRIER_BIT | GL_SHADER_STORAGE_BARRIER_BIT;
4509     constexpr GLbitfield kExtensionBarrierBits = GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT_EXT;
4510 
4511     barriers &= kCoreBarrierBits | kExtensionBarrierBits;
4512 
4513     // GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT_EXT specifies that a fence sync or glFinish must be used
4514     // after the barrier for the CPU to to see the shader writes.  Since host-visible buffer writes
4515     // always issue a barrier automatically for the sake of glMapBuffer() (see
4516     // comment on |mIsAnyHostVisibleBufferWritten|), there's nothing to do for
4517     // GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT_EXT.
4518     barriers &= ~GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT_EXT;
4519 
4520     // If no other barrier, early out.
4521     if (barriers == 0)
4522     {
4523         return angle::Result::Continue;
4524     }
4525 
4526     // glMemoryBarrier for barrier bit X_BARRIER_BIT implies:
4527     //
4528     // - An execution+memory barrier: shader writes are made visible to subsequent X accesses
4529     //
4530     // Additionally, SHADER_IMAGE_ACCESS_BARRIER_BIT and SHADER_STORAGE_BARRIER_BIT imply:
4531     //
4532     // - An execution+memory barrier: all accesses are finished before image/buffer writes
4533     //
4534     // For the first barrier, we can simplify the implementation by assuming that prior writes are
4535     // expected to be used right after this barrier, so we can close the render pass or flush the
4536     // outside render pass commands right away if they have had any writes.
4537     //
4538     // It's noteworthy that some barrier bits affect draw/dispatch calls only, while others affect
4539     // other commands.  For the latter, since storage buffer and images are not tracked in command
4540     // buffers, we can't rely on the command buffers being flushed in the usual way when recording
4541     // these commands (i.e. through |getOutsideRenderPassCommandBuffer()| and
4542     // |vk::CommandBufferAccess|).  Conservatively flushing command buffers with any storage output
4543     // simplifies this use case.  If this needs to be avoided in the future,
4544     // |getOutsideRenderPassCommandBuffer()| can be modified to flush the command buffers if they
4545     // have had any storage output.
4546     //
4547     // For the second barrier, we need to defer closing the render pass until there's a draw or
4548     // dispatch call that uses storage buffers or images that were previously used in the render
4549     // pass.  This allows the render pass to remain open in scenarios such as this:
4550     //
4551     // - Draw using resource X
4552     // - glMemoryBarrier
4553     // - Draw/dispatch with storage buffer/image Y
4554     //
4555     // To achieve this, a dirty bit is added that breaks the render pass if any storage
4556     // buffer/images are used in it.  Until the render pass breaks, changing the program or storage
4557     // buffer/image bindings should set this dirty bit again.
4558 
4559     if (mRenderPassCommands->hasShaderStorageOutput())
4560     {
4561         // Break the render pass if necessary as future non-draw commands can't know if they should.
4562         ANGLE_TRY(flushCommandsAndEndRenderPass());
4563     }
4564     else if (mOutsideRenderPassCommands->hasShaderStorageOutput())
4565     {
4566         // Otherwise flush the outside render pass commands if necessary.
4567         ANGLE_TRY(flushOutsideRenderPassCommands());
4568     }
4569 
4570     constexpr GLbitfield kWriteAfterAccessBarriers =
4571         GL_SHADER_IMAGE_ACCESS_BARRIER_BIT | GL_SHADER_STORAGE_BARRIER_BIT;
4572 
4573     if ((barriers & kWriteAfterAccessBarriers) == 0)
4574     {
4575         return angle::Result::Continue;
4576     }
4577 
4578     // Defer flushing the command buffers until a draw/dispatch with storage buffer/image is
4579     // encountered.
4580     mGraphicsDirtyBits.set(DIRTY_BIT_MEMORY_BARRIER);
4581     mComputeDirtyBits.set(DIRTY_BIT_MEMORY_BARRIER);
4582 
4583     // Make sure memory barrier is issued for future usages of storage buffers and images even if
4584     // there's no binding change.
4585     mGraphicsDirtyBits.set(DIRTY_BIT_SHADER_RESOURCES);
4586     mComputeDirtyBits.set(DIRTY_BIT_SHADER_RESOURCES);
4587 
4588     // Mark the command buffers as affected by glMemoryBarrier, so future program and storage
4589     // buffer/image binding changes can set DIRTY_BIT_MEMORY_BARRIER again.
4590     mOutsideRenderPassCommands->setGLMemoryBarrierIssued();
4591     mRenderPassCommands->setGLMemoryBarrierIssued();
4592 
4593     return angle::Result::Continue;
4594 }
4595 
memoryBarrierByRegion(const gl::Context * context,GLbitfield barriers)4596 angle::Result ContextVk::memoryBarrierByRegion(const gl::Context *context, GLbitfield barriers)
4597 {
4598     // Note: memoryBarrierByRegion is expected to affect only the fragment pipeline, but is
4599     // otherwise similar to memoryBarrier in function.
4600     //
4601     // TODO: Optimize memoryBarrierByRegion by issuing an in-subpass pipeline barrier instead of
4602     // breaking the render pass.  http://anglebug.com/5132
4603     return memoryBarrier(context, barriers);
4604 }
4605 
framebufferFetchBarrier()4606 void ContextVk::framebufferFetchBarrier()
4607 {
4608     mGraphicsDirtyBits.set(DIRTY_BIT_FRAMEBUFFER_FETCH_BARRIER);
4609 }
4610 
getQueryPool(gl::QueryType queryType)4611 vk::DynamicQueryPool *ContextVk::getQueryPool(gl::QueryType queryType)
4612 {
4613     ASSERT(queryType == gl::QueryType::AnySamples ||
4614            queryType == gl::QueryType::AnySamplesConservative ||
4615            queryType == gl::QueryType::PrimitivesGenerated ||
4616            queryType == gl::QueryType::TransformFeedbackPrimitivesWritten ||
4617            queryType == gl::QueryType::Timestamp || queryType == gl::QueryType::TimeElapsed);
4618 
4619     // For PrimitivesGenerated queries:
4620     //
4621     // - If VK_EXT_primitives_generated_query is supported, use that.
4622     //   TODO: http://anglebug.com/5430
4623     // - Otherwise, if pipelineStatisticsQuery is supported, use that,
4624     // - Otherwise, use the same pool as TransformFeedbackPrimitivesWritten and share the query as
4625     //   the Vulkan transform feedback query produces both results.  This option is non-conformant
4626     //   as the primitives generated query will not be functional without transform feedback.
4627     //
4628     if (queryType == gl::QueryType::PrimitivesGenerated &&
4629         !getFeatures().supportsPipelineStatisticsQuery.enabled)
4630     {
4631         queryType = gl::QueryType::TransformFeedbackPrimitivesWritten;
4632     }
4633 
4634     // Assert that timestamp extension is available if needed.
4635     ASSERT(queryType != gl::QueryType::Timestamp && queryType != gl::QueryType::TimeElapsed ||
4636            mRenderer->getQueueFamilyProperties().timestampValidBits > 0);
4637     ASSERT(mQueryPools[queryType].isValid());
4638     return &mQueryPools[queryType];
4639 }
4640 
getClearColorValue() const4641 const VkClearValue &ContextVk::getClearColorValue() const
4642 {
4643     return mClearColorValue;
4644 }
4645 
getClearDepthStencilValue() const4646 const VkClearValue &ContextVk::getClearDepthStencilValue() const
4647 {
4648     return mClearDepthStencilValue;
4649 }
4650 
getClearColorMasks() const4651 gl::BlendStateExt::ColorMaskStorage::Type ContextVk::getClearColorMasks() const
4652 {
4653     return mClearColorMasks;
4654 }
4655 
writeAtomicCounterBufferDriverUniformOffsets(uint32_t * offsetsOut,size_t offsetsSize)4656 void ContextVk::writeAtomicCounterBufferDriverUniformOffsets(uint32_t *offsetsOut,
4657                                                              size_t offsetsSize)
4658 {
4659     const VkDeviceSize offsetAlignment =
4660         mRenderer->getPhysicalDeviceProperties().limits.minStorageBufferOffsetAlignment;
4661     size_t atomicCounterBufferCount = mState.getAtomicCounterBufferCount();
4662 
4663     ASSERT(atomicCounterBufferCount <= offsetsSize * 4);
4664 
4665     for (uint32_t bufferIndex = 0; bufferIndex < atomicCounterBufferCount; ++bufferIndex)
4666     {
4667         uint32_t offsetDiff = 0;
4668 
4669         const gl::OffsetBindingPointer<gl::Buffer> *atomicCounterBuffer =
4670             &mState.getIndexedAtomicCounterBuffer(bufferIndex);
4671         if (atomicCounterBuffer->get())
4672         {
4673             VkDeviceSize offset        = atomicCounterBuffer->getOffset();
4674             VkDeviceSize alignedOffset = (offset / offsetAlignment) * offsetAlignment;
4675 
4676             // GL requires the atomic counter buffer offset to be aligned with uint.
4677             ASSERT((offset - alignedOffset) % sizeof(uint32_t) == 0);
4678             offsetDiff = static_cast<uint32_t>((offset - alignedOffset) / sizeof(uint32_t));
4679 
4680             // We expect offsetDiff to fit in an 8-bit value.  The maximum difference is
4681             // minStorageBufferOffsetAlignment / 4, where minStorageBufferOffsetAlignment
4682             // currently has a maximum value of 256 on any device.
4683             ASSERT(offsetDiff < (1 << 8));
4684         }
4685 
4686         // The output array is already cleared prior to this call.
4687         ASSERT(bufferIndex % 4 != 0 || offsetsOut[bufferIndex / 4] == 0);
4688 
4689         offsetsOut[bufferIndex / 4] |= static_cast<uint8_t>(offsetDiff) << ((bufferIndex % 4) * 8);
4690     }
4691 }
4692 
pauseTransformFeedbackIfActiveUnpaused()4693 void ContextVk::pauseTransformFeedbackIfActiveUnpaused()
4694 {
4695     if (mRenderPassCommands->isTransformFeedbackActiveUnpaused())
4696     {
4697         ASSERT(getFeatures().supportsTransformFeedbackExtension.enabled);
4698         mRenderPassCommands->pauseTransformFeedback();
4699 
4700         // Note that this function is called when render pass break is imminent
4701         // (flushCommandsAndEndRenderPass(), or UtilsVk::clearFramebuffer which will close the
4702         // render pass after the clear).  This dirty bit allows transform feedback to resume
4703         // automatically on next render pass.
4704         mGraphicsDirtyBits.set(DIRTY_BIT_TRANSFORM_FEEDBACK_RESUME);
4705     }
4706 }
4707 
handleDirtyGraphicsDriverUniforms(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)4708 angle::Result ContextVk::handleDirtyGraphicsDriverUniforms(DirtyBits::Iterator *dirtyBitsIterator,
4709                                                            DirtyBits dirtyBitMask)
4710 {
4711     // Allocate a new region in the dynamic buffer.
4712     bool useGraphicsDriverUniformsExtended = getFeatures().forceDriverUniformOverSpecConst.enabled;
4713     uint8_t *ptr;
4714     bool newBuffer;
4715     GraphicsDriverUniforms *driverUniforms;
4716     size_t driverUniformSize;
4717 
4718     if (useGraphicsDriverUniformsExtended)
4719     {
4720         driverUniformSize = sizeof(GraphicsDriverUniformsExtended);
4721     }
4722     else
4723     {
4724         driverUniformSize = sizeof(GraphicsDriverUniforms);
4725     }
4726 
4727     ANGLE_TRY(allocateDriverUniforms(driverUniformSize, &mDriverUniforms[PipelineType::Graphics],
4728                                      &ptr, &newBuffer));
4729 
4730     if (useGraphicsDriverUniformsExtended)
4731     {
4732         float halfRenderAreaWidth =
4733             static_cast<float>(mDrawFramebuffer->getState().getDimensions().width) * 0.5f;
4734         float halfRenderAreaHeight =
4735             static_cast<float>(mDrawFramebuffer->getState().getDimensions().height) * 0.5f;
4736 
4737         float flipX = 1.0f;
4738         float flipY = -1.0f;
4739         // Y-axis flipping only comes into play with the default framebuffer (i.e. a swapchain
4740         // image). For 0-degree rotation, an FBO or pbuffer could be the draw framebuffer, and so we
4741         // must check whether flipY should be positive or negative.  All other rotations, will be to
4742         // the default framebuffer, and so the value of isViewportFlipEnabledForDrawFBO() is assumed
4743         // true; the appropriate flipY value is chosen such that gl_FragCoord is positioned at the
4744         // lower-left corner of the window.
4745         switch (mCurrentRotationDrawFramebuffer)
4746         {
4747             case SurfaceRotation::Identity:
4748                 flipX = 1.0f;
4749                 flipY = isViewportFlipEnabledForDrawFBO() ? -1.0f : 1.0f;
4750                 break;
4751             case SurfaceRotation::Rotated90Degrees:
4752                 ASSERT(isViewportFlipEnabledForDrawFBO());
4753                 flipX = 1.0f;
4754                 flipY = 1.0f;
4755                 std::swap(halfRenderAreaWidth, halfRenderAreaHeight);
4756                 break;
4757             case SurfaceRotation::Rotated180Degrees:
4758                 ASSERT(isViewportFlipEnabledForDrawFBO());
4759                 flipX = -1.0f;
4760                 flipY = 1.0f;
4761                 break;
4762             case SurfaceRotation::Rotated270Degrees:
4763                 ASSERT(isViewportFlipEnabledForDrawFBO());
4764                 flipX = -1.0f;
4765                 flipY = -1.0f;
4766                 break;
4767             default:
4768                 UNREACHABLE();
4769                 break;
4770         }
4771 
4772         GraphicsDriverUniformsExtended *driverUniformsExt =
4773             reinterpret_cast<GraphicsDriverUniformsExtended *>(ptr);
4774         driverUniformsExt->halfRenderArea = {halfRenderAreaWidth, halfRenderAreaHeight};
4775         driverUniformsExt->flipXY         = {flipX, flipY};
4776         driverUniformsExt->negFlipXY      = {flipX, -flipY};
4777         memcpy(&driverUniformsExt->fragRotation,
4778                &kFragRotationMatrices[mCurrentRotationDrawFramebuffer],
4779                sizeof(PreRotationMatrixValues));
4780         driverUniforms = &driverUniformsExt->common;
4781     }
4782     else
4783     {
4784         driverUniforms = reinterpret_cast<GraphicsDriverUniforms *>(ptr);
4785     }
4786 
4787     gl::Rectangle glViewport = mState.getViewport();
4788     if (isRotatedAspectRatioForDrawFBO())
4789     {
4790         // The surface is rotated 90/270 degrees.  This changes the aspect ratio of the surface.
4791         std::swap(glViewport.x, glViewport.y);
4792         std::swap(glViewport.width, glViewport.height);
4793     }
4794 
4795     uint32_t xfbActiveUnpaused = mState.isTransformFeedbackActiveUnpaused();
4796 
4797     float depthRangeNear = mState.getNearPlane();
4798     float depthRangeFar  = mState.getFarPlane();
4799     float depthRangeDiff = depthRangeFar - depthRangeNear;
4800     int32_t numSamples   = mDrawFramebuffer->getSamples();
4801 
4802     // Copy and flush to the device.
4803     *driverUniforms = {
4804         {static_cast<float>(glViewport.x), static_cast<float>(glViewport.y),
4805          static_cast<float>(glViewport.width), static_cast<float>(glViewport.height)},
4806         mState.getEnabledClipDistances().bits(),
4807         xfbActiveUnpaused,
4808         static_cast<int32_t>(mXfbVertexCountPerInstance),
4809         numSamples,
4810         {},
4811         {},
4812         {depthRangeNear, depthRangeFar, depthRangeDiff, 0.0f}};
4813 
4814     if (xfbActiveUnpaused)
4815     {
4816         TransformFeedbackVk *transformFeedbackVk =
4817             vk::GetImpl(mState.getCurrentTransformFeedback());
4818         transformFeedbackVk->getBufferOffsets(this, mXfbBaseVertex,
4819                                               driverUniforms->xfbBufferOffsets.data(),
4820                                               driverUniforms->xfbBufferOffsets.size());
4821     }
4822 
4823     if (mState.hasValidAtomicCounterBuffer())
4824     {
4825         writeAtomicCounterBufferDriverUniformOffsets(driverUniforms->acbBufferOffsets.data(),
4826                                                      driverUniforms->acbBufferOffsets.size());
4827     }
4828 
4829     return updateDriverUniformsDescriptorSet(newBuffer, driverUniformSize,
4830                                              &mDriverUniforms[PipelineType::Graphics]);
4831 }
4832 
handleDirtyComputeDriverUniforms()4833 angle::Result ContextVk::handleDirtyComputeDriverUniforms()
4834 {
4835     // Allocate a new region in the dynamic buffer.
4836     uint8_t *ptr;
4837     bool newBuffer;
4838     ANGLE_TRY(allocateDriverUniforms(sizeof(ComputeDriverUniforms),
4839                                      &mDriverUniforms[PipelineType::Compute], &ptr, &newBuffer));
4840 
4841     // Copy and flush to the device.
4842     ComputeDriverUniforms *driverUniforms = reinterpret_cast<ComputeDriverUniforms *>(ptr);
4843     *driverUniforms                       = {};
4844 
4845     if (mState.hasValidAtomicCounterBuffer())
4846     {
4847         writeAtomicCounterBufferDriverUniformOffsets(driverUniforms->acbBufferOffsets.data(),
4848                                                      driverUniforms->acbBufferOffsets.size());
4849     }
4850 
4851     return updateDriverUniformsDescriptorSet(newBuffer, sizeof(ComputeDriverUniforms),
4852                                              &mDriverUniforms[PipelineType::Compute]);
4853 }
4854 
handleDirtyDriverUniformsBindingImpl(vk::CommandBuffer * commandBuffer,VkPipelineBindPoint bindPoint,DriverUniformsDescriptorSet * driverUniforms)4855 void ContextVk::handleDirtyDriverUniformsBindingImpl(vk::CommandBuffer *commandBuffer,
4856                                                      VkPipelineBindPoint bindPoint,
4857                                                      DriverUniformsDescriptorSet *driverUniforms)
4858 {
4859     // The descriptor pool that this descriptor set was allocated from needs to be retained when the
4860     // descriptor set is used in a new command. Since the descriptor pools are specific to each
4861     // ContextVk, we only need to retain them once to ensure the reference count and Serial are
4862     // updated correctly.
4863     if (!driverUniforms->descriptorPoolBinding.get().usedInRecordedCommands())
4864     {
4865         driverUniforms->descriptorPoolBinding.get().retain(&mResourceUseList);
4866     }
4867 
4868     commandBuffer->bindDescriptorSets(
4869         mExecutable->getPipelineLayout(), bindPoint, DescriptorSetIndex::Internal, 1,
4870         &driverUniforms->descriptorSet, 1, &driverUniforms->dynamicOffset);
4871 }
4872 
handleDirtyGraphicsDriverUniformsBinding(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)4873 angle::Result ContextVk::handleDirtyGraphicsDriverUniformsBinding(
4874     DirtyBits::Iterator *dirtyBitsIterator,
4875     DirtyBits dirtyBitMask)
4876 {
4877     // Bind the driver descriptor set.
4878     handleDirtyDriverUniformsBindingImpl(mRenderPassCommandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
4879                                          &mDriverUniforms[PipelineType::Graphics]);
4880     return angle::Result::Continue;
4881 }
4882 
handleDirtyComputeDriverUniformsBinding()4883 angle::Result ContextVk::handleDirtyComputeDriverUniformsBinding()
4884 {
4885     // Bind the driver descriptor set.
4886     handleDirtyDriverUniformsBindingImpl(&mOutsideRenderPassCommands->getCommandBuffer(),
4887                                          VK_PIPELINE_BIND_POINT_COMPUTE,
4888                                          &mDriverUniforms[PipelineType::Compute]);
4889     return angle::Result::Continue;
4890 }
4891 
allocateDriverUniforms(size_t driverUniformsSize,DriverUniformsDescriptorSet * driverUniforms,uint8_t ** ptrOut,bool * newBufferOut)4892 angle::Result ContextVk::allocateDriverUniforms(size_t driverUniformsSize,
4893                                                 DriverUniformsDescriptorSet *driverUniforms,
4894                                                 uint8_t **ptrOut,
4895                                                 bool *newBufferOut)
4896 {
4897     // Allocate a new region in the dynamic buffer. The allocate call may put buffer into dynamic
4898     // buffer's mInflightBuffers. During command submission time, these inflight buffers are added
4899     // into context's mResourceUseList which will ensure they get tagged with queue serial number
4900     // before moving them into the free list.
4901     VkDeviceSize offset;
4902     ANGLE_TRY(driverUniforms->dynamicBuffer.allocate(this, driverUniformsSize, ptrOut, nullptr,
4903                                                      &offset, newBufferOut));
4904 
4905     driverUniforms->dynamicOffset = static_cast<uint32_t>(offset);
4906 
4907     return angle::Result::Continue;
4908 }
4909 
updateDriverUniformsDescriptorSet(bool newBuffer,size_t driverUniformsSize,DriverUniformsDescriptorSet * driverUniforms)4910 angle::Result ContextVk::updateDriverUniformsDescriptorSet(
4911     bool newBuffer,
4912     size_t driverUniformsSize,
4913     DriverUniformsDescriptorSet *driverUniforms)
4914 {
4915     ANGLE_TRY(driverUniforms->dynamicBuffer.flush(this));
4916 
4917     if (!newBuffer)
4918     {
4919         return angle::Result::Continue;
4920     }
4921 
4922     const vk::BufferHelper *buffer = driverUniforms->dynamicBuffer.getCurrentBuffer();
4923     vk::BufferSerial bufferSerial  = buffer->getBufferSerial();
4924     // Look up in the cache first
4925     if (driverUniforms->descriptorSetCache.get(bufferSerial.getValue(),
4926                                                &driverUniforms->descriptorSet))
4927     {
4928         // The descriptor pool that this descriptor set was allocated from needs to be retained each
4929         // time the descriptor set is used in a new command.
4930         driverUniforms->descriptorPoolBinding.get().retain(&mResourceUseList);
4931         return angle::Result::Continue;
4932     }
4933 
4934     // Allocate a new descriptor set.
4935     bool isCompute            = getState().getProgramExecutable()->isCompute();
4936     PipelineType pipelineType = isCompute ? PipelineType::Compute : PipelineType::Graphics;
4937     bool newPoolAllocated;
4938     ANGLE_TRY(mDriverUniformsDescriptorPools[pipelineType].allocateSetsAndGetInfo(
4939         this, driverUniforms->descriptorSetLayout.get().ptr(), 1,
4940         &driverUniforms->descriptorPoolBinding, &driverUniforms->descriptorSet, &newPoolAllocated));
4941     mContextPerfCounters.descriptorSetsAllocated[pipelineType]++;
4942 
4943     // Clear descriptor set cache. It may no longer be valid.
4944     if (newPoolAllocated)
4945     {
4946         driverUniforms->descriptorSetCache.clear();
4947     }
4948 
4949     // Update the driver uniform descriptor set.
4950     VkDescriptorBufferInfo &bufferInfo = allocDescriptorBufferInfo();
4951     bufferInfo.buffer                  = buffer->getBuffer().getHandle();
4952     bufferInfo.offset                  = 0;
4953     bufferInfo.range                   = driverUniformsSize;
4954 
4955     VkWriteDescriptorSet &writeInfo = allocWriteDescriptorSet();
4956     writeInfo.sType                 = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
4957     writeInfo.dstSet                = driverUniforms->descriptorSet;
4958     writeInfo.dstBinding            = 0;
4959     writeInfo.dstArrayElement       = 0;
4960     writeInfo.descriptorCount       = 1;
4961     writeInfo.descriptorType        = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
4962     writeInfo.pImageInfo            = nullptr;
4963     writeInfo.pTexelBufferView      = nullptr;
4964     writeInfo.pBufferInfo           = &bufferInfo;
4965 
4966     // Add into descriptor set cache
4967     driverUniforms->descriptorSetCache.insert(bufferSerial.getValue(),
4968                                               driverUniforms->descriptorSet);
4969 
4970     return angle::Result::Continue;
4971 }
4972 
handleError(VkResult errorCode,const char * file,const char * function,unsigned int line)4973 void ContextVk::handleError(VkResult errorCode,
4974                             const char *file,
4975                             const char *function,
4976                             unsigned int line)
4977 {
4978     ASSERT(errorCode != VK_SUCCESS);
4979 
4980     GLenum glErrorCode = DefaultGLErrorCode(errorCode);
4981 
4982     std::stringstream errorStream;
4983     errorStream << "Internal Vulkan error (" << errorCode << "): " << VulkanResultString(errorCode)
4984                 << ".";
4985 
4986     if (errorCode == VK_ERROR_DEVICE_LOST)
4987     {
4988         WARN() << errorStream.str();
4989         handleDeviceLost();
4990     }
4991 
4992     mErrors->handleError(glErrorCode, errorStream.str().c_str(), file, function, line);
4993 }
4994 
updateActiveTextures(const gl::Context * context)4995 angle::Result ContextVk::updateActiveTextures(const gl::Context *context)
4996 {
4997     const gl::ProgramExecutable *executable = mState.getProgramExecutable();
4998     ASSERT(executable);
4999 
5000     uint32_t prevMaxIndex = mActiveTexturesDesc.getMaxIndex();
5001     memset(mActiveTextures.data(), 0, sizeof(mActiveTextures[0]) * prevMaxIndex);
5002     mActiveTexturesDesc.reset();
5003 
5004     const gl::ActiveTexturesCache &textures        = mState.getActiveTexturesCache();
5005     const gl::ActiveTextureMask &activeTextures    = executable->getActiveSamplersMask();
5006     const gl::ActiveTextureTypeArray &textureTypes = executable->getActiveSamplerTypes();
5007 
5008     bool recreatePipelineLayout                     = false;
5009     FormatIndexMap<uint64_t> externalFormatIndexMap = {};
5010     FormatIndexMap<VkFormat> vkFormatIndexMap       = {};
5011     for (size_t textureUnit : activeTextures)
5012     {
5013         gl::Texture *texture        = textures[textureUnit];
5014         gl::TextureType textureType = textureTypes[textureUnit];
5015         ASSERT(textureType != gl::TextureType::InvalidEnum);
5016 
5017         const bool isIncompleteTexture = texture == nullptr;
5018 
5019         // Null textures represent incomplete textures.
5020         if (isIncompleteTexture)
5021         {
5022             ANGLE_TRY(getIncompleteTexture(
5023                 context, textureType, executable->getSamplerFormatForTextureUnitIndex(textureUnit),
5024                 &texture));
5025         }
5026 
5027         TextureVk *textureVk = vk::GetImpl(texture);
5028         ASSERT(textureVk != nullptr);
5029 
5030         vk::TextureUnit &activeTexture = mActiveTextures[textureUnit];
5031 
5032         // Special handling of texture buffers.  They have a buffer attached instead of an image.
5033         if (textureType == gl::TextureType::Buffer)
5034         {
5035             activeTexture.texture = textureVk;
5036             mActiveTexturesDesc.update(textureUnit, textureVk->getBufferViewSerial(),
5037                                        vk::SamplerSerial());
5038 
5039             continue;
5040         }
5041 
5042         if (!isIncompleteTexture && texture->isDepthOrStencil() &&
5043             shouldSwitchToReadOnlyDepthFeedbackLoopMode(context, texture))
5044         {
5045             // Special handling for deferred clears.
5046             ANGLE_TRY(mDrawFramebuffer->flushDeferredClears(this));
5047 
5048             if (hasStartedRenderPass())
5049             {
5050                 if (!textureVk->getImage().hasRenderPassUsageFlag(
5051                         vk::RenderPassUsage::ReadOnlyAttachment))
5052                 {
5053                     // To enter depth feedback loop, we must flush and start a new renderpass.
5054                     // Otherwise it will stick with writable layout and cause validation error.
5055                     ANGLE_TRY(flushCommandsAndEndRenderPass());
5056                 }
5057                 else
5058                 {
5059                     mDrawFramebuffer->updateRenderPassReadOnlyDepthMode(this, mRenderPassCommands);
5060                 }
5061             }
5062 
5063             mDrawFramebuffer->setReadOnlyDepthFeedbackLoopMode(true);
5064         }
5065 
5066         gl::Sampler *sampler       = mState.getSampler(static_cast<uint32_t>(textureUnit));
5067         const SamplerVk *samplerVk = sampler ? vk::GetImpl(sampler) : nullptr;
5068 
5069         const vk::SamplerHelper &samplerHelper =
5070             samplerVk ? samplerVk->getSampler() : textureVk->getSampler();
5071         const gl::SamplerState &samplerState =
5072             sampler ? sampler->getSamplerState() : texture->getSamplerState();
5073         activeTexture.texture    = textureVk;
5074         activeTexture.sampler    = &samplerHelper;
5075         activeTexture.srgbDecode = samplerState.getSRGBDecode();
5076 
5077         if (activeTexture.srgbDecode == GL_SKIP_DECODE_EXT)
5078         {
5079             // Make sure we use the MUTABLE bit for the storage. Because the "skip decode" is a
5080             // Sampler state we might not have caught this setting in TextureVk::syncState.
5081             ANGLE_TRY(textureVk->ensureMutable(this));
5082         }
5083 
5084         vk::ImageOrBufferViewSubresourceSerial imageViewSerial =
5085             textureVk->getImageViewSubresourceSerial(samplerState);
5086         mActiveTexturesDesc.update(textureUnit, imageViewSerial, samplerHelper.getSamplerSerial());
5087 
5088         if (textureVk->getImage().hasImmutableSampler())
5089         {
5090             uint64_t externalFormat = textureVk->getImage().getExternalFormat();
5091             VkFormat vkFormat       = textureVk->getImage().getFormat().actualImageVkFormat();
5092             if (externalFormat != 0)
5093             {
5094                 externalFormatIndexMap[externalFormat] = static_cast<uint32_t>(textureUnit);
5095             }
5096             else
5097             {
5098                 ASSERT(vkFormat != 0);
5099                 vkFormatIndexMap[vkFormat] = static_cast<uint32_t>(textureUnit);
5100             }
5101         }
5102 
5103         recreatePipelineLayout =
5104             textureVk->getAndResetImmutableSamplerDirtyState() || recreatePipelineLayout;
5105     }
5106 
5107     if (!mExecutable->isImmutableSamplerFormatCompatible(externalFormatIndexMap, vkFormatIndexMap))
5108     {
5109         recreatePipelineLayout = true;
5110     }
5111 
5112     // Recreate the pipeline layout, if necessary.
5113     if (recreatePipelineLayout)
5114     {
5115         ANGLE_TRY(mExecutable->createPipelineLayout(context, &mActiveTextures));
5116 
5117         // The default uniforms descriptor set was reset during createPipelineLayout(), so mark them
5118         // dirty to get everything reallocated/rebound before the next draw.
5119         if (executable->hasDefaultUniforms())
5120         {
5121             if (mProgram)
5122             {
5123                 mProgram->setAllDefaultUniformsDirty();
5124             }
5125             else if (mProgramPipeline)
5126             {
5127                 mProgramPipeline->setAllDefaultUniformsDirty(context->getState());
5128             }
5129         }
5130     }
5131 
5132     return angle::Result::Continue;
5133 }
5134 
updateActiveImages(vk::CommandBufferHelper * commandBufferHelper)5135 angle::Result ContextVk::updateActiveImages(vk::CommandBufferHelper *commandBufferHelper)
5136 {
5137     const gl::State &glState                = mState;
5138     const gl::ProgramExecutable *executable = glState.getProgramExecutable();
5139     ASSERT(executable);
5140 
5141     mActiveImages.fill(nullptr);
5142 
5143     const gl::ActiveTextureMask &activeImages = executable->getActiveImagesMask();
5144     const gl::ActiveTextureArray<gl::ShaderBitSet> &activeImageShaderBits =
5145         executable->getActiveImageShaderBits();
5146 
5147     // Note: currently, the image layout is transitioned entirely even if only one level or layer is
5148     // used.  This is an issue if one subresource of the image is used as framebuffer attachment and
5149     // the other as image.  This is a similar issue to http://anglebug.com/2914.  Another issue
5150     // however is if multiple subresources of the same image are used at the same time.
5151     // Inefficiencies aside, setting write dependency on the same image multiple times is not
5152     // supported.  The following makes sure write dependencies are set only once per image.
5153     std::set<vk::ImageHelper *> alreadyProcessed;
5154 
5155     for (size_t imageUnitIndex : activeImages)
5156     {
5157         const gl::ImageUnit &imageUnit = glState.getImageUnit(imageUnitIndex);
5158         const gl::Texture *texture     = imageUnit.texture.get();
5159         if (texture == nullptr)
5160         {
5161             continue;
5162         }
5163 
5164         TextureVk *textureVk          = vk::GetImpl(texture);
5165         mActiveImages[imageUnitIndex] = textureVk;
5166 
5167         // The image should be flushed and ready to use at this point. There may still be
5168         // lingering staged updates in its staging buffer for unused texture mip levels or
5169         // layers. Therefore we can't verify it has no staged updates right here.
5170         gl::ShaderBitSet shaderStages = activeImageShaderBits[imageUnitIndex];
5171         ASSERT(shaderStages.any());
5172 
5173         // Special handling of texture buffers.  They have a buffer attached instead of an image.
5174         if (texture->getType() == gl::TextureType::Buffer)
5175         {
5176             BufferVk *bufferVk        = vk::GetImpl(textureVk->getBuffer().get());
5177             VkDeviceSize bufferOffset = 0;
5178             vk::BufferHelper &buffer  = bufferVk->getBufferAndOffset(&bufferOffset);
5179 
5180             // TODO: accept multiple stages in bufferWrite.  http://anglebug.com/3573
5181             for (gl::ShaderType stage : shaderStages)
5182             {
5183                 commandBufferHelper->bufferWrite(
5184                     this, VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
5185                     vk::GetPipelineStage(stage), vk::AliasingMode::Disallowed, &buffer);
5186             }
5187 
5188             textureVk->retainBufferViews(&mResourceUseList);
5189 
5190             continue;
5191         }
5192 
5193         vk::ImageHelper *image = &textureVk->getImage();
5194 
5195         if (alreadyProcessed.find(image) != alreadyProcessed.end())
5196         {
5197             continue;
5198         }
5199         alreadyProcessed.insert(image);
5200 
5201         vk::ImageLayout imageLayout;
5202         gl::ShaderType firstShader = shaderStages.first();
5203         gl::ShaderType lastShader  = shaderStages.last();
5204         shaderStages.reset(firstShader);
5205         shaderStages.reset(lastShader);
5206         // We barrier against either:
5207         // - Vertex only
5208         // - Fragment only
5209         // - Pre-fragment only (vertex, geometry and tessellation together)
5210         if (shaderStages.any() || firstShader != lastShader)
5211         {
5212             imageLayout = lastShader == gl::ShaderType::Fragment
5213                               ? vk::ImageLayout::AllGraphicsShadersWrite
5214                               : vk::ImageLayout::PreFragmentShadersWrite;
5215         }
5216         else
5217         {
5218             imageLayout = kShaderWriteImageLayouts[firstShader];
5219         }
5220 
5221         VkImageAspectFlags aspectFlags = image->getAspectFlags();
5222 
5223         uint32_t layerStart = 0;
5224         uint32_t layerCount = image->getLayerCount();
5225         if (imageUnit.layered)
5226         {
5227             layerStart = imageUnit.layered;
5228             layerCount = 1;
5229         }
5230 
5231         commandBufferHelper->imageWrite(
5232             this, gl::LevelIndex(static_cast<uint32_t>(imageUnit.level)), layerStart, layerCount,
5233             aspectFlags, imageLayout, vk::AliasingMode::Allowed, image);
5234     }
5235 
5236     return angle::Result::Continue;
5237 }
5238 
hasRecordedCommands()5239 bool ContextVk::hasRecordedCommands()
5240 {
5241     ASSERT(mOutsideRenderPassCommands && mRenderPassCommands);
5242     return !mOutsideRenderPassCommands->empty() || mRenderPassCommands->started();
5243 }
5244 
flushImpl(const vk::Semaphore * signalSemaphore)5245 angle::Result ContextVk::flushImpl(const vk::Semaphore *signalSemaphore)
5246 {
5247     ANGLE_TRACE_EVENT0("gpu.angle", "ContextVk::flushImpl");
5248 
5249     // We must set this to false before calling flushCommandsAndEndRenderPass to prevent it from
5250     // calling back to flushImpl.
5251     mHasDeferredFlush = false;
5252     getShareGroupVk()->clearSyncObjectPendingFlush();
5253 
5254     ANGLE_TRY(flushCommandsAndEndRenderPass());
5255 
5256     if (mIsAnyHostVisibleBufferWritten)
5257     {
5258         // Make sure all writes to host-visible buffers are flushed.  We have no way of knowing
5259         // whether any buffer will be mapped for readback in the future, and we can't afford to
5260         // flush and wait on a one-pipeline-barrier command buffer on every map().
5261         VkMemoryBarrier memoryBarrier = {};
5262         memoryBarrier.sType           = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
5263         memoryBarrier.srcAccessMask   = VK_ACCESS_MEMORY_WRITE_BIT;
5264         memoryBarrier.dstAccessMask   = VK_ACCESS_HOST_READ_BIT | VK_ACCESS_HOST_WRITE_BIT;
5265 
5266         mOutsideRenderPassCommands->getCommandBuffer().memoryBarrier(
5267             VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_HOST_BIT, &memoryBarrier);
5268         mIsAnyHostVisibleBufferWritten = false;
5269     }
5270 
5271     if (mGpuEventsEnabled)
5272     {
5273         EventName eventName = GetTraceEventName("Primary", mPerfCounters.primaryBuffers);
5274         ANGLE_TRY(traceGpuEvent(&mOutsideRenderPassCommands->getCommandBuffer(),
5275                                 TRACE_EVENT_PHASE_END, eventName));
5276     }
5277     ANGLE_TRY(flushOutsideRenderPassCommands());
5278 
5279     // We must add the per context dynamic buffers into mResourceUseList before submission so that
5280     // they get retained properly until GPU completes. We do not add current buffer into
5281     // mResourceUseList since they never get reused or freed until context gets destroyed, at which
5282     // time we always wait for GPU to finish before destroying the dynamic buffers.
5283     for (DriverUniformsDescriptorSet &driverUniform : mDriverUniforms)
5284     {
5285         driverUniform.dynamicBuffer.releaseInFlightBuffersToResourceUseList(this);
5286     }
5287     mDefaultUniformStorage.releaseInFlightBuffersToResourceUseList(this);
5288     mStagingBuffer.releaseInFlightBuffersToResourceUseList(this);
5289 
5290     ANGLE_TRY(submitFrame(signalSemaphore));
5291 
5292     mPerfCounters.renderPasses                           = 0;
5293     mPerfCounters.writeDescriptorSets                    = 0;
5294     mPerfCounters.flushedOutsideRenderPassCommandBuffers = 0;
5295     mPerfCounters.resolveImageCommands                   = 0;
5296 
5297     ASSERT(mWaitSemaphores.empty());
5298     ASSERT(mWaitSemaphoreStageMasks.empty());
5299 
5300     mPerfCounters.primaryBuffers++;
5301 
5302     if (mGpuEventsEnabled)
5303     {
5304         EventName eventName = GetTraceEventName("Primary", mPerfCounters.primaryBuffers);
5305         ANGLE_TRY(traceGpuEvent(&mOutsideRenderPassCommands->getCommandBuffer(),
5306                                 TRACE_EVENT_PHASE_BEGIN, eventName));
5307     }
5308 
5309     return angle::Result::Continue;
5310 }
5311 
finishImpl()5312 angle::Result ContextVk::finishImpl()
5313 {
5314     ANGLE_TRACE_EVENT0("gpu.angle", "ContextVk::finishImpl");
5315 
5316     ANGLE_TRY(flushImpl(nullptr));
5317     ANGLE_TRY(mRenderer->finish(this, hasProtectedContent()));
5318 
5319     clearAllGarbage();
5320 
5321     if (mGpuEventsEnabled)
5322     {
5323         // This loop should in practice execute once since the queue is already idle.
5324         while (mInFlightGpuEventQueries.size() > 0)
5325         {
5326             ANGLE_TRY(checkCompletedGpuEvents());
5327         }
5328         // Recalculate the CPU/GPU time difference to account for clock drifting.  Avoid
5329         // unnecessary synchronization if there is no event to be adjusted (happens when
5330         // finish() gets called multiple times towards the end of the application).
5331         if (mGpuEvents.size() > 0)
5332         {
5333             ANGLE_TRY(synchronizeCpuGpuTime());
5334         }
5335     }
5336 
5337     return angle::Result::Continue;
5338 }
5339 
addWaitSemaphore(VkSemaphore semaphore,VkPipelineStageFlags stageMask)5340 void ContextVk::addWaitSemaphore(VkSemaphore semaphore, VkPipelineStageFlags stageMask)
5341 {
5342     mWaitSemaphores.push_back(semaphore);
5343     mWaitSemaphoreStageMasks.push_back(stageMask);
5344 }
5345 
getCommandPool() const5346 const vk::CommandPool &ContextVk::getCommandPool() const
5347 {
5348     return mCommandPool;
5349 }
5350 
isSerialInUse(Serial serial) const5351 bool ContextVk::isSerialInUse(Serial serial) const
5352 {
5353     return serial > getLastCompletedQueueSerial();
5354 }
5355 
checkCompletedCommands()5356 angle::Result ContextVk::checkCompletedCommands()
5357 {
5358     return mRenderer->checkCompletedCommands(this);
5359 }
5360 
finishToSerial(Serial serial)5361 angle::Result ContextVk::finishToSerial(Serial serial)
5362 {
5363     return mRenderer->finishToSerial(this, serial);
5364 }
5365 
getCompatibleRenderPass(const vk::RenderPassDesc & desc,vk::RenderPass ** renderPassOut)5366 angle::Result ContextVk::getCompatibleRenderPass(const vk::RenderPassDesc &desc,
5367                                                  vk::RenderPass **renderPassOut)
5368 {
5369     // Note: Each context has it's own RenderPassCache so no locking needed.
5370     return mRenderPassCache.getCompatibleRenderPass(this, desc, renderPassOut);
5371 }
5372 
getRenderPassWithOps(const vk::RenderPassDesc & desc,const vk::AttachmentOpsArray & ops,vk::RenderPass ** renderPassOut)5373 angle::Result ContextVk::getRenderPassWithOps(const vk::RenderPassDesc &desc,
5374                                               const vk::AttachmentOpsArray &ops,
5375                                               vk::RenderPass **renderPassOut)
5376 {
5377     // Note: Each context has it's own RenderPassCache so no locking needed.
5378     return mRenderPassCache.getRenderPassWithOps(this, desc, ops, renderPassOut);
5379 }
5380 
getTimestamp(uint64_t * timestampOut)5381 angle::Result ContextVk::getTimestamp(uint64_t *timestampOut)
5382 {
5383     // The intent of this function is to query the timestamp without stalling the GPU.
5384     // Currently, that seems impossible, so instead, we are going to make a small submission
5385     // with just a timestamp query.  First, the disjoint timer query extension says:
5386     //
5387     // > This will return the GL time after all previous commands have reached the GL server but
5388     // have not yet necessarily executed.
5389     //
5390     // The previous commands may be deferred at the moment and not yet flushed. The wording allows
5391     // us to make a submission to get the timestamp without flushing.
5392     //
5393     // Second:
5394     //
5395     // > By using a combination of this synchronous get command and the asynchronous timestamp
5396     // query object target, applications can measure the latency between when commands reach the
5397     // GL server and when they are realized in the framebuffer.
5398     //
5399     // This fits with the above strategy as well, although inevitably we are possibly
5400     // introducing a GPU bubble.  This function directly generates a command buffer and submits
5401     // it instead of using the other member functions.  This is to avoid changing any state,
5402     // such as the queue serial.
5403 
5404     // Create a query used to receive the GPU timestamp
5405     VkDevice device = getDevice();
5406     vk::DeviceScoped<vk::DynamicQueryPool> timestampQueryPool(device);
5407     vk::QueryHelper timestampQuery;
5408     ANGLE_TRY(timestampQueryPool.get().init(this, VK_QUERY_TYPE_TIMESTAMP, 1));
5409     ANGLE_TRY(timestampQueryPool.get().allocateQuery(this, &timestampQuery, 1));
5410 
5411     vk::ResourceUseList scratchResourceUseList;
5412 
5413     // Record the command buffer
5414     vk::DeviceScoped<vk::PrimaryCommandBuffer> commandBatch(device);
5415     vk::PrimaryCommandBuffer &commandBuffer = commandBatch.get();
5416 
5417     ANGLE_TRY(mRenderer->getCommandBufferOneOff(this, hasProtectedContent(), &commandBuffer));
5418 
5419     timestampQuery.writeTimestampToPrimary(this, &commandBuffer);
5420     timestampQuery.retain(&scratchResourceUseList);
5421     ANGLE_VK_TRY(this, commandBuffer.end());
5422 
5423     // Create fence for the submission
5424     VkFenceCreateInfo fenceInfo = {};
5425     fenceInfo.sType             = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
5426     fenceInfo.flags             = 0;
5427 
5428     vk::DeviceScoped<vk::Fence> fence(device);
5429     ANGLE_VK_TRY(this, fence.get().init(device, fenceInfo));
5430 
5431     Serial throwAwaySerial;
5432     ANGLE_TRY(mRenderer->queueSubmitOneOff(this, std::move(commandBuffer), hasProtectedContent(),
5433                                            mContextPriority, &fence.get(),
5434                                            vk::SubmitPolicy::EnsureSubmitted, &throwAwaySerial));
5435 
5436     // Wait for the submission to finish.  Given no semaphores, there is hope that it would execute
5437     // in parallel with what's already running on the GPU.
5438     ANGLE_VK_TRY(this, fence.get().wait(device, mRenderer->getMaxFenceWaitTimeNs()));
5439     scratchResourceUseList.releaseResourceUsesAndUpdateSerials(throwAwaySerial);
5440 
5441     // Get the query results
5442     vk::QueryResult result(1);
5443     ANGLE_TRY(timestampQuery.getUint64Result(this, &result));
5444     *timestampOut = result.getResult(vk::QueryResult::kDefaultResultIndex);
5445     timestampQueryPool.get().freeQuery(this, &timestampQuery);
5446 
5447     // Convert results to nanoseconds.
5448     *timestampOut = static_cast<uint64_t>(
5449         *timestampOut *
5450         static_cast<double>(getRenderer()->getPhysicalDeviceProperties().limits.timestampPeriod));
5451 
5452     return angle::Result::Continue;
5453 }
5454 
invalidateDefaultAttribute(size_t attribIndex)5455 void ContextVk::invalidateDefaultAttribute(size_t attribIndex)
5456 {
5457     mDirtyDefaultAttribsMask.set(attribIndex);
5458     mGraphicsDirtyBits.set(DIRTY_BIT_DEFAULT_ATTRIBS);
5459 }
5460 
invalidateDefaultAttributes(const gl::AttributesMask & dirtyMask)5461 void ContextVk::invalidateDefaultAttributes(const gl::AttributesMask &dirtyMask)
5462 {
5463     if (dirtyMask.any())
5464     {
5465         mDirtyDefaultAttribsMask |= dirtyMask;
5466         mGraphicsDirtyBits.set(DIRTY_BIT_DEFAULT_ATTRIBS);
5467     }
5468 }
5469 
updateDefaultAttribute(size_t attribIndex)5470 angle::Result ContextVk::updateDefaultAttribute(size_t attribIndex)
5471 {
5472     vk::DynamicBuffer &defaultBuffer = mDefaultAttribBuffers[attribIndex];
5473 
5474     defaultBuffer.releaseInFlightBuffers(this);
5475 
5476     uint8_t *ptr;
5477     VkBuffer bufferHandle = VK_NULL_HANDLE;
5478     VkDeviceSize offset   = 0;
5479     ANGLE_TRY(
5480         defaultBuffer.allocate(this, kDefaultValueSize, &ptr, &bufferHandle, &offset, nullptr));
5481 
5482     const gl::State &glState = mState;
5483     const gl::VertexAttribCurrentValueData &defaultValue =
5484         glState.getVertexAttribCurrentValues()[attribIndex];
5485     memcpy(ptr, &defaultValue.Values, kDefaultValueSize);
5486     ASSERT(!defaultBuffer.isCoherent());
5487     ANGLE_TRY(defaultBuffer.flush(this));
5488 
5489     return mVertexArray->updateDefaultAttrib(this, attribIndex, bufferHandle,
5490                                              defaultBuffer.getCurrentBuffer(),
5491                                              static_cast<uint32_t>(offset));
5492 }
5493 
getDriverUniformsDescriptorSetDesc(VkShaderStageFlags shaderStages) const5494 vk::DescriptorSetLayoutDesc ContextVk::getDriverUniformsDescriptorSetDesc(
5495     VkShaderStageFlags shaderStages) const
5496 {
5497     vk::DescriptorSetLayoutDesc desc;
5498     desc.update(0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1, shaderStages, nullptr);
5499     return desc;
5500 }
5501 
shouldEmulateSeamfulCubeMapSampling() const5502 bool ContextVk::shouldEmulateSeamfulCubeMapSampling() const
5503 {
5504     // Only allow seamful cube map sampling in non-webgl ES2.
5505     if (mState.getClientMajorVersion() != 2 || mState.isWebGL())
5506     {
5507         return false;
5508     }
5509 
5510     if (mRenderer->getFeatures().disallowSeamfulCubeMapEmulation.enabled)
5511     {
5512         return false;
5513     }
5514 
5515     return true;
5516 }
5517 
onBufferReleaseToExternal(const vk::BufferHelper & buffer)5518 angle::Result ContextVk::onBufferReleaseToExternal(const vk::BufferHelper &buffer)
5519 {
5520     if (mRenderPassCommands->usesBuffer(buffer))
5521     {
5522         return flushCommandsAndEndRenderPass();
5523     }
5524     return angle::Result::Continue;
5525 }
5526 
onImageReleaseToExternal(const vk::ImageHelper & image)5527 angle::Result ContextVk::onImageReleaseToExternal(const vk::ImageHelper &image)
5528 {
5529     if (IsRenderPassStartedAndUsesImage(*mRenderPassCommands, image))
5530     {
5531         return flushCommandsAndEndRenderPass();
5532     }
5533     return angle::Result::Continue;
5534 }
5535 
beginNewRenderPass(const vk::Framebuffer & framebuffer,const gl::Rectangle & renderArea,const vk::RenderPassDesc & renderPassDesc,const vk::AttachmentOpsArray & renderPassAttachmentOps,const vk::PackedAttachmentCount colorAttachmentCount,const vk::PackedAttachmentIndex depthStencilAttachmentIndex,const vk::PackedClearValuesArray & clearValues,vk::CommandBuffer ** commandBufferOut)5536 angle::Result ContextVk::beginNewRenderPass(
5537     const vk::Framebuffer &framebuffer,
5538     const gl::Rectangle &renderArea,
5539     const vk::RenderPassDesc &renderPassDesc,
5540     const vk::AttachmentOpsArray &renderPassAttachmentOps,
5541     const vk::PackedAttachmentCount colorAttachmentCount,
5542     const vk::PackedAttachmentIndex depthStencilAttachmentIndex,
5543     const vk::PackedClearValuesArray &clearValues,
5544     vk::CommandBuffer **commandBufferOut)
5545 {
5546     // Next end any currently outstanding renderPass
5547     ANGLE_TRY(flushCommandsAndEndRenderPass());
5548 
5549     mRenderPassCommands->beginRenderPass(
5550         framebuffer, renderArea, renderPassDesc, renderPassAttachmentOps, colorAttachmentCount,
5551         depthStencilAttachmentIndex, clearValues, commandBufferOut);
5552     mPerfCounters.renderPasses++;
5553 
5554     return angle::Result::Continue;
5555 }
5556 
startRenderPass(gl::Rectangle renderArea,vk::CommandBuffer ** commandBufferOut,bool * renderPassDescChangedOut)5557 angle::Result ContextVk::startRenderPass(gl::Rectangle renderArea,
5558                                          vk::CommandBuffer **commandBufferOut,
5559                                          bool *renderPassDescChangedOut)
5560 {
5561     ANGLE_TRY(mDrawFramebuffer->startNewRenderPass(this, renderArea, &mRenderPassCommandBuffer,
5562                                                    renderPassDescChangedOut));
5563 
5564     // Make sure the render pass is not restarted if it is started by UtilsVk (as opposed to
5565     // setupDraw(), which clears this bit automatically).
5566     mGraphicsDirtyBits.reset(DIRTY_BIT_RENDER_PASS);
5567 
5568     ANGLE_TRY(resumeRenderPassQueriesIfActive());
5569 
5570     const gl::DepthStencilState &dsState = mState.getDepthStencilState();
5571     vk::ResourceAccess depthAccess       = GetDepthAccess(dsState);
5572     vk::ResourceAccess stencilAccess     = GetStencilAccess(dsState);
5573     mRenderPassCommands->onDepthAccess(depthAccess);
5574     mRenderPassCommands->onStencilAccess(stencilAccess);
5575 
5576     mDrawFramebuffer->updateRenderPassReadOnlyDepthMode(this, mRenderPassCommands);
5577 
5578     if (commandBufferOut)
5579     {
5580         *commandBufferOut = mRenderPassCommandBuffer;
5581     }
5582 
5583     return angle::Result::Continue;
5584 }
5585 
startNextSubpass()5586 void ContextVk::startNextSubpass()
5587 {
5588     ASSERT(hasStartedRenderPass());
5589 
5590     mRenderPassCommands->getCommandBuffer().nextSubpass(VK_SUBPASS_CONTENTS_INLINE);
5591 
5592     // The graphics pipelines are bound to a subpass, so update the subpass as well.
5593     mGraphicsPipelineDesc->nextSubpass(&mGraphicsPipelineTransition);
5594 }
5595 
restoreFinishedRenderPass(vk::Framebuffer * framebuffer)5596 void ContextVk::restoreFinishedRenderPass(vk::Framebuffer *framebuffer)
5597 {
5598     if (mRenderPassCommandBuffer != nullptr)
5599     {
5600         // The render pass isn't finished yet, so nothing to restore.
5601         return;
5602     }
5603 
5604     if (mRenderPassCommands->started() &&
5605         mRenderPassCommands->getFramebufferHandle() == framebuffer->getHandle())
5606     {
5607         // There is already a render pass open for this framebuffer, so just restore the
5608         // pointer rather than starting a whole new render pass. One possible path here
5609         // is if the draw framebuffer binding has changed from FBO A -> B -> A, without
5610         // any commands that started a new render pass for FBO B (such as a clear being
5611         // issued that was deferred).
5612         mRenderPassCommandBuffer = &mRenderPassCommands->getCommandBuffer();
5613         ASSERT(hasStartedRenderPass());
5614     }
5615 }
5616 
getCurrentSubpassIndex() const5617 uint32_t ContextVk::getCurrentSubpassIndex() const
5618 {
5619     return mGraphicsPipelineDesc->getSubpass();
5620 }
5621 
getCurrentViewCount() const5622 uint32_t ContextVk::getCurrentViewCount() const
5623 {
5624     ASSERT(mDrawFramebuffer);
5625     return mDrawFramebuffer->getRenderPassDesc().viewCount();
5626 }
5627 
flushCommandsAndEndRenderPassImpl()5628 angle::Result ContextVk::flushCommandsAndEndRenderPassImpl()
5629 {
5630     // Ensure we flush the RenderPass *after* the prior commands.
5631     ANGLE_TRY(flushOutsideRenderPassCommands());
5632     ASSERT(mOutsideRenderPassCommands->empty());
5633 
5634     if (!mRenderPassCommands->started())
5635     {
5636         onRenderPassFinished();
5637         return angle::Result::Continue;
5638     }
5639 
5640     mCurrentTransformFeedbackBuffers.clear();
5641 
5642     // Reset serials for XFB if active.
5643     if (mState.isTransformFeedbackActiveUnpaused())
5644     {
5645         const gl::ProgramExecutable *executable = mState.getProgramExecutable();
5646         ASSERT(executable);
5647         size_t xfbBufferCount = executable->getTransformFeedbackBufferCount();
5648 
5649         TransformFeedbackVk *transformFeedbackVk =
5650             vk::GetImpl(mState.getCurrentTransformFeedback());
5651 
5652         populateTransformFeedbackBufferSet(xfbBufferCount, transformFeedbackVk->getBufferHelpers());
5653     }
5654 
5655     onRenderPassFinished();
5656 
5657     if (mGpuEventsEnabled)
5658     {
5659         EventName eventName = GetTraceEventName("RP", mPerfCounters.renderPasses);
5660         ANGLE_TRY(traceGpuEvent(&mOutsideRenderPassCommands->getCommandBuffer(),
5661                                 TRACE_EVENT_PHASE_BEGIN, eventName));
5662         ANGLE_TRY(flushOutsideRenderPassCommands());
5663     }
5664 
5665     addOverlayUsedBuffersCount(mRenderPassCommands);
5666 
5667     pauseTransformFeedbackIfActiveUnpaused();
5668 
5669     mRenderPassCommands->endRenderPass(this);
5670 
5671     if (vk::CommandBufferHelper::kEnableCommandStreamDiagnostics)
5672     {
5673         mRenderPassCommands->addCommandDiagnostics(this);
5674     }
5675 
5676     vk::RenderPass *renderPass = nullptr;
5677     ANGLE_TRY(getRenderPassWithOps(mRenderPassCommands->getRenderPassDesc(),
5678                                    mRenderPassCommands->getAttachmentOps(), &renderPass));
5679 
5680     ANGLE_TRY(mRenderer->flushRenderPassCommands(this, hasProtectedContent(), *renderPass,
5681                                                  &mRenderPassCommands));
5682 
5683     if (mGpuEventsEnabled)
5684     {
5685         EventName eventName = GetTraceEventName("RP", mPerfCounters.renderPasses);
5686         ANGLE_TRY(traceGpuEvent(&mOutsideRenderPassCommands->getCommandBuffer(),
5687                                 TRACE_EVENT_PHASE_END, eventName));
5688         ANGLE_TRY(flushOutsideRenderPassCommands());
5689     }
5690 
5691     if (mHasDeferredFlush)
5692     {
5693         // If we have deferred glFlush call in the middle of renderpass, flush them now.
5694         ANGLE_TRY(flushImpl(nullptr));
5695     }
5696 
5697     return angle::Result::Continue;
5698 }
5699 
flushCommandsAndEndRenderPass()5700 angle::Result ContextVk::flushCommandsAndEndRenderPass()
5701 {
5702     bool isRenderPassStarted = mRenderPassCommands->started();
5703 
5704     ANGLE_TRY(flushCommandsAndEndRenderPassImpl());
5705 
5706     // Set dirty bits if render pass was open (and thus has been closed).
5707     if (isRenderPassStarted)
5708     {
5709         mGraphicsDirtyBits |= mNewGraphicsCommandBufferDirtyBits;
5710 
5711         // Restart at subpass 0.
5712         mGraphicsPipelineDesc->resetSubpass(&mGraphicsPipelineTransition);
5713     }
5714 
5715     return angle::Result::Continue;
5716 }
5717 
flushDirtyGraphicsRenderPass(DirtyBits::Iterator * dirtyBitsIterator,DirtyBits dirtyBitMask)5718 angle::Result ContextVk::flushDirtyGraphicsRenderPass(DirtyBits::Iterator *dirtyBitsIterator,
5719                                                       DirtyBits dirtyBitMask)
5720 {
5721     ASSERT(mRenderPassCommands->started());
5722 
5723     ANGLE_TRY(flushCommandsAndEndRenderPassImpl());
5724 
5725     // Set dirty bits that need processing on new render pass on the dirty bits iterator that's
5726     // being processed right now.
5727     dirtyBitsIterator->setLaterBits(mNewGraphicsCommandBufferDirtyBits & dirtyBitMask);
5728 
5729     // Additionally, make sure any dirty bits not included in the mask are left for future
5730     // processing.  Note that |dirtyBitMask| is removed from |mNewGraphicsCommandBufferDirtyBits|
5731     // after dirty bits are iterated, so there's no need to mask them out.
5732     mGraphicsDirtyBits |= mNewGraphicsCommandBufferDirtyBits;
5733 
5734     // Restart at subpass 0.
5735     mGraphicsPipelineDesc->resetSubpass(&mGraphicsPipelineTransition);
5736 
5737     return angle::Result::Continue;
5738 }
5739 
syncExternalMemory()5740 angle::Result ContextVk::syncExternalMemory()
5741 {
5742     VkMemoryBarrier memoryBarrier = {};
5743     memoryBarrier.sType           = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
5744     memoryBarrier.srcAccessMask   = VK_ACCESS_MEMORY_WRITE_BIT;
5745     memoryBarrier.dstAccessMask   = VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT;
5746 
5747     mOutsideRenderPassCommands->getCommandBuffer().memoryBarrier(
5748         VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, &memoryBarrier);
5749     return angle::Result::Continue;
5750 }
5751 
addCommandBufferDiagnostics(const std::string & commandBufferDiagnostics)5752 void ContextVk::addCommandBufferDiagnostics(const std::string &commandBufferDiagnostics)
5753 {
5754     mCommandBufferDiagnostics.push_back(commandBufferDiagnostics);
5755 }
5756 
dumpCommandStreamDiagnostics()5757 void ContextVk::dumpCommandStreamDiagnostics()
5758 {
5759     std::ostream &out = std::cout;
5760 
5761     if (mCommandBufferDiagnostics.empty())
5762         return;
5763 
5764     out << "digraph {\n"
5765         << "  node [shape=plaintext fontname=\"Consolas\"]\n";
5766 
5767     for (size_t index = 0; index < mCommandBufferDiagnostics.size(); ++index)
5768     {
5769         const std::string &payload = mCommandBufferDiagnostics[index];
5770         out << "  cb" << index << " [label =\"" << payload << "\"];\n";
5771     }
5772 
5773     for (size_t index = 0; index < mCommandBufferDiagnostics.size() - 1; ++index)
5774     {
5775         out << "  cb" << index << " -> cb" << index + 1 << "\n";
5776     }
5777 
5778     mCommandBufferDiagnostics.clear();
5779 
5780     out << "}\n";
5781 }
5782 
initIndexTypeMap()5783 void ContextVk::initIndexTypeMap()
5784 {
5785     // Init gles-vulkan index type map
5786     mIndexTypeMap[gl::DrawElementsType::UnsignedByte] =
5787         mRenderer->getFeatures().supportsIndexTypeUint8.enabled ? VK_INDEX_TYPE_UINT8_EXT
5788                                                                 : VK_INDEX_TYPE_UINT16;
5789     mIndexTypeMap[gl::DrawElementsType::UnsignedShort] = VK_INDEX_TYPE_UINT16;
5790     mIndexTypeMap[gl::DrawElementsType::UnsignedInt]   = VK_INDEX_TYPE_UINT32;
5791 }
5792 
getVkIndexType(gl::DrawElementsType glIndexType) const5793 VkIndexType ContextVk::getVkIndexType(gl::DrawElementsType glIndexType) const
5794 {
5795     return mIndexTypeMap[glIndexType];
5796 }
5797 
getVkIndexTypeSize(gl::DrawElementsType glIndexType) const5798 size_t ContextVk::getVkIndexTypeSize(gl::DrawElementsType glIndexType) const
5799 {
5800     gl::DrawElementsType elementsType = shouldConvertUint8VkIndexType(glIndexType)
5801                                             ? gl::DrawElementsType::UnsignedShort
5802                                             : glIndexType;
5803     ASSERT(elementsType < gl::DrawElementsType::EnumCount);
5804 
5805     // Use GetDrawElementsTypeSize() to get the size
5806     return static_cast<size_t>(gl::GetDrawElementsTypeSize(elementsType));
5807 }
5808 
shouldConvertUint8VkIndexType(gl::DrawElementsType glIndexType) const5809 bool ContextVk::shouldConvertUint8VkIndexType(gl::DrawElementsType glIndexType) const
5810 {
5811     return (glIndexType == gl::DrawElementsType::UnsignedByte &&
5812             !mRenderer->getFeatures().supportsIndexTypeUint8.enabled);
5813 }
5814 
flushOutsideRenderPassCommands()5815 angle::Result ContextVk::flushOutsideRenderPassCommands()
5816 {
5817     if (mOutsideRenderPassCommands->empty())
5818     {
5819         return angle::Result::Continue;
5820     }
5821 
5822     addOverlayUsedBuffersCount(mOutsideRenderPassCommands);
5823 
5824     if (vk::CommandBufferHelper::kEnableCommandStreamDiagnostics)
5825     {
5826         mOutsideRenderPassCommands->addCommandDiagnostics(this);
5827     }
5828 
5829     ANGLE_TRY(mRenderer->flushOutsideRPCommands(this, hasProtectedContent(),
5830                                                 &mOutsideRenderPassCommands));
5831 
5832     // Make sure appropriate dirty bits are set, in case another thread makes a submission before
5833     // the next dispatch call.
5834     mComputeDirtyBits |= mNewComputeCommandBufferDirtyBits;
5835 
5836     mPerfCounters.flushedOutsideRenderPassCommandBuffers++;
5837     return angle::Result::Continue;
5838 }
5839 
beginRenderPassQuery(QueryVk * queryVk)5840 angle::Result ContextVk::beginRenderPassQuery(QueryVk *queryVk)
5841 {
5842     // Emit debug-util markers before calling the query command.
5843     ANGLE_TRY(handleGraphicsEventLog(rx::GraphicsEventCmdBuf::InRenderPassCmdBufQueryCmd));
5844 
5845     // To avoid complexity, we always start and end these queries inside the render pass.  If the
5846     // render pass has not yet started, the query is deferred until it does.
5847     if (mRenderPassCommandBuffer)
5848     {
5849         ANGLE_TRY(queryVk->getQueryHelper()->beginRenderPassQuery(this));
5850 
5851         // Update rasterizer discard emulation with primitives generated query if necessary.
5852         if (queryVk->getType() == gl::QueryType::PrimitivesGenerated)
5853         {
5854             updateRasterizerDiscardEnabled(true);
5855         }
5856     }
5857 
5858     gl::QueryType type = queryVk->getType();
5859 
5860     ASSERT(mActiveRenderPassQueries[type] == nullptr);
5861     mActiveRenderPassQueries[type] = queryVk;
5862 
5863     return angle::Result::Continue;
5864 }
5865 
endRenderPassQuery(QueryVk * queryVk)5866 angle::Result ContextVk::endRenderPassQuery(QueryVk *queryVk)
5867 {
5868     // Emit debug-util markers before calling the query command.
5869     ANGLE_TRY(handleGraphicsEventLog(rx::GraphicsEventCmdBuf::InRenderPassCmdBufQueryCmd));
5870 
5871     if (mRenderPassCommandBuffer)
5872     {
5873         queryVk->getQueryHelper()->endRenderPassQuery(this);
5874 
5875         // Update rasterizer discard emulation with primitives generated query if necessary.
5876         if (queryVk->getType() == gl::QueryType::PrimitivesGenerated)
5877         {
5878             updateRasterizerDiscardEnabled(false);
5879         }
5880     }
5881 
5882     gl::QueryType type = queryVk->getType();
5883 
5884     ASSERT(mActiveRenderPassQueries[type] == queryVk);
5885     mActiveRenderPassQueries[type] = nullptr;
5886 
5887     return angle::Result::Continue;
5888 }
5889 
pauseRenderPassQueriesIfActive()5890 void ContextVk::pauseRenderPassQueriesIfActive()
5891 {
5892     if (mRenderPassCommandBuffer == nullptr)
5893     {
5894         return;
5895     }
5896 
5897     for (QueryVk *activeQuery : mActiveRenderPassQueries)
5898     {
5899         if (activeQuery)
5900         {
5901             activeQuery->onRenderPassEnd(this);
5902 
5903             // No need to update rasterizer discard emulation with primitives generated query.  The
5904             // state will be updated when the next render pass starts.
5905         }
5906     }
5907 }
5908 
resumeRenderPassQueriesIfActive()5909 angle::Result ContextVk::resumeRenderPassQueriesIfActive()
5910 {
5911     ASSERT(mRenderPassCommandBuffer);
5912 
5913     // Note: these queries should be processed in order.  See comment in QueryVk::onRenderPassStart.
5914     for (QueryVk *activeQuery : mActiveRenderPassQueries)
5915     {
5916         if (activeQuery)
5917         {
5918             ANGLE_TRY(activeQuery->onRenderPassStart(this));
5919 
5920             // Update rasterizer discard emulation with primitives generated query if necessary.
5921             if (activeQuery->getType() == gl::QueryType::PrimitivesGenerated)
5922             {
5923                 updateRasterizerDiscardEnabled(true);
5924             }
5925         }
5926     }
5927 
5928     return angle::Result::Continue;
5929 }
5930 
doesPrimitivesGeneratedQuerySupportRasterizerDiscard() const5931 bool ContextVk::doesPrimitivesGeneratedQuerySupportRasterizerDiscard() const
5932 {
5933     // TODO: If primitives generated is implemented with VK_EXT_primitives_generated_query, check
5934     // the corresponding feature bit.  http://anglebug.com/5430.
5935 
5936     // If primitives generated is emulated with pipeline statistics query, it's unknown on which
5937     // hardware rasterizer discard is supported.  Assume it's supported on none.
5938     if (getFeatures().supportsPipelineStatisticsQuery.enabled)
5939     {
5940         return false;
5941     }
5942 
5943     return true;
5944 }
5945 
isEmulatingRasterizerDiscardDuringPrimitivesGeneratedQuery(bool isPrimitivesGeneratedQueryActive) const5946 bool ContextVk::isEmulatingRasterizerDiscardDuringPrimitivesGeneratedQuery(
5947     bool isPrimitivesGeneratedQueryActive) const
5948 {
5949     return isPrimitivesGeneratedQueryActive && mState.isRasterizerDiscardEnabled() &&
5950            !doesPrimitivesGeneratedQuerySupportRasterizerDiscard();
5951 }
5952 
getActiveRenderPassQuery(gl::QueryType queryType) const5953 QueryVk *ContextVk::getActiveRenderPassQuery(gl::QueryType queryType) const
5954 {
5955     return mActiveRenderPassQueries[queryType];
5956 }
5957 
isRobustResourceInitEnabled() const5958 bool ContextVk::isRobustResourceInitEnabled() const
5959 {
5960     return mState.isRobustResourceInitEnabled();
5961 }
5962 
5963 template <typename T, const T *VkWriteDescriptorSet::*pInfo>
growDesciptorCapacity(std::vector<T> * descriptorVector,size_t newSize)5964 void ContextVk::growDesciptorCapacity(std::vector<T> *descriptorVector, size_t newSize)
5965 {
5966     const T *const oldInfoStart = descriptorVector->empty() ? nullptr : &(*descriptorVector)[0];
5967     size_t newCapacity          = std::max(descriptorVector->capacity() << 1, newSize);
5968     descriptorVector->reserve(newCapacity);
5969 
5970     if (oldInfoStart)
5971     {
5972         // patch mWriteInfo with new BufferInfo/ImageInfo pointers
5973         for (VkWriteDescriptorSet &set : mWriteDescriptorSets)
5974         {
5975             if (set.*pInfo)
5976             {
5977                 size_t index = set.*pInfo - oldInfoStart;
5978                 set.*pInfo   = &(*descriptorVector)[index];
5979             }
5980         }
5981     }
5982 }
5983 
5984 template <typename T, const T *VkWriteDescriptorSet::*pInfo>
allocDescriptorInfos(std::vector<T> * descriptorVector,size_t count)5985 T *ContextVk::allocDescriptorInfos(std::vector<T> *descriptorVector, size_t count)
5986 {
5987     size_t oldSize = descriptorVector->size();
5988     size_t newSize = oldSize + count;
5989     if (newSize > descriptorVector->capacity())
5990     {
5991         // If we have reached capacity, grow the storage and patch the descriptor set with new
5992         // buffer info pointer
5993         growDesciptorCapacity<T, pInfo>(descriptorVector, newSize);
5994     }
5995     descriptorVector->resize(newSize);
5996     return &(*descriptorVector)[oldSize];
5997 }
5998 
allocDescriptorBufferInfos(size_t count)5999 VkDescriptorBufferInfo *ContextVk::allocDescriptorBufferInfos(size_t count)
6000 {
6001     return allocDescriptorInfos<VkDescriptorBufferInfo, &VkWriteDescriptorSet::pBufferInfo>(
6002         &mDescriptorBufferInfos, count);
6003 }
6004 
allocDescriptorImageInfos(size_t count)6005 VkDescriptorImageInfo *ContextVk::allocDescriptorImageInfos(size_t count)
6006 {
6007     return allocDescriptorInfos<VkDescriptorImageInfo, &VkWriteDescriptorSet::pImageInfo>(
6008         &mDescriptorImageInfos, count);
6009 }
6010 
allocWriteDescriptorSets(size_t count)6011 VkWriteDescriptorSet *ContextVk::allocWriteDescriptorSets(size_t count)
6012 {
6013     mPerfCounters.writeDescriptorSets += count;
6014 
6015     size_t oldSize = mWriteDescriptorSets.size();
6016     size_t newSize = oldSize + count;
6017     mWriteDescriptorSets.resize(newSize);
6018     return &mWriteDescriptorSets[oldSize];
6019 }
6020 
setDefaultUniformBlocksMinSizeForTesting(size_t minSize)6021 void ContextVk::setDefaultUniformBlocksMinSizeForTesting(size_t minSize)
6022 {
6023     mDefaultUniformStorage.setMinimumSizeForTesting(minSize);
6024 }
6025 
initializeMultisampleTextureToBlack(const gl::Context * context,gl::Texture * glTexture)6026 angle::Result ContextVk::initializeMultisampleTextureToBlack(const gl::Context *context,
6027                                                              gl::Texture *glTexture)
6028 {
6029     ASSERT(glTexture->getType() == gl::TextureType::_2DMultisample);
6030     TextureVk *textureVk = vk::GetImpl(glTexture);
6031 
6032     return textureVk->initializeContents(context, gl::ImageIndex::Make2DMultisample());
6033 }
6034 
onProgramExecutableReset(ProgramExecutableVk * executableVk)6035 void ContextVk::onProgramExecutableReset(ProgramExecutableVk *executableVk)
6036 {
6037     const gl::ProgramExecutable *executable = getState().getProgramExecutable();
6038     if (!executable)
6039     {
6040         return;
6041     }
6042 
6043     // Only do this for the currently bound ProgramExecutableVk, since Program A can be linked while
6044     // Program B is currently in use and we don't want to reset/invalidate Program B's pipeline.
6045     if (executableVk != mExecutable)
6046     {
6047         return;
6048     }
6049 
6050     // Reset *ContextVk::mCurrentGraphicsPipeline, since programInfo.release() freed the
6051     // PipelineHelper that it's currently pointing to.
6052     // TODO(http://anglebug.com/5624): rework updateActiveTextures(), createPipelineLayout(),
6053     // handleDirtyGraphicsPipeline(), and ProgramPipelineVk::link().
6054     resetCurrentGraphicsPipeline();
6055 
6056     if (executable->isCompute())
6057     {
6058         invalidateCurrentComputePipeline();
6059     }
6060     else
6061     {
6062         invalidateCurrentGraphicsPipeline();
6063     }
6064 }
6065 
updateRenderPassDepthStencilAccess()6066 angle::Result ContextVk::updateRenderPassDepthStencilAccess()
6067 {
6068     if (hasStartedRenderPass() && mDrawFramebuffer->getDepthStencilRenderTarget())
6069     {
6070         const gl::DepthStencilState &dsState = mState.getDepthStencilState();
6071         vk::ResourceAccess depthAccess       = GetDepthAccess(dsState);
6072         vk::ResourceAccess stencilAccess     = GetStencilAccess(dsState);
6073 
6074         if ((depthAccess == vk::ResourceAccess::Write ||
6075              stencilAccess == vk::ResourceAccess::Write) &&
6076             mDrawFramebuffer->isReadOnlyDepthFeedbackLoopMode())
6077         {
6078             // If we are switching out of read only mode and we are in feedback loop, we must end
6079             // renderpass here. Otherwise, updating it to writeable layout will produce a writable
6080             // feedback loop that is illegal in vulkan and will trigger validation errors that depth
6081             // texture is using the writable layout.
6082             ANGLE_TRY(flushCommandsAndEndRenderPass());
6083             // Clear read-only depth feedback mode.
6084             mDrawFramebuffer->setReadOnlyDepthFeedbackLoopMode(false);
6085         }
6086         else
6087         {
6088             mRenderPassCommands->onDepthAccess(depthAccess);
6089             mRenderPassCommands->onStencilAccess(stencilAccess);
6090 
6091             mDrawFramebuffer->updateRenderPassReadOnlyDepthMode(this, mRenderPassCommands);
6092         }
6093     }
6094 
6095     return angle::Result::Continue;
6096 }
6097 
shouldSwitchToReadOnlyDepthFeedbackLoopMode(const gl::Context * context,gl::Texture * texture) const6098 bool ContextVk::shouldSwitchToReadOnlyDepthFeedbackLoopMode(const gl::Context *context,
6099                                                             gl::Texture *texture) const
6100 {
6101     ASSERT(texture->isDepthOrStencil());
6102 
6103     const gl::ProgramExecutable *programExecutable = mState.getProgramExecutable();
6104 
6105     // When running compute we don't have a draw FBO.
6106     if (programExecutable->isCompute())
6107     {
6108         return false;
6109     }
6110 
6111     // The "readOnlyDepthMode" feature enables read-only depth-stencil feedback loops. We
6112     // only switch to "read-only" mode when there's loop. We track the depth-stencil access
6113     // mode in the RenderPass. The tracking tells us when we can retroactively go back and
6114     // change the RenderPass to read-only. If there are any writes we need to break and
6115     // finish the current RP before starting the read-only one.
6116     return texture->isBoundToFramebuffer(mDrawFramebuffer->getState().getFramebufferSerial()) &&
6117            !mState.isDepthWriteEnabled() && !mDrawFramebuffer->isReadOnlyDepthFeedbackLoopMode();
6118 }
6119 
onResourceAccess(const vk::CommandBufferAccess & access)6120 angle::Result ContextVk::onResourceAccess(const vk::CommandBufferAccess &access)
6121 {
6122     ANGLE_TRY(flushCommandBuffersIfNecessary(access));
6123 
6124     for (const vk::CommandBufferImageAccess &imageAccess : access.getReadImages())
6125     {
6126         ASSERT(!IsRenderPassStartedAndUsesImage(*mRenderPassCommands, *imageAccess.image));
6127 
6128         imageAccess.image->recordReadBarrier(this, imageAccess.aspectFlags, imageAccess.imageLayout,
6129                                              &mOutsideRenderPassCommands->getCommandBuffer());
6130         imageAccess.image->retain(&mResourceUseList);
6131     }
6132 
6133     for (const vk::CommandBufferImageWrite &imageWrite : access.getWriteImages())
6134     {
6135         ASSERT(!IsRenderPassStartedAndUsesImage(*mRenderPassCommands, *imageWrite.access.image));
6136 
6137         imageWrite.access.image->recordWriteBarrier(
6138             this, imageWrite.access.aspectFlags, imageWrite.access.imageLayout,
6139             &mOutsideRenderPassCommands->getCommandBuffer());
6140         imageWrite.access.image->retain(&mResourceUseList);
6141         imageWrite.access.image->onWrite(imageWrite.levelStart, imageWrite.levelCount,
6142                                          imageWrite.layerStart, imageWrite.layerCount,
6143                                          imageWrite.access.aspectFlags);
6144     }
6145 
6146     for (const vk::CommandBufferBufferAccess &bufferAccess : access.getReadBuffers())
6147     {
6148         ASSERT(!mRenderPassCommands->usesBufferForWrite(*bufferAccess.buffer));
6149         ASSERT(!mOutsideRenderPassCommands->usesBufferForWrite(*bufferAccess.buffer));
6150 
6151         mOutsideRenderPassCommands->bufferRead(this, bufferAccess.accessType, bufferAccess.stage,
6152                                                bufferAccess.buffer);
6153     }
6154 
6155     for (const vk::CommandBufferBufferAccess &bufferAccess : access.getWriteBuffers())
6156     {
6157         ASSERT(!mRenderPassCommands->usesBuffer(*bufferAccess.buffer));
6158         ASSERT(!mOutsideRenderPassCommands->usesBuffer(*bufferAccess.buffer));
6159 
6160         mOutsideRenderPassCommands->bufferWrite(this, bufferAccess.accessType, bufferAccess.stage,
6161                                                 vk::AliasingMode::Disallowed, bufferAccess.buffer);
6162     }
6163 
6164     return angle::Result::Continue;
6165 }
6166 
flushCommandBuffersIfNecessary(const vk::CommandBufferAccess & access)6167 angle::Result ContextVk::flushCommandBuffersIfNecessary(const vk::CommandBufferAccess &access)
6168 {
6169     // Go over resources and decide whether the render pass needs to close, whether the outside
6170     // render pass commands need to be flushed, or neither.  Note that closing the render pass
6171     // implies flushing the outside render pass as well, so if that needs to be done, we can close
6172     // the render pass and immediately return from this function.  Otherwise, this function keeps
6173     // track of whether the outside render pass commands need to be closed, and if so, it will do
6174     // that once at the end.
6175 
6176     // Read images only need to close the render pass if they need a layout transition.
6177     for (const vk::CommandBufferImageAccess &imageAccess : access.getReadImages())
6178     {
6179         // Note that different read methods are not compatible. A shader read uses a different
6180         // layout than a transfer read. So we cannot support simultaneous read usage as easily as
6181         // for Buffers.  TODO: Don't close the render pass if the image was only used read-only in
6182         // the render pass.  http://anglebug.com/4984
6183         if (IsRenderPassStartedAndUsesImage(*mRenderPassCommands, *imageAccess.image))
6184         {
6185             return flushCommandsAndEndRenderPass();
6186         }
6187     }
6188 
6189     // Write images only need to close the render pass if they need a layout transition.
6190     for (const vk::CommandBufferImageWrite &imageWrite : access.getWriteImages())
6191     {
6192         if (IsRenderPassStartedAndUsesImage(*mRenderPassCommands, *imageWrite.access.image))
6193         {
6194             return flushCommandsAndEndRenderPass();
6195         }
6196     }
6197 
6198     bool shouldCloseOutsideRenderPassCommands = false;
6199 
6200     // Read buffers only need a new command buffer if previously used for write.
6201     for (const vk::CommandBufferBufferAccess &bufferAccess : access.getReadBuffers())
6202     {
6203         if (mRenderPassCommands->usesBufferForWrite(*bufferAccess.buffer))
6204         {
6205             return flushCommandsAndEndRenderPass();
6206         }
6207         else if (mOutsideRenderPassCommands->usesBufferForWrite(*bufferAccess.buffer))
6208         {
6209             shouldCloseOutsideRenderPassCommands = true;
6210         }
6211     }
6212 
6213     // Write buffers always need a new command buffer if previously used.
6214     for (const vk::CommandBufferBufferAccess &bufferAccess : access.getWriteBuffers())
6215     {
6216         if (mRenderPassCommands->usesBuffer(*bufferAccess.buffer))
6217         {
6218             return flushCommandsAndEndRenderPass();
6219         }
6220         else if (mOutsideRenderPassCommands->usesBuffer(*bufferAccess.buffer))
6221         {
6222             shouldCloseOutsideRenderPassCommands = true;
6223         }
6224     }
6225 
6226     if (shouldCloseOutsideRenderPassCommands)
6227     {
6228         return flushOutsideRenderPassCommands();
6229     }
6230 
6231     return angle::Result::Continue;
6232 }
6233 
endRenderPassIfComputeReadAfterTransformFeedbackWrite()6234 angle::Result ContextVk::endRenderPassIfComputeReadAfterTransformFeedbackWrite()
6235 {
6236     // Similar to flushCommandBuffersIfNecessary(), but using uniform buffers currently bound and
6237     // used by the current (compute) program.  This is to handle read-after-write hazards where the
6238     // write originates from transform feedback.
6239     if (mCurrentTransformFeedbackBuffers.empty())
6240     {
6241         return angle::Result::Continue;
6242     }
6243 
6244     const gl::ProgramExecutable *executable = mState.getProgramExecutable();
6245     ASSERT(executable && executable->isCompute());
6246 
6247     gl::ShaderMap<const gl::ProgramState *> programStates;
6248     mExecutable->fillProgramStateMap(this, &programStates);
6249 
6250     for (const gl::ShaderType shaderType : executable->getLinkedShaderStages())
6251     {
6252         const gl::ProgramState *programState = programStates[shaderType];
6253         ASSERT(programState);
6254 
6255         // Uniform buffers:
6256         const std::vector<gl::InterfaceBlock> &blocks = programState->getUniformBlocks();
6257 
6258         for (uint32_t bufferIndex = 0; bufferIndex < blocks.size(); ++bufferIndex)
6259         {
6260             const gl::InterfaceBlock &block = blocks[bufferIndex];
6261             const gl::OffsetBindingPointer<gl::Buffer> &bufferBinding =
6262                 mState.getIndexedUniformBuffer(block.binding);
6263 
6264             if (!block.isActive(shaderType) || bufferBinding.get() == nullptr)
6265             {
6266                 continue;
6267             }
6268 
6269             VkDeviceSize bufferOffset = 0;
6270             vk::BufferHelper &buffer =
6271                 vk::GetImpl(bufferBinding.get())->getBufferAndOffset(&bufferOffset);
6272             if (mCurrentTransformFeedbackBuffers.contains(&buffer))
6273             {
6274                 return flushCommandsAndEndRenderPass();
6275             }
6276         }
6277     }
6278 
6279     return angle::Result::Continue;
6280 }
6281 
endRenderPassIfComputeReadAfterAttachmentWrite()6282 angle::Result ContextVk::endRenderPassIfComputeReadAfterAttachmentWrite()
6283 {
6284     // Similar to flushCommandBuffersIfNecessary(), but using textures currently bound and used by
6285     // the current (compute) program.  This is to handle read-after-write hazards where the write
6286     // originates from a framebuffer attachment.
6287     const gl::ProgramExecutable *executable = mState.getProgramExecutable();
6288     ASSERT(executable && executable->isCompute() && executable->hasTextures());
6289 
6290     const gl::ActiveTexturesCache &textures        = mState.getActiveTexturesCache();
6291     const gl::ActiveTextureTypeArray &textureTypes = executable->getActiveSamplerTypes();
6292 
6293     for (size_t textureUnit : executable->getActiveSamplersMask())
6294     {
6295         gl::Texture *texture        = textures[textureUnit];
6296         gl::TextureType textureType = textureTypes[textureUnit];
6297 
6298         if (texture == nullptr || textureType == gl::TextureType::Buffer)
6299         {
6300             continue;
6301         }
6302 
6303         TextureVk *textureVk = vk::GetImpl(texture);
6304         ASSERT(textureVk != nullptr);
6305         vk::ImageHelper &image = textureVk->getImage();
6306 
6307         if (IsRenderPassStartedAndUsesImage(*mRenderPassCommands, image))
6308         {
6309             return flushCommandsAndEndRenderPass();
6310         }
6311     }
6312 
6313     return angle::Result::Continue;
6314 }
6315 
6316 // Requires that trace is enabled to see the output, which is supported with is_debug=true
outputCumulativePerfCounters()6317 void ContextVk::outputCumulativePerfCounters()
6318 {
6319     if (!vk::kOutputCumulativePerfCounters)
6320     {
6321         return;
6322     }
6323 
6324     INFO() << "Context Descriptor Set Allocations: ";
6325 
6326     for (PipelineType pipelineType : angle::AllEnums<PipelineType>())
6327     {
6328         uint32_t count = mCumulativeContextPerfCounters.descriptorSetsAllocated[pipelineType];
6329         if (count > 0)
6330         {
6331             INFO() << "    PipelineType " << ToUnderlying(pipelineType) << ": " << count;
6332         }
6333     }
6334 }
6335 
getAndResetObjectPerfCounters()6336 ContextVkPerfCounters ContextVk::getAndResetObjectPerfCounters()
6337 {
6338     mCumulativeContextPerfCounters.descriptorSetsAllocated +=
6339         mContextPerfCounters.descriptorSetsAllocated;
6340 
6341     ContextVkPerfCounters counters               = mContextPerfCounters;
6342     mContextPerfCounters.descriptorSetsAllocated = {};
6343     return counters;
6344 }
6345 }  // namespace rx
6346