1 //
2 // Copyright 2016 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // ContextVk.h:
7 // Defines the class interface for ContextVk, implementing ContextImpl.
8 //
9
10 #ifndef LIBANGLE_RENDERER_VULKAN_CONTEXTVK_H_
11 #define LIBANGLE_RENDERER_VULKAN_CONTEXTVK_H_
12
13 #include <condition_variable>
14
15 #include "common/PackedEnums.h"
16 #include "common/vulkan/vk_headers.h"
17 #include "image_util/loadimage.h"
18 #include "libANGLE/renderer/ContextImpl.h"
19 #include "libANGLE/renderer/renderer_utils.h"
20 #include "libANGLE/renderer/vulkan/DisplayVk.h"
21 #include "libANGLE/renderer/vulkan/OverlayVk.h"
22 #include "libANGLE/renderer/vulkan/PersistentCommandPool.h"
23 #include "libANGLE/renderer/vulkan/ShareGroupVk.h"
24 #include "libANGLE/renderer/vulkan/vk_helpers.h"
25 #include "libANGLE/renderer/vulkan/vk_renderer.h"
26
27 namespace angle
28 {
29 struct FeaturesVk;
30 } // namespace angle
31
32 namespace rx
33 {
34 namespace vk
35 {
36 class SyncHelper;
37 } // namespace vk
38
39 class ProgramExecutableVk;
40 class WindowSurfaceVk;
41 class OffscreenSurfaceVk;
42 class ShareGroupVk;
43
44 static constexpr uint32_t kMaxGpuEventNameLen = 32;
45 using EventName = std::array<char, kMaxGpuEventNameLen>;
46
47 using ContextVkDescriptorSetList = angle::PackedEnumMap<PipelineType, uint32_t>;
48 using CounterPipelineTypeMap = angle::PackedEnumMap<PipelineType, uint32_t>;
49
50 enum class GraphicsEventCmdBuf
51 {
52 NotInQueryCmd = 0,
53 InOutsideCmdBufQueryCmd = 1,
54 InRenderPassCmdBufQueryCmd = 2,
55
56 InvalidEnum = 3,
57 EnumCount = 3,
58 };
59
60 // Why depth/stencil feedback loop is being updated. Based on whether it's due to a draw or clear,
61 // different GL state affect depth/stencil write.
62 enum class UpdateDepthFeedbackLoopReason
63 {
64 None,
65 Draw,
66 Clear,
67 };
68
69 class ContextVk : public ContextImpl, public vk::Context, public MultisampleTextureInitializer
70 {
71 public:
72 ContextVk(const gl::State &state, gl::ErrorSet *errorSet, vk::Renderer *renderer);
73 ~ContextVk() override;
74
75 angle::Result initialize(const angle::ImageLoadContext &imageLoadContext) override;
76
77 void onDestroy(const gl::Context *context) override;
78
79 // Flush and finish.
80 angle::Result flush(const gl::Context *context) override;
81 angle::Result finish(const gl::Context *context) override;
82
83 // Drawing methods.
84 angle::Result drawArrays(const gl::Context *context,
85 gl::PrimitiveMode mode,
86 GLint first,
87 GLsizei count) override;
88 angle::Result drawArraysInstanced(const gl::Context *context,
89 gl::PrimitiveMode mode,
90 GLint first,
91 GLsizei count,
92 GLsizei instanceCount) override;
93 angle::Result drawArraysInstancedBaseInstance(const gl::Context *context,
94 gl::PrimitiveMode mode,
95 GLint first,
96 GLsizei count,
97 GLsizei instanceCount,
98 GLuint baseInstance) override;
99
100 angle::Result drawElements(const gl::Context *context,
101 gl::PrimitiveMode mode,
102 GLsizei count,
103 gl::DrawElementsType type,
104 const void *indices) override;
105 angle::Result drawElementsBaseVertex(const gl::Context *context,
106 gl::PrimitiveMode mode,
107 GLsizei count,
108 gl::DrawElementsType type,
109 const void *indices,
110 GLint baseVertex) override;
111 angle::Result drawElementsInstanced(const gl::Context *context,
112 gl::PrimitiveMode mode,
113 GLsizei count,
114 gl::DrawElementsType type,
115 const void *indices,
116 GLsizei instanceCount) override;
117 angle::Result drawElementsInstancedBaseVertex(const gl::Context *context,
118 gl::PrimitiveMode mode,
119 GLsizei count,
120 gl::DrawElementsType type,
121 const void *indices,
122 GLsizei instanceCount,
123 GLint baseVertex) override;
124 angle::Result drawElementsInstancedBaseVertexBaseInstance(const gl::Context *context,
125 gl::PrimitiveMode mode,
126 GLsizei count,
127 gl::DrawElementsType type,
128 const void *indices,
129 GLsizei instances,
130 GLint baseVertex,
131 GLuint baseInstance) override;
132 angle::Result drawRangeElements(const gl::Context *context,
133 gl::PrimitiveMode mode,
134 GLuint start,
135 GLuint end,
136 GLsizei count,
137 gl::DrawElementsType type,
138 const void *indices) override;
139 angle::Result drawRangeElementsBaseVertex(const gl::Context *context,
140 gl::PrimitiveMode mode,
141 GLuint start,
142 GLuint end,
143 GLsizei count,
144 gl::DrawElementsType type,
145 const void *indices,
146 GLint baseVertex) override;
147 angle::Result drawArraysIndirect(const gl::Context *context,
148 gl::PrimitiveMode mode,
149 const void *indirect) override;
150 angle::Result drawElementsIndirect(const gl::Context *context,
151 gl::PrimitiveMode mode,
152 gl::DrawElementsType type,
153 const void *indirect) override;
154
155 angle::Result multiDrawArrays(const gl::Context *context,
156 gl::PrimitiveMode mode,
157 const GLint *firsts,
158 const GLsizei *counts,
159 GLsizei drawcount) override;
160 angle::Result multiDrawArraysInstanced(const gl::Context *context,
161 gl::PrimitiveMode mode,
162 const GLint *firsts,
163 const GLsizei *counts,
164 const GLsizei *instanceCounts,
165 GLsizei drawcount) override;
166 angle::Result multiDrawArraysIndirect(const gl::Context *context,
167 gl::PrimitiveMode mode,
168 const void *indirect,
169 GLsizei drawcount,
170 GLsizei stride) override;
171 angle::Result multiDrawElements(const gl::Context *context,
172 gl::PrimitiveMode mode,
173 const GLsizei *counts,
174 gl::DrawElementsType type,
175 const GLvoid *const *indices,
176 GLsizei drawcount) override;
177 angle::Result multiDrawElementsInstanced(const gl::Context *context,
178 gl::PrimitiveMode mode,
179 const GLsizei *counts,
180 gl::DrawElementsType type,
181 const GLvoid *const *indices,
182 const GLsizei *instanceCounts,
183 GLsizei drawcount) override;
184 angle::Result multiDrawElementsIndirect(const gl::Context *context,
185 gl::PrimitiveMode mode,
186 gl::DrawElementsType type,
187 const void *indirect,
188 GLsizei drawcount,
189 GLsizei stride) override;
190 angle::Result multiDrawArraysInstancedBaseInstance(const gl::Context *context,
191 gl::PrimitiveMode mode,
192 const GLint *firsts,
193 const GLsizei *counts,
194 const GLsizei *instanceCounts,
195 const GLuint *baseInstances,
196 GLsizei drawcount) override;
197 angle::Result multiDrawElementsInstancedBaseVertexBaseInstance(const gl::Context *context,
198 gl::PrimitiveMode mode,
199 const GLsizei *counts,
200 gl::DrawElementsType type,
201 const GLvoid *const *indices,
202 const GLsizei *instanceCounts,
203 const GLint *baseVertices,
204 const GLuint *baseInstances,
205 GLsizei drawcount) override;
206
207 // MultiDrawIndirect helper functions
208 angle::Result multiDrawElementsIndirectHelper(const gl::Context *context,
209 gl::PrimitiveMode mode,
210 gl::DrawElementsType type,
211 const void *indirect,
212 GLsizei drawcount,
213 GLsizei stride);
214 angle::Result multiDrawArraysIndirectHelper(const gl::Context *context,
215 gl::PrimitiveMode mode,
216 const void *indirect,
217 GLsizei drawcount,
218 GLsizei stride);
219
220 // ShareGroup
getShareGroup()221 ShareGroupVk *getShareGroup() { return mShareGroupVk; }
getPipelineLayoutCache()222 PipelineLayoutCache &getPipelineLayoutCache()
223 {
224 return mShareGroupVk->getPipelineLayoutCache();
225 }
getDescriptorSetLayoutCache()226 DescriptorSetLayoutCache &getDescriptorSetLayoutCache()
227 {
228 return mShareGroupVk->getDescriptorSetLayoutCache();
229 }
getMetaDescriptorPools()230 vk::DescriptorSetArray<vk::MetaDescriptorPool> &getMetaDescriptorPools()
231 {
232 return mShareGroupVk->getMetaDescriptorPools();
233 }
234
235 // Device loss
236 gl::GraphicsResetStatus getResetStatus() override;
237
isDebugEnabled()238 bool isDebugEnabled()
239 {
240 return mRenderer->enableDebugUtils() || mRenderer->angleDebuggerMode();
241 }
242
243 // EXT_debug_marker
244 angle::Result insertEventMarker(GLsizei length, const char *marker) override;
245 angle::Result pushGroupMarker(GLsizei length, const char *marker) override;
246 angle::Result popGroupMarker() override;
247
248 void insertEventMarkerImpl(GLenum source, const char *marker);
249
250 // KHR_debug
251 angle::Result pushDebugGroup(const gl::Context *context,
252 GLenum source,
253 GLuint id,
254 const std::string &message) override;
255 angle::Result popDebugGroup(const gl::Context *context) override;
256
257 // Record GL API calls for debuggers
258 void logEvent(const char *eventString);
259 void endEventLog(angle::EntryPoint entryPoint, PipelineType pipelineType);
260 void endEventLogForClearOrQuery();
261
262 bool isViewportFlipEnabledForDrawFBO() const;
263 bool isViewportFlipEnabledForReadFBO() const;
264 // When the device/surface is rotated such that the surface's aspect ratio is different than
265 // the native device (e.g. 90 degrees), the width and height of the viewport, scissor, and
266 // render area must be swapped.
267 bool isRotatedAspectRatioForDrawFBO() const;
268 bool isRotatedAspectRatioForReadFBO() const;
269 SurfaceRotation getRotationDrawFramebuffer() const;
270 SurfaceRotation getRotationReadFramebuffer() const;
271 SurfaceRotation getSurfaceRotationImpl(const gl::Framebuffer *framebuffer,
272 const egl::Surface *surface);
273
274 // View port (x, y, w, h) will be determined by a combination of -
275 // 1. clip space origin
276 // 2. isViewportFlipEnabledForDrawFBO
277 // For userdefined FBOs it will be based on the value of isViewportFlipEnabledForDrawFBO.
278 // For default FBOs it will be XOR of ClipOrigin and isViewportFlipEnabledForDrawFBO.
279 // isYFlipEnabledForDrawFBO indicates the rendered image is upside-down.
isYFlipEnabledForDrawFBO()280 ANGLE_INLINE bool isYFlipEnabledForDrawFBO() const
281 {
282 return mState.getClipOrigin() == gl::ClipOrigin::UpperLeft
283 ? !isViewportFlipEnabledForDrawFBO()
284 : isViewportFlipEnabledForDrawFBO();
285 }
286
287 // State sync with dirty bits.
288 angle::Result syncState(const gl::Context *context,
289 const gl::state::DirtyBits dirtyBits,
290 const gl::state::DirtyBits bitMask,
291 const gl::state::ExtendedDirtyBits extendedDirtyBits,
292 const gl::state::ExtendedDirtyBits extendedBitMask,
293 gl::Command command) override;
294
295 // Disjoint timer queries
296 GLint getGPUDisjoint() override;
297 GLint64 getTimestamp() override;
298
299 // Context switching
300 angle::Result onMakeCurrent(const gl::Context *context) override;
301 angle::Result onUnMakeCurrent(const gl::Context *context) override;
302 angle::Result onSurfaceUnMakeCurrent(WindowSurfaceVk *surface);
303 angle::Result onSurfaceUnMakeCurrent(OffscreenSurfaceVk *surface);
304
305 // Native capabilities, unmodified by gl::Context.
306 gl::Caps getNativeCaps() const override;
307 const gl::TextureCapsMap &getNativeTextureCaps() const override;
308 const gl::Extensions &getNativeExtensions() const override;
309 const gl::Limitations &getNativeLimitations() const override;
310 const ShPixelLocalStorageOptions &getNativePixelLocalStorageOptions() const override;
311
312 // Shader creation
313 CompilerImpl *createCompiler() override;
314 ShaderImpl *createShader(const gl::ShaderState &state) override;
315 ProgramImpl *createProgram(const gl::ProgramState &state) override;
316 ProgramExecutableImpl *createProgramExecutable(
317 const gl::ProgramExecutable *executable) override;
318
319 // Framebuffer creation
320 FramebufferImpl *createFramebuffer(const gl::FramebufferState &state) override;
321
322 // Texture creation
323 TextureImpl *createTexture(const gl::TextureState &state) override;
324
325 // Renderbuffer creation
326 RenderbufferImpl *createRenderbuffer(const gl::RenderbufferState &state) override;
327
328 // Buffer creation
329 BufferImpl *createBuffer(const gl::BufferState &state) override;
330
331 // Vertex Array creation
332 VertexArrayImpl *createVertexArray(const gl::VertexArrayState &state) override;
333
334 // Query and Fence creation
335 QueryImpl *createQuery(gl::QueryType type) override;
336 FenceNVImpl *createFenceNV() override;
337 SyncImpl *createSync() override;
338
339 // Transform Feedback creation
340 TransformFeedbackImpl *createTransformFeedback(
341 const gl::TransformFeedbackState &state) override;
342
343 // Sampler object creation
344 SamplerImpl *createSampler(const gl::SamplerState &state) override;
345
346 // Program Pipeline object creation
347 ProgramPipelineImpl *createProgramPipeline(const gl::ProgramPipelineState &data) override;
348
349 // Memory object creation.
350 MemoryObjectImpl *createMemoryObject() override;
351
352 // Semaphore creation.
353 SemaphoreImpl *createSemaphore() override;
354
355 // Overlay creation.
356 OverlayImpl *createOverlay(const gl::OverlayState &state) override;
357
358 angle::Result dispatchCompute(const gl::Context *context,
359 GLuint numGroupsX,
360 GLuint numGroupsY,
361 GLuint numGroupsZ) override;
362 angle::Result dispatchComputeIndirect(const gl::Context *context, GLintptr indirect) override;
363
364 angle::Result memoryBarrier(const gl::Context *context, GLbitfield barriers) override;
365 angle::Result memoryBarrierByRegion(const gl::Context *context, GLbitfield barriers) override;
366
invalidateTexture(gl::TextureType target)367 ANGLE_INLINE void invalidateTexture(gl::TextureType target) override {}
368
hasDisplayTextureShareGroup()369 bool hasDisplayTextureShareGroup() const { return mState.hasDisplayTextureShareGroup(); }
370
371 // EXT_shader_framebuffer_fetch_non_coherent
372 void framebufferFetchBarrier() override;
373
374 // KHR_blend_equation_advanced
375 void blendBarrier() override;
376
377 // GL_ANGLE_vulkan_image
378 angle::Result acquireTextures(const gl::Context *context,
379 const gl::TextureBarrierVector &textureBarriers) override;
380 angle::Result releaseTextures(const gl::Context *context,
381 gl::TextureBarrierVector *textureBarriers) override;
382
383 // Sets effective Context Priority. Changed by ShareGroupVk.
setPriority(egl::ContextPriority newPriority)384 void setPriority(egl::ContextPriority newPriority)
385 {
386 mContextPriority = newPriority;
387 mDeviceQueueIndex = mRenderer->getDeviceQueueIndex(mContextPriority);
388 }
389
390 VkDevice getDevice() const;
391 // Effective Context Priority
getPriority()392 egl::ContextPriority getPriority() const { return mContextPriority; }
getProtectionType()393 vk::ProtectionType getProtectionType() const { return mProtectionType; }
394
getFeatures()395 ANGLE_INLINE const angle::FeaturesVk &getFeatures() const { return mRenderer->getFeatures(); }
396
invalidateVertexAndIndexBuffers()397 ANGLE_INLINE void invalidateVertexAndIndexBuffers()
398 {
399 mGraphicsDirtyBits |= kIndexAndVertexDirtyBits;
400 }
401
402 angle::Result onVertexBufferChange(const vk::BufferHelper *vertexBuffer);
403
404 angle::Result onVertexAttributeChange(size_t attribIndex,
405 GLuint stride,
406 GLuint divisor,
407 angle::FormatID format,
408 bool compressed,
409 GLuint relativeOffset,
410 const vk::BufferHelper *vertexBuffer);
411
412 void invalidateDefaultAttribute(size_t attribIndex);
413 void invalidateDefaultAttributes(const gl::AttributesMask &dirtyMask);
414 angle::Result onFramebufferChange(FramebufferVk *framebufferVk, gl::Command command);
415 void onDrawFramebufferRenderPassDescChange(FramebufferVk *framebufferVk,
416 bool *renderPassDescChangedOut);
onHostVisibleBufferWrite()417 void onHostVisibleBufferWrite() { mIsAnyHostVisibleBufferWritten = true; }
418
419 void invalidateCurrentTransformFeedbackBuffers();
420 void onTransformFeedbackStateChanged();
421 angle::Result onBeginTransformFeedback(
422 size_t bufferCount,
423 const gl::TransformFeedbackBuffersArray<vk::BufferHelper *> &buffers,
424 const gl::TransformFeedbackBuffersArray<vk::BufferHelper> &counterBuffers);
425 void onEndTransformFeedback();
426 angle::Result onPauseTransformFeedback();
427 void pauseTransformFeedbackIfActiveUnpaused();
428
onColorAccessChange()429 void onColorAccessChange() { mGraphicsDirtyBits |= kColorAccessChangeDirtyBits; }
onDepthStencilAccessChange()430 void onDepthStencilAccessChange() { mGraphicsDirtyBits |= kDepthStencilAccessChangeDirtyBits; }
431
432 // When UtilsVk issues draw or dispatch calls, it binds a new pipeline and descriptor sets that
433 // the context is not aware of. These functions are called to make sure the pipeline and
434 // affected descriptor set bindings are dirtied for the next application draw/dispatch call.
435 void invalidateGraphicsPipelineBinding();
436 void invalidateComputePipelineBinding();
437 void invalidateGraphicsDescriptorSet(DescriptorSetIndex usedDescriptorSet);
438 void invalidateComputeDescriptorSet(DescriptorSetIndex usedDescriptorSet);
439 void invalidateAllDynamicState();
440 angle::Result updateRenderPassDepthFeedbackLoopMode(
441 UpdateDepthFeedbackLoopReason depthReason,
442 UpdateDepthFeedbackLoopReason stencilReason);
443
444 angle::Result optimizeRenderPassForPresent(vk::ImageViewHelper *colorImageView,
445 vk::ImageHelper *colorImage,
446 vk::ImageHelper *colorImageMS,
447 vk::PresentMode presentMode,
448 bool *imageResolved);
449
450 vk::DynamicQueryPool *getQueryPool(gl::QueryType queryType);
451
452 const VkClearValue &getClearColorValue() const;
453 const VkClearValue &getClearDepthStencilValue() const;
454 gl::BlendStateExt::ColorMaskStorage::Type getClearColorMasks() const;
getScissor()455 const VkRect2D &getScissor() const { return mScissor; }
456 angle::Result getIncompleteTexture(const gl::Context *context,
457 gl::TextureType type,
458 gl::SamplerFormat format,
459 gl::Texture **textureOut);
460 void updateColorMasks();
461 void updateBlendFuncsAndEquations();
462
463 void handleError(VkResult errorCode,
464 const char *file,
465 const char *function,
466 unsigned int line) override;
467
468 angle::Result onIndexBufferChange(const vk::BufferHelper *currentIndexBuffer);
469
470 angle::Result flushImpl(const vk::Semaphore *semaphore,
471 const vk::SharedExternalFence *externalFence,
472 RenderPassClosureReason renderPassClosureReason);
473 angle::Result finishImpl(RenderPassClosureReason renderPassClosureReason);
474
475 void addWaitSemaphore(VkSemaphore semaphore, VkPipelineStageFlags stageMask);
476
477 template <typename T>
addGarbage(T * object)478 void addGarbage(T *object)
479 {
480 if (object->valid())
481 {
482 mCurrentGarbage.emplace_back(vk::GetGarbage(object));
483 }
484 }
485
486 angle::Result getCompatibleRenderPass(const vk::RenderPassDesc &desc,
487 const vk::RenderPass **renderPassOut);
488 angle::Result getRenderPassWithOps(const vk::RenderPassDesc &desc,
489 const vk::AttachmentOpsArray &ops,
490 const vk::RenderPass **renderPassOut);
491
getShaderLibrary()492 vk::ShaderLibrary &getShaderLibrary() { return mShaderLibrary; }
getUtils()493 UtilsVk &getUtils() { return mUtils; }
494
495 angle::Result getTimestamp(uint64_t *timestampOut);
496
497 // Create Begin/End/Instant GPU trace events, which take their timestamps from GPU queries.
498 // The events are queued until the query results are available. Possible values for `phase`
499 // are TRACE_EVENT_PHASE_*
traceGpuEvent(vk::OutsideRenderPassCommandBuffer * commandBuffer,char phase,const EventName & name)500 ANGLE_INLINE angle::Result traceGpuEvent(vk::OutsideRenderPassCommandBuffer *commandBuffer,
501 char phase,
502 const EventName &name)
503 {
504 if (mGpuEventsEnabled)
505 return traceGpuEventImpl(commandBuffer, phase, name);
506 return angle::Result::Continue;
507 }
508
getRenderPassCache()509 RenderPassCache &getRenderPassCache() { return mRenderPassCache; }
510
emulateSeamfulCubeMapSampling()511 bool emulateSeamfulCubeMapSampling() const { return mEmulateSeamfulCubeMapSampling; }
512
getDebug()513 const gl::Debug &getDebug() const { return mState.getDebug(); }
getOverlay()514 const gl::OverlayType *getOverlay() const { return mState.getOverlay(); }
515
516 angle::Result onBufferReleaseToExternal(const vk::BufferHelper &buffer);
517 angle::Result onImageReleaseToExternal(const vk::ImageHelper &image);
518
onImageRenderPassRead(VkImageAspectFlags aspectFlags,vk::ImageLayout imageLayout,vk::ImageHelper * image)519 void onImageRenderPassRead(VkImageAspectFlags aspectFlags,
520 vk::ImageLayout imageLayout,
521 vk::ImageHelper *image)
522 {
523 ASSERT(mRenderPassCommands->started());
524 mRenderPassCommands->imageRead(this, aspectFlags, imageLayout, image);
525 }
526
onImageRenderPassWrite(gl::LevelIndex level,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags,vk::ImageLayout imageLayout,vk::ImageHelper * image)527 void onImageRenderPassWrite(gl::LevelIndex level,
528 uint32_t layerStart,
529 uint32_t layerCount,
530 VkImageAspectFlags aspectFlags,
531 vk::ImageLayout imageLayout,
532 vk::ImageHelper *image)
533 {
534 ASSERT(mRenderPassCommands->started());
535 mRenderPassCommands->imageWrite(this, level, layerStart, layerCount, aspectFlags,
536 imageLayout, image);
537 }
538
onColorDraw(gl::LevelIndex level,uint32_t layerStart,uint32_t layerCount,vk::ImageHelper * image,vk::ImageHelper * resolveImage,UniqueSerial imageSiblingSerial,vk::PackedAttachmentIndex packedAttachmentIndex)539 void onColorDraw(gl::LevelIndex level,
540 uint32_t layerStart,
541 uint32_t layerCount,
542 vk::ImageHelper *image,
543 vk::ImageHelper *resolveImage,
544 UniqueSerial imageSiblingSerial,
545 vk::PackedAttachmentIndex packedAttachmentIndex)
546 {
547 ASSERT(mRenderPassCommands->started());
548 mRenderPassCommands->colorImagesDraw(level, layerStart, layerCount, image, resolveImage,
549 imageSiblingSerial, packedAttachmentIndex);
550 }
onDepthStencilDraw(gl::LevelIndex level,uint32_t layerStart,uint32_t layerCount,vk::ImageHelper * image,vk::ImageHelper * resolveImage,UniqueSerial imageSiblingSerial)551 void onDepthStencilDraw(gl::LevelIndex level,
552 uint32_t layerStart,
553 uint32_t layerCount,
554 vk::ImageHelper *image,
555 vk::ImageHelper *resolveImage,
556 UniqueSerial imageSiblingSerial)
557 {
558 ASSERT(mRenderPassCommands->started());
559 mRenderPassCommands->depthStencilImagesDraw(level, layerStart, layerCount, image,
560 resolveImage, imageSiblingSerial);
561 }
562
onFragmentShadingRateRead(vk::ImageHelper * image)563 void onFragmentShadingRateRead(vk::ImageHelper *image)
564 {
565 ASSERT(mRenderPassCommands->started());
566 mRenderPassCommands->fragmentShadingRateImageRead(image);
567 }
568
finalizeImageLayout(const vk::ImageHelper * image,UniqueSerial imageSiblingSerial)569 void finalizeImageLayout(const vk::ImageHelper *image, UniqueSerial imageSiblingSerial)
570 {
571 if (mRenderPassCommands->started())
572 {
573 mRenderPassCommands->finalizeImageLayout(this, image, imageSiblingSerial);
574 }
575 }
576
getOutsideRenderPassCommandBuffer(const vk::CommandBufferAccess & access,vk::OutsideRenderPassCommandBuffer ** commandBufferOut)577 angle::Result getOutsideRenderPassCommandBuffer(
578 const vk::CommandBufferAccess &access,
579 vk::OutsideRenderPassCommandBuffer **commandBufferOut)
580 {
581 ANGLE_TRY(onResourceAccess(access));
582 *commandBufferOut = &mOutsideRenderPassCommands->getCommandBuffer();
583 return angle::Result::Continue;
584 }
585
getOutsideRenderPassCommandBufferHelper(const vk::CommandBufferAccess & access,vk::OutsideRenderPassCommandBufferHelper ** commandBufferHelperOut)586 angle::Result getOutsideRenderPassCommandBufferHelper(
587 const vk::CommandBufferAccess &access,
588 vk::OutsideRenderPassCommandBufferHelper **commandBufferHelperOut)
589 {
590 ANGLE_TRY(onResourceAccess(access));
591 *commandBufferHelperOut = mOutsideRenderPassCommands;
592 return angle::Result::Continue;
593 }
594
trackImageWithOutsideRenderPassEvent(vk::ImageHelper * image)595 void trackImageWithOutsideRenderPassEvent(vk::ImageHelper *image)
596 {
597 if (mRenderer->getFeatures().useVkEventForImageBarrier.enabled)
598 {
599 mOutsideRenderPassCommands->trackImageWithEvent(this, image);
600 }
601 }
trackImagesWithOutsideRenderPassEvent(vk::ImageHelper * srcImage,vk::ImageHelper * dstImage)602 void trackImagesWithOutsideRenderPassEvent(vk::ImageHelper *srcImage, vk::ImageHelper *dstImage)
603 {
604 if (mRenderer->getFeatures().useVkEventForImageBarrier.enabled)
605 {
606 mOutsideRenderPassCommands->trackImagesWithEvent(this, srcImage, dstImage);
607 }
608 }
trackImagesWithOutsideRenderPassEvent(const vk::ImageHelperPtr * images,size_t count)609 void trackImagesWithOutsideRenderPassEvent(const vk::ImageHelperPtr *images, size_t count)
610 {
611 if (mRenderer->getFeatures().useVkEventForImageBarrier.enabled)
612 {
613 mOutsideRenderPassCommands->trackImagesWithEvent(this, images, count);
614 }
615 }
616
submitStagedTextureUpdates()617 angle::Result submitStagedTextureUpdates()
618 {
619 // Staged updates are recorded in outside RP cammand buffer, submit them.
620 return flushOutsideRenderPassCommands();
621 }
622
onEGLImageQueueChange()623 angle::Result onEGLImageQueueChange()
624 {
625 // Flush the barrier inserted to change the queue and layout of an EGL image. Another
626 // thread may start using this image without issuing a sync object.
627 return flushOutsideRenderPassCommands();
628 }
629
630 angle::Result beginNewRenderPass(vk::RenderPassFramebuffer &&framebuffer,
631 const gl::Rectangle &renderArea,
632 const vk::RenderPassDesc &renderPassDesc,
633 const vk::AttachmentOpsArray &renderPassAttachmentOps,
634 const vk::PackedAttachmentCount colorAttachmentCount,
635 const vk::PackedAttachmentIndex depthStencilAttachmentIndex,
636 const vk::PackedClearValuesArray &clearValues,
637 vk::RenderPassCommandBuffer **commandBufferOut);
638
disableRenderPassReactivation()639 void disableRenderPassReactivation() { mAllowRenderPassToReactivate = false; }
640
641 // Only returns true if we have a started RP and we've run setupDraw.
hasActiveRenderPass()642 bool hasActiveRenderPass() const
643 {
644 // If mRenderPassCommandBuffer is not null, mRenderPassCommands must already started, we
645 // call this active render pass. A started render pass will have null
646 // mRenderPassCommandBuffer after onRenderPassFinished call, we call this state started but
647 // inactive.
648 ASSERT(mRenderPassCommandBuffer == nullptr || mRenderPassCommands->started());
649 // Checking mRenderPassCommandBuffer ensures we've called setupDraw.
650 return mRenderPassCommandBuffer != nullptr;
651 }
652
hasStartedRenderPassWithQueueSerial(const QueueSerial & queueSerial)653 bool hasStartedRenderPassWithQueueSerial(const QueueSerial &queueSerial) const
654 {
655 return mRenderPassCommands->started() &&
656 mRenderPassCommands->getQueueSerial() == queueSerial;
657 }
hasStartedRenderPassWithSwapchainFramebuffer(const vk::Framebuffer & framebuffer)658 bool hasStartedRenderPassWithSwapchainFramebuffer(const vk::Framebuffer &framebuffer) const
659 {
660 // WindowSurfaceVk caches its own framebuffers and guarantees that render passes are not
661 // kept open between frames (including when a swapchain is recreated and framebuffer handles
662 // change). It is therefore safe to verify an open render pass by the framebuffer handle
663 return mRenderPassCommands->started() &&
664 mRenderPassCommands->getFramebuffer().getFramebuffer().getHandle() ==
665 framebuffer.getHandle();
666 }
667
isRenderPassStartedAndUsesBuffer(const vk::BufferHelper & buffer)668 bool isRenderPassStartedAndUsesBuffer(const vk::BufferHelper &buffer) const
669 {
670 return mRenderPassCommands->started() && mRenderPassCommands->usesBuffer(buffer);
671 }
672
isRenderPassStartedAndUsesBufferForWrite(const vk::BufferHelper & buffer)673 bool isRenderPassStartedAndUsesBufferForWrite(const vk::BufferHelper &buffer) const
674 {
675 return mRenderPassCommands->started() && mRenderPassCommands->usesBufferForWrite(buffer);
676 }
677
isRenderPassStartedAndUsesImage(const vk::ImageHelper & image)678 bool isRenderPassStartedAndUsesImage(const vk::ImageHelper &image) const
679 {
680 return mRenderPassCommands->started() && mRenderPassCommands->usesImage(image);
681 }
682
getStartedRenderPassCommands()683 vk::RenderPassCommandBufferHelper &getStartedRenderPassCommands()
684 {
685 ASSERT(mRenderPassCommands->started());
686 return *mRenderPassCommands;
687 }
688
689 uint32_t getCurrentSubpassIndex() const;
690 uint32_t getCurrentViewCount() const;
691
692 // Initial Context Priority. Used for EGL_CONTEXT_PRIORITY_LEVEL_IMG attribute.
getContextPriority()693 egl::ContextPriority getContextPriority() const override { return mInitialContextPriority; }
694 angle::Result startRenderPass(gl::Rectangle renderArea,
695 vk::RenderPassCommandBuffer **commandBufferOut,
696 bool *renderPassDescChangedOut);
697 angle::Result startNextSubpass();
698 angle::Result flushCommandsAndEndRenderPass(RenderPassClosureReason reason);
699 angle::Result flushCommandsAndEndRenderPassWithoutSubmit(RenderPassClosureReason reason);
700 angle::Result flushAndSubmitOutsideRenderPassCommands();
701
702 angle::Result syncExternalMemory();
703
704 // Either issue a submission or defer it when a sync object is initialized. If deferred, a
705 // submission will have to be incurred during client wait.
706 angle::Result onSyncObjectInit(vk::SyncHelper *syncHelper, SyncFenceScope scope);
707 // Called when a sync object is waited on while its submission was deffered in onSyncObjectInit.
708 // It's a no-op if this context doesn't have a pending submission. Note that due to
709 // mHasDeferredFlush being set, flushing the render pass leads to a submission automatically.
710 angle::Result flushCommandsAndEndRenderPassIfDeferredSyncInit(RenderPassClosureReason reason);
711
712 void addCommandBufferDiagnostics(const std::string &commandBufferDiagnostics);
713
714 VkIndexType getVkIndexType(gl::DrawElementsType glIndexType) const;
715 size_t getVkIndexTypeSize(gl::DrawElementsType glIndexType) const;
716 bool shouldConvertUint8VkIndexType(gl::DrawElementsType glIndexType) const;
717
718 bool isRobustResourceInitEnabled() const;
hasRobustAccess()719 bool hasRobustAccess() const { return mState.hasRobustAccess(); }
720
721 // Queries that begin and end automatically with render pass start and end
722 angle::Result beginRenderPassQuery(QueryVk *queryVk);
723 angle::Result endRenderPassQuery(QueryVk *queryVk);
724 void pauseRenderPassQueriesIfActive();
725 angle::Result resumeRenderPassQueriesIfActive();
726 angle::Result resumeXfbRenderPassQueriesIfActive();
727 bool doesPrimitivesGeneratedQuerySupportRasterizerDiscard() const;
728 bool isEmulatingRasterizerDiscardDuringPrimitivesGeneratedQuery(
729 bool isPrimitivesGeneratedQueryActive) const;
730
731 // Used by QueryVk to share query helpers between transform feedback queries.
732 QueryVk *getActiveRenderPassQuery(gl::QueryType queryType) const;
733
734 void syncObjectPerfCounters(const angle::VulkanPerfCounters &commandQueuePerfCounters);
735 void updateOverlayOnPresent();
736 void addOverlayUsedBuffersCount(vk::CommandBufferHelperCommon *commandBuffer);
737
738 // For testing only.
739 void setDefaultUniformBlocksMinSizeForTesting(size_t minSize);
740
getEmptyBuffer()741 vk::BufferHelper &getEmptyBuffer() { return mEmptyBuffer; }
742
743 // Keeping track of the buffer copy size. Used to determine when to submit the outside command
744 // buffer.
745 angle::Result onCopyUpdate(VkDeviceSize size, bool *commandBufferWasFlushedOut);
746
747 // Implementation of MultisampleTextureInitializer
748 angle::Result initializeMultisampleTextureToBlack(const gl::Context *context,
749 gl::Texture *glTexture) override;
750
751 // TODO(http://anglebug.com/5624): rework updateActiveTextures(), createPipelineLayout(),
752 // handleDirtyGraphicsPipeline(), and ProgramPipelineVk::link().
resetCurrentGraphicsPipeline()753 void resetCurrentGraphicsPipeline()
754 {
755 mCurrentGraphicsPipeline = nullptr;
756 mCurrentGraphicsPipelineShaders = nullptr;
757 }
758
759 void onProgramExecutableReset(ProgramExecutableVk *executableVk);
760
761 angle::Result handleGraphicsEventLog(GraphicsEventCmdBuf queryEventType);
762
763 void flushDescriptorSetUpdates();
764
getDefaultBufferPool(VkDeviceSize size,uint32_t memoryTypeIndex,BufferUsageType usageType)765 vk::BufferPool *getDefaultBufferPool(VkDeviceSize size,
766 uint32_t memoryTypeIndex,
767 BufferUsageType usageType)
768 {
769 return mShareGroupVk->getDefaultBufferPool(mRenderer, size, memoryTypeIndex, usageType);
770 }
771
allocateStreamedVertexBuffer(size_t attribIndex,size_t bytesToAllocate,vk::BufferHelper ** vertexBufferOut)772 angle::Result allocateStreamedVertexBuffer(size_t attribIndex,
773 size_t bytesToAllocate,
774 vk::BufferHelper **vertexBufferOut)
775 {
776 bool newBufferOut;
777 ANGLE_TRY(mStreamedVertexBuffers[attribIndex].allocate(this, bytesToAllocate,
778 vertexBufferOut, &newBufferOut));
779 if (newBufferOut)
780 {
781 mHasInFlightStreamedVertexBuffers.set(attribIndex);
782 }
783 return angle::Result::Continue;
784 }
785
786 // Put the context in framebuffer fetch mode. If the permanentlySwitchToFramebufferFetchMode
787 // feature is enabled, this is done on first encounter of framebuffer fetch, and makes the
788 // context use framebuffer-fetch-enabled render passes from here on.
789 angle::Result switchToFramebufferFetchMode(bool hasFramebufferFetch);
isInFramebufferFetchMode()790 bool isInFramebufferFetchMode() const { return mIsInFramebufferFetchMode; }
791
792 const angle::PerfMonitorCounterGroups &getPerfMonitorCounters() override;
793
794 void resetPerFramePerfCounters();
795
796 // Accumulate cache stats for a specific cache
accumulateCacheStats(VulkanCacheType cache,const CacheStats & stats)797 void accumulateCacheStats(VulkanCacheType cache, const CacheStats &stats)
798 {
799 mVulkanCacheStats[cache].accumulate(stats);
800 }
801
802 // Whether VK_EXT_pipeline_robustness should be used to enable robust buffer access in the
803 // pipeline.
pipelineRobustness()804 vk::PipelineRobustness pipelineRobustness() const
805 {
806 return getFeatures().supportsPipelineRobustness.enabled && mState.hasRobustAccess()
807 ? vk::PipelineRobustness::Robust
808 : vk::PipelineRobustness::NonRobust;
809 }
810 // Whether VK_EXT_pipeline_protected_access should be used to restrict the pipeline to protected
811 // command buffers. Note that when false, if the extension is supported, the pipeline can be
812 // restricted to unprotected command buffers.
pipelineProtectedAccess()813 vk::PipelineProtectedAccess pipelineProtectedAccess() const
814 {
815 return getFeatures().supportsPipelineProtectedAccess.enabled && mState.hasProtectedContent()
816 ? vk::PipelineProtectedAccess::Protected
817 : vk::PipelineProtectedAccess::Unprotected;
818 }
819
820 vk::ComputePipelineFlags getComputePipelineFlags() const;
821
getImageLoadContext()822 const angle::ImageLoadContext &getImageLoadContext() const { return mImageLoadContext; }
823
824 bool hasUnsubmittedUse(const vk::ResourceUse &use) const;
hasUnsubmittedUse(const vk::Resource & resource)825 bool hasUnsubmittedUse(const vk::Resource &resource) const
826 {
827 return hasUnsubmittedUse(resource.getResourceUse());
828 }
hasUnsubmittedUse(const vk::ReadWriteResource & resource)829 bool hasUnsubmittedUse(const vk::ReadWriteResource &resource) const
830 {
831 return hasUnsubmittedUse(resource.getResourceUse());
832 }
833
getLastSubmittedQueueSerial()834 const QueueSerial &getLastSubmittedQueueSerial() const { return mLastSubmittedQueueSerial; }
getSubmittedResourceUse()835 const vk::ResourceUse &getSubmittedResourceUse() const { return mSubmittedResourceUse; }
836
837 // Uploading mutable mipmap textures is currently restricted to single-context applications.
isEligibleForMutableTextureFlush()838 bool isEligibleForMutableTextureFlush() const
839 {
840 return getFeatures().mutableMipmapTextureUpload.enabled && !hasDisplayTextureShareGroup() &&
841 mShareGroupVk->getContexts().size() == 1;
842 }
843
getDepthStencilAttachmentFlags()844 vk::RenderPassUsageFlags getDepthStencilAttachmentFlags() const
845 {
846 return mDepthStencilAttachmentFlags;
847 }
848
isDitherEnabled()849 bool isDitherEnabled() { return mState.isDitherEnabled(); }
850
851 // The following functions try to allocate memory for buffers and images. If they fail due to
852 // OOM errors, they will try other options for memory allocation.
853 angle::Result initBufferAllocation(vk::BufferHelper *bufferHelper,
854 uint32_t memoryTypeIndex,
855 size_t allocationSize,
856 size_t alignment,
857 BufferUsageType bufferUsageType);
858 angle::Result initImageAllocation(vk::ImageHelper *imageHelper,
859 bool hasProtectedContent,
860 const vk::MemoryProperties &memoryProperties,
861 VkMemoryPropertyFlags flags,
862 vk::MemoryAllocationType allocationType);
863
864 angle::Result releaseBufferAllocation(vk::BufferHelper *bufferHelper);
865
866 // Helper functions to initialize a buffer for a specific usage
867 // Suballocate a host visible buffer with alignment good for copyBuffer.
868 angle::Result initBufferForBufferCopy(vk::BufferHelper *bufferHelper,
869 size_t size,
870 vk::MemoryCoherency coherency);
871 // Suballocate a host visible buffer with alignment good for copyImage.
872 angle::Result initBufferForImageCopy(vk::BufferHelper *bufferHelper,
873 size_t size,
874 vk::MemoryCoherency coherency,
875 angle::FormatID formatId,
876 VkDeviceSize *offset,
877 uint8_t **dataPtr);
878 // Suballocate a buffer with alignment good for shader storage or copyBuffer.
879 angle::Result initBufferForVertexConversion(vk::BufferHelper *bufferHelper,
880 size_t size,
881 vk::MemoryHostVisibility hostVisibility);
882
883 // In the event of collecting too much garbage, we should flush the garbage so it can be freed.
884 void addToPendingImageGarbage(vk::ResourceUse use, VkDeviceSize size);
885
886 bool hasExcessPendingGarbage() const;
887
888 private:
889 // Dirty bits.
890 enum DirtyBitType : size_t
891 {
892 // Dirty bits that must be processed before the render pass is started. The handlers for
893 // these dirty bits don't record any commands.
894
895 // the AnySamplePassed render pass query has been ended.
896 DIRTY_BIT_ANY_SAMPLE_PASSED_QUERY_END,
897 // A glMemoryBarrier has been called and command buffers may need flushing.
898 DIRTY_BIT_MEMORY_BARRIER,
899 // Update default attribute buffers.
900 DIRTY_BIT_DEFAULT_ATTRIBS,
901 // The pipeline has changed and needs to be recreated. This dirty bit may close the render
902 // pass.
903 DIRTY_BIT_PIPELINE_DESC,
904 // Support for depth/stencil read-only feedback loop. When depth/stencil access changes,
905 // the render pass may need closing.
906 DIRTY_BIT_READ_ONLY_DEPTH_FEEDBACK_LOOP_MODE,
907
908 // Start the render pass.
909 DIRTY_BIT_RENDER_PASS,
910
911 // Dirty bits that must be processed after the render pass is started. Their handlers
912 // record commands.
913 DIRTY_BIT_EVENT_LOG,
914 // Update color and depth/stencil accesses in the render pass.
915 DIRTY_BIT_COLOR_ACCESS,
916 DIRTY_BIT_DEPTH_STENCIL_ACCESS,
917 // Pipeline needs to rebind because a new command buffer has been allocated, or UtilsVk has
918 // changed the binding. The pipeline itself doesn't need to be recreated.
919 DIRTY_BIT_PIPELINE_BINDING,
920 DIRTY_BIT_TEXTURES,
921 DIRTY_BIT_VERTEX_BUFFERS,
922 DIRTY_BIT_INDEX_BUFFER,
923 DIRTY_BIT_UNIFORMS,
924 DIRTY_BIT_DRIVER_UNIFORMS,
925 // Shader resources excluding textures, which are handled separately.
926 DIRTY_BIT_SHADER_RESOURCES,
927 DIRTY_BIT_UNIFORM_BUFFERS,
928 DIRTY_BIT_TRANSFORM_FEEDBACK_BUFFERS,
929 DIRTY_BIT_TRANSFORM_FEEDBACK_RESUME,
930 DIRTY_BIT_DESCRIPTOR_SETS,
931 DIRTY_BIT_FRAMEBUFFER_FETCH_BARRIER,
932 DIRTY_BIT_BLEND_BARRIER,
933
934 // Dynamic state
935 // - In core Vulkan 1.0
936 DIRTY_BIT_DYNAMIC_VIEWPORT,
937 DIRTY_BIT_DYNAMIC_SCISSOR,
938 DIRTY_BIT_DYNAMIC_LINE_WIDTH,
939 DIRTY_BIT_DYNAMIC_DEPTH_BIAS,
940 DIRTY_BIT_DYNAMIC_BLEND_CONSTANTS,
941 DIRTY_BIT_DYNAMIC_STENCIL_COMPARE_MASK,
942 DIRTY_BIT_DYNAMIC_STENCIL_WRITE_MASK,
943 DIRTY_BIT_DYNAMIC_STENCIL_REFERENCE,
944 // - In VK_EXT_extended_dynamic_state
945 DIRTY_BIT_DYNAMIC_CULL_MODE,
946 DIRTY_BIT_DYNAMIC_FRONT_FACE,
947 DIRTY_BIT_DYNAMIC_DEPTH_TEST_ENABLE,
948 DIRTY_BIT_DYNAMIC_DEPTH_WRITE_ENABLE,
949 DIRTY_BIT_DYNAMIC_DEPTH_COMPARE_OP,
950 DIRTY_BIT_DYNAMIC_STENCIL_TEST_ENABLE,
951 DIRTY_BIT_DYNAMIC_STENCIL_OP,
952 // - In VK_EXT_extended_dynamic_state2
953 DIRTY_BIT_DYNAMIC_RASTERIZER_DISCARD_ENABLE,
954 DIRTY_BIT_DYNAMIC_DEPTH_BIAS_ENABLE,
955 DIRTY_BIT_DYNAMIC_LOGIC_OP,
956 DIRTY_BIT_DYNAMIC_PRIMITIVE_RESTART_ENABLE,
957 // - In VK_KHR_fragment_shading_rate
958 DIRTY_BIT_DYNAMIC_FRAGMENT_SHADING_RATE,
959
960 DIRTY_BIT_MAX,
961 };
962
963 // Dirty bit handlers that can break the render pass must always be specified before
964 // DIRTY_BIT_RENDER_PASS.
965 static_assert(
966 DIRTY_BIT_ANY_SAMPLE_PASSED_QUERY_END < DIRTY_BIT_RENDER_PASS,
967 "Render pass breaking dirty bit must be handled before the render pass dirty bit");
968 static_assert(
969 DIRTY_BIT_MEMORY_BARRIER < DIRTY_BIT_RENDER_PASS,
970 "Render pass breaking dirty bit must be handled before the render pass dirty bit");
971 static_assert(
972 DIRTY_BIT_DEFAULT_ATTRIBS < DIRTY_BIT_RENDER_PASS,
973 "Render pass breaking dirty bit must be handled before the render pass dirty bit");
974 static_assert(
975 DIRTY_BIT_PIPELINE_DESC < DIRTY_BIT_RENDER_PASS,
976 "Render pass breaking dirty bit must be handled before the render pass dirty bit");
977 static_assert(
978 DIRTY_BIT_READ_ONLY_DEPTH_FEEDBACK_LOOP_MODE < DIRTY_BIT_RENDER_PASS,
979 "Render pass breaking dirty bit must be handled before the render pass dirty bit");
980
981 // Dirty bit handlers that record commands or otherwise expect to manipulate the render pass
982 // that will be used for the draw call must be specified after DIRTY_BIT_RENDER_PASS.
983 static_assert(DIRTY_BIT_EVENT_LOG > DIRTY_BIT_RENDER_PASS,
984 "Render pass using dirty bit must be handled after the render pass dirty bit");
985 static_assert(DIRTY_BIT_COLOR_ACCESS > DIRTY_BIT_RENDER_PASS,
986 "Render pass using dirty bit must be handled after the render pass dirty bit");
987 static_assert(DIRTY_BIT_DEPTH_STENCIL_ACCESS > DIRTY_BIT_RENDER_PASS,
988 "Render pass using dirty bit must be handled after the render pass dirty bit");
989 static_assert(DIRTY_BIT_PIPELINE_BINDING > DIRTY_BIT_RENDER_PASS,
990 "Render pass using dirty bit must be handled after the render pass dirty bit");
991 static_assert(DIRTY_BIT_TEXTURES > DIRTY_BIT_RENDER_PASS,
992 "Render pass using dirty bit must be handled after the render pass dirty bit");
993 static_assert(DIRTY_BIT_VERTEX_BUFFERS > DIRTY_BIT_RENDER_PASS,
994 "Render pass using dirty bit must be handled after the render pass dirty bit");
995 static_assert(DIRTY_BIT_INDEX_BUFFER > DIRTY_BIT_RENDER_PASS,
996 "Render pass using dirty bit must be handled after the render pass dirty bit");
997 static_assert(DIRTY_BIT_DRIVER_UNIFORMS > DIRTY_BIT_RENDER_PASS,
998 "Render pass using dirty bit must be handled after the render pass dirty bit");
999 static_assert(DIRTY_BIT_SHADER_RESOURCES > DIRTY_BIT_RENDER_PASS,
1000 "Render pass using dirty bit must be handled after the render pass dirty bit");
1001 static_assert(
1002 DIRTY_BIT_UNIFORM_BUFFERS > DIRTY_BIT_SHADER_RESOURCES,
1003 "Uniform buffer using dirty bit must be handled after the shader resource dirty bit");
1004 static_assert(DIRTY_BIT_TRANSFORM_FEEDBACK_BUFFERS > DIRTY_BIT_RENDER_PASS,
1005 "Render pass using dirty bit must be handled after the render pass dirty bit");
1006 static_assert(DIRTY_BIT_TRANSFORM_FEEDBACK_RESUME > DIRTY_BIT_RENDER_PASS,
1007 "Render pass using dirty bit must be handled after the render pass dirty bit");
1008 static_assert(DIRTY_BIT_DESCRIPTOR_SETS > DIRTY_BIT_RENDER_PASS,
1009 "Render pass using dirty bit must be handled after the render pass dirty bit");
1010 static_assert(DIRTY_BIT_UNIFORMS > DIRTY_BIT_RENDER_PASS,
1011 "Render pass using dirty bit must be handled after the render pass dirty bit");
1012 static_assert(DIRTY_BIT_FRAMEBUFFER_FETCH_BARRIER > DIRTY_BIT_RENDER_PASS,
1013 "Render pass using dirty bit must be handled after the render pass dirty bit");
1014 static_assert(DIRTY_BIT_BLEND_BARRIER > DIRTY_BIT_RENDER_PASS,
1015 "Render pass using dirty bit must be handled after the render pass dirty bit");
1016 static_assert(DIRTY_BIT_DYNAMIC_VIEWPORT > DIRTY_BIT_RENDER_PASS,
1017 "Render pass using dirty bit must be handled after the render pass dirty bit");
1018 static_assert(DIRTY_BIT_DYNAMIC_SCISSOR > DIRTY_BIT_RENDER_PASS,
1019 "Render pass using dirty bit must be handled after the render pass dirty bit");
1020 static_assert(DIRTY_BIT_DYNAMIC_LINE_WIDTH > DIRTY_BIT_RENDER_PASS,
1021 "Render pass using dirty bit must be handled after the render pass dirty bit");
1022 static_assert(DIRTY_BIT_DYNAMIC_DEPTH_BIAS > DIRTY_BIT_RENDER_PASS,
1023 "Render pass using dirty bit must be handled after the render pass dirty bit");
1024 static_assert(DIRTY_BIT_DYNAMIC_BLEND_CONSTANTS > DIRTY_BIT_RENDER_PASS,
1025 "Render pass using dirty bit must be handled after the render pass dirty bit");
1026 static_assert(DIRTY_BIT_DYNAMIC_STENCIL_COMPARE_MASK > DIRTY_BIT_RENDER_PASS,
1027 "Render pass using dirty bit must be handled after the render pass dirty bit");
1028 static_assert(DIRTY_BIT_DYNAMIC_STENCIL_WRITE_MASK > DIRTY_BIT_RENDER_PASS,
1029 "Render pass using dirty bit must be handled after the render pass dirty bit");
1030 static_assert(DIRTY_BIT_DYNAMIC_STENCIL_REFERENCE > DIRTY_BIT_RENDER_PASS,
1031 "Render pass using dirty bit must be handled after the render pass dirty bit");
1032 static_assert(DIRTY_BIT_DYNAMIC_CULL_MODE > DIRTY_BIT_RENDER_PASS,
1033 "Render pass using dirty bit must be handled after the render pass dirty bit");
1034 static_assert(DIRTY_BIT_DYNAMIC_FRONT_FACE > DIRTY_BIT_RENDER_PASS,
1035 "Render pass using dirty bit must be handled after the render pass dirty bit");
1036 static_assert(DIRTY_BIT_DYNAMIC_DEPTH_TEST_ENABLE > DIRTY_BIT_RENDER_PASS,
1037 "Render pass using dirty bit must be handled after the render pass dirty bit");
1038 static_assert(DIRTY_BIT_DYNAMIC_DEPTH_WRITE_ENABLE > DIRTY_BIT_RENDER_PASS,
1039 "Render pass using dirty bit must be handled after the render pass dirty bit");
1040 static_assert(DIRTY_BIT_DYNAMIC_DEPTH_COMPARE_OP > DIRTY_BIT_RENDER_PASS,
1041 "Render pass using dirty bit must be handled after the render pass dirty bit");
1042 static_assert(DIRTY_BIT_DYNAMIC_STENCIL_TEST_ENABLE > DIRTY_BIT_RENDER_PASS,
1043 "Render pass using dirty bit must be handled after the render pass dirty bit");
1044 static_assert(DIRTY_BIT_DYNAMIC_STENCIL_OP > DIRTY_BIT_RENDER_PASS,
1045 "Render pass using dirty bit must be handled after the render pass dirty bit");
1046 static_assert(DIRTY_BIT_DYNAMIC_RASTERIZER_DISCARD_ENABLE > DIRTY_BIT_RENDER_PASS,
1047 "Render pass using dirty bit must be handled after the render pass dirty bit");
1048 static_assert(DIRTY_BIT_DYNAMIC_DEPTH_BIAS_ENABLE > DIRTY_BIT_RENDER_PASS,
1049 "Render pass using dirty bit must be handled after the render pass dirty bit");
1050 static_assert(DIRTY_BIT_DYNAMIC_LOGIC_OP > DIRTY_BIT_RENDER_PASS,
1051 "Render pass using dirty bit must be handled after the render pass dirty bit");
1052 static_assert(DIRTY_BIT_DYNAMIC_PRIMITIVE_RESTART_ENABLE > DIRTY_BIT_RENDER_PASS,
1053 "Render pass using dirty bit must be handled after the render pass dirty bit");
1054 static_assert(DIRTY_BIT_DYNAMIC_FRAGMENT_SHADING_RATE > DIRTY_BIT_RENDER_PASS,
1055 "Render pass using dirty bit must be handled after the render pass dirty bit");
1056
1057 using DirtyBits = angle::BitSet<DIRTY_BIT_MAX>;
1058
1059 using GraphicsDirtyBitHandler = angle::Result (
1060 ContextVk::*)(DirtyBits::Iterator *dirtyBitsIterator, DirtyBits dirtyBitMask);
1061 using ComputeDirtyBitHandler =
1062 angle::Result (ContextVk::*)(DirtyBits::Iterator *dirtyBitsIterator);
1063
1064 // The GpuEventQuery struct holds together a timestamp query and enough data to create a
1065 // trace event based on that. Use traceGpuEvent to insert such queries. They will be readback
1066 // when the results are available, without inserting a GPU bubble.
1067 //
1068 // - eventName will be the reported name of the event
1069 // - phase is either 'B' (duration begin), 'E' (duration end) or 'i' (instant // event).
1070 // See Google's "Trace Event Format":
1071 // https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU
1072 // - serial is the serial of the batch the query was submitted on. Until the batch is
1073 // submitted, the query is not checked to avoid incuring a flush.
1074 struct GpuEventQuery final
1075 {
1076 EventName name;
1077 char phase;
1078 vk::QueryHelper queryHelper;
1079 };
1080
1081 // Once a query result is available, the timestamp is read and a GpuEvent object is kept until
1082 // the next clock sync, at which point the clock drift is compensated in the results before
1083 // handing them off to the application.
1084 struct GpuEvent final
1085 {
1086 uint64_t gpuTimestampCycles;
1087 std::array<char, kMaxGpuEventNameLen> name;
1088 char phase;
1089 };
1090
1091 struct GpuClockSyncInfo
1092 {
1093 double gpuTimestampS;
1094 double cpuTimestampS;
1095 };
1096
1097 class ScopedDescriptorSetUpdates;
1098
1099 angle::Result setupDraw(const gl::Context *context,
1100 gl::PrimitiveMode mode,
1101 GLint firstVertexOrInvalid,
1102 GLsizei vertexOrIndexCount,
1103 GLsizei instanceCount,
1104 gl::DrawElementsType indexTypeOrInvalid,
1105 const void *indices,
1106 DirtyBits dirtyBitMask);
1107
1108 angle::Result setupIndexedDraw(const gl::Context *context,
1109 gl::PrimitiveMode mode,
1110 GLsizei indexCount,
1111 GLsizei instanceCount,
1112 gl::DrawElementsType indexType,
1113 const void *indices);
1114 angle::Result setupIndirectDraw(const gl::Context *context,
1115 gl::PrimitiveMode mode,
1116 DirtyBits dirtyBitMask,
1117 vk::BufferHelper *indirectBuffer);
1118 angle::Result setupIndexedIndirectDraw(const gl::Context *context,
1119 gl::PrimitiveMode mode,
1120 gl::DrawElementsType indexType,
1121 vk::BufferHelper *indirectBuffer);
1122
1123 angle::Result setupLineLoopIndexedIndirectDraw(const gl::Context *context,
1124 gl::PrimitiveMode mode,
1125 gl::DrawElementsType indexType,
1126 vk::BufferHelper *srcIndirectBuf,
1127 VkDeviceSize indirectBufferOffset,
1128 vk::BufferHelper **indirectBufferOut);
1129 angle::Result setupLineLoopIndirectDraw(const gl::Context *context,
1130 gl::PrimitiveMode mode,
1131 vk::BufferHelper *indirectBuffer,
1132 VkDeviceSize indirectBufferOffset,
1133 vk::BufferHelper **indirectBufferOut);
1134
1135 angle::Result setupLineLoopDraw(const gl::Context *context,
1136 gl::PrimitiveMode mode,
1137 GLint firstVertex,
1138 GLsizei vertexOrIndexCount,
1139 gl::DrawElementsType indexTypeOrInvalid,
1140 const void *indices,
1141 uint32_t *numIndicesOut);
1142
1143 angle::Result setupDispatch(const gl::Context *context);
1144
1145 gl::Rectangle getCorrectedViewport(const gl::Rectangle &viewport) const;
1146 void updateViewport(FramebufferVk *framebufferVk,
1147 const gl::Rectangle &viewport,
1148 float nearPlane,
1149 float farPlane);
1150 void updateFrontFace();
1151 void updateDepthRange(float nearPlane, float farPlane);
1152 void updateMissingOutputsMask();
1153 void updateSampleMaskWithRasterizationSamples(const uint32_t rasterizationSamples);
1154 void updateAlphaToCoverageWithRasterizationSamples(const uint32_t rasterizationSamples);
1155 void updateFrameBufferFetchSamples(const uint32_t prevSamples, const uint32_t curSamples);
1156 void updateFlipViewportDrawFramebuffer(const gl::State &glState);
1157 void updateFlipViewportReadFramebuffer(const gl::State &glState);
1158 void updateSurfaceRotationDrawFramebuffer(const gl::State &glState,
1159 const egl::Surface *currentDrawSurface);
1160 void updateSurfaceRotationReadFramebuffer(const gl::State &glState,
1161 const egl::Surface *currentReadSurface);
1162
1163 angle::Result updateActiveTextures(const gl::Context *context, gl::Command command);
1164 template <typename CommandBufferHelperT>
1165 angle::Result updateActiveImages(CommandBufferHelperT *commandBufferHelper);
1166
invalidateCurrentGraphicsPipeline()1167 ANGLE_INLINE void invalidateCurrentGraphicsPipeline()
1168 {
1169 // Note: DIRTY_BIT_PIPELINE_BINDING will be automatically set if pipeline bind is necessary.
1170 mGraphicsDirtyBits.set(DIRTY_BIT_PIPELINE_DESC);
1171 }
1172
invalidateCurrentComputePipeline()1173 ANGLE_INLINE void invalidateCurrentComputePipeline()
1174 {
1175 mComputeDirtyBits |= kPipelineDescAndBindingDirtyBits;
1176 mCurrentComputePipeline = nullptr;
1177 }
1178
1179 angle::Result invalidateProgramExecutableHelper(const gl::Context *context);
1180
1181 void invalidateCurrentDefaultUniforms();
1182 angle::Result invalidateCurrentTextures(const gl::Context *context, gl::Command command);
1183 angle::Result invalidateCurrentShaderResources(gl::Command command);
1184 angle::Result invalidateCurrentShaderUniformBuffers(gl::Command command);
1185 void invalidateGraphicsDriverUniforms();
1186 void invalidateDriverUniforms();
1187
1188 angle::Result handleNoopDrawEvent() override;
1189
1190 // Handlers for graphics pipeline dirty bits.
1191 angle::Result handleDirtyGraphicsMemoryBarrier(DirtyBits::Iterator *dirtyBitsIterator,
1192 DirtyBits dirtyBitMask);
1193 angle::Result handleDirtyGraphicsDefaultAttribs(DirtyBits::Iterator *dirtyBitsIterator,
1194 DirtyBits dirtyBitMask);
1195 angle::Result handleDirtyGraphicsPipelineDesc(DirtyBits::Iterator *dirtyBitsIterator,
1196 DirtyBits dirtyBitMask);
1197 angle::Result handleDirtyGraphicsReadOnlyDepthFeedbackLoopMode(
1198 DirtyBits::Iterator *dirtyBitsIterator,
1199 DirtyBits dirtyBitMask);
1200 angle::Result handleDirtyAnySamplePassedQueryEnd(DirtyBits::Iterator *dirtyBitsIterator,
1201 DirtyBits dirtyBitMask);
1202 angle::Result handleDirtyGraphicsRenderPass(DirtyBits::Iterator *dirtyBitsIterator,
1203 DirtyBits dirtyBitMask);
1204 angle::Result handleDirtyGraphicsEventLog(DirtyBits::Iterator *dirtyBitsIterator,
1205 DirtyBits dirtyBitMask);
1206 angle::Result handleDirtyGraphicsColorAccess(DirtyBits::Iterator *dirtyBitsIterator,
1207 DirtyBits dirtyBitMask);
1208 angle::Result handleDirtyGraphicsDepthStencilAccess(DirtyBits::Iterator *dirtyBitsIterator,
1209 DirtyBits dirtyBitMask);
1210 angle::Result handleDirtyGraphicsPipelineBinding(DirtyBits::Iterator *dirtyBitsIterator,
1211 DirtyBits dirtyBitMask);
1212 angle::Result handleDirtyGraphicsTextures(DirtyBits::Iterator *dirtyBitsIterator,
1213 DirtyBits dirtyBitMask);
1214 angle::Result handleDirtyGraphicsVertexBuffers(DirtyBits::Iterator *dirtyBitsIterator,
1215 DirtyBits dirtyBitMask);
1216 angle::Result handleDirtyGraphicsIndexBuffer(DirtyBits::Iterator *dirtyBitsIterator,
1217 DirtyBits dirtyBitMask);
1218 angle::Result handleDirtyGraphicsDriverUniforms(DirtyBits::Iterator *dirtyBitsIterator,
1219 DirtyBits dirtyBitMask);
1220 angle::Result handleDirtyGraphicsShaderResources(DirtyBits::Iterator *dirtyBitsIterator,
1221 DirtyBits dirtyBitMask);
1222 angle::Result handleDirtyGraphicsUniformBuffers(DirtyBits::Iterator *dirtyBitsIterator,
1223 DirtyBits dirtyBitMask);
1224 angle::Result handleDirtyGraphicsFramebufferFetchBarrier(DirtyBits::Iterator *dirtyBitsIterator,
1225 DirtyBits dirtyBitMask);
1226 angle::Result handleDirtyGraphicsBlendBarrier(DirtyBits::Iterator *dirtyBitsIterator,
1227 DirtyBits dirtyBitMask);
1228 angle::Result handleDirtyGraphicsTransformFeedbackBuffersEmulation(
1229 DirtyBits::Iterator *dirtyBitsIterator,
1230 DirtyBits dirtyBitMask);
1231 angle::Result handleDirtyGraphicsTransformFeedbackBuffersExtension(
1232 DirtyBits::Iterator *dirtyBitsIterator,
1233 DirtyBits dirtyBitMask);
1234 angle::Result handleDirtyGraphicsTransformFeedbackResume(DirtyBits::Iterator *dirtyBitsIterator,
1235 DirtyBits dirtyBitMask);
1236 angle::Result handleDirtyGraphicsDescriptorSets(DirtyBits::Iterator *dirtyBitsIterator,
1237 DirtyBits dirtyBitMask);
1238 angle::Result handleDirtyGraphicsUniforms(DirtyBits::Iterator *dirtyBitsIterator,
1239 DirtyBits dirtyBitMask);
1240 angle::Result handleDirtyGraphicsDynamicViewport(DirtyBits::Iterator *dirtyBitsIterator,
1241 DirtyBits dirtyBitMask);
1242 angle::Result handleDirtyGraphicsDynamicScissor(DirtyBits::Iterator *dirtyBitsIterator,
1243 DirtyBits dirtyBitMask);
1244 angle::Result handleDirtyGraphicsDynamicLineWidth(DirtyBits::Iterator *dirtyBitsIterator,
1245 DirtyBits dirtyBitMask);
1246 angle::Result handleDirtyGraphicsDynamicDepthBias(DirtyBits::Iterator *dirtyBitsIterator,
1247 DirtyBits dirtyBitMask);
1248 angle::Result handleDirtyGraphicsDynamicBlendConstants(DirtyBits::Iterator *dirtyBitsIterator,
1249 DirtyBits dirtyBitMask);
1250 angle::Result handleDirtyGraphicsDynamicStencilCompareMask(
1251 DirtyBits::Iterator *dirtyBitsIterator,
1252 DirtyBits dirtyBitMask);
1253 angle::Result handleDirtyGraphicsDynamicStencilWriteMask(DirtyBits::Iterator *dirtyBitsIterator,
1254 DirtyBits dirtyBitMask);
1255 angle::Result handleDirtyGraphicsDynamicStencilReference(DirtyBits::Iterator *dirtyBitsIterator,
1256 DirtyBits dirtyBitMask);
1257 angle::Result handleDirtyGraphicsDynamicCullMode(DirtyBits::Iterator *dirtyBitsIterator,
1258 DirtyBits dirtyBitMask);
1259 angle::Result handleDirtyGraphicsDynamicFrontFace(DirtyBits::Iterator *dirtyBitsIterator,
1260 DirtyBits dirtyBitMask);
1261 angle::Result handleDirtyGraphicsDynamicDepthTestEnable(DirtyBits::Iterator *dirtyBitsIterator,
1262 DirtyBits dirtyBitMask);
1263 angle::Result handleDirtyGraphicsDynamicDepthWriteEnable(DirtyBits::Iterator *dirtyBitsIterator,
1264 DirtyBits dirtyBitMask);
1265 angle::Result handleDirtyGraphicsDynamicDepthCompareOp(DirtyBits::Iterator *dirtyBitsIterator,
1266 DirtyBits dirtyBitMask);
1267 angle::Result handleDirtyGraphicsDynamicStencilTestEnable(
1268 DirtyBits::Iterator *dirtyBitsIterator,
1269 DirtyBits dirtyBitMask);
1270 angle::Result handleDirtyGraphicsDynamicStencilOp(DirtyBits::Iterator *dirtyBitsIterator,
1271 DirtyBits dirtyBitMask);
1272 angle::Result handleDirtyGraphicsDynamicRasterizerDiscardEnable(
1273 DirtyBits::Iterator *dirtyBitsIterator,
1274 DirtyBits dirtyBitMask);
1275 angle::Result handleDirtyGraphicsDynamicDepthBiasEnable(DirtyBits::Iterator *dirtyBitsIterator,
1276 DirtyBits dirtyBitMask);
1277 angle::Result handleDirtyGraphicsDynamicLogicOp(DirtyBits::Iterator *dirtyBitsIterator,
1278 DirtyBits dirtyBitMask);
1279 angle::Result handleDirtyGraphicsDynamicPrimitiveRestartEnable(
1280 DirtyBits::Iterator *dirtyBitsIterator,
1281 DirtyBits dirtyBitMask);
1282 angle::Result handleDirtyGraphicsDynamicFragmentShadingRate(
1283 DirtyBits::Iterator *dirtyBitsIterator,
1284 DirtyBits dirtyBitMask);
1285
1286 // Handlers for compute pipeline dirty bits.
1287 angle::Result handleDirtyComputeMemoryBarrier(DirtyBits::Iterator *dirtyBitsIterator);
1288 angle::Result handleDirtyComputeEventLog(DirtyBits::Iterator *dirtyBitsIterator);
1289 angle::Result handleDirtyComputePipelineDesc(DirtyBits::Iterator *dirtyBitsIterator);
1290 angle::Result handleDirtyComputePipelineBinding(DirtyBits::Iterator *dirtyBitsIterator);
1291 angle::Result handleDirtyComputeTextures(DirtyBits::Iterator *dirtyBitsIterator);
1292 angle::Result handleDirtyComputeDriverUniforms(DirtyBits::Iterator *dirtyBitsIterator);
1293 angle::Result handleDirtyComputeShaderResources(DirtyBits::Iterator *dirtyBitsIterator);
1294 angle::Result handleDirtyComputeUniformBuffers(DirtyBits::Iterator *dirtyBitsIterator);
1295 angle::Result handleDirtyComputeDescriptorSets(DirtyBits::Iterator *dirtyBitsIterator);
1296 angle::Result handleDirtyComputeUniforms(DirtyBits::Iterator *dirtyBitsIterator);
1297
1298 // Common parts of the common dirty bit handlers.
1299 angle::Result handleDirtyUniformsImpl(DirtyBits::Iterator *dirtyBitsIterator,
1300 vk::CommandBufferHelperCommon *commandBufferHelper);
1301 angle::Result handleDirtyMemoryBarrierImpl(DirtyBits::Iterator *dirtyBitsIterator,
1302 DirtyBits dirtyBitMask);
1303 template <typename CommandBufferT>
1304 angle::Result handleDirtyEventLogImpl(CommandBufferT *commandBuffer);
1305 template <typename CommandBufferHelperT>
1306 angle::Result handleDirtyTexturesImpl(CommandBufferHelperT *commandBufferHelper,
1307 PipelineType pipelineType);
1308 template <typename CommandBufferHelperT>
1309 angle::Result handleDirtyShaderResourcesImpl(CommandBufferHelperT *commandBufferHelper,
1310 PipelineType pipelineType,
1311 DirtyBits::Iterator *dirtyBitsIterator);
1312 template <typename CommandBufferHelperT>
1313 angle::Result handleDirtyUniformBuffersImpl(CommandBufferHelperT *commandBufferHelper);
1314 template <typename CommandBufferHelperT>
1315 angle::Result handleDirtyDescriptorSetsImpl(CommandBufferHelperT *commandBufferHelper,
1316 PipelineType pipelineType);
1317 void handleDirtyGraphicsDynamicScissorImpl(bool isPrimitivesGeneratedQueryActive);
1318
1319 void writeAtomicCounterBufferDriverUniformOffsets(uint32_t *offsetsOut, size_t offsetsSize);
1320
1321 enum class Submit
1322 {
1323 OutsideRenderPassCommandsOnly,
1324 AllCommands,
1325 };
1326
1327 angle::Result submitCommands(const vk::Semaphore *signalSemaphore,
1328 const vk::SharedExternalFence *externalFence,
1329 Submit submission);
1330
1331 angle::Result synchronizeCpuGpuTime();
1332 angle::Result traceGpuEventImpl(vk::OutsideRenderPassCommandBuffer *commandBuffer,
1333 char phase,
1334 const EventName &name);
1335 angle::Result checkCompletedGpuEvents();
1336 void flushGpuEvents(double nextSyncGpuTimestampS, double nextSyncCpuTimestampS);
1337 void handleDeviceLost();
1338 bool shouldEmulateSeamfulCubeMapSampling() const;
1339 void clearAllGarbage();
1340 void dumpCommandStreamDiagnostics();
1341 angle::Result flushOutsideRenderPassCommands();
1342 // Flush commands and end render pass without setting any dirty bits.
1343 // flushCommandsAndEndRenderPass() and flushDirtyGraphicsRenderPass() will set the dirty bits
1344 // directly or through the iterator respectively. Outside those two functions, this shouldn't
1345 // be called directly.
1346 angle::Result flushDirtyGraphicsRenderPass(DirtyBits::Iterator *dirtyBitsIterator,
1347 DirtyBits dirtyBitMask,
1348 RenderPassClosureReason reason);
1349
1350 // Mark the render pass to be closed on the next draw call. The render pass is not actually
1351 // closed and can be restored with restoreFinishedRenderPass if necessary, for example to append
1352 // a resolve attachment.
1353 void onRenderPassFinished(RenderPassClosureReason reason);
1354
1355 void initIndexTypeMap();
1356
1357 VertexArrayVk *getVertexArray() const;
1358 FramebufferVk *getDrawFramebuffer() const;
1359
1360 // Read-after-write hazards are generally handled with |glMemoryBarrier| when the source of
1361 // write is storage output. When the write is outside render pass, the natural placement of the
1362 // render pass after the current outside render pass commands ensures that the memory barriers
1363 // and image layout transitions automatically take care of such synchronizations.
1364 //
1365 // There are a number of read-after-write cases that require breaking the render pass however to
1366 // preserve the order of operations:
1367 //
1368 // - Transform feedback write (in render pass), then vertex/index read (in render pass)
1369 // - Transform feedback write (in render pass), then ubo read (outside render pass)
1370 // - Framebuffer attachment write (in render pass), then texture sample (outside render pass)
1371 // * Note that texture sampling inside render pass would cause a feedback loop
1372 //
1373 angle::Result endRenderPassIfTransformFeedbackBuffer(const vk::BufferHelper *buffer);
1374 angle::Result endRenderPassIfComputeReadAfterTransformFeedbackWrite();
1375 angle::Result endRenderPassIfComputeAccessAfterGraphicsImageAccess();
1376
1377 // Update read-only depth feedback loop mode. Typically called from
1378 // handleDirtyGraphicsReadOnlyDepthFeedbackLoopMode, but can be called from UtilsVk in functions
1379 // that don't necessarily break the render pass.
1380 angle::Result switchOutReadOnlyDepthStencilMode(DirtyBits::Iterator *dirtyBitsIterator,
1381 DirtyBits dirtyBitMask,
1382 UpdateDepthFeedbackLoopReason depthReason,
1383 UpdateDepthFeedbackLoopReason stencilReason);
1384 angle::Result switchToReadOnlyDepthStencilMode(gl::Texture *texture,
1385 gl::Command command,
1386 FramebufferVk *drawFramebuffer,
1387 bool isStencilTexture);
1388
1389 angle::Result onResourceAccess(const vk::CommandBufferAccess &access);
1390 angle::Result flushCommandBuffersIfNecessary(const vk::CommandBufferAccess &access);
1391 bool renderPassUsesStorageResources() const;
1392
1393 angle::Result pushDebugGroupImpl(GLenum source, GLuint id, const char *message);
1394 angle::Result popDebugGroupImpl();
1395
1396 void updateScissor(const gl::State &glState);
1397
1398 void updateDepthStencil(const gl::State &glState);
1399 void updateDepthTestEnabled(const gl::State &glState);
1400 void updateDepthWriteEnabled(const gl::State &glState);
1401 void updateDepthFunc(const gl::State &glState);
1402 void updateStencilTestEnabled(const gl::State &glState);
1403
1404 void updateSampleShadingWithRasterizationSamples(const uint32_t rasterizationSamples);
1405 void updateRasterizationSamples(const uint32_t rasterizationSamples);
1406 void updateRasterizerDiscardEnabled(bool isPrimitivesGeneratedQueryActive);
1407
1408 void updateAdvancedBlendEquations(const gl::ProgramExecutable *executable);
1409
1410 void updateDither();
1411
1412 // When the useNonZeroStencilWriteMaskStaticState workaround is enabled, the static state for
1413 // stencil should be non-zero despite the state being dynamic. This is done when:
1414 //
1415 // - The shader includes discard, or
1416 // - Alpha-to-coverage is enabled.
1417 //
1418 // An alternative could have been to set the static state unconditionally to non-zero. This is
1419 // avoided however, as on the affected driver that would disable certain optimizations.
1420 void updateStencilWriteWorkaround();
1421
1422 void updateShaderResourcesWithSharedCacheKey(
1423 const vk::SharedDescriptorSetCacheKey &sharedCacheKey);
1424
1425 angle::Result createGraphicsPipeline();
1426
1427 angle::Result allocateQueueSerialIndex();
1428 void releaseQueueSerialIndex();
1429
1430 void generateOutsideRenderPassCommandsQueueSerial();
1431 void generateRenderPassCommandsQueueSerial(QueueSerial *queueSerialOut);
1432
1433 angle::Result ensureInterfacePipelineCache();
1434
1435 angle::ImageLoadContext mImageLoadContext;
1436
1437 std::array<GraphicsDirtyBitHandler, DIRTY_BIT_MAX> mGraphicsDirtyBitHandlers;
1438 std::array<ComputeDirtyBitHandler, DIRTY_BIT_MAX> mComputeDirtyBitHandlers;
1439
1440 vk::RenderPassCommandBuffer *mRenderPassCommandBuffer;
1441
1442 vk::PipelineHelper *mCurrentGraphicsPipeline;
1443 vk::PipelineHelper *mCurrentGraphicsPipelineShaders;
1444 vk::PipelineHelper *mCurrentGraphicsPipelineVertexInput;
1445 vk::PipelineHelper *mCurrentGraphicsPipelineFragmentOutput;
1446 vk::PipelineHelper *mCurrentComputePipeline;
1447 gl::PrimitiveMode mCurrentDrawMode;
1448
1449 WindowSurfaceVk *mCurrentWindowSurface;
1450 // Records the current rotation of the surface (draw/read) framebuffer, derived from
1451 // mCurrentWindowSurface->getPreTransform().
1452 SurfaceRotation mCurrentRotationDrawFramebuffer;
1453 SurfaceRotation mCurrentRotationReadFramebuffer;
1454
1455 // Tracks if we are in depth/stencil *read-only* or feedback loop mode. The read only is
1456 // specially allowed as both usages attachment and texture are read-only. When switching away
1457 // from read-only mode, the render pass is broken is to accommodate the new writable layout.
1458 vk::RenderPassUsageFlags mDepthStencilAttachmentFlags;
1459
1460 // Keep a cached pipeline description structure that can be used to query the pipeline cache.
1461 // Kept in a pointer so allocations can be aligned, and structs can be portably packed.
1462 std::unique_ptr<vk::GraphicsPipelineDesc> mGraphicsPipelineDesc;
1463 // Transition bits indicating which state has changed since last pipeline recreation. It is
1464 // used to look up pipelines in the cache without iterating over the entire key as a performance
1465 // optimization.
1466 //
1467 // |mGraphicsPipelineTransition| tracks transition bits since the last complete pipeline
1468 // creation/retrieval. |mGraphicsPipelineLibraryTransition| tracks the same but for the case
1469 // where the pipeline is created through libraries. The latter accumulates
1470 // |mGraphicsPipelineTransition| while the caches are hit, so that the bits are not lost if a
1471 // partial library needs to be created in the future.
1472 vk::GraphicsPipelineTransitionBits mGraphicsPipelineTransition;
1473 vk::GraphicsPipelineTransitionBits mGraphicsPipelineLibraryTransition;
1474
1475 // A pipeline cache specifically used for vertex input and fragment output pipelines, when there
1476 // is no blob reuse between libraries and monolithic pipelines. In that case, there's no point
1477 // in making monolithic pipelines be stored in the same cache as these partial pipelines.
1478 //
1479 // Note additionally that applications only create a handful of vertex input and fragment output
1480 // pipelines, which is also s fast operation, so this cache is both small and ephemeral (i.e.
1481 // not cached to disk).
1482 vk::PipelineCache mInterfacePipelinesCache;
1483
1484 // These pools are externally synchronized, so cannot be accessed from different
1485 // threads simultaneously. Hence, we keep them in the ContextVk instead of the vk::Renderer.
1486 // Note that this implementation would need to change in shared resource scenarios. Likely
1487 // we'd instead share a single set of pools between the share groups.
1488 gl::QueryTypeMap<vk::DynamicQueryPool> mQueryPools;
1489
1490 // Queries that need to be closed and reopened with the render pass:
1491 //
1492 // - Occlusion queries
1493 // - Transform feedback queries, if not emulated
1494 gl::QueryTypeMap<QueryVk *> mActiveRenderPassQueries;
1495
1496 // Dirty bits.
1497 DirtyBits mGraphicsDirtyBits;
1498 DirtyBits mComputeDirtyBits;
1499 DirtyBits mNonIndexedDirtyBitsMask;
1500 DirtyBits mIndexedDirtyBitsMask;
1501 DirtyBits mNewGraphicsCommandBufferDirtyBits;
1502 DirtyBits mNewComputeCommandBufferDirtyBits;
1503 DirtyBits mDynamicStateDirtyBits;
1504 static constexpr DirtyBits kColorAccessChangeDirtyBits{DIRTY_BIT_COLOR_ACCESS};
1505 static constexpr DirtyBits kDepthStencilAccessChangeDirtyBits{
1506 DIRTY_BIT_READ_ONLY_DEPTH_FEEDBACK_LOOP_MODE, DIRTY_BIT_DEPTH_STENCIL_ACCESS};
1507 static constexpr DirtyBits kIndexAndVertexDirtyBits{DIRTY_BIT_VERTEX_BUFFERS,
1508 DIRTY_BIT_INDEX_BUFFER};
1509 static constexpr DirtyBits kPipelineDescAndBindingDirtyBits{DIRTY_BIT_PIPELINE_DESC,
1510 DIRTY_BIT_PIPELINE_BINDING};
1511 static constexpr DirtyBits kTexturesAndDescSetDirtyBits{DIRTY_BIT_TEXTURES,
1512 DIRTY_BIT_DESCRIPTOR_SETS};
1513 static constexpr DirtyBits kResourcesAndDescSetDirtyBits{DIRTY_BIT_SHADER_RESOURCES,
1514 DIRTY_BIT_DESCRIPTOR_SETS};
1515 static constexpr DirtyBits kUniformBuffersAndDescSetDirtyBits{DIRTY_BIT_UNIFORM_BUFFERS,
1516 DIRTY_BIT_DESCRIPTOR_SETS};
1517 static constexpr DirtyBits kXfbBuffersAndDescSetDirtyBits{DIRTY_BIT_TRANSFORM_FEEDBACK_BUFFERS,
1518 DIRTY_BIT_DESCRIPTOR_SETS};
1519
1520 // The offset we had the last time we bound the index buffer.
1521 const GLvoid *mLastIndexBufferOffset;
1522 VkDeviceSize mCurrentIndexBufferOffset;
1523 gl::DrawElementsType mCurrentDrawElementsType;
1524 angle::PackedEnumMap<gl::DrawElementsType, VkIndexType> mIndexTypeMap;
1525
1526 // Cache the current draw call's firstVertex to be passed to
1527 // TransformFeedbackVk::getBufferOffsets. Unfortunately, gl_BaseVertex support in Vulkan is
1528 // not yet ubiquitous, which would have otherwise removed the need for this value to be passed
1529 // as a uniform.
1530 GLint mXfbBaseVertex;
1531 // Cache the current draw call's vertex count as well to support instanced draw calls
1532 GLuint mXfbVertexCountPerInstance;
1533
1534 // Cached clear value/mask for color and depth/stencil.
1535 VkClearValue mClearColorValue;
1536 VkClearValue mClearDepthStencilValue;
1537 gl::BlendStateExt::ColorMaskStorage::Type mClearColorMasks;
1538
1539 IncompleteTextureSet mIncompleteTextures;
1540
1541 // If the current surface bound to this context wants to have all rendering flipped vertically.
1542 // Updated on calls to onMakeCurrent.
1543 bool mFlipYForCurrentSurface;
1544 bool mFlipViewportForDrawFramebuffer;
1545 bool mFlipViewportForReadFramebuffer;
1546
1547 // If any host-visible buffer is written by the GPU since last submission, a barrier is inserted
1548 // at the end of the command buffer to make that write available to the host.
1549 bool mIsAnyHostVisibleBufferWritten;
1550
1551 // Whether this context should do seamful cube map sampling emulation.
1552 bool mEmulateSeamfulCubeMapSampling;
1553
1554 // This info is used in the descriptor update step.
1555 gl::ActiveTextureArray<TextureVk *> mActiveTextures;
1556
1557 // We use textureSerial to optimize texture binding updates. Each permutation of a
1558 // {VkImage/VkSampler} generates a unique serial. These object ids are combined to form a unique
1559 // signature for each descriptor set. This allows us to keep a cache of descriptor sets and
1560 // avoid calling vkAllocateDesctiporSets each texture update.
1561 vk::DescriptorSetDesc mActiveTexturesDesc;
1562
1563 vk::DescriptorSetDescBuilder mShaderBuffersDescriptorDesc;
1564 // The WriteDescriptorDescs from ProgramExecutableVk with InputAttachment update.
1565 vk::WriteDescriptorDescs mShaderBufferWriteDescriptorDescs;
1566
1567 gl::ActiveTextureArray<TextureVk *> mActiveImages;
1568
1569 // "Current Value" aka default vertex attribute state.
1570 gl::AttributesMask mDirtyDefaultAttribsMask;
1571
1572 // DynamicBuffers for streaming vertex data from client memory pointer as well as for default
1573 // attributes. mHasInFlightStreamedVertexBuffers indicates if the dynamic buffer has any
1574 // in-flight buffer or not that we need to release at submission time.
1575 gl::AttribArray<vk::DynamicBuffer> mStreamedVertexBuffers;
1576 gl::AttributesMask mHasInFlightStreamedVertexBuffers;
1577
1578 // We use a single pool for recording commands. We also keep a free list for pool recycling.
1579 vk::SecondaryCommandPools mCommandPools;
1580
1581 // Per context queue serial
1582 SerialIndex mCurrentQueueSerialIndex;
1583 QueueSerial mLastFlushedQueueSerial;
1584 QueueSerial mLastSubmittedQueueSerial;
1585 // All submitted queue serials over the life time of this context.
1586 vk::ResourceUse mSubmittedResourceUse;
1587 // Current active transform feedback buffer queue serial. Invalid if TF not active.
1588 QueueSerial mCurrentTransformFeedbackQueueSerial;
1589
1590 // The garbage list for single context use objects. The list will be GPU tracked by next
1591 // submission queueSerial. Note: Resource based shared object should always be added to
1592 // renderer's mSharedGarbageList.
1593 vk::GarbageObjects mCurrentGarbage;
1594
1595 RenderPassCache mRenderPassCache;
1596
1597 vk::OutsideRenderPassCommandBufferHelper *mOutsideRenderPassCommands;
1598 vk::RenderPassCommandBufferHelper *mRenderPassCommands;
1599
1600 // Allocators for the render pass command buffers. They are utilized only when shared ring
1601 // buffer allocators are being used.
1602 vk::SecondaryCommandMemoryAllocator mOutsideRenderPassCommandsAllocator;
1603 vk::SecondaryCommandMemoryAllocator mRenderPassCommandsAllocator;
1604
1605 // The following is used when creating debug-util markers for graphics debuggers (e.g. AGI). A
1606 // given gl{Begin|End}Query command may result in commands being submitted to the outside or
1607 // render-pass command buffer. The ContextVk::handleGraphicsEventLog() method records the
1608 // appropriate command buffer for use by ContextVk::endEventLogForQuery(). The knowledge of
1609 // which command buffer to use depends on the particular type of query (e.g. samples
1610 // vs. timestamp), and is only known by the query code, which is what calls
1611 // ContextVk::handleGraphicsEventLog(). After all back-end processing of the gl*Query command
1612 // is complete, the front-end calls ContextVk::endEventLogForQuery(), which needs to know which
1613 // command buffer to call endDebugUtilsLabelEXT() for.
1614 GraphicsEventCmdBuf mQueryEventType;
1615
1616 // Internal shader library.
1617 vk::ShaderLibrary mShaderLibrary;
1618 UtilsVk mUtils;
1619
1620 bool mGpuEventsEnabled;
1621 vk::DynamicQueryPool mGpuEventQueryPool;
1622 // A list of queries that have yet to be turned into an event (their result is not yet
1623 // available).
1624 std::vector<GpuEventQuery> mInFlightGpuEventQueries;
1625 // A list of gpu events since the last clock sync.
1626 std::vector<GpuEvent> mGpuEvents;
1627 // The current frame index, used to generate a submission-encompassing event tagged with it.
1628 uint32_t mPrimaryBufferEventCounter;
1629
1630 // Cached value of the color attachment mask of the current draw framebuffer. This is used to
1631 // know which attachment indices have their blend state set in |mGraphicsPipelineDesc|, and
1632 // subsequently is used to clear the blend state for attachments that no longer exist when a new
1633 // framebuffer is bound.
1634 gl::DrawBufferMask mCachedDrawFramebufferColorAttachmentMask;
1635
1636 // Whether a flush was requested, but is deferred as an optimization to avoid breaking the
1637 // render pass.
1638 bool mHasDeferredFlush;
1639
1640 // Whether this context has produced any commands so far. While the renderer already skips
1641 // vkQueueSubmit when there is no command recorded, this variable allows glFlush itself to be
1642 // entirely skipped. This is particularly needed for an optimization where the Surface is in
1643 // shared-present mode, and the app is unnecessarily calling eglSwapBuffers (which equates
1644 // glFlush in that mode).
1645 bool mHasAnyCommandsPendingSubmission;
1646
1647 // Whether framebuffer fetch is active. When the permanentlySwitchToFramebufferFetchMode
1648 // feature is enabled, if any program uses framebuffer fetch, rendering switches to assuming
1649 // framebuffer fetch could happen in any render pass. This incurs a potential cost due to usage
1650 // of the GENERAL layout instead of COLOR_ATTACHMENT_OPTIMAL, but has definite benefits of
1651 // avoiding render pass breaks when a framebuffer fetch program is used mid render pass.
1652 bool mIsInFramebufferFetchMode;
1653
1654 // True if current started render pass is allowed to reactivate.
1655 bool mAllowRenderPassToReactivate;
1656
1657 // The size of copy commands issued between buffers and images. Used to submit the command
1658 // buffer for the outside render pass.
1659 VkDeviceSize mTotalBufferToImageCopySize;
1660 VkDeviceSize mEstimatedPendingImageGarbageSize;
1661
1662 // Semaphores that must be flushed before the current commands. Flushed semaphores will be
1663 // waited on in the next submission.
1664 std::vector<VkSemaphore> mWaitSemaphores;
1665 std::vector<VkPipelineStageFlags> mWaitSemaphoreStageMasks;
1666 // Whether this context has wait semaphores (flushed and unflushed) that must be submitted.
1667 bool mHasWaitSemaphoresPendingSubmission;
1668
1669 // Hold information from the last gpu clock sync for future gpu-to-cpu timestamp conversions.
1670 GpuClockSyncInfo mGpuClockSync;
1671
1672 // The very first timestamp queried for a GPU event is used as origin, so event timestamps would
1673 // have a value close to zero, to avoid losing 12 bits when converting these 64 bit values to
1674 // double.
1675 uint64_t mGpuEventTimestampOrigin;
1676
1677 // A mix of per-frame and per-run counters.
1678 angle::PerfMonitorCounterGroups mPerfMonitorCounters;
1679
1680 gl::state::DirtyBits mPipelineDirtyBitsMask;
1681
1682 egl::ContextPriority mInitialContextPriority;
1683 egl::ContextPriority mContextPriority;
1684 vk::ProtectionType mProtectionType;
1685
1686 ShareGroupVk *mShareGroupVk;
1687
1688 // This is a special "empty" placeholder buffer for use when we just need a placeholder buffer
1689 // but not the data. Examples are shader that has no uniform or doesn't use all slots in the
1690 // atomic counter buffer array, or places where there is no vertex buffer since Vulkan does not
1691 // allow binding a null vertex buffer.
1692 vk::BufferHelper mEmptyBuffer;
1693
1694 // Storage for default uniforms of ProgramVks and ProgramPipelineVks.
1695 vk::DynamicBuffer mDefaultUniformStorage;
1696
1697 std::vector<std::string> mCommandBufferDiagnostics;
1698
1699 // Record GL API calls for debuggers
1700 std::vector<std::string> mEventLog;
1701
1702 // Viewport and scissor are handled as dynamic state.
1703 VkViewport mViewport;
1704 VkRect2D mScissor;
1705
1706 VulkanCacheStats mVulkanCacheStats;
1707
1708 RangedSerialFactory mOutsideRenderPassSerialFactory;
1709 };
1710
endRenderPassIfTransformFeedbackBuffer(const vk::BufferHelper * buffer)1711 ANGLE_INLINE angle::Result ContextVk::endRenderPassIfTransformFeedbackBuffer(
1712 const vk::BufferHelper *buffer)
1713 {
1714 if (!mCurrentTransformFeedbackQueueSerial.valid() || !buffer ||
1715 !buffer->writtenByCommandBuffer(mCurrentTransformFeedbackQueueSerial))
1716 {
1717 return angle::Result::Continue;
1718 }
1719
1720 return flushCommandsAndEndRenderPass(RenderPassClosureReason::XfbWriteThenVertexIndexBuffer);
1721 }
1722
onIndexBufferChange(const vk::BufferHelper * currentIndexBuffer)1723 ANGLE_INLINE angle::Result ContextVk::onIndexBufferChange(
1724 const vk::BufferHelper *currentIndexBuffer)
1725 {
1726 mGraphicsDirtyBits.set(DIRTY_BIT_INDEX_BUFFER);
1727 mLastIndexBufferOffset = reinterpret_cast<const void *>(angle::DirtyPointer);
1728 return endRenderPassIfTransformFeedbackBuffer(currentIndexBuffer);
1729 }
1730
onVertexBufferChange(const vk::BufferHelper * vertexBuffer)1731 ANGLE_INLINE angle::Result ContextVk::onVertexBufferChange(const vk::BufferHelper *vertexBuffer)
1732 {
1733 mGraphicsDirtyBits.set(DIRTY_BIT_VERTEX_BUFFERS);
1734 return endRenderPassIfTransformFeedbackBuffer(vertexBuffer);
1735 }
1736
onVertexAttributeChange(size_t attribIndex,GLuint stride,GLuint divisor,angle::FormatID format,bool compressed,GLuint relativeOffset,const vk::BufferHelper * vertexBuffer)1737 ANGLE_INLINE angle::Result ContextVk::onVertexAttributeChange(size_t attribIndex,
1738 GLuint stride,
1739 GLuint divisor,
1740 angle::FormatID format,
1741 bool compressed,
1742 GLuint relativeOffset,
1743 const vk::BufferHelper *vertexBuffer)
1744 {
1745 const GLuint staticStride = mRenderer->useVertexInputBindingStrideDynamicState() ? 0 : stride;
1746
1747 if (!getFeatures().supportsVertexInputDynamicState.enabled)
1748 {
1749 invalidateCurrentGraphicsPipeline();
1750
1751 // Set divisor to 1 for attribs with emulated divisor
1752 mGraphicsPipelineDesc->updateVertexInput(
1753 this, &mGraphicsPipelineTransition, static_cast<uint32_t>(attribIndex), staticStride,
1754 divisor > mRenderer->getMaxVertexAttribDivisor() ? 1 : divisor, format, compressed,
1755 relativeOffset);
1756 }
1757 return onVertexBufferChange(vertexBuffer);
1758 }
1759
hasUnsubmittedUse(const vk::ResourceUse & use)1760 ANGLE_INLINE bool ContextVk::hasUnsubmittedUse(const vk::ResourceUse &use) const
1761 {
1762 return mCurrentQueueSerialIndex != kInvalidQueueSerialIndex &&
1763 use > QueueSerial(mCurrentQueueSerialIndex,
1764 mRenderer->getLastSubmittedSerial(mCurrentQueueSerialIndex));
1765 }
1766
UseLineRaster(const ContextVk * contextVk,gl::PrimitiveMode mode)1767 ANGLE_INLINE bool UseLineRaster(const ContextVk *contextVk, gl::PrimitiveMode mode)
1768 {
1769 return gl::IsLineMode(mode);
1770 }
1771
1772 uint32_t GetDriverUniformSize(vk::Context *context, PipelineType pipelineType);
1773 } // namespace rx
1774
1775 // Generate a perf warning, and insert an event marker in the command buffer.
1776 #define ANGLE_VK_PERF_WARNING(contextVk, severity, ...) \
1777 do \
1778 { \
1779 ANGLE_PERF_WARNING(contextVk->getDebug(), severity, __VA_ARGS__); \
1780 if (contextVk->isDebugEnabled()) \
1781 { \
1782 char ANGLE_MESSAGE[200]; \
1783 snprintf(ANGLE_MESSAGE, sizeof(ANGLE_MESSAGE), __VA_ARGS__); \
1784 contextVk->insertEventMarkerImpl(GL_DEBUG_SOURCE_OTHER, ANGLE_MESSAGE); \
1785 } \
1786 } while (0)
1787
1788 // Generate a trace event for graphics profiler, and insert an event marker in the command buffer.
1789 #define ANGLE_VK_TRACE_EVENT_AND_MARKER(contextVk, ...) \
1790 do \
1791 { \
1792 char ANGLE_MESSAGE[200]; \
1793 snprintf(ANGLE_MESSAGE, sizeof(ANGLE_MESSAGE), __VA_ARGS__); \
1794 ANGLE_TRACE_EVENT0("gpu.angle", ANGLE_MESSAGE); \
1795 \
1796 contextVk->insertEventMarkerImpl(GL_DEBUG_SOURCE_OTHER, ANGLE_MESSAGE); \
1797 } while (0)
1798
1799 #endif // LIBANGLE_RENDERER_VULKAN_CONTEXTVK_H_
1800