1 //
2 // Copyright 2016 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // ContextVk.h:
7 // Defines the class interface for ContextVk, implementing ContextImpl.
8 //
9
10 #ifndef LIBANGLE_RENDERER_VULKAN_CONTEXTVK_H_
11 #define LIBANGLE_RENDERER_VULKAN_CONTEXTVK_H_
12
13 #include <condition_variable>
14
15 #include "common/PackedEnums.h"
16 #include "common/vulkan/vk_headers.h"
17 #include "libANGLE/renderer/ContextImpl.h"
18 #include "libANGLE/renderer/renderer_utils.h"
19 #include "libANGLE/renderer/vulkan/DisplayVk.h"
20 #include "libANGLE/renderer/vulkan/OverlayVk.h"
21 #include "libANGLE/renderer/vulkan/PersistentCommandPool.h"
22 #include "libANGLE/renderer/vulkan/RendererVk.h"
23 #include "libANGLE/renderer/vulkan/ShareGroupVk.h"
24 #include "libANGLE/renderer/vulkan/vk_helpers.h"
25
26 namespace angle
27 {
28 struct FeaturesVk;
29 } // namespace angle
30
31 namespace rx
32 {
33 namespace vk
34 {
35 class SyncHelper;
36 } // namespace vk
37
38 class ProgramExecutableVk;
39 class RendererVk;
40 class WindowSurfaceVk;
41 class OffscreenSurfaceVk;
42 class ShareGroupVk;
43
44 static constexpr uint32_t kMaxGpuEventNameLen = 32;
45 using EventName = std::array<char, kMaxGpuEventNameLen>;
46
47 using ContextVkDescriptorSetList = angle::PackedEnumMap<PipelineType, uint32_t>;
48 using CounterPipelineTypeMap = angle::PackedEnumMap<PipelineType, uint32_t>;
49
50 enum class GraphicsEventCmdBuf
51 {
52 NotInQueryCmd = 0,
53 InOutsideCmdBufQueryCmd = 1,
54 InRenderPassCmdBufQueryCmd = 2,
55
56 InvalidEnum = 3,
57 EnumCount = 3,
58 };
59
60 // Why depth/stencil feedback loop is being updated. Based on whether it's due to a draw or clear,
61 // different GL state affect depth/stencil write.
62 enum class UpdateDepthFeedbackLoopReason
63 {
64 None,
65 Draw,
66 Clear,
67 };
68
69 class ContextVk : public ContextImpl, public vk::Context, public MultisampleTextureInitializer
70 {
71 public:
72 ContextVk(const gl::State &state, gl::ErrorSet *errorSet, RendererVk *renderer);
73 ~ContextVk() override;
74
75 angle::Result initialize() override;
76
77 void onDestroy(const gl::Context *context) override;
78
79 // Flush and finish.
80 angle::Result flush(const gl::Context *context) override;
81 angle::Result finish(const gl::Context *context) override;
82
83 // Drawing methods.
84 angle::Result drawArrays(const gl::Context *context,
85 gl::PrimitiveMode mode,
86 GLint first,
87 GLsizei count) override;
88 angle::Result drawArraysInstanced(const gl::Context *context,
89 gl::PrimitiveMode mode,
90 GLint first,
91 GLsizei count,
92 GLsizei instanceCount) override;
93 angle::Result drawArraysInstancedBaseInstance(const gl::Context *context,
94 gl::PrimitiveMode mode,
95 GLint first,
96 GLsizei count,
97 GLsizei instanceCount,
98 GLuint baseInstance) override;
99
100 angle::Result drawElements(const gl::Context *context,
101 gl::PrimitiveMode mode,
102 GLsizei count,
103 gl::DrawElementsType type,
104 const void *indices) override;
105 angle::Result drawElementsBaseVertex(const gl::Context *context,
106 gl::PrimitiveMode mode,
107 GLsizei count,
108 gl::DrawElementsType type,
109 const void *indices,
110 GLint baseVertex) override;
111 angle::Result drawElementsInstanced(const gl::Context *context,
112 gl::PrimitiveMode mode,
113 GLsizei count,
114 gl::DrawElementsType type,
115 const void *indices,
116 GLsizei instanceCount) override;
117 angle::Result drawElementsInstancedBaseVertex(const gl::Context *context,
118 gl::PrimitiveMode mode,
119 GLsizei count,
120 gl::DrawElementsType type,
121 const void *indices,
122 GLsizei instanceCount,
123 GLint baseVertex) override;
124 angle::Result drawElementsInstancedBaseVertexBaseInstance(const gl::Context *context,
125 gl::PrimitiveMode mode,
126 GLsizei count,
127 gl::DrawElementsType type,
128 const void *indices,
129 GLsizei instances,
130 GLint baseVertex,
131 GLuint baseInstance) override;
132 angle::Result drawRangeElements(const gl::Context *context,
133 gl::PrimitiveMode mode,
134 GLuint start,
135 GLuint end,
136 GLsizei count,
137 gl::DrawElementsType type,
138 const void *indices) override;
139 angle::Result drawRangeElementsBaseVertex(const gl::Context *context,
140 gl::PrimitiveMode mode,
141 GLuint start,
142 GLuint end,
143 GLsizei count,
144 gl::DrawElementsType type,
145 const void *indices,
146 GLint baseVertex) override;
147 angle::Result drawArraysIndirect(const gl::Context *context,
148 gl::PrimitiveMode mode,
149 const void *indirect) override;
150 angle::Result drawElementsIndirect(const gl::Context *context,
151 gl::PrimitiveMode mode,
152 gl::DrawElementsType type,
153 const void *indirect) override;
154
155 angle::Result multiDrawArrays(const gl::Context *context,
156 gl::PrimitiveMode mode,
157 const GLint *firsts,
158 const GLsizei *counts,
159 GLsizei drawcount) override;
160 angle::Result multiDrawArraysInstanced(const gl::Context *context,
161 gl::PrimitiveMode mode,
162 const GLint *firsts,
163 const GLsizei *counts,
164 const GLsizei *instanceCounts,
165 GLsizei drawcount) override;
166 angle::Result multiDrawArraysIndirect(const gl::Context *context,
167 gl::PrimitiveMode mode,
168 const void *indirect,
169 GLsizei drawcount,
170 GLsizei stride) override;
171 angle::Result multiDrawElements(const gl::Context *context,
172 gl::PrimitiveMode mode,
173 const GLsizei *counts,
174 gl::DrawElementsType type,
175 const GLvoid *const *indices,
176 GLsizei drawcount) override;
177 angle::Result multiDrawElementsInstanced(const gl::Context *context,
178 gl::PrimitiveMode mode,
179 const GLsizei *counts,
180 gl::DrawElementsType type,
181 const GLvoid *const *indices,
182 const GLsizei *instanceCounts,
183 GLsizei drawcount) override;
184 angle::Result multiDrawElementsIndirect(const gl::Context *context,
185 gl::PrimitiveMode mode,
186 gl::DrawElementsType type,
187 const void *indirect,
188 GLsizei drawcount,
189 GLsizei stride) override;
190 angle::Result multiDrawArraysInstancedBaseInstance(const gl::Context *context,
191 gl::PrimitiveMode mode,
192 const GLint *firsts,
193 const GLsizei *counts,
194 const GLsizei *instanceCounts,
195 const GLuint *baseInstances,
196 GLsizei drawcount) override;
197 angle::Result multiDrawElementsInstancedBaseVertexBaseInstance(const gl::Context *context,
198 gl::PrimitiveMode mode,
199 const GLsizei *counts,
200 gl::DrawElementsType type,
201 const GLvoid *const *indices,
202 const GLsizei *instanceCounts,
203 const GLint *baseVertices,
204 const GLuint *baseInstances,
205 GLsizei drawcount) override;
206
207 // MultiDrawIndirect helper functions
208 angle::Result multiDrawElementsIndirectHelper(const gl::Context *context,
209 gl::PrimitiveMode mode,
210 gl::DrawElementsType type,
211 const void *indirect,
212 GLsizei drawcount,
213 GLsizei stride);
214 angle::Result multiDrawArraysIndirectHelper(const gl::Context *context,
215 gl::PrimitiveMode mode,
216 const void *indirect,
217 GLsizei drawcount,
218 GLsizei stride);
219
220 // ShareGroup
getShareGroup()221 ShareGroupVk *getShareGroup() { return mShareGroupVk; }
getPipelineLayoutCache()222 PipelineLayoutCache &getPipelineLayoutCache()
223 {
224 return mShareGroupVk->getPipelineLayoutCache();
225 }
getDescriptorSetLayoutCache()226 DescriptorSetLayoutCache &getDescriptorSetLayoutCache()
227 {
228 return mShareGroupVk->getDescriptorSetLayoutCache();
229 }
230
231 // Device loss
232 gl::GraphicsResetStatus getResetStatus() override;
233
234 // EXT_debug_marker
235 angle::Result insertEventMarker(GLsizei length, const char *marker) override;
236 angle::Result pushGroupMarker(GLsizei length, const char *marker) override;
237 angle::Result popGroupMarker() override;
238
239 void insertEventMarkerImpl(GLenum source, const char *marker);
240
241 // KHR_debug
242 angle::Result pushDebugGroup(const gl::Context *context,
243 GLenum source,
244 GLuint id,
245 const std::string &message) override;
246 angle::Result popDebugGroup(const gl::Context *context) override;
247
248 // Record GL API calls for debuggers
249 void logEvent(const char *eventString);
250 void endEventLog(angle::EntryPoint entryPoint, PipelineType pipelineType);
251 void endEventLogForClearOrQuery();
252
253 bool isViewportFlipEnabledForDrawFBO() const;
254 bool isViewportFlipEnabledForReadFBO() const;
255 // When the device/surface is rotated such that the surface's aspect ratio is different than
256 // the native device (e.g. 90 degrees), the width and height of the viewport, scissor, and
257 // render area must be swapped.
258 bool isRotatedAspectRatioForDrawFBO() const;
259 bool isRotatedAspectRatioForReadFBO() const;
260 SurfaceRotation getRotationDrawFramebuffer() const;
261 SurfaceRotation getRotationReadFramebuffer() const;
262 SurfaceRotation getSurfaceRotationImpl(const gl::Framebuffer *framebuffer,
263 const egl::Surface *surface);
264
265 // View port (x, y, w, h) will be determined by a combination of -
266 // 1. clip space origin
267 // 2. isViewportFlipEnabledForDrawFBO
268 // For userdefined FBOs it will be based on the value of isViewportFlipEnabledForDrawFBO.
269 // For default FBOs it will be XOR of ClipOrigin and isViewportFlipEnabledForDrawFBO.
270 // isYFlipEnabledForDrawFBO indicates the rendered image is upside-down.
isYFlipEnabledForDrawFBO()271 ANGLE_INLINE bool isYFlipEnabledForDrawFBO() const
272 {
273 return mState.getClipOrigin() == gl::ClipOrigin::UpperLeft
274 ? !isViewportFlipEnabledForDrawFBO()
275 : isViewportFlipEnabledForDrawFBO();
276 }
277
278 // State sync with dirty bits.
279 angle::Result syncState(const gl::Context *context,
280 const gl::state::DirtyBits dirtyBits,
281 const gl::state::DirtyBits bitMask,
282 const gl::state::ExtendedDirtyBits extendedDirtyBits,
283 const gl::state::ExtendedDirtyBits extendedBitMask,
284 gl::Command command) override;
285
286 // Disjoint timer queries
287 GLint getGPUDisjoint() override;
288 GLint64 getTimestamp() override;
289
290 // Context switching
291 angle::Result onMakeCurrent(const gl::Context *context) override;
292 angle::Result onUnMakeCurrent(const gl::Context *context) override;
293 angle::Result onSurfaceUnMakeCurrent(WindowSurfaceVk *surface);
294 angle::Result onSurfaceUnMakeCurrent(OffscreenSurfaceVk *surface);
295
296 // Native capabilities, unmodified by gl::Context.
297 gl::Caps getNativeCaps() const override;
298 const gl::TextureCapsMap &getNativeTextureCaps() const override;
299 const gl::Extensions &getNativeExtensions() const override;
300 const gl::Limitations &getNativeLimitations() const override;
301 const ShPixelLocalStorageOptions &getNativePixelLocalStorageOptions() const override;
302
303 // Shader creation
304 CompilerImpl *createCompiler() override;
305 ShaderImpl *createShader(const gl::ShaderState &state) override;
306 ProgramImpl *createProgram(const gl::ProgramState &state) override;
307
308 // Framebuffer creation
309 FramebufferImpl *createFramebuffer(const gl::FramebufferState &state) override;
310
311 // Texture creation
312 TextureImpl *createTexture(const gl::TextureState &state) override;
313
314 // Renderbuffer creation
315 RenderbufferImpl *createRenderbuffer(const gl::RenderbufferState &state) override;
316
317 // Buffer creation
318 BufferImpl *createBuffer(const gl::BufferState &state) override;
319
320 // Vertex Array creation
321 VertexArrayImpl *createVertexArray(const gl::VertexArrayState &state) override;
322
323 // Query and Fence creation
324 QueryImpl *createQuery(gl::QueryType type) override;
325 FenceNVImpl *createFenceNV() override;
326 SyncImpl *createSync() override;
327
328 // Transform Feedback creation
329 TransformFeedbackImpl *createTransformFeedback(
330 const gl::TransformFeedbackState &state) override;
331
332 // Sampler object creation
333 SamplerImpl *createSampler(const gl::SamplerState &state) override;
334
335 // Program Pipeline object creation
336 ProgramPipelineImpl *createProgramPipeline(const gl::ProgramPipelineState &data) override;
337
338 // Memory object creation.
339 MemoryObjectImpl *createMemoryObject() override;
340
341 // Semaphore creation.
342 SemaphoreImpl *createSemaphore() override;
343
344 // Overlay creation.
345 OverlayImpl *createOverlay(const gl::OverlayState &state) override;
346
347 angle::Result dispatchCompute(const gl::Context *context,
348 GLuint numGroupsX,
349 GLuint numGroupsY,
350 GLuint numGroupsZ) override;
351 angle::Result dispatchComputeIndirect(const gl::Context *context, GLintptr indirect) override;
352
353 angle::Result memoryBarrier(const gl::Context *context, GLbitfield barriers) override;
354 angle::Result memoryBarrierByRegion(const gl::Context *context, GLbitfield barriers) override;
355
invalidateTexture(gl::TextureType target)356 ANGLE_INLINE void invalidateTexture(gl::TextureType target) override {}
357
hasDisplayTextureShareGroup()358 bool hasDisplayTextureShareGroup() const { return mState.hasDisplayTextureShareGroup(); }
359
360 // EXT_shader_framebuffer_fetch_non_coherent
361 void framebufferFetchBarrier() override;
362
363 // KHR_blend_equation_advanced
364 void blendBarrier() override;
365
366 // GL_ANGLE_vulkan_image
367 angle::Result acquireTextures(const gl::Context *context,
368 const gl::TextureBarrierVector &textureBarriers) override;
369 angle::Result releaseTextures(const gl::Context *context,
370 gl::TextureBarrierVector *textureBarriers) override;
371
372 // Sets effective Context Priority. Changed by ShareGroupVk.
setPriority(egl::ContextPriority newPriority)373 void setPriority(egl::ContextPriority newPriority) { mContextPriority = newPriority; }
374
375 VkDevice getDevice() const;
376 // Effective Context Priority
getPriority()377 egl::ContextPriority getPriority() const { return mContextPriority; }
getProtectionType()378 vk::ProtectionType getProtectionType() const { return mProtectionType; }
379
getFeatures()380 ANGLE_INLINE const angle::FeaturesVk &getFeatures() const { return mRenderer->getFeatures(); }
381
invalidateVertexAndIndexBuffers()382 ANGLE_INLINE void invalidateVertexAndIndexBuffers()
383 {
384 mGraphicsDirtyBits |= kIndexAndVertexDirtyBits;
385 }
386
387 angle::Result onVertexBufferChange(const vk::BufferHelper *vertexBuffer);
388
389 angle::Result onVertexAttributeChange(size_t attribIndex,
390 GLuint stride,
391 GLuint divisor,
392 angle::FormatID format,
393 bool compressed,
394 GLuint relativeOffset,
395 const vk::BufferHelper *vertexBuffer);
396
397 void invalidateDefaultAttribute(size_t attribIndex);
398 void invalidateDefaultAttributes(const gl::AttributesMask &dirtyMask);
399 angle::Result onFramebufferChange(FramebufferVk *framebufferVk, gl::Command command);
400 void onDrawFramebufferRenderPassDescChange(FramebufferVk *framebufferVk,
401 bool *renderPassDescChangedOut);
onHostVisibleBufferWrite()402 void onHostVisibleBufferWrite() { mIsAnyHostVisibleBufferWritten = true; }
403
404 void invalidateCurrentTransformFeedbackBuffers();
405 void onTransformFeedbackStateChanged();
406 angle::Result onBeginTransformFeedback(
407 size_t bufferCount,
408 const gl::TransformFeedbackBuffersArray<vk::BufferHelper *> &buffers,
409 const gl::TransformFeedbackBuffersArray<vk::BufferHelper> &counterBuffers);
410 void onEndTransformFeedback();
411 angle::Result onPauseTransformFeedback();
412 void pauseTransformFeedbackIfActiveUnpaused();
413
onColorAccessChange()414 void onColorAccessChange() { mGraphicsDirtyBits |= kColorAccessChangeDirtyBits; }
onDepthStencilAccessChange()415 void onDepthStencilAccessChange() { mGraphicsDirtyBits |= kDepthStencilAccessChangeDirtyBits; }
416
417 // When UtilsVk issues draw or dispatch calls, it binds a new pipeline and descriptor sets that
418 // the context is not aware of. These functions are called to make sure the pipeline and
419 // affected descriptor set bindings are dirtied for the next application draw/dispatch call.
420 void invalidateGraphicsPipelineBinding();
421 void invalidateComputePipelineBinding();
422 void invalidateGraphicsDescriptorSet(DescriptorSetIndex usedDescriptorSet);
423 void invalidateComputeDescriptorSet(DescriptorSetIndex usedDescriptorSet);
424 void invalidateAllDynamicState();
425 angle::Result updateRenderPassDepthFeedbackLoopMode(
426 UpdateDepthFeedbackLoopReason depthReason,
427 UpdateDepthFeedbackLoopReason stencilReason);
428
429 angle::Result optimizeRenderPassForPresent(VkFramebuffer framebufferHandle,
430 vk::ImageViewHelper *colorImageView,
431 vk::ImageHelper *colorImage,
432 vk::ImageHelper *colorImageMS,
433 vk::PresentMode presentMode,
434 bool *imageResolved);
435
436 vk::DynamicQueryPool *getQueryPool(gl::QueryType queryType);
437
438 const VkClearValue &getClearColorValue() const;
439 const VkClearValue &getClearDepthStencilValue() const;
440 gl::BlendStateExt::ColorMaskStorage::Type getClearColorMasks() const;
getScissor()441 const VkRect2D &getScissor() const { return mScissor; }
442 angle::Result getIncompleteTexture(const gl::Context *context,
443 gl::TextureType type,
444 gl::SamplerFormat format,
445 gl::Texture **textureOut);
446 void updateColorMasks();
447 void updateMissingOutputsMask();
448 void updateBlendFuncsAndEquations();
449 void updateSampleMaskWithRasterizationSamples(const uint32_t rasterizationSamples);
450 void updateAlphaToCoverageWithRasterizationSamples(const uint32_t rasterizationSamples);
451 void updateFrameBufferFetchSamples(const uint32_t prevSamples, const uint32_t curSamples);
452
453 void handleError(VkResult errorCode,
454 const char *file,
455 const char *function,
456 unsigned int line) override;
457
458 angle::Result onIndexBufferChange(const vk::BufferHelper *currentIndexBuffer);
459
460 angle::Result flushImpl(const vk::Semaphore *semaphore,
461 const vk::SharedExternalFence *externalFence,
462 RenderPassClosureReason renderPassClosureReason);
463 angle::Result finishImpl(RenderPassClosureReason renderPassClosureReason);
464
465 void addWaitSemaphore(VkSemaphore semaphore, VkPipelineStageFlags stageMask);
466
467 template <typename T>
addGarbage(T * object)468 void addGarbage(T *object)
469 {
470 if (object->valid())
471 {
472 mCurrentGarbage.emplace_back(vk::GetGarbage(object));
473 }
474 }
475
476 angle::Result getCompatibleRenderPass(const vk::RenderPassDesc &desc,
477 const vk::RenderPass **renderPassOut);
478 angle::Result getRenderPassWithOps(const vk::RenderPassDesc &desc,
479 const vk::AttachmentOpsArray &ops,
480 const vk::RenderPass **renderPassOut);
481
getShaderLibrary()482 vk::ShaderLibrary &getShaderLibrary() { return mShaderLibrary; }
getUtils()483 UtilsVk &getUtils() { return mUtils; }
484
485 angle::Result getTimestamp(uint64_t *timestampOut);
486
487 // Create Begin/End/Instant GPU trace events, which take their timestamps from GPU queries.
488 // The events are queued until the query results are available. Possible values for `phase`
489 // are TRACE_EVENT_PHASE_*
traceGpuEvent(vk::OutsideRenderPassCommandBuffer * commandBuffer,char phase,const EventName & name)490 ANGLE_INLINE angle::Result traceGpuEvent(vk::OutsideRenderPassCommandBuffer *commandBuffer,
491 char phase,
492 const EventName &name)
493 {
494 if (mGpuEventsEnabled)
495 return traceGpuEventImpl(commandBuffer, phase, name);
496 return angle::Result::Continue;
497 }
498
getRenderPassCache()499 RenderPassCache &getRenderPassCache() { return mRenderPassCache; }
500
emulateSeamfulCubeMapSampling()501 bool emulateSeamfulCubeMapSampling() const { return mEmulateSeamfulCubeMapSampling; }
502
getDebug()503 const gl::Debug &getDebug() const { return mState.getDebug(); }
getOverlay()504 const gl::OverlayType *getOverlay() const { return mState.getOverlay(); }
505
506 angle::Result onBufferReleaseToExternal(const vk::BufferHelper &buffer);
507 angle::Result onImageReleaseToExternal(const vk::ImageHelper &image);
508
onImageRenderPassRead(VkImageAspectFlags aspectFlags,vk::ImageLayout imageLayout,vk::ImageHelper * image)509 void onImageRenderPassRead(VkImageAspectFlags aspectFlags,
510 vk::ImageLayout imageLayout,
511 vk::ImageHelper *image)
512 {
513 ASSERT(mRenderPassCommands->started());
514 mRenderPassCommands->imageRead(this, aspectFlags, imageLayout, image);
515 }
516
onImageRenderPassWrite(gl::LevelIndex level,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags,vk::ImageLayout imageLayout,vk::ImageHelper * image)517 void onImageRenderPassWrite(gl::LevelIndex level,
518 uint32_t layerStart,
519 uint32_t layerCount,
520 VkImageAspectFlags aspectFlags,
521 vk::ImageLayout imageLayout,
522 vk::ImageHelper *image)
523 {
524 ASSERT(mRenderPassCommands->started());
525 mRenderPassCommands->imageWrite(this, level, layerStart, layerCount, aspectFlags,
526 imageLayout, image);
527 }
528
onColorDraw(gl::LevelIndex level,uint32_t layerStart,uint32_t layerCount,vk::ImageHelper * image,vk::ImageHelper * resolveImage,UniqueSerial imageSiblingSerial,vk::PackedAttachmentIndex packedAttachmentIndex)529 void onColorDraw(gl::LevelIndex level,
530 uint32_t layerStart,
531 uint32_t layerCount,
532 vk::ImageHelper *image,
533 vk::ImageHelper *resolveImage,
534 UniqueSerial imageSiblingSerial,
535 vk::PackedAttachmentIndex packedAttachmentIndex)
536 {
537 ASSERT(mRenderPassCommands->started());
538 mRenderPassCommands->colorImagesDraw(level, layerStart, layerCount, image, resolveImage,
539 imageSiblingSerial, packedAttachmentIndex);
540 }
onDepthStencilDraw(gl::LevelIndex level,uint32_t layerStart,uint32_t layerCount,vk::ImageHelper * image,vk::ImageHelper * resolveImage,UniqueSerial imageSiblingSerial)541 void onDepthStencilDraw(gl::LevelIndex level,
542 uint32_t layerStart,
543 uint32_t layerCount,
544 vk::ImageHelper *image,
545 vk::ImageHelper *resolveImage,
546 UniqueSerial imageSiblingSerial)
547 {
548 ASSERT(mRenderPassCommands->started());
549 mRenderPassCommands->depthStencilImagesDraw(level, layerStart, layerCount, image,
550 resolveImage, imageSiblingSerial);
551 }
552
finalizeImageLayout(const vk::ImageHelper * image,UniqueSerial imageSiblingSerial)553 void finalizeImageLayout(const vk::ImageHelper *image, UniqueSerial imageSiblingSerial)
554 {
555 if (mRenderPassCommands->started())
556 {
557 mRenderPassCommands->finalizeImageLayout(this, image, imageSiblingSerial);
558 }
559 }
560
getOutsideRenderPassCommandBuffer(const vk::CommandBufferAccess & access,vk::OutsideRenderPassCommandBuffer ** commandBufferOut)561 angle::Result getOutsideRenderPassCommandBuffer(
562 const vk::CommandBufferAccess &access,
563 vk::OutsideRenderPassCommandBuffer **commandBufferOut)
564 {
565 ANGLE_TRY(onResourceAccess(access));
566 *commandBufferOut = &mOutsideRenderPassCommands->getCommandBuffer();
567 return angle::Result::Continue;
568 }
569
getOutsideRenderPassCommandBufferHelper(const vk::CommandBufferAccess & access,vk::OutsideRenderPassCommandBufferHelper ** commandBufferHelperOut)570 angle::Result getOutsideRenderPassCommandBufferHelper(
571 const vk::CommandBufferAccess &access,
572 vk::OutsideRenderPassCommandBufferHelper **commandBufferHelperOut)
573 {
574 ANGLE_TRY(onResourceAccess(access));
575 *commandBufferHelperOut = mOutsideRenderPassCommands;
576 return angle::Result::Continue;
577 }
578
submitStagedTextureUpdates()579 angle::Result submitStagedTextureUpdates()
580 {
581 // Staged updates are recorded in outside RP cammand buffer, submit them.
582 return flushOutsideRenderPassCommands();
583 }
584
onEGLImageQueueChange()585 angle::Result onEGLImageQueueChange()
586 {
587 // Flush the barrier inserted to change the queue and layout of an EGL image. Another
588 // thread may start using this image without issuing a sync object.
589 return flushOutsideRenderPassCommands();
590 }
591
592 angle::Result beginNewRenderPass(vk::MaybeImagelessFramebuffer &framebuffer,
593 const gl::Rectangle &renderArea,
594 const vk::RenderPassDesc &renderPassDesc,
595 const vk::AttachmentOpsArray &renderPassAttachmentOps,
596 const vk::PackedAttachmentCount colorAttachmentCount,
597 const vk::PackedAttachmentIndex depthStencilAttachmentIndex,
598 const vk::PackedClearValuesArray &clearValues,
599 vk::RenderPassCommandBuffer **commandBufferOut);
600
disableRenderPassReactivation()601 void disableRenderPassReactivation() { mAllowRenderPassToReactivate = false; }
602
603 // Only returns true if we have a started RP and we've run setupDraw.
hasActiveRenderPass()604 bool hasActiveRenderPass() const
605 {
606 // If mRenderPassCommandBuffer is not null, mRenderPassCommands must already started, we
607 // call this active render pass. A started render pass will have null
608 // mRenderPassCommandBuffer after onRenderPassFinished call, we call this state started but
609 // inactive.
610 ASSERT(mRenderPassCommandBuffer == nullptr || mRenderPassCommands->started());
611 // Checking mRenderPassCommandBuffer ensures we've called setupDraw.
612 return mRenderPassCommandBuffer != nullptr;
613 }
614
hasStartedRenderPassWithQueueSerial(const QueueSerial & queueSerial)615 bool hasStartedRenderPassWithQueueSerial(const QueueSerial &queueSerial) const
616 {
617 return mRenderPassCommands->started() &&
618 mRenderPassCommands->getQueueSerial() == queueSerial;
619 }
620
isRenderPassStartedAndUsesBuffer(const vk::BufferHelper & buffer)621 bool isRenderPassStartedAndUsesBuffer(const vk::BufferHelper &buffer) const
622 {
623 return mRenderPassCommands->started() && mRenderPassCommands->usesBuffer(buffer);
624 }
625
isRenderPassStartedAndUsesBufferForWrite(const vk::BufferHelper & buffer)626 bool isRenderPassStartedAndUsesBufferForWrite(const vk::BufferHelper &buffer) const
627 {
628 return mRenderPassCommands->started() && mRenderPassCommands->usesBufferForWrite(buffer);
629 }
630
isRenderPassStartedAndUsesImage(const vk::ImageHelper & image)631 bool isRenderPassStartedAndUsesImage(const vk::ImageHelper &image) const
632 {
633 return mRenderPassCommands->started() && mRenderPassCommands->usesImage(image);
634 }
635
getStartedRenderPassCommands()636 vk::RenderPassCommandBufferHelper &getStartedRenderPassCommands()
637 {
638 ASSERT(mRenderPassCommands->started());
639 return *mRenderPassCommands;
640 }
641
642 uint32_t getCurrentSubpassIndex() const;
643 uint32_t getCurrentViewCount() const;
644
645 // Initial Context Priority. Used for EGL_CONTEXT_PRIORITY_LEVEL_IMG attribute.
getContextPriority()646 egl::ContextPriority getContextPriority() const override { return mInitialContextPriority; }
647 angle::Result startRenderPass(gl::Rectangle renderArea,
648 vk::RenderPassCommandBuffer **commandBufferOut,
649 bool *renderPassDescChangedOut);
650 angle::Result startNextSubpass();
651 angle::Result flushCommandsAndEndRenderPass(RenderPassClosureReason reason);
652 angle::Result flushCommandsAndEndRenderPassWithoutSubmit(RenderPassClosureReason reason);
653 angle::Result flushAndSubmitOutsideRenderPassCommands();
654
655 angle::Result syncExternalMemory();
656
657 // Either issue a submission or defer it when a sync object is initialized. If deferred, a
658 // submission will have to be incurred during client wait.
659 angle::Result onSyncObjectInit(vk::SyncHelper *syncHelper, bool isEGLSyncObject);
660 // Called when a sync object is waited on while its submission was deffered in onSyncObjectInit.
661 // It's a no-op if this context doesn't have a pending submission. Note that due to
662 // mHasDeferredFlush being set, flushing the render pass leads to a submission automatically.
663 angle::Result flushCommandsAndEndRenderPassIfDeferredSyncInit(RenderPassClosureReason reason);
664
665 void addCommandBufferDiagnostics(const std::string &commandBufferDiagnostics);
666
667 VkIndexType getVkIndexType(gl::DrawElementsType glIndexType) const;
668 size_t getVkIndexTypeSize(gl::DrawElementsType glIndexType) const;
669 bool shouldConvertUint8VkIndexType(gl::DrawElementsType glIndexType) const;
670
671 ProgramExecutableVk *getExecutable() const;
672
673 bool isRobustResourceInitEnabled() const;
674
675 uint32_t getDriverUniformSize(PipelineType pipelineType) const;
676
677 // Queries that begin and end automatically with render pass start and end
678 angle::Result beginRenderPassQuery(QueryVk *queryVk);
679 angle::Result endRenderPassQuery(QueryVk *queryVk);
680 void pauseRenderPassQueriesIfActive();
681 angle::Result resumeRenderPassQueriesIfActive();
682 angle::Result resumeXfbRenderPassQueriesIfActive();
683 bool doesPrimitivesGeneratedQuerySupportRasterizerDiscard() const;
684 bool isEmulatingRasterizerDiscardDuringPrimitivesGeneratedQuery(
685 bool isPrimitivesGeneratedQueryActive) const;
686
687 // Used by QueryVk to share query helpers between transform feedback queries.
688 QueryVk *getActiveRenderPassQuery(gl::QueryType queryType) const;
689
690 void syncObjectPerfCounters(const angle::VulkanPerfCounters &commandQueuePerfCounters);
691 void updateOverlayOnPresent();
692 void addOverlayUsedBuffersCount(vk::CommandBufferHelperCommon *commandBuffer);
693
694 // For testing only.
695 void setDefaultUniformBlocksMinSizeForTesting(size_t minSize);
696
getEmptyBuffer()697 vk::BufferHelper &getEmptyBuffer() { return mEmptyBuffer; }
698
699 // Keeping track of the buffer copy size. Used to determine when to submit the outside command
700 // buffer.
701 angle::Result onCopyUpdate(VkDeviceSize size, bool *commandBufferWasFlushedOut);
702
703 // Implementation of MultisampleTextureInitializer
704 angle::Result initializeMultisampleTextureToBlack(const gl::Context *context,
705 gl::Texture *glTexture) override;
706
707 // TODO(http://anglebug.com/5624): rework updateActiveTextures(), createPipelineLayout(),
708 // handleDirtyGraphicsPipeline(), and ProgramPipelineVk::link().
resetCurrentGraphicsPipeline()709 void resetCurrentGraphicsPipeline()
710 {
711 mCurrentGraphicsPipeline = nullptr;
712 mCurrentGraphicsPipelineShaders = nullptr;
713 }
714
715 void onProgramExecutableReset(ProgramExecutableVk *executableVk);
716
717 angle::Result handleGraphicsEventLog(GraphicsEventCmdBuf queryEventType);
718
719 void flushDescriptorSetUpdates();
720
getDefaultBufferPool(VkDeviceSize size,uint32_t memoryTypeIndex,BufferUsageType usageType)721 vk::BufferPool *getDefaultBufferPool(VkDeviceSize size,
722 uint32_t memoryTypeIndex,
723 BufferUsageType usageType)
724 {
725 return mShareGroupVk->getDefaultBufferPool(mRenderer, size, memoryTypeIndex, usageType);
726 }
727
allocateStreamedVertexBuffer(size_t attribIndex,size_t bytesToAllocate,vk::BufferHelper ** vertexBufferOut)728 angle::Result allocateStreamedVertexBuffer(size_t attribIndex,
729 size_t bytesToAllocate,
730 vk::BufferHelper **vertexBufferOut)
731 {
732 bool newBufferOut;
733 ANGLE_TRY(mStreamedVertexBuffers[attribIndex].allocate(this, bytesToAllocate,
734 vertexBufferOut, &newBufferOut));
735 if (newBufferOut)
736 {
737 mHasInFlightStreamedVertexBuffers.set(attribIndex);
738 }
739 return angle::Result::Continue;
740 }
741
742 angle::Result bindCachedDescriptorPool(
743 DescriptorSetIndex descriptorSetIndex,
744 const vk::DescriptorSetLayoutDesc &descriptorSetLayoutDesc,
745 uint32_t descriptorCountMultiplier,
746 vk::DescriptorPoolPointer *poolPointerOut);
747
748 // Put the context in framebuffer fetch mode. If the permanentlySwitchToFramebufferFetchMode
749 // feature is enabled, this is done on first encounter of framebuffer fetch, and makes the
750 // context use framebuffer-fetch-enabled render passes from here on.
751 angle::Result switchToFramebufferFetchMode(bool hasFramebufferFetch);
isInFramebufferFetchMode()752 bool isInFramebufferFetchMode() const { return mIsInFramebufferFetchMode; }
753
754 const angle::PerfMonitorCounterGroups &getPerfMonitorCounters() override;
755
756 void resetPerFramePerfCounters();
757
758 // Accumulate cache stats for a specific cache
accumulateCacheStats(VulkanCacheType cache,const CacheStats & stats)759 void accumulateCacheStats(VulkanCacheType cache, const CacheStats &stats)
760 {
761 mVulkanCacheStats[cache].accumulate(stats);
762 }
763
getPipelineCacheGraphStream()764 std::ostringstream &getPipelineCacheGraphStream() { return mPipelineCacheGraph; }
765
766 // Whether VK_EXT_pipeline_robustness should be used to enable robust buffer access in the
767 // pipeline.
shouldUsePipelineRobustness()768 bool shouldUsePipelineRobustness() const
769 {
770 return getFeatures().supportsPipelineRobustness.enabled && mState.hasRobustAccess();
771 }
772 // Whether VK_EXT_pipeline_protected_access should be used to restrict the pipeline to protected
773 // command buffers. Note that when false, if the extension is supported, the pipeline can be
774 // restricted to unprotected command buffers.
shouldRestrictPipelineToProtectedAccess()775 bool shouldRestrictPipelineToProtectedAccess() const
776 {
777 return getFeatures().supportsPipelineProtectedAccess.enabled &&
778 mState.hasProtectedContent();
779 }
780
781 vk::ComputePipelineFlags getComputePipelineFlags() const;
782
783 angle::ImageLoadContext getImageLoadContext() const;
784
785 bool hasUnsubmittedUse(const vk::ResourceUse &use) const;
hasUnsubmittedUse(const vk::Resource & resource)786 bool hasUnsubmittedUse(const vk::Resource &resource) const
787 {
788 return hasUnsubmittedUse(resource.getResourceUse());
789 }
hasUnsubmittedUse(const vk::ReadWriteResource & resource)790 bool hasUnsubmittedUse(const vk::ReadWriteResource &resource) const
791 {
792 return hasUnsubmittedUse(resource.getResourceUse());
793 }
794
getLastSubmittedQueueSerial()795 const QueueSerial &getLastSubmittedQueueSerial() const { return mLastSubmittedQueueSerial; }
getSubmittedResourceUse()796 const vk::ResourceUse &getSubmittedResourceUse() const { return mSubmittedResourceUse; }
797
798 // Uploading mutable mipmap textures is currently restricted to single-context applications.
isEligibleForMutableTextureFlush()799 bool isEligibleForMutableTextureFlush() const
800 {
801 return getFeatures().mutableMipmapTextureUpload.enabled && !hasDisplayTextureShareGroup() &&
802 mShareGroupVk->getContexts().size() == 1;
803 }
804
getDepthStencilAttachmentFlags()805 vk::RenderPassUsageFlags getDepthStencilAttachmentFlags() const
806 {
807 return mDepthStencilAttachmentFlags;
808 }
809
isDitherEnabled()810 bool isDitherEnabled() { return mState.isDitherEnabled(); }
811
812 private:
813 // Dirty bits.
814 enum DirtyBitType : size_t
815 {
816 // Dirty bits that must be processed before the render pass is started. The handlers for
817 // these dirty bits don't record any commands.
818
819 // the AnySamplePassed render pass query has been ended.
820 DIRTY_BIT_ANY_SAMPLE_PASSED_QUERY_END,
821 // A glMemoryBarrier has been called and command buffers may need flushing.
822 DIRTY_BIT_MEMORY_BARRIER,
823 // Update default attribute buffers.
824 DIRTY_BIT_DEFAULT_ATTRIBS,
825 // The pipeline has changed and needs to be recreated. This dirty bit may close the render
826 // pass.
827 DIRTY_BIT_PIPELINE_DESC,
828 // Support for depth/stencil read-only feedback loop. When depth/stencil access changes,
829 // the render pass may need closing.
830 DIRTY_BIT_READ_ONLY_DEPTH_FEEDBACK_LOOP_MODE,
831
832 // Start the render pass.
833 DIRTY_BIT_RENDER_PASS,
834
835 // Dirty bits that must be processed after the render pass is started. Their handlers
836 // record commands.
837 DIRTY_BIT_EVENT_LOG,
838 // Update color and depth/stencil accesses in the render pass.
839 DIRTY_BIT_COLOR_ACCESS,
840 DIRTY_BIT_DEPTH_STENCIL_ACCESS,
841 // Pipeline needs to rebind because a new command buffer has been allocated, or UtilsVk has
842 // changed the binding. The pipeline itself doesn't need to be recreated.
843 DIRTY_BIT_PIPELINE_BINDING,
844 DIRTY_BIT_TEXTURES,
845 DIRTY_BIT_VERTEX_BUFFERS,
846 DIRTY_BIT_INDEX_BUFFER,
847 DIRTY_BIT_UNIFORMS,
848 DIRTY_BIT_DRIVER_UNIFORMS,
849 // Shader resources excluding textures, which are handled separately.
850 DIRTY_BIT_SHADER_RESOURCES,
851 DIRTY_BIT_UNIFORM_BUFFERS,
852 DIRTY_BIT_TRANSFORM_FEEDBACK_BUFFERS,
853 DIRTY_BIT_TRANSFORM_FEEDBACK_RESUME,
854 DIRTY_BIT_DESCRIPTOR_SETS,
855 DIRTY_BIT_FRAMEBUFFER_FETCH_BARRIER,
856 DIRTY_BIT_BLEND_BARRIER,
857
858 // Dynamic state
859 // - In core Vulkan 1.0
860 DIRTY_BIT_DYNAMIC_VIEWPORT,
861 DIRTY_BIT_DYNAMIC_SCISSOR,
862 DIRTY_BIT_DYNAMIC_LINE_WIDTH,
863 DIRTY_BIT_DYNAMIC_DEPTH_BIAS,
864 DIRTY_BIT_DYNAMIC_BLEND_CONSTANTS,
865 DIRTY_BIT_DYNAMIC_STENCIL_COMPARE_MASK,
866 DIRTY_BIT_DYNAMIC_STENCIL_WRITE_MASK,
867 DIRTY_BIT_DYNAMIC_STENCIL_REFERENCE,
868 // - In VK_EXT_extended_dynamic_state
869 DIRTY_BIT_DYNAMIC_CULL_MODE,
870 DIRTY_BIT_DYNAMIC_FRONT_FACE,
871 DIRTY_BIT_DYNAMIC_DEPTH_TEST_ENABLE,
872 DIRTY_BIT_DYNAMIC_DEPTH_WRITE_ENABLE,
873 DIRTY_BIT_DYNAMIC_DEPTH_COMPARE_OP,
874 DIRTY_BIT_DYNAMIC_STENCIL_TEST_ENABLE,
875 DIRTY_BIT_DYNAMIC_STENCIL_OP,
876 // - In VK_EXT_extended_dynamic_state2
877 DIRTY_BIT_DYNAMIC_RASTERIZER_DISCARD_ENABLE,
878 DIRTY_BIT_DYNAMIC_DEPTH_BIAS_ENABLE,
879 DIRTY_BIT_DYNAMIC_LOGIC_OP,
880 DIRTY_BIT_DYNAMIC_PRIMITIVE_RESTART_ENABLE,
881 // - In VK_KHR_fragment_shading_rate
882 DIRTY_BIT_DYNAMIC_FRAGMENT_SHADING_RATE,
883
884 DIRTY_BIT_MAX,
885 };
886
887 // Dirty bit handlers that can break the render pass must always be specified before
888 // DIRTY_BIT_RENDER_PASS.
889 static_assert(
890 DIRTY_BIT_ANY_SAMPLE_PASSED_QUERY_END < DIRTY_BIT_RENDER_PASS,
891 "Render pass breaking dirty bit must be handled before the render pass dirty bit");
892 static_assert(
893 DIRTY_BIT_MEMORY_BARRIER < DIRTY_BIT_RENDER_PASS,
894 "Render pass breaking dirty bit must be handled before the render pass dirty bit");
895 static_assert(
896 DIRTY_BIT_DEFAULT_ATTRIBS < DIRTY_BIT_RENDER_PASS,
897 "Render pass breaking dirty bit must be handled before the render pass dirty bit");
898 static_assert(
899 DIRTY_BIT_PIPELINE_DESC < DIRTY_BIT_RENDER_PASS,
900 "Render pass breaking dirty bit must be handled before the render pass dirty bit");
901 static_assert(
902 DIRTY_BIT_READ_ONLY_DEPTH_FEEDBACK_LOOP_MODE < DIRTY_BIT_RENDER_PASS,
903 "Render pass breaking dirty bit must be handled before the render pass dirty bit");
904
905 // Dirty bit handlers that record commands or otherwise expect to manipulate the render pass
906 // that will be used for the draw call must be specified after DIRTY_BIT_RENDER_PASS.
907 static_assert(DIRTY_BIT_EVENT_LOG > DIRTY_BIT_RENDER_PASS,
908 "Render pass using dirty bit must be handled after the render pass dirty bit");
909 static_assert(DIRTY_BIT_COLOR_ACCESS > DIRTY_BIT_RENDER_PASS,
910 "Render pass using dirty bit must be handled after the render pass dirty bit");
911 static_assert(DIRTY_BIT_DEPTH_STENCIL_ACCESS > DIRTY_BIT_RENDER_PASS,
912 "Render pass using dirty bit must be handled after the render pass dirty bit");
913 static_assert(DIRTY_BIT_PIPELINE_BINDING > DIRTY_BIT_RENDER_PASS,
914 "Render pass using dirty bit must be handled after the render pass dirty bit");
915 static_assert(DIRTY_BIT_TEXTURES > DIRTY_BIT_RENDER_PASS,
916 "Render pass using dirty bit must be handled after the render pass dirty bit");
917 static_assert(DIRTY_BIT_VERTEX_BUFFERS > DIRTY_BIT_RENDER_PASS,
918 "Render pass using dirty bit must be handled after the render pass dirty bit");
919 static_assert(DIRTY_BIT_INDEX_BUFFER > DIRTY_BIT_RENDER_PASS,
920 "Render pass using dirty bit must be handled after the render pass dirty bit");
921 static_assert(DIRTY_BIT_DRIVER_UNIFORMS > DIRTY_BIT_RENDER_PASS,
922 "Render pass using dirty bit must be handled after the render pass dirty bit");
923 static_assert(DIRTY_BIT_SHADER_RESOURCES > DIRTY_BIT_RENDER_PASS,
924 "Render pass using dirty bit must be handled after the render pass dirty bit");
925 static_assert(
926 DIRTY_BIT_UNIFORM_BUFFERS > DIRTY_BIT_SHADER_RESOURCES,
927 "Uniform buffer using dirty bit must be handled after the shader resource dirty bit");
928 static_assert(DIRTY_BIT_TRANSFORM_FEEDBACK_BUFFERS > DIRTY_BIT_RENDER_PASS,
929 "Render pass using dirty bit must be handled after the render pass dirty bit");
930 static_assert(DIRTY_BIT_TRANSFORM_FEEDBACK_RESUME > DIRTY_BIT_RENDER_PASS,
931 "Render pass using dirty bit must be handled after the render pass dirty bit");
932 static_assert(DIRTY_BIT_DESCRIPTOR_SETS > DIRTY_BIT_RENDER_PASS,
933 "Render pass using dirty bit must be handled after the render pass dirty bit");
934 static_assert(DIRTY_BIT_UNIFORMS > DIRTY_BIT_RENDER_PASS,
935 "Render pass using dirty bit must be handled after the render pass dirty bit");
936 static_assert(DIRTY_BIT_FRAMEBUFFER_FETCH_BARRIER > DIRTY_BIT_RENDER_PASS,
937 "Render pass using dirty bit must be handled after the render pass dirty bit");
938 static_assert(DIRTY_BIT_BLEND_BARRIER > DIRTY_BIT_RENDER_PASS,
939 "Render pass using dirty bit must be handled after the render pass dirty bit");
940 static_assert(DIRTY_BIT_DYNAMIC_VIEWPORT > DIRTY_BIT_RENDER_PASS,
941 "Render pass using dirty bit must be handled after the render pass dirty bit");
942 static_assert(DIRTY_BIT_DYNAMIC_SCISSOR > DIRTY_BIT_RENDER_PASS,
943 "Render pass using dirty bit must be handled after the render pass dirty bit");
944 static_assert(DIRTY_BIT_DYNAMIC_LINE_WIDTH > DIRTY_BIT_RENDER_PASS,
945 "Render pass using dirty bit must be handled after the render pass dirty bit");
946 static_assert(DIRTY_BIT_DYNAMIC_DEPTH_BIAS > DIRTY_BIT_RENDER_PASS,
947 "Render pass using dirty bit must be handled after the render pass dirty bit");
948 static_assert(DIRTY_BIT_DYNAMIC_BLEND_CONSTANTS > DIRTY_BIT_RENDER_PASS,
949 "Render pass using dirty bit must be handled after the render pass dirty bit");
950 static_assert(DIRTY_BIT_DYNAMIC_STENCIL_COMPARE_MASK > DIRTY_BIT_RENDER_PASS,
951 "Render pass using dirty bit must be handled after the render pass dirty bit");
952 static_assert(DIRTY_BIT_DYNAMIC_STENCIL_WRITE_MASK > DIRTY_BIT_RENDER_PASS,
953 "Render pass using dirty bit must be handled after the render pass dirty bit");
954 static_assert(DIRTY_BIT_DYNAMIC_STENCIL_REFERENCE > DIRTY_BIT_RENDER_PASS,
955 "Render pass using dirty bit must be handled after the render pass dirty bit");
956 static_assert(DIRTY_BIT_DYNAMIC_CULL_MODE > DIRTY_BIT_RENDER_PASS,
957 "Render pass using dirty bit must be handled after the render pass dirty bit");
958 static_assert(DIRTY_BIT_DYNAMIC_FRONT_FACE > DIRTY_BIT_RENDER_PASS,
959 "Render pass using dirty bit must be handled after the render pass dirty bit");
960 static_assert(DIRTY_BIT_DYNAMIC_DEPTH_TEST_ENABLE > DIRTY_BIT_RENDER_PASS,
961 "Render pass using dirty bit must be handled after the render pass dirty bit");
962 static_assert(DIRTY_BIT_DYNAMIC_DEPTH_WRITE_ENABLE > DIRTY_BIT_RENDER_PASS,
963 "Render pass using dirty bit must be handled after the render pass dirty bit");
964 static_assert(DIRTY_BIT_DYNAMIC_DEPTH_COMPARE_OP > DIRTY_BIT_RENDER_PASS,
965 "Render pass using dirty bit must be handled after the render pass dirty bit");
966 static_assert(DIRTY_BIT_DYNAMIC_STENCIL_TEST_ENABLE > DIRTY_BIT_RENDER_PASS,
967 "Render pass using dirty bit must be handled after the render pass dirty bit");
968 static_assert(DIRTY_BIT_DYNAMIC_STENCIL_OP > DIRTY_BIT_RENDER_PASS,
969 "Render pass using dirty bit must be handled after the render pass dirty bit");
970 static_assert(DIRTY_BIT_DYNAMIC_RASTERIZER_DISCARD_ENABLE > DIRTY_BIT_RENDER_PASS,
971 "Render pass using dirty bit must be handled after the render pass dirty bit");
972 static_assert(DIRTY_BIT_DYNAMIC_DEPTH_BIAS_ENABLE > DIRTY_BIT_RENDER_PASS,
973 "Render pass using dirty bit must be handled after the render pass dirty bit");
974 static_assert(DIRTY_BIT_DYNAMIC_LOGIC_OP > DIRTY_BIT_RENDER_PASS,
975 "Render pass using dirty bit must be handled after the render pass dirty bit");
976 static_assert(DIRTY_BIT_DYNAMIC_PRIMITIVE_RESTART_ENABLE > DIRTY_BIT_RENDER_PASS,
977 "Render pass using dirty bit must be handled after the render pass dirty bit");
978 static_assert(DIRTY_BIT_DYNAMIC_FRAGMENT_SHADING_RATE > DIRTY_BIT_RENDER_PASS,
979 "Render pass using dirty bit must be handled after the render pass dirty bit");
980
981 using DirtyBits = angle::BitSet<DIRTY_BIT_MAX>;
982
983 using GraphicsDirtyBitHandler = angle::Result (
984 ContextVk::*)(DirtyBits::Iterator *dirtyBitsIterator, DirtyBits dirtyBitMask);
985 using ComputeDirtyBitHandler = angle::Result (ContextVk::*)();
986
987 // The GpuEventQuery struct holds together a timestamp query and enough data to create a
988 // trace event based on that. Use traceGpuEvent to insert such queries. They will be readback
989 // when the results are available, without inserting a GPU bubble.
990 //
991 // - eventName will be the reported name of the event
992 // - phase is either 'B' (duration begin), 'E' (duration end) or 'i' (instant // event).
993 // See Google's "Trace Event Format":
994 // https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU
995 // - serial is the serial of the batch the query was submitted on. Until the batch is
996 // submitted, the query is not checked to avoid incuring a flush.
997 struct GpuEventQuery final
998 {
999 EventName name;
1000 char phase;
1001 vk::QueryHelper queryHelper;
1002 };
1003
1004 // Once a query result is available, the timestamp is read and a GpuEvent object is kept until
1005 // the next clock sync, at which point the clock drift is compensated in the results before
1006 // handing them off to the application.
1007 struct GpuEvent final
1008 {
1009 uint64_t gpuTimestampCycles;
1010 std::array<char, kMaxGpuEventNameLen> name;
1011 char phase;
1012 };
1013
1014 struct GpuClockSyncInfo
1015 {
1016 double gpuTimestampS;
1017 double cpuTimestampS;
1018 };
1019
1020 class ScopedDescriptorSetUpdates;
1021
1022 angle::Result setupDraw(const gl::Context *context,
1023 gl::PrimitiveMode mode,
1024 GLint firstVertexOrInvalid,
1025 GLsizei vertexOrIndexCount,
1026 GLsizei instanceCount,
1027 gl::DrawElementsType indexTypeOrInvalid,
1028 const void *indices,
1029 DirtyBits dirtyBitMask);
1030
1031 angle::Result setupIndexedDraw(const gl::Context *context,
1032 gl::PrimitiveMode mode,
1033 GLsizei indexCount,
1034 GLsizei instanceCount,
1035 gl::DrawElementsType indexType,
1036 const void *indices);
1037 angle::Result setupIndirectDraw(const gl::Context *context,
1038 gl::PrimitiveMode mode,
1039 DirtyBits dirtyBitMask,
1040 vk::BufferHelper *indirectBuffer);
1041 angle::Result setupIndexedIndirectDraw(const gl::Context *context,
1042 gl::PrimitiveMode mode,
1043 gl::DrawElementsType indexType,
1044 vk::BufferHelper *indirectBuffer);
1045
1046 angle::Result setupLineLoopIndexedIndirectDraw(const gl::Context *context,
1047 gl::PrimitiveMode mode,
1048 gl::DrawElementsType indexType,
1049 vk::BufferHelper *srcIndirectBuf,
1050 VkDeviceSize indirectBufferOffset,
1051 vk::BufferHelper **indirectBufferOut);
1052 angle::Result setupLineLoopIndirectDraw(const gl::Context *context,
1053 gl::PrimitiveMode mode,
1054 vk::BufferHelper *indirectBuffer,
1055 VkDeviceSize indirectBufferOffset,
1056 vk::BufferHelper **indirectBufferOut);
1057
1058 angle::Result setupLineLoopDraw(const gl::Context *context,
1059 gl::PrimitiveMode mode,
1060 GLint firstVertex,
1061 GLsizei vertexOrIndexCount,
1062 gl::DrawElementsType indexTypeOrInvalid,
1063 const void *indices,
1064 uint32_t *numIndicesOut);
1065
1066 angle::Result setupDispatch(const gl::Context *context);
1067
1068 gl::Rectangle getCorrectedViewport(const gl::Rectangle &viewport) const;
1069 void updateViewport(FramebufferVk *framebufferVk,
1070 const gl::Rectangle &viewport,
1071 float nearPlane,
1072 float farPlane);
1073 void updateFrontFace();
1074 void updateDepthRange(float nearPlane, float farPlane);
1075 void updateFlipViewportDrawFramebuffer(const gl::State &glState);
1076 void updateFlipViewportReadFramebuffer(const gl::State &glState);
1077 void updateSurfaceRotationDrawFramebuffer(const gl::State &glState,
1078 const egl::Surface *currentDrawSurface);
1079 void updateSurfaceRotationReadFramebuffer(const gl::State &glState,
1080 const egl::Surface *currentReadSurface);
1081
1082 angle::Result updateActiveTextures(const gl::Context *context, gl::Command command);
1083 template <typename CommandBufferHelperT>
1084 angle::Result updateActiveImages(CommandBufferHelperT *commandBufferHelper);
1085
invalidateCurrentGraphicsPipeline()1086 ANGLE_INLINE void invalidateCurrentGraphicsPipeline()
1087 {
1088 // Note: DIRTY_BIT_PIPELINE_BINDING will be automatically set if pipeline bind is necessary.
1089 mGraphicsDirtyBits.set(DIRTY_BIT_PIPELINE_DESC);
1090 }
1091
invalidateCurrentComputePipeline()1092 ANGLE_INLINE void invalidateCurrentComputePipeline()
1093 {
1094 mComputeDirtyBits |= kPipelineDescAndBindingDirtyBits;
1095 mCurrentComputePipeline = nullptr;
1096 }
1097
1098 angle::Result invalidateProgramExecutableHelper(const gl::Context *context);
1099
1100 void invalidateCurrentDefaultUniforms();
1101 angle::Result invalidateCurrentTextures(const gl::Context *context, gl::Command command);
1102 angle::Result invalidateCurrentShaderResources(gl::Command command);
1103 angle::Result invalidateCurrentShaderUniformBuffers(gl::Command command);
1104 void invalidateGraphicsDriverUniforms();
1105 void invalidateDriverUniforms();
1106
1107 angle::Result handleNoopDrawEvent() override;
1108
1109 // Handlers for graphics pipeline dirty bits.
1110 angle::Result handleDirtyGraphicsMemoryBarrier(DirtyBits::Iterator *dirtyBitsIterator,
1111 DirtyBits dirtyBitMask);
1112 angle::Result handleDirtyGraphicsDefaultAttribs(DirtyBits::Iterator *dirtyBitsIterator,
1113 DirtyBits dirtyBitMask);
1114 angle::Result handleDirtyGraphicsPipelineDesc(DirtyBits::Iterator *dirtyBitsIterator,
1115 DirtyBits dirtyBitMask);
1116 angle::Result handleDirtyGraphicsReadOnlyDepthFeedbackLoopMode(
1117 DirtyBits::Iterator *dirtyBitsIterator,
1118 DirtyBits dirtyBitMask);
1119 angle::Result handleDirtyAnySamplePassedQueryEnd(DirtyBits::Iterator *dirtyBitsIterator,
1120 DirtyBits dirtyBitMask);
1121 angle::Result handleDirtyGraphicsRenderPass(DirtyBits::Iterator *dirtyBitsIterator,
1122 DirtyBits dirtyBitMask);
1123 angle::Result handleDirtyGraphicsEventLog(DirtyBits::Iterator *dirtyBitsIterator,
1124 DirtyBits dirtyBitMask);
1125 angle::Result handleDirtyGraphicsColorAccess(DirtyBits::Iterator *dirtyBitsIterator,
1126 DirtyBits dirtyBitMask);
1127 angle::Result handleDirtyGraphicsDepthStencilAccess(DirtyBits::Iterator *dirtyBitsIterator,
1128 DirtyBits dirtyBitMask);
1129 angle::Result handleDirtyGraphicsPipelineBinding(DirtyBits::Iterator *dirtyBitsIterator,
1130 DirtyBits dirtyBitMask);
1131 angle::Result handleDirtyGraphicsTextures(DirtyBits::Iterator *dirtyBitsIterator,
1132 DirtyBits dirtyBitMask);
1133 angle::Result handleDirtyGraphicsVertexBuffers(DirtyBits::Iterator *dirtyBitsIterator,
1134 DirtyBits dirtyBitMask);
1135 angle::Result handleDirtyGraphicsIndexBuffer(DirtyBits::Iterator *dirtyBitsIterator,
1136 DirtyBits dirtyBitMask);
1137 angle::Result handleDirtyGraphicsDriverUniforms(DirtyBits::Iterator *dirtyBitsIterator,
1138 DirtyBits dirtyBitMask);
1139 angle::Result handleDirtyGraphicsShaderResources(DirtyBits::Iterator *dirtyBitsIterator,
1140 DirtyBits dirtyBitMask);
1141 angle::Result handleDirtyGraphicsUniformBuffers(DirtyBits::Iterator *dirtyBitsIterator,
1142 DirtyBits dirtyBitMask);
1143 angle::Result handleDirtyGraphicsFramebufferFetchBarrier(DirtyBits::Iterator *dirtyBitsIterator,
1144 DirtyBits dirtyBitMask);
1145 angle::Result handleDirtyGraphicsBlendBarrier(DirtyBits::Iterator *dirtyBitsIterator,
1146 DirtyBits dirtyBitMask);
1147 angle::Result handleDirtyGraphicsTransformFeedbackBuffersEmulation(
1148 DirtyBits::Iterator *dirtyBitsIterator,
1149 DirtyBits dirtyBitMask);
1150 angle::Result handleDirtyGraphicsTransformFeedbackBuffersExtension(
1151 DirtyBits::Iterator *dirtyBitsIterator,
1152 DirtyBits dirtyBitMask);
1153 angle::Result handleDirtyGraphicsTransformFeedbackResume(DirtyBits::Iterator *dirtyBitsIterator,
1154 DirtyBits dirtyBitMask);
1155 angle::Result handleDirtyGraphicsDescriptorSets(DirtyBits::Iterator *dirtyBitsIterator,
1156 DirtyBits dirtyBitMask);
1157 angle::Result handleDirtyGraphicsUniforms(DirtyBits::Iterator *dirtyBitsIterator,
1158 DirtyBits dirtyBitMask);
1159 angle::Result handleDirtyGraphicsDynamicViewport(DirtyBits::Iterator *dirtyBitsIterator,
1160 DirtyBits dirtyBitMask);
1161 angle::Result handleDirtyGraphicsDynamicScissor(DirtyBits::Iterator *dirtyBitsIterator,
1162 DirtyBits dirtyBitMask);
1163 angle::Result handleDirtyGraphicsDynamicLineWidth(DirtyBits::Iterator *dirtyBitsIterator,
1164 DirtyBits dirtyBitMask);
1165 angle::Result handleDirtyGraphicsDynamicDepthBias(DirtyBits::Iterator *dirtyBitsIterator,
1166 DirtyBits dirtyBitMask);
1167 angle::Result handleDirtyGraphicsDynamicBlendConstants(DirtyBits::Iterator *dirtyBitsIterator,
1168 DirtyBits dirtyBitMask);
1169 angle::Result handleDirtyGraphicsDynamicStencilCompareMask(
1170 DirtyBits::Iterator *dirtyBitsIterator,
1171 DirtyBits dirtyBitMask);
1172 angle::Result handleDirtyGraphicsDynamicStencilWriteMask(DirtyBits::Iterator *dirtyBitsIterator,
1173 DirtyBits dirtyBitMask);
1174 angle::Result handleDirtyGraphicsDynamicStencilReference(DirtyBits::Iterator *dirtyBitsIterator,
1175 DirtyBits dirtyBitMask);
1176 angle::Result handleDirtyGraphicsDynamicCullMode(DirtyBits::Iterator *dirtyBitsIterator,
1177 DirtyBits dirtyBitMask);
1178 angle::Result handleDirtyGraphicsDynamicFrontFace(DirtyBits::Iterator *dirtyBitsIterator,
1179 DirtyBits dirtyBitMask);
1180 angle::Result handleDirtyGraphicsDynamicDepthTestEnable(DirtyBits::Iterator *dirtyBitsIterator,
1181 DirtyBits dirtyBitMask);
1182 angle::Result handleDirtyGraphicsDynamicDepthWriteEnable(DirtyBits::Iterator *dirtyBitsIterator,
1183 DirtyBits dirtyBitMask);
1184 angle::Result handleDirtyGraphicsDynamicDepthCompareOp(DirtyBits::Iterator *dirtyBitsIterator,
1185 DirtyBits dirtyBitMask);
1186 angle::Result handleDirtyGraphicsDynamicStencilTestEnable(
1187 DirtyBits::Iterator *dirtyBitsIterator,
1188 DirtyBits dirtyBitMask);
1189 angle::Result handleDirtyGraphicsDynamicStencilOp(DirtyBits::Iterator *dirtyBitsIterator,
1190 DirtyBits dirtyBitMask);
1191 angle::Result handleDirtyGraphicsDynamicRasterizerDiscardEnable(
1192 DirtyBits::Iterator *dirtyBitsIterator,
1193 DirtyBits dirtyBitMask);
1194 angle::Result handleDirtyGraphicsDynamicDepthBiasEnable(DirtyBits::Iterator *dirtyBitsIterator,
1195 DirtyBits dirtyBitMask);
1196 angle::Result handleDirtyGraphicsDynamicLogicOp(DirtyBits::Iterator *dirtyBitsIterator,
1197 DirtyBits dirtyBitMask);
1198 angle::Result handleDirtyGraphicsDynamicPrimitiveRestartEnable(
1199 DirtyBits::Iterator *dirtyBitsIterator,
1200 DirtyBits dirtyBitMask);
1201 angle::Result handleDirtyGraphicsDynamicFragmentShadingRate(
1202 DirtyBits::Iterator *dirtyBitsIterator,
1203 DirtyBits dirtyBitMask);
1204
1205 // Handlers for compute pipeline dirty bits.
1206 angle::Result handleDirtyComputeMemoryBarrier();
1207 angle::Result handleDirtyComputeEventLog();
1208 angle::Result handleDirtyComputePipelineDesc();
1209 angle::Result handleDirtyComputePipelineBinding();
1210 angle::Result handleDirtyComputeTextures();
1211 angle::Result handleDirtyComputeDriverUniforms();
1212 angle::Result handleDirtyComputeShaderResources();
1213 angle::Result handleDirtyComputeUniformBuffers();
1214 angle::Result handleDirtyComputeDescriptorSets();
1215 angle::Result handleDirtyComputeUniforms();
1216
1217 // Common parts of the common dirty bit handlers.
1218 angle::Result handleDirtyUniformsImpl(vk::CommandBufferHelperCommon *commandBufferHelper);
1219 angle::Result handleDirtyMemoryBarrierImpl(DirtyBits::Iterator *dirtyBitsIterator,
1220 DirtyBits dirtyBitMask);
1221 template <typename CommandBufferT>
1222 angle::Result handleDirtyEventLogImpl(CommandBufferT *commandBuffer);
1223 template <typename CommandBufferHelperT>
1224 angle::Result handleDirtyTexturesImpl(CommandBufferHelperT *commandBufferHelper,
1225 PipelineType pipelineType);
1226 template <typename CommandBufferHelperT>
1227 angle::Result handleDirtyShaderResourcesImpl(CommandBufferHelperT *commandBufferHelper,
1228 PipelineType pipelineType);
1229 template <typename CommandBufferHelperT>
1230 angle::Result handleDirtyUniformBuffersImpl(CommandBufferHelperT *commandBufferHelper);
1231 template <typename CommandBufferHelperT>
1232 angle::Result handleDirtyDescriptorSetsImpl(CommandBufferHelperT *commandBufferHelper,
1233 PipelineType pipelineType);
1234 void handleDirtyGraphicsDynamicScissorImpl(bool isPrimitivesGeneratedQueryActive);
1235
1236 void writeAtomicCounterBufferDriverUniformOffsets(uint32_t *offsetsOut, size_t offsetsSize);
1237
1238 enum class Submit
1239 {
1240 OutsideRenderPassCommandsOnly,
1241 AllCommands,
1242 };
1243
1244 angle::Result submitCommands(const vk::Semaphore *signalSemaphore,
1245 const vk::SharedExternalFence *externalFence,
1246 Submit submission);
1247
1248 angle::Result synchronizeCpuGpuTime();
1249 angle::Result traceGpuEventImpl(vk::OutsideRenderPassCommandBuffer *commandBuffer,
1250 char phase,
1251 const EventName &name);
1252 angle::Result checkCompletedGpuEvents();
1253 void flushGpuEvents(double nextSyncGpuTimestampS, double nextSyncCpuTimestampS);
1254 void handleDeviceLost();
1255 bool shouldEmulateSeamfulCubeMapSampling() const;
1256 void clearAllGarbage();
1257 void dumpCommandStreamDiagnostics();
1258 angle::Result flushOutsideRenderPassCommands();
1259 // Flush commands and end render pass without setting any dirty bits.
1260 // flushCommandsAndEndRenderPass() and flushDirtyGraphicsRenderPass() will set the dirty bits
1261 // directly or through the iterator respectively. Outside those two functions, this shouldn't
1262 // be called directly.
1263 angle::Result flushDirtyGraphicsRenderPass(DirtyBits::Iterator *dirtyBitsIterator,
1264 DirtyBits dirtyBitMask,
1265 RenderPassClosureReason reason);
1266
1267 // Mark the render pass to be closed on the next draw call. The render pass is not actually
1268 // closed and can be restored with restoreFinishedRenderPass if necessary, for example to append
1269 // a resolve attachment.
1270 void onRenderPassFinished(RenderPassClosureReason reason);
1271
1272 void initIndexTypeMap();
1273
1274 VertexArrayVk *getVertexArray() const;
1275 FramebufferVk *getDrawFramebuffer() const;
1276 ProgramVk *getProgram() const;
1277 ProgramPipelineVk *getProgramPipeline() const;
1278
1279 // Read-after-write hazards are generally handled with |glMemoryBarrier| when the source of
1280 // write is storage output. When the write is outside render pass, the natural placement of the
1281 // render pass after the current outside render pass commands ensures that the memory barriers
1282 // and image layout transitions automatically take care of such synchronizations.
1283 //
1284 // There are a number of read-after-write cases that require breaking the render pass however to
1285 // preserve the order of operations:
1286 //
1287 // - Transform feedback write (in render pass), then vertex/index read (in render pass)
1288 // - Transform feedback write (in render pass), then ubo read (outside render pass)
1289 // - Framebuffer attachment write (in render pass), then texture sample (outside render pass)
1290 // * Note that texture sampling inside render pass would cause a feedback loop
1291 //
1292 angle::Result endRenderPassIfTransformFeedbackBuffer(const vk::BufferHelper *buffer);
1293 angle::Result endRenderPassIfComputeReadAfterTransformFeedbackWrite();
1294 angle::Result endRenderPassIfComputeAccessAfterGraphicsImageAccess();
1295
1296 // Update read-only depth feedback loop mode. Typically called from
1297 // handleDirtyGraphicsReadOnlyDepthFeedbackLoopMode, but can be called from UtilsVk in functions
1298 // that don't necessarily break the render pass.
1299 angle::Result switchOutReadOnlyDepthStencilMode(DirtyBits::Iterator *dirtyBitsIterator,
1300 DirtyBits dirtyBitMask,
1301 UpdateDepthFeedbackLoopReason depthReason,
1302 UpdateDepthFeedbackLoopReason stencilReason);
1303 angle::Result switchToReadOnlyDepthStencilMode(gl::Texture *texture,
1304 gl::Command command,
1305 FramebufferVk *drawFramebuffer,
1306 bool isStencilTexture);
1307
1308 angle::Result onResourceAccess(const vk::CommandBufferAccess &access);
1309 angle::Result flushCommandBuffersIfNecessary(const vk::CommandBufferAccess &access);
1310 bool renderPassUsesStorageResources() const;
1311
1312 angle::Result pushDebugGroupImpl(GLenum source, GLuint id, const char *message);
1313 angle::Result popDebugGroupImpl();
1314
1315 void updateScissor(const gl::State &glState);
1316
1317 void updateDepthStencil(const gl::State &glState);
1318 void updateDepthTestEnabled(const gl::State &glState);
1319 void updateDepthWriteEnabled(const gl::State &glState);
1320 void updateDepthFunc(const gl::State &glState);
1321 void updateStencilTestEnabled(const gl::State &glState);
1322
1323 void updateSampleShadingWithRasterizationSamples(const uint32_t rasterizationSamples);
1324 void updateRasterizationSamples(const uint32_t rasterizationSamples);
1325 void updateRasterizerDiscardEnabled(bool isPrimitivesGeneratedQueryActive);
1326
1327 void updateAdvancedBlendEquations(const gl::ProgramExecutable *executable);
1328
1329 void updateDither();
1330
1331 // When the useNonZeroStencilWriteMaskStaticState workaround is enabled, the static state for
1332 // stencil should be non-zero despite the state being dynamic. This is done when:
1333 //
1334 // - The shader includes discard, or
1335 // - Alpha-to-coverage is enabled.
1336 //
1337 // An alternative could have been to set the static state unconditionally to non-zero. This is
1338 // avoided however, as on the affected driver that would disable certain optimizations.
1339 void updateStencilWriteWorkaround();
1340
1341 void updateShaderResourcesWithSharedCacheKey(
1342 const vk::SharedDescriptorSetCacheKey &sharedCacheKey);
1343
1344 angle::Result createGraphicsPipeline();
1345
1346 angle::Result allocateQueueSerialIndex();
1347 void releaseQueueSerialIndex();
1348
1349 void generateOutsideRenderPassCommandsQueueSerial();
1350 void generateRenderPassCommandsQueueSerial(QueueSerial *queueSerialOut);
1351
1352 angle::Result ensureInterfacePipelineCache();
1353
1354 std::array<GraphicsDirtyBitHandler, DIRTY_BIT_MAX> mGraphicsDirtyBitHandlers;
1355 std::array<ComputeDirtyBitHandler, DIRTY_BIT_MAX> mComputeDirtyBitHandlers;
1356
1357 vk::RenderPassCommandBuffer *mRenderPassCommandBuffer;
1358
1359 vk::PipelineHelper *mCurrentGraphicsPipeline;
1360 vk::PipelineHelper *mCurrentGraphicsPipelineShaders;
1361 vk::PipelineHelper *mCurrentGraphicsPipelineVertexInput;
1362 vk::PipelineHelper *mCurrentGraphicsPipelineFragmentOutput;
1363 vk::PipelineHelper *mCurrentComputePipeline;
1364 gl::PrimitiveMode mCurrentDrawMode;
1365
1366 WindowSurfaceVk *mCurrentWindowSurface;
1367 // Records the current rotation of the surface (draw/read) framebuffer, derived from
1368 // mCurrentWindowSurface->getPreTransform().
1369 SurfaceRotation mCurrentRotationDrawFramebuffer;
1370 SurfaceRotation mCurrentRotationReadFramebuffer;
1371
1372 // Tracks if we are in depth/stencil *read-only* or feedback loop mode. The read only is
1373 // specially allowed as both usages attachment and texture are read-only. When switching away
1374 // from read-only mode, the render pass is broken is to accommodate the new writable layout.
1375 vk::RenderPassUsageFlags mDepthStencilAttachmentFlags;
1376
1377 // Keep a cached pipeline description structure that can be used to query the pipeline cache.
1378 // Kept in a pointer so allocations can be aligned, and structs can be portably packed.
1379 std::unique_ptr<vk::GraphicsPipelineDesc> mGraphicsPipelineDesc;
1380 // Transition bits indicating which state has changed since last pipeline recreation. It is
1381 // used to look up pipelines in the cache without iterating over the entire key as a performance
1382 // optimization.
1383 //
1384 // |mGraphicsPipelineTransition| tracks transition bits since the last complete pipeline
1385 // creation/retrieval. |mGraphicsPipelineLibraryTransition| tracks the same but for the case
1386 // where the pipeline is created through libraries. The latter accumulates
1387 // |mGraphicsPipelineTransition| while the caches are hit, so that the bits are not lost if a
1388 // partial library needs to be created in the future.
1389 vk::GraphicsPipelineTransitionBits mGraphicsPipelineTransition;
1390 vk::GraphicsPipelineTransitionBits mGraphicsPipelineLibraryTransition;
1391
1392 // Used when VK_EXT_graphics_pipeline_library is available, the vertex input and fragment output
1393 // partial pipelines are created in the following caches.
1394 VertexInputGraphicsPipelineCache mVertexInputGraphicsPipelineCache;
1395 FragmentOutputGraphicsPipelineCache mFragmentOutputGraphicsPipelineCache;
1396
1397 // A pipeline cache specifically used for vertex input and fragment output pipelines, when there
1398 // is no blob reuse between libraries and monolithic pipelines. In that case, there's no point
1399 // in making monolithic pipelines be stored in the same cache as these partial pipelines.
1400 //
1401 // Note additionally that applications only create a handful of vertex input and fragment output
1402 // pipelines, which is also s fast operation, so this cache is both small and ephemeral (i.e.
1403 // not cached to disk).
1404 vk::PipelineCache mInterfacePipelinesCache;
1405
1406 // These pools are externally synchronized, so cannot be accessed from different
1407 // threads simultaneously. Hence, we keep them in the ContextVk instead of the RendererVk.
1408 // Note that this implementation would need to change in shared resource scenarios. Likely
1409 // we'd instead share a single set of pools between the share groups.
1410 gl::QueryTypeMap<vk::DynamicQueryPool> mQueryPools;
1411
1412 // Queries that need to be closed and reopened with the render pass:
1413 //
1414 // - Occlusion queries
1415 // - Transform feedback queries, if not emulated
1416 gl::QueryTypeMap<QueryVk *> mActiveRenderPassQueries;
1417
1418 // Dirty bits.
1419 DirtyBits mGraphicsDirtyBits;
1420 DirtyBits mComputeDirtyBits;
1421 DirtyBits mNonIndexedDirtyBitsMask;
1422 DirtyBits mIndexedDirtyBitsMask;
1423 DirtyBits mNewGraphicsCommandBufferDirtyBits;
1424 DirtyBits mNewComputeCommandBufferDirtyBits;
1425 DirtyBits mDynamicStateDirtyBits;
1426 static constexpr DirtyBits kColorAccessChangeDirtyBits{DIRTY_BIT_COLOR_ACCESS};
1427 static constexpr DirtyBits kDepthStencilAccessChangeDirtyBits{
1428 DIRTY_BIT_READ_ONLY_DEPTH_FEEDBACK_LOOP_MODE, DIRTY_BIT_DEPTH_STENCIL_ACCESS};
1429 static constexpr DirtyBits kIndexAndVertexDirtyBits{DIRTY_BIT_VERTEX_BUFFERS,
1430 DIRTY_BIT_INDEX_BUFFER};
1431 static constexpr DirtyBits kPipelineDescAndBindingDirtyBits{DIRTY_BIT_PIPELINE_DESC,
1432 DIRTY_BIT_PIPELINE_BINDING};
1433 static constexpr DirtyBits kTexturesAndDescSetDirtyBits{DIRTY_BIT_TEXTURES,
1434 DIRTY_BIT_DESCRIPTOR_SETS};
1435 static constexpr DirtyBits kResourcesAndDescSetDirtyBits{DIRTY_BIT_SHADER_RESOURCES,
1436 DIRTY_BIT_DESCRIPTOR_SETS};
1437 static constexpr DirtyBits kUniformBuffersAndDescSetDirtyBits{DIRTY_BIT_UNIFORM_BUFFERS,
1438 DIRTY_BIT_DESCRIPTOR_SETS};
1439 static constexpr DirtyBits kXfbBuffersAndDescSetDirtyBits{DIRTY_BIT_TRANSFORM_FEEDBACK_BUFFERS,
1440 DIRTY_BIT_DESCRIPTOR_SETS};
1441
1442 // The offset we had the last time we bound the index buffer.
1443 const GLvoid *mLastIndexBufferOffset;
1444 VkDeviceSize mCurrentIndexBufferOffset;
1445 gl::DrawElementsType mCurrentDrawElementsType;
1446 angle::PackedEnumMap<gl::DrawElementsType, VkIndexType> mIndexTypeMap;
1447
1448 // Cache the current draw call's firstVertex to be passed to
1449 // TransformFeedbackVk::getBufferOffsets. Unfortunately, gl_BaseVertex support in Vulkan is
1450 // not yet ubiquitous, which would have otherwise removed the need for this value to be passed
1451 // as a uniform.
1452 GLint mXfbBaseVertex;
1453 // Cache the current draw call's vertex count as well to support instanced draw calls
1454 GLuint mXfbVertexCountPerInstance;
1455
1456 // Cached clear value/mask for color and depth/stencil.
1457 VkClearValue mClearColorValue;
1458 VkClearValue mClearDepthStencilValue;
1459 gl::BlendStateExt::ColorMaskStorage::Type mClearColorMasks;
1460
1461 IncompleteTextureSet mIncompleteTextures;
1462
1463 // If the current surface bound to this context wants to have all rendering flipped vertically.
1464 // Updated on calls to onMakeCurrent.
1465 bool mFlipYForCurrentSurface;
1466 bool mFlipViewportForDrawFramebuffer;
1467 bool mFlipViewportForReadFramebuffer;
1468
1469 // If any host-visible buffer is written by the GPU since last submission, a barrier is inserted
1470 // at the end of the command buffer to make that write available to the host.
1471 bool mIsAnyHostVisibleBufferWritten;
1472
1473 // Whether this context should do seamful cube map sampling emulation.
1474 bool mEmulateSeamfulCubeMapSampling;
1475
1476 // This info is used in the descriptor update step.
1477 gl::ActiveTextureArray<TextureVk *> mActiveTextures;
1478
1479 // We use textureSerial to optimize texture binding updates. Each permutation of a
1480 // {VkImage/VkSampler} generates a unique serial. These object ids are combined to form a unique
1481 // signature for each descriptor set. This allows us to keep a cache of descriptor sets and
1482 // avoid calling vkAllocateDesctiporSets each texture update.
1483 vk::DescriptorSetDesc mActiveTexturesDesc;
1484
1485 vk::DescriptorSetDescBuilder mShaderBuffersDescriptorDesc;
1486 // The WriteDescriptorDescs from ProgramExecutableVk with InputAttachment update.
1487 vk::WriteDescriptorDescs mShaderBufferWriteDescriptorDescs;
1488
1489 gl::ActiveTextureArray<TextureVk *> mActiveImages;
1490
1491 // "Current Value" aka default vertex attribute state.
1492 gl::AttributesMask mDirtyDefaultAttribsMask;
1493
1494 // DynamicBuffers for streaming vertex data from client memory pointer as well as for default
1495 // attributes. mHasInFlightStreamedVertexBuffers indicates if the dynamic buffer has any
1496 // in-flight buffer or not that we need to release at submission time.
1497 gl::AttribArray<vk::DynamicBuffer> mStreamedVertexBuffers;
1498 gl::AttributesMask mHasInFlightStreamedVertexBuffers;
1499
1500 // We use a single pool for recording commands. We also keep a free list for pool recycling.
1501 vk::SecondaryCommandPools mCommandPools;
1502
1503 // Per context queue serial
1504 SerialIndex mCurrentQueueSerialIndex;
1505 QueueSerial mLastFlushedQueueSerial;
1506 QueueSerial mLastSubmittedQueueSerial;
1507 // All submitted queue serials over the life time of this context.
1508 vk::ResourceUse mSubmittedResourceUse;
1509 // Current active transform feedback buffer queue serial. Invalid if TF not active.
1510 QueueSerial mCurrentTransformFeedbackQueueSerial;
1511
1512 // The garbage list for single context use objects. The list will be GPU tracked by next
1513 // submission queueSerial. Note: Resource based shared object should always be added to
1514 // renderer's mSharedGarbage.
1515 vk::GarbageList mCurrentGarbage;
1516
1517 RenderPassCache mRenderPassCache;
1518
1519 vk::OutsideRenderPassCommandBufferHelper *mOutsideRenderPassCommands;
1520 vk::RenderPassCommandBufferHelper *mRenderPassCommands;
1521
1522 // Allocators for the render pass command buffers. They are utilized only when shared ring
1523 // buffer allocators are being used.
1524 vk::SecondaryCommandMemoryAllocator mOutsideRenderPassCommandsAllocator;
1525 vk::SecondaryCommandMemoryAllocator mRenderPassCommandsAllocator;
1526
1527 // The following is used when creating debug-util markers for graphics debuggers (e.g. AGI). A
1528 // given gl{Begin|End}Query command may result in commands being submitted to the outside or
1529 // render-pass command buffer. The ContextVk::handleGraphicsEventLog() method records the
1530 // appropriate command buffer for use by ContextVk::endEventLogForQuery(). The knowledge of
1531 // which command buffer to use depends on the particular type of query (e.g. samples
1532 // vs. timestamp), and is only known by the query code, which is what calls
1533 // ContextVk::handleGraphicsEventLog(). After all back-end processing of the gl*Query command
1534 // is complete, the front-end calls ContextVk::endEventLogForQuery(), which needs to know which
1535 // command buffer to call endDebugUtilsLabelEXT() for.
1536 GraphicsEventCmdBuf mQueryEventType;
1537
1538 // Internal shader library.
1539 vk::ShaderLibrary mShaderLibrary;
1540 UtilsVk mUtils;
1541
1542 bool mGpuEventsEnabled;
1543 vk::DynamicQueryPool mGpuEventQueryPool;
1544 // A list of queries that have yet to be turned into an event (their result is not yet
1545 // available).
1546 std::vector<GpuEventQuery> mInFlightGpuEventQueries;
1547 // A list of gpu events since the last clock sync.
1548 std::vector<GpuEvent> mGpuEvents;
1549 // The current frame index, used to generate a submission-encompassing event tagged with it.
1550 uint32_t mPrimaryBufferEventCounter;
1551
1552 // Cached value of the color attachment mask of the current draw framebuffer. This is used to
1553 // know which attachment indices have their blend state set in |mGraphicsPipelineDesc|, and
1554 // subsequently is used to clear the blend state for attachments that no longer exist when a new
1555 // framebuffer is bound.
1556 gl::DrawBufferMask mCachedDrawFramebufferColorAttachmentMask;
1557
1558 // Whether a flush was requested, but is deferred as an optimization to avoid breaking the
1559 // render pass.
1560 bool mHasDeferredFlush;
1561
1562 // Whether this context has produced any commands so far. While the renderer already skips
1563 // vkQueueSubmit when there is no command recorded, this variable allows glFlush itself to be
1564 // entirely skipped. This is particularly needed for an optimization where the Surface is in
1565 // shared-present mode, and the app is unnecessarily calling eglSwapBuffers (which equates
1566 // glFlush in that mode).
1567 bool mHasAnyCommandsPendingSubmission;
1568
1569 // Whether framebuffer fetch is active. When the permanentlySwitchToFramebufferFetchMode
1570 // feature is enabled, if any program uses framebuffer fetch, rendering switches to assuming
1571 // framebuffer fetch could happen in any render pass. This incurs a potential cost due to usage
1572 // of the GENERAL layout instead of COLOR_ATTACHMENT_OPTIMAL, but has definite benefits of
1573 // avoiding render pass breaks when a framebuffer fetch program is used mid render pass.
1574 bool mIsInFramebufferFetchMode;
1575
1576 // True if current started render pass is allowed to reactivate.
1577 bool mAllowRenderPassToReactivate;
1578
1579 // The size of copy commands issued between buffers and images. Used to submit the command
1580 // buffer for the outside render pass.
1581 VkDeviceSize mTotalBufferToImageCopySize;
1582
1583 // Semaphores that must be flushed before the current commands. Flushed semaphores will be
1584 // waited on in the next submission.
1585 std::vector<VkSemaphore> mWaitSemaphores;
1586 std::vector<VkPipelineStageFlags> mWaitSemaphoreStageMasks;
1587 // Whether this context has wait semaphores (flushed and unflushed) that must be submitted.
1588 bool mHasWaitSemaphoresPendingSubmission;
1589
1590 // Hold information from the last gpu clock sync for future gpu-to-cpu timestamp conversions.
1591 GpuClockSyncInfo mGpuClockSync;
1592
1593 // The very first timestamp queried for a GPU event is used as origin, so event timestamps would
1594 // have a value close to zero, to avoid losing 12 bits when converting these 64 bit values to
1595 // double.
1596 uint64_t mGpuEventTimestampOrigin;
1597
1598 // A mix of per-frame and per-run counters.
1599 angle::PerfMonitorCounterGroups mPerfMonitorCounters;
1600
1601 gl::state::DirtyBits mPipelineDirtyBitsMask;
1602
1603 egl::ContextPriority mInitialContextPriority;
1604 egl::ContextPriority mContextPriority;
1605 vk::ProtectionType mProtectionType;
1606
1607 ShareGroupVk *mShareGroupVk;
1608
1609 // This is a special "empty" placeholder buffer for use when we just need a placeholder buffer
1610 // but not the data. Examples are shader that has no uniform or doesn't use all slots in the
1611 // atomic counter buffer array, or places where there is no vertex buffer since Vulkan does not
1612 // allow binding a null vertex buffer.
1613 vk::BufferHelper mEmptyBuffer;
1614
1615 // Storage for default uniforms of ProgramVks and ProgramPipelineVks.
1616 vk::DynamicBuffer mDefaultUniformStorage;
1617
1618 std::vector<std::string> mCommandBufferDiagnostics;
1619
1620 // Record GL API calls for debuggers
1621 std::vector<std::string> mEventLog;
1622
1623 // Viewport and scissor are handled as dynamic state.
1624 VkViewport mViewport;
1625 VkRect2D mScissor;
1626
1627 VulkanCacheStats mVulkanCacheStats;
1628
1629 // A graph built from pipeline descs and their transitions.
1630 std::ostringstream mPipelineCacheGraph;
1631
1632 RangedSerialFactory mOutsideRenderPassSerialFactory;
1633 };
1634
endRenderPassIfTransformFeedbackBuffer(const vk::BufferHelper * buffer)1635 ANGLE_INLINE angle::Result ContextVk::endRenderPassIfTransformFeedbackBuffer(
1636 const vk::BufferHelper *buffer)
1637 {
1638 if (!mCurrentTransformFeedbackQueueSerial.valid() || !buffer ||
1639 !buffer->writtenByCommandBuffer(mCurrentTransformFeedbackQueueSerial))
1640 {
1641 return angle::Result::Continue;
1642 }
1643
1644 return flushCommandsAndEndRenderPass(RenderPassClosureReason::XfbWriteThenVertexIndexBuffer);
1645 }
1646
onIndexBufferChange(const vk::BufferHelper * currentIndexBuffer)1647 ANGLE_INLINE angle::Result ContextVk::onIndexBufferChange(
1648 const vk::BufferHelper *currentIndexBuffer)
1649 {
1650 mGraphicsDirtyBits.set(DIRTY_BIT_INDEX_BUFFER);
1651 mLastIndexBufferOffset = reinterpret_cast<const void *>(angle::DirtyPointer);
1652 return endRenderPassIfTransformFeedbackBuffer(currentIndexBuffer);
1653 }
1654
onVertexBufferChange(const vk::BufferHelper * vertexBuffer)1655 ANGLE_INLINE angle::Result ContextVk::onVertexBufferChange(const vk::BufferHelper *vertexBuffer)
1656 {
1657 mGraphicsDirtyBits.set(DIRTY_BIT_VERTEX_BUFFERS);
1658 return endRenderPassIfTransformFeedbackBuffer(vertexBuffer);
1659 }
1660
onVertexAttributeChange(size_t attribIndex,GLuint stride,GLuint divisor,angle::FormatID format,bool compressed,GLuint relativeOffset,const vk::BufferHelper * vertexBuffer)1661 ANGLE_INLINE angle::Result ContextVk::onVertexAttributeChange(size_t attribIndex,
1662 GLuint stride,
1663 GLuint divisor,
1664 angle::FormatID format,
1665 bool compressed,
1666 GLuint relativeOffset,
1667 const vk::BufferHelper *vertexBuffer)
1668 {
1669 const GLuint staticStride = mRenderer->useVertexInputBindingStrideDynamicState() ? 0 : stride;
1670
1671 invalidateCurrentGraphicsPipeline();
1672
1673 // Set divisor to 1 for attribs with emulated divisor
1674 mGraphicsPipelineDesc->updateVertexInput(
1675 this, &mGraphicsPipelineTransition, static_cast<uint32_t>(attribIndex), staticStride,
1676 divisor > mRenderer->getMaxVertexAttribDivisor() ? 1 : divisor, format, compressed,
1677 relativeOffset);
1678 return onVertexBufferChange(vertexBuffer);
1679 }
1680
hasUnsubmittedUse(const vk::ResourceUse & use)1681 ANGLE_INLINE bool ContextVk::hasUnsubmittedUse(const vk::ResourceUse &use) const
1682 {
1683 return mCurrentQueueSerialIndex != kInvalidQueueSerialIndex &&
1684 use > QueueSerial(mCurrentQueueSerialIndex,
1685 mRenderer->getLastSubmittedSerial(mCurrentQueueSerialIndex));
1686 }
1687
UseLineRaster(const ContextVk * contextVk,gl::PrimitiveMode mode)1688 ANGLE_INLINE bool UseLineRaster(const ContextVk *contextVk, gl::PrimitiveMode mode)
1689 {
1690 return gl::IsLineMode(mode);
1691 }
1692 } // namespace rx
1693
1694 // Generate a perf warning, and insert an event marker in the command buffer.
1695 #define ANGLE_VK_PERF_WARNING(contextVk, severity, ...) \
1696 do \
1697 { \
1698 char ANGLE_MESSAGE[200]; \
1699 snprintf(ANGLE_MESSAGE, sizeof(ANGLE_MESSAGE), __VA_ARGS__); \
1700 ANGLE_PERF_WARNING(contextVk->getDebug(), severity, ANGLE_MESSAGE); \
1701 \
1702 contextVk->insertEventMarkerImpl(GL_DEBUG_SOURCE_OTHER, ANGLE_MESSAGE); \
1703 } while (0)
1704
1705 // Generate a trace event for graphics profiler, and insert an event marker in the command buffer.
1706 #define ANGLE_VK_TRACE_EVENT_AND_MARKER(contextVk, ...) \
1707 do \
1708 { \
1709 char ANGLE_MESSAGE[200]; \
1710 snprintf(ANGLE_MESSAGE, sizeof(ANGLE_MESSAGE), __VA_ARGS__); \
1711 ANGLE_TRACE_EVENT0("gpu.angle", ANGLE_MESSAGE); \
1712 \
1713 contextVk->insertEventMarkerImpl(GL_DEBUG_SOURCE_OTHER, ANGLE_MESSAGE); \
1714 } while (0)
1715
1716 #endif // LIBANGLE_RENDERER_VULKAN_CONTEXTVK_H_
1717