• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright 2018 The ANGLE Project Authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file.
5 //
6 // vk_helpers:
7 //   Helper utility classes that manage Vulkan resources.
8 
9 #include "libANGLE/renderer/vulkan/vk_helpers.h"
10 #include "libANGLE/renderer/driver_utils.h"
11 
12 #include "common/utilities.h"
13 #include "common/vulkan/vk_headers.h"
14 #include "image_util/loadimage.h"
15 #include "libANGLE/Context.h"
16 #include "libANGLE/renderer/renderer_utils.h"
17 #include "libANGLE/renderer/vulkan/BufferVk.h"
18 #include "libANGLE/renderer/vulkan/ContextVk.h"
19 #include "libANGLE/renderer/vulkan/DisplayVk.h"
20 #include "libANGLE/renderer/vulkan/FramebufferVk.h"
21 #include "libANGLE/renderer/vulkan/RenderTargetVk.h"
22 #include "libANGLE/renderer/vulkan/RendererVk.h"
23 #include "libANGLE/renderer/vulkan/android/vk_android_utils.h"
24 #include "libANGLE/renderer/vulkan/vk_utils.h"
25 #include "libANGLE/trace.h"
26 
27 namespace rx
28 {
29 namespace vk
30 {
31 namespace
32 {
33 // ANGLE_robust_resource_initialization requires color textures to be initialized to zero.
34 constexpr VkClearColorValue kRobustInitColorValue = {{0, 0, 0, 0}};
35 // When emulating a texture, we want the emulated channels to be 0, with alpha 1.
36 constexpr VkClearColorValue kEmulatedInitColorValue = {{0, 0, 0, 1.0f}};
37 // ANGLE_robust_resource_initialization requires depth to be initialized to 1 and stencil to 0.
38 // We are fine with these values for emulated depth/stencil textures too.
39 constexpr VkClearDepthStencilValue kRobustInitDepthStencilValue = {1.0f, 0};
40 
41 constexpr VkImageAspectFlags kDepthStencilAspects =
42     VK_IMAGE_ASPECT_STENCIL_BIT | VK_IMAGE_ASPECT_DEPTH_BIT;
43 
44 constexpr VkBufferUsageFlags kLineLoopDynamicBufferUsage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
45                                                            VK_BUFFER_USAGE_TRANSFER_DST_BIT |
46                                                            VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
47 constexpr int kLineLoopDynamicBufferInitialSize = 1024 * 1024;
48 constexpr VkBufferUsageFlags kLineLoopDynamicIndirectBufferUsage =
49     VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT |
50     VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
51 constexpr int kLineLoopDynamicIndirectBufferInitialSize = sizeof(VkDrawIndirectCommand) * 16;
52 
53 constexpr angle::PackedEnumMap<PipelineStage, VkPipelineStageFlagBits> kPipelineStageFlagBitMap = {
54     {PipelineStage::TopOfPipe, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT},
55     {PipelineStage::DrawIndirect, VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT},
56     {PipelineStage::VertexInput, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT},
57     {PipelineStage::VertexShader, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT},
58     {PipelineStage::GeometryShader, VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT},
59     {PipelineStage::TransformFeedback, VK_PIPELINE_STAGE_TRANSFORM_FEEDBACK_BIT_EXT},
60     {PipelineStage::EarlyFragmentTest, VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT},
61     {PipelineStage::FragmentShader, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT},
62     {PipelineStage::LateFragmentTest, VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT},
63     {PipelineStage::ColorAttachmentOutput, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT},
64     {PipelineStage::ComputeShader, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT},
65     {PipelineStage::Transfer, VK_PIPELINE_STAGE_TRANSFER_BIT},
66     {PipelineStage::BottomOfPipe, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT},
67     {PipelineStage::Host, VK_PIPELINE_STAGE_HOST_BIT}};
68 
69 constexpr gl::ShaderMap<PipelineStage> kPipelineStageShaderMap = {
70     {gl::ShaderType::Vertex, PipelineStage::VertexShader},
71     {gl::ShaderType::Fragment, PipelineStage::FragmentShader},
72     {gl::ShaderType::Geometry, PipelineStage::GeometryShader},
73     {gl::ShaderType::Compute, PipelineStage::ComputeShader},
74 };
75 
76 constexpr size_t kDefaultPoolAllocatorPageSize = 16 * 1024;
77 
78 struct ImageMemoryBarrierData
79 {
80     char name[44];
81 
82     // The Vk layout corresponding to the ImageLayout key.
83     VkImageLayout layout;
84 
85     // The stage in which the image is used (or Bottom/Top if not using any specific stage).  Unless
86     // Bottom/Top (Bottom used for transition to and Top used for transition from), the two values
87     // should match.
88     VkPipelineStageFlags dstStageMask;
89     VkPipelineStageFlags srcStageMask;
90     // Access mask when transitioning into this layout.
91     VkAccessFlags dstAccessMask;
92     // Access mask when transitioning out from this layout.  Note that source access mask never
93     // needs a READ bit, as WAR hazards don't need memory barriers (just execution barriers).
94     VkAccessFlags srcAccessMask;
95     // Read or write.
96     ResourceAccess type;
97     // CommandBufferHelper tracks an array of PipelineBarriers. This indicates which array element
98     // this should be merged into. Right now we track individual barrier for every PipelineStage. If
99     // layout has a single stage mask bit, we use that stage as index. If layout has multiple stage
100     // mask bits, we pick the lowest stage as the index since it is the first stage that needs
101     // barrier.
102     PipelineStage barrierIndex;
103 };
104 
105 constexpr VkPipelineStageFlags kPreFragmentStageFlags =
106     VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
107     VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT;
108 
109 constexpr VkPipelineStageFlags kAllShadersPipelineStageFlags =
110     kPreFragmentStageFlags | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
111     VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT;
112 
113 constexpr VkPipelineStageFlags kAllDepthStencilPipelineStageFlags =
114     VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT;
115 
116 // clang-format off
117 constexpr angle::PackedEnumMap<ImageLayout, ImageMemoryBarrierData> kImageMemoryBarrierData = {
118     {
119         ImageLayout::Undefined,
120         ImageMemoryBarrierData{
121             "Undefined",
122             VK_IMAGE_LAYOUT_UNDEFINED,
123             VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
124             VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
125             // Transition to: we don't expect to transition into Undefined.
126             0,
127             // Transition from: there's no data in the image to care about.
128             0,
129             ResourceAccess::ReadOnly,
130             PipelineStage::InvalidEnum,
131         },
132     },
133     {
134         ImageLayout::ColorAttachment,
135         ImageMemoryBarrierData{
136             "ColorAttachment",
137             VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
138             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
139             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
140             // Transition to: all reads and writes must happen after barrier.
141             VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
142             // Transition from: all writes must finish before barrier.
143             VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
144             ResourceAccess::Write,
145             PipelineStage::ColorAttachmentOutput,
146         },
147     },
148     {
149         ImageLayout::ColorAttachmentAndFragmentShaderRead,
150         ImageMemoryBarrierData{
151             "ColorAttachmentAndFragmentShaderRead",
152             VK_IMAGE_LAYOUT_GENERAL,
153             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
154             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
155             // Transition to: all reads and writes must happen after barrier.
156             VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT,
157             // Transition from: all writes must finish before barrier.
158             VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
159             ResourceAccess::Write,
160             PipelineStage::FragmentShader,
161         },
162     },
163     {
164         ImageLayout::ColorAttachmentAndAllShadersRead,
165         ImageMemoryBarrierData{
166             "ColorAttachmentAndAllShadersRead",
167             VK_IMAGE_LAYOUT_GENERAL,
168             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | kAllShadersPipelineStageFlags,
169             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | kAllShadersPipelineStageFlags,
170             // Transition to: all reads and writes must happen after barrier.
171             VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT,
172             // Transition from: all writes must finish before barrier.
173             VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
174             ResourceAccess::Write,
175             // In case of multiple destination stages, We barrier the earliest stage
176             PipelineStage::VertexShader,
177         },
178     },
179     {
180         ImageLayout::DSAttachmentWriteAndFragmentShaderRead,
181         ImageMemoryBarrierData{
182             "DSAttachmentWriteAndFragmentShaderRead",
183             VK_IMAGE_LAYOUT_GENERAL,
184             kAllDepthStencilPipelineStageFlags | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
185             kAllDepthStencilPipelineStageFlags | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
186             // Transition to: all reads and writes must happen after barrier.
187             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT,
188             // Transition from: all writes must finish before barrier.
189             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
190             ResourceAccess::Write,
191             PipelineStage::FragmentShader,
192         },
193     },
194     {
195         ImageLayout::DSAttachmentWriteAndAllShadersRead,
196         ImageMemoryBarrierData{
197             "DSAttachmentWriteAndAllShadersRead",
198             VK_IMAGE_LAYOUT_GENERAL,
199             kAllDepthStencilPipelineStageFlags | kAllShadersPipelineStageFlags,
200             kAllDepthStencilPipelineStageFlags | kAllShadersPipelineStageFlags,
201             // Transition to: all reads and writes must happen after barrier.
202             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_SHADER_READ_BIT,
203             // Transition from: all writes must finish before barrier.
204             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
205             ResourceAccess::Write,
206             // In case of multiple destination stages, We barrier the earliest stage
207             PipelineStage::VertexShader,
208         },
209     },
210     {
211         ImageLayout::DSAttachmentReadAndFragmentShaderRead,
212             ImageMemoryBarrierData{
213             "DSAttachmentReadAndFragmentShaderRead",
214             VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
215             VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | kAllDepthStencilPipelineStageFlags,
216             VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | kAllDepthStencilPipelineStageFlags,
217             // Transition to: all reads must happen after barrier.
218             VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
219             // Transition from: RAR and WAR don't need memory barrier.
220             0,
221             ResourceAccess::ReadOnly,
222             PipelineStage::EarlyFragmentTest,
223         },
224     },
225     {
226         ImageLayout::DSAttachmentReadAndAllShadersRead,
227             ImageMemoryBarrierData{
228             "DSAttachmentReadAndAllShadersRead",
229             VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
230             kAllShadersPipelineStageFlags | kAllDepthStencilPipelineStageFlags,
231             kAllShadersPipelineStageFlags | kAllDepthStencilPipelineStageFlags,
232             // Transition to: all reads must happen after barrier.
233             VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
234             // Transition from: RAR and WAR don't need memory barrier.
235             0,
236             ResourceAccess::ReadOnly,
237             PipelineStage::VertexShader,
238         },
239     },
240     {
241         ImageLayout::DepthStencilAttachmentReadOnly,
242             ImageMemoryBarrierData{
243             "DepthStencilAttachmentReadOnly",
244             VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL,
245             kAllDepthStencilPipelineStageFlags,
246             kAllDepthStencilPipelineStageFlags,
247             // Transition to: all reads must happen after barrier.
248             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT,
249             // Transition from: RAR and WAR don't need memory barrier.
250             0,
251             ResourceAccess::ReadOnly,
252             PipelineStage::EarlyFragmentTest,
253         },
254     },
255     {
256         ImageLayout::DepthStencilAttachment,
257         ImageMemoryBarrierData{
258             "DepthStencilAttachment",
259             VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
260             kAllDepthStencilPipelineStageFlags,
261             kAllDepthStencilPipelineStageFlags,
262             // Transition to: all reads and writes must happen after barrier.
263             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
264             // Transition from: all writes must finish before barrier.
265             VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
266             ResourceAccess::Write,
267             PipelineStage::EarlyFragmentTest,
268         },
269     },
270     {
271         ImageLayout::DepthStencilResolveAttachment,
272         ImageMemoryBarrierData{
273             "DepthStencilResolveAttachment",
274             VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
275             // Note: depth/stencil resolve uses color output stage and mask!
276             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
277             VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
278             // Transition to: all reads and writes must happen after barrier.
279             VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
280             // Transition from: all writes must finish before barrier.
281             VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
282             ResourceAccess::Write,
283             PipelineStage::ColorAttachmentOutput,
284         },
285     },
286     {
287         ImageLayout::Present,
288         ImageMemoryBarrierData{
289             "Present",
290             VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
291             VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
292             VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
293             // transition to: vkQueuePresentKHR automatically performs the appropriate memory barriers:
294             //
295             // > Any writes to memory backing the images referenced by the pImageIndices and
296             // > pSwapchains members of pPresentInfo, that are available before vkQueuePresentKHR
297             // > is executed, are automatically made visible to the read access performed by the
298             // > presentation engine.
299             0,
300             // Transition from: RAR and WAR don't need memory barrier.
301             0,
302             ResourceAccess::ReadOnly,
303             PipelineStage::BottomOfPipe,
304         },
305     },
306     {
307         ImageLayout::SharedPresent,
308         ImageMemoryBarrierData{
309             "SharedPresent",
310             VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR,
311             VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
312             VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
313             // Transition to: all reads and writes must happen after barrier.
314             VK_ACCESS_MEMORY_READ_BIT | VK_ACCESS_MEMORY_WRITE_BIT,
315             // Transition from: all writes must finish before barrier.
316             VK_ACCESS_MEMORY_WRITE_BIT,
317             ResourceAccess::Write,
318             PipelineStage::BottomOfPipe,
319         },
320     },
321     {
322         ImageLayout::ExternalPreInitialized,
323         ImageMemoryBarrierData{
324             "ExternalPreInitialized",
325             // Binding a VkImage with an initial layout of VK_IMAGE_LAYOUT_UNDEFINED to external
326             // memory whose content has already been defined does not make the content undefined
327             // (see 12.8.1.  External Resource Sharing).
328             //
329             // Note that for external memory objects, if the content is already defined, the
330             // ownership rules imply that the first operation on the texture must be a call to
331             // glWaitSemaphoreEXT that grants ownership of the image and informs us of the true
332             // layout.  If the content is not already defined, the first operation may not be a
333             // glWaitSemaphore, but in this case undefined layout is appropriate.
334             VK_IMAGE_LAYOUT_UNDEFINED,
335             VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
336             VK_PIPELINE_STAGE_HOST_BIT | VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
337             // Transition to: we don't expect to transition into PreInitialized.
338             0,
339             // Transition from: all writes must finish before barrier.
340             VK_ACCESS_MEMORY_WRITE_BIT,
341             ResourceAccess::ReadOnly,
342             PipelineStage::InvalidEnum,
343         },
344     },
345     {
346         ImageLayout::ExternalShadersReadOnly,
347         ImageMemoryBarrierData{
348             "ExternalShadersReadOnly",
349             VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
350             VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
351             VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
352             // Transition to: all reads must happen after barrier.
353             VK_ACCESS_SHADER_READ_BIT,
354             // Transition from: RAR and WAR don't need memory barrier.
355             0,
356             ResourceAccess::ReadOnly,
357             // In case of multiple destination stages, We barrier the earliest stage
358             PipelineStage::TopOfPipe,
359         },
360     },
361     {
362         ImageLayout::ExternalShadersWrite,
363         ImageMemoryBarrierData{
364             "ExternalShadersWrite",
365             VK_IMAGE_LAYOUT_GENERAL,
366             VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
367             VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
368             // Transition to: all reads and writes must happen after barrier.
369             VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
370             // Transition from: all writes must finish before barrier.
371             VK_ACCESS_SHADER_WRITE_BIT,
372             ResourceAccess::Write,
373             // In case of multiple destination stages, We barrier the earliest stage
374             PipelineStage::TopOfPipe,
375         },
376     },
377     {
378         ImageLayout::TransferSrc,
379         ImageMemoryBarrierData{
380             "TransferSrc",
381             VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
382             VK_PIPELINE_STAGE_TRANSFER_BIT,
383             VK_PIPELINE_STAGE_TRANSFER_BIT,
384             // Transition to: all reads must happen after barrier.
385             VK_ACCESS_TRANSFER_READ_BIT,
386             // Transition from: RAR and WAR don't need memory barrier.
387             0,
388             ResourceAccess::ReadOnly,
389             PipelineStage::Transfer,
390         },
391     },
392     {
393         ImageLayout::TransferDst,
394         ImageMemoryBarrierData{
395             "TransferDst",
396             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
397             VK_PIPELINE_STAGE_TRANSFER_BIT,
398             VK_PIPELINE_STAGE_TRANSFER_BIT,
399             // Transition to: all writes must happen after barrier.
400             VK_ACCESS_TRANSFER_WRITE_BIT,
401             // Transition from: all writes must finish before barrier.
402             VK_ACCESS_TRANSFER_WRITE_BIT,
403             ResourceAccess::Write,
404             PipelineStage::Transfer,
405         },
406     },
407     {
408         ImageLayout::VertexShaderReadOnly,
409         ImageMemoryBarrierData{
410             "VertexShaderReadOnly",
411             VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
412             VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
413             VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
414             // Transition to: all reads must happen after barrier.
415             VK_ACCESS_SHADER_READ_BIT,
416             // Transition from: RAR and WAR don't need memory barrier.
417             0,
418             ResourceAccess::ReadOnly,
419             PipelineStage::VertexShader,
420         },
421     },
422     {
423         ImageLayout::VertexShaderWrite,
424         ImageMemoryBarrierData{
425             "VertexShaderWrite",
426             VK_IMAGE_LAYOUT_GENERAL,
427             VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
428             VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
429             // Transition to: all reads and writes must happen after barrier.
430             VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
431             // Transition from: all writes must finish before barrier.
432             VK_ACCESS_SHADER_WRITE_BIT,
433             ResourceAccess::Write,
434             PipelineStage::VertexShader,
435         },
436     },
437     {
438         ImageLayout::PreFragmentShadersReadOnly,
439         ImageMemoryBarrierData{
440             "PreFragmentShadersReadOnly",
441             VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
442             kPreFragmentStageFlags,
443             kPreFragmentStageFlags,
444             // Transition to: all reads must happen after barrier.
445             VK_ACCESS_SHADER_READ_BIT,
446             // Transition from: RAR and WAR don't need memory barrier.
447             0,
448             ResourceAccess::ReadOnly,
449             // In case of multiple destination stages, We barrier the earliest stage
450             PipelineStage::VertexShader,
451         },
452     },
453     {
454         ImageLayout::PreFragmentShadersWrite,
455         ImageMemoryBarrierData{
456             "PreFragmentShadersWrite",
457             VK_IMAGE_LAYOUT_GENERAL,
458             kPreFragmentStageFlags,
459             kPreFragmentStageFlags,
460             // Transition to: all reads and writes must happen after barrier.
461             VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
462             // Transition from: all writes must finish before barrier.
463             VK_ACCESS_SHADER_WRITE_BIT,
464             ResourceAccess::Write,
465             // In case of multiple destination stages, We barrier the earliest stage
466             PipelineStage::VertexShader,
467         },
468     },
469     {
470         ImageLayout::FragmentShaderReadOnly,
471         ImageMemoryBarrierData{
472             "FragmentShaderReadOnly",
473             VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
474             VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
475             VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
476             // Transition to: all reads must happen after barrier.
477             VK_ACCESS_SHADER_READ_BIT,
478             // Transition from: RAR and WAR don't need memory barrier.
479             0,
480             ResourceAccess::ReadOnly,
481             PipelineStage::FragmentShader,
482         },
483     },
484     {
485         ImageLayout::FragmentShaderWrite,
486         ImageMemoryBarrierData{
487             "FragmentShaderWrite",
488             VK_IMAGE_LAYOUT_GENERAL,
489             VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
490             VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
491             // Transition to: all reads and writes must happen after barrier.
492             VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
493             // Transition from: all writes must finish before barrier.
494             VK_ACCESS_SHADER_WRITE_BIT,
495             ResourceAccess::Write,
496             PipelineStage::FragmentShader,
497         },
498     },
499     {
500         ImageLayout::ComputeShaderReadOnly,
501         ImageMemoryBarrierData{
502             "ComputeShaderReadOnly",
503             VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
504             VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
505             VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
506             // Transition to: all reads must happen after barrier.
507             VK_ACCESS_SHADER_READ_BIT,
508             // Transition from: RAR and WAR don't need memory barrier.
509             0,
510             ResourceAccess::ReadOnly,
511             PipelineStage::ComputeShader,
512         },
513     },
514     {
515         ImageLayout::ComputeShaderWrite,
516         ImageMemoryBarrierData{
517             "ComputeShaderWrite",
518             VK_IMAGE_LAYOUT_GENERAL,
519             VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
520             VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
521             // Transition to: all reads and writes must happen after barrier.
522             VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
523             // Transition from: all writes must finish before barrier.
524             VK_ACCESS_SHADER_WRITE_BIT,
525             ResourceAccess::Write,
526             PipelineStage::ComputeShader,
527         },
528     },
529     {
530         ImageLayout::AllGraphicsShadersReadOnly,
531         ImageMemoryBarrierData{
532             "AllGraphicsShadersReadOnly",
533             VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
534             kAllShadersPipelineStageFlags,
535             kAllShadersPipelineStageFlags,
536             // Transition to: all reads must happen after barrier.
537             VK_ACCESS_SHADER_READ_BIT,
538             // Transition from: RAR and WAR don't need memory barrier.
539             0,
540             ResourceAccess::ReadOnly,
541             // In case of multiple destination stages, We barrier the earliest stage
542             PipelineStage::VertexShader,
543         },
544     },
545     {
546         ImageLayout::AllGraphicsShadersWrite,
547         ImageMemoryBarrierData{
548             "AllGraphicsShadersWrite",
549             VK_IMAGE_LAYOUT_GENERAL,
550             kAllShadersPipelineStageFlags,
551             kAllShadersPipelineStageFlags,
552             // Transition to: all reads and writes must happen after barrier.
553             VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT,
554             // Transition from: all writes must finish before barrier.
555             VK_ACCESS_SHADER_WRITE_BIT,
556             ResourceAccess::Write,
557             // In case of multiple destination stages, We barrier the earliest stage
558             PipelineStage::VertexShader,
559         },
560     },
561 };
562 // clang-format on
563 
GetImageLayoutSrcStageMask(Context * context,const ImageMemoryBarrierData & transition)564 VkPipelineStageFlags GetImageLayoutSrcStageMask(Context *context,
565                                                 const ImageMemoryBarrierData &transition)
566 {
567     return transition.srcStageMask & context->getRenderer()->getSupportedVulkanPipelineStageMask();
568 }
569 
GetImageLayoutDstStageMask(Context * context,const ImageMemoryBarrierData & transition)570 VkPipelineStageFlags GetImageLayoutDstStageMask(Context *context,
571                                                 const ImageMemoryBarrierData &transition)
572 {
573     return transition.dstStageMask & context->getRenderer()->getSupportedVulkanPipelineStageMask();
574 }
575 
HandlePrimitiveRestart(ContextVk * contextVk,gl::DrawElementsType glIndexType,GLsizei indexCount,const uint8_t * srcPtr,uint8_t * outPtr)576 void HandlePrimitiveRestart(ContextVk *contextVk,
577                             gl::DrawElementsType glIndexType,
578                             GLsizei indexCount,
579                             const uint8_t *srcPtr,
580                             uint8_t *outPtr)
581 {
582     switch (glIndexType)
583     {
584         case gl::DrawElementsType::UnsignedByte:
585             if (contextVk->getFeatures().supportsIndexTypeUint8.enabled)
586             {
587                 CopyLineLoopIndicesWithRestart<uint8_t, uint8_t>(indexCount, srcPtr, outPtr);
588             }
589             else
590             {
591                 CopyLineLoopIndicesWithRestart<uint8_t, uint16_t>(indexCount, srcPtr, outPtr);
592             }
593             break;
594         case gl::DrawElementsType::UnsignedShort:
595             CopyLineLoopIndicesWithRestart<uint16_t, uint16_t>(indexCount, srcPtr, outPtr);
596             break;
597         case gl::DrawElementsType::UnsignedInt:
598             CopyLineLoopIndicesWithRestart<uint32_t, uint32_t>(indexCount, srcPtr, outPtr);
599             break;
600         default:
601             UNREACHABLE();
602     }
603 }
604 
HasBothDepthAndStencilAspects(VkImageAspectFlags aspectFlags)605 bool HasBothDepthAndStencilAspects(VkImageAspectFlags aspectFlags)
606 {
607     return IsMaskFlagSet(aspectFlags, kDepthStencilAspects);
608 }
609 
GetContentDefinedLayerRangeBits(uint32_t layerStart,uint32_t layerCount,uint32_t maxLayerCount)610 uint8_t GetContentDefinedLayerRangeBits(uint32_t layerStart,
611                                         uint32_t layerCount,
612                                         uint32_t maxLayerCount)
613 {
614     uint8_t layerRangeBits = layerCount >= maxLayerCount ? static_cast<uint8_t>(~0u)
615                                                          : angle::BitMask<uint8_t>(layerCount);
616     layerRangeBits <<= layerStart;
617 
618     return layerRangeBits;
619 }
620 
GetImageLayerCountForView(const ImageHelper & image)621 uint32_t GetImageLayerCountForView(const ImageHelper &image)
622 {
623     // Depth > 1 means this is a 3D texture and depth is our layer count
624     return image.getExtents().depth > 1 ? image.getExtents().depth : image.getLayerCount();
625 }
626 
ReleaseImageViews(ImageViewVector * imageViewVector,std::vector<GarbageObject> * garbage)627 void ReleaseImageViews(ImageViewVector *imageViewVector, std::vector<GarbageObject> *garbage)
628 {
629     for (ImageView &imageView : *imageViewVector)
630     {
631         if (imageView.valid())
632         {
633             garbage->emplace_back(GetGarbage(&imageView));
634         }
635     }
636     imageViewVector->clear();
637 }
638 
DestroyImageViews(ImageViewVector * imageViewVector,VkDevice device)639 void DestroyImageViews(ImageViewVector *imageViewVector, VkDevice device)
640 {
641     for (ImageView &imageView : *imageViewVector)
642     {
643         imageView.destroy(device);
644     }
645     imageViewVector->clear();
646 }
647 
GetLevelImageView(ImageViewVector * imageViews,LevelIndex levelVk,uint32_t levelCount)648 ImageView *GetLevelImageView(ImageViewVector *imageViews, LevelIndex levelVk, uint32_t levelCount)
649 {
650     // Lazily allocate the storage for image views. We allocate the full level count because we
651     // don't want to trigger any std::vector reallocations. Reallocations could invalidate our
652     // view pointers.
653     if (imageViews->empty())
654     {
655         imageViews->resize(levelCount);
656     }
657     ASSERT(imageViews->size() > levelVk.get());
658 
659     return &(*imageViews)[levelVk.get()];
660 }
661 
GetLevelLayerImageView(LayerLevelImageViewVector * imageViews,LevelIndex levelVk,uint32_t layer,uint32_t levelCount,uint32_t layerCount)662 ImageView *GetLevelLayerImageView(LayerLevelImageViewVector *imageViews,
663                                   LevelIndex levelVk,
664                                   uint32_t layer,
665                                   uint32_t levelCount,
666                                   uint32_t layerCount)
667 {
668     // Lazily allocate the storage for image views. We allocate the full layer count because we
669     // don't want to trigger any std::vector reallocations. Reallocations could invalidate our
670     // view pointers.
671     if (imageViews->empty())
672     {
673         imageViews->resize(layerCount);
674     }
675     ASSERT(imageViews->size() > layer);
676 
677     return GetLevelImageView(&(*imageViews)[layer], levelVk, levelCount);
678 }
679 
680 // Special rules apply to VkBufferImageCopy with depth/stencil. The components are tightly packed
681 // into a depth or stencil section of the destination buffer. See the spec:
682 // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/VkBufferImageCopy.html
GetDepthStencilImageToBufferFormat(const angle::Format & imageFormat,VkImageAspectFlagBits copyAspect)683 const angle::Format &GetDepthStencilImageToBufferFormat(const angle::Format &imageFormat,
684                                                         VkImageAspectFlagBits copyAspect)
685 {
686     if (copyAspect == VK_IMAGE_ASPECT_STENCIL_BIT)
687     {
688         ASSERT(imageFormat.id == angle::FormatID::D24_UNORM_S8_UINT ||
689                imageFormat.id == angle::FormatID::D32_FLOAT_S8X24_UINT ||
690                imageFormat.id == angle::FormatID::S8_UINT);
691         return angle::Format::Get(angle::FormatID::S8_UINT);
692     }
693 
694     ASSERT(copyAspect == VK_IMAGE_ASPECT_DEPTH_BIT);
695 
696     switch (imageFormat.id)
697     {
698         case angle::FormatID::D16_UNORM:
699             return imageFormat;
700         case angle::FormatID::D24_UNORM_X8_UINT:
701             return imageFormat;
702         case angle::FormatID::D24_UNORM_S8_UINT:
703             return angle::Format::Get(angle::FormatID::D24_UNORM_X8_UINT);
704         case angle::FormatID::D32_FLOAT:
705             return imageFormat;
706         case angle::FormatID::D32_FLOAT_S8X24_UINT:
707             return angle::Format::Get(angle::FormatID::D32_FLOAT);
708         default:
709             UNREACHABLE();
710             return imageFormat;
711     }
712 }
713 
GetRobustResourceClearValue(const angle::Format & intendedFormat,const angle::Format & actualFormat)714 VkClearValue GetRobustResourceClearValue(const angle::Format &intendedFormat,
715                                          const angle::Format &actualFormat)
716 {
717     VkClearValue clearValue = {};
718     if (intendedFormat.hasDepthOrStencilBits())
719     {
720         clearValue.depthStencil = kRobustInitDepthStencilValue;
721     }
722     else
723     {
724         clearValue.color = HasEmulatedImageChannels(intendedFormat, actualFormat)
725                                ? kEmulatedInitColorValue
726                                : kRobustInitColorValue;
727     }
728     return clearValue;
729 }
730 
731 #if !defined(ANGLE_PLATFORM_MACOS) && !defined(ANGLE_PLATFORM_ANDROID)
IsExternalQueueFamily(uint32_t queueFamilyIndex)732 bool IsExternalQueueFamily(uint32_t queueFamilyIndex)
733 {
734     return queueFamilyIndex == VK_QUEUE_FAMILY_EXTERNAL ||
735            queueFamilyIndex == VK_QUEUE_FAMILY_FOREIGN_EXT;
736 }
737 #endif
738 
IsShaderReadOnlyLayout(const ImageMemoryBarrierData & imageLayout)739 bool IsShaderReadOnlyLayout(const ImageMemoryBarrierData &imageLayout)
740 {
741     return imageLayout.layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
742 }
743 
IsAnySubresourceContentDefined(const gl::TexLevelArray<angle::BitSet8<8>> & contentDefined)744 bool IsAnySubresourceContentDefined(const gl::TexLevelArray<angle::BitSet8<8>> &contentDefined)
745 {
746     for (const angle::BitSet8<8> &levelContentDefined : contentDefined)
747     {
748         if (levelContentDefined.any())
749         {
750             return true;
751         }
752     }
753     return false;
754 }
755 
ExtendRenderPassInvalidateArea(const gl::Rectangle & invalidateArea,gl::Rectangle * out)756 void ExtendRenderPassInvalidateArea(const gl::Rectangle &invalidateArea, gl::Rectangle *out)
757 {
758     if (out->empty())
759     {
760         *out = invalidateArea;
761     }
762     else
763     {
764         gl::ExtendRectangle(*out, invalidateArea, out);
765     }
766 }
767 
CanCopyWithTransferForCopyImage(RendererVk * renderer,ImageHelper * srcImage,VkImageTiling srcTilingMode,ImageHelper * dstImage,VkImageTiling dstTilingMode)768 bool CanCopyWithTransferForCopyImage(RendererVk *renderer,
769                                      ImageHelper *srcImage,
770                                      VkImageTiling srcTilingMode,
771                                      ImageHelper *dstImage,
772                                      VkImageTiling dstTilingMode)
773 {
774     // Neither source nor destination formats can be emulated for copy image through transfer,
775     // unless they are emulated with the same format!
776     bool isFormatCompatible =
777         (!srcImage->hasEmulatedImageFormat() && !dstImage->hasEmulatedImageFormat()) ||
778         srcImage->getActualFormatID() == dstImage->getActualFormatID();
779 
780     // If neither formats are emulated, GL validation ensures that pixelBytes is the same for both.
781     ASSERT(!isFormatCompatible ||
782            srcImage->getActualFormat().pixelBytes == dstImage->getActualFormat().pixelBytes);
783 
784     return isFormatCompatible &&
785            CanCopyWithTransfer(renderer, srcImage->getActualFormatID(), srcTilingMode,
786                                dstImage->getActualFormatID(), dstTilingMode);
787 }
788 
ReleaseBufferListToRenderer(RendererVk * renderer,BufferHelperPointerVector * buffers)789 void ReleaseBufferListToRenderer(RendererVk *renderer, BufferHelperPointerVector *buffers)
790 {
791     for (std::unique_ptr<BufferHelper> &toFree : *buffers)
792     {
793         toFree->release(renderer);
794     }
795     buffers->clear();
796 }
797 
DestroyBufferList(RendererVk * renderer,BufferHelperPointerVector * buffers)798 void DestroyBufferList(RendererVk *renderer, BufferHelperPointerVector *buffers)
799 {
800     for (std::unique_ptr<BufferHelper> &toDestroy : *buffers)
801     {
802         toDestroy->destroy(renderer);
803     }
804     buffers->clear();
805 }
806 
ShouldReleaseFreeBuffer(const vk::BufferHelper & buffer,size_t dynamicBufferSize,DynamicBufferPolicy policy,size_t freeListSize)807 bool ShouldReleaseFreeBuffer(const vk::BufferHelper &buffer,
808                              size_t dynamicBufferSize,
809                              DynamicBufferPolicy policy,
810                              size_t freeListSize)
811 {
812     constexpr size_t kLimitedFreeListMaxSize = 1;
813 
814     // If the dynamic buffer was resized we cannot reuse the retained buffer.  Additionally,
815     // only reuse the buffer if specifically requested.
816     const bool sizeMismatch    = buffer.getSize() != dynamicBufferSize;
817     const bool releaseByPolicy = policy == DynamicBufferPolicy::OneShotUse ||
818                                  (policy == DynamicBufferPolicy::SporadicTextureUpload &&
819                                   freeListSize >= kLimitedFreeListMaxSize);
820 
821     return sizeMismatch || releaseByPolicy;
822 }
823 }  // anonymous namespace
824 
825 // This is an arbitrary max. We can change this later if necessary.
826 uint32_t DynamicDescriptorPool::mMaxSetsPerPool           = 16;
827 uint32_t DynamicDescriptorPool::mMaxSetsPerPoolMultiplier = 2;
828 
GetImageCreateFlags(gl::TextureType textureType)829 VkImageCreateFlags GetImageCreateFlags(gl::TextureType textureType)
830 {
831     switch (textureType)
832     {
833         case gl::TextureType::CubeMap:
834         case gl::TextureType::CubeMapArray:
835             return VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
836 
837         case gl::TextureType::_3D:
838             return VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT;
839 
840         default:
841             return 0;
842     }
843 }
844 
GetImageLayoutFromGLImageLayout(GLenum layout)845 ImageLayout GetImageLayoutFromGLImageLayout(GLenum layout)
846 {
847     switch (layout)
848     {
849         case GL_NONE:
850             return ImageLayout::Undefined;
851         case GL_LAYOUT_GENERAL_EXT:
852             return ImageLayout::ExternalShadersWrite;
853         case GL_LAYOUT_COLOR_ATTACHMENT_EXT:
854             return ImageLayout::ColorAttachment;
855         case GL_LAYOUT_DEPTH_STENCIL_ATTACHMENT_EXT:
856         case GL_LAYOUT_DEPTH_STENCIL_READ_ONLY_EXT:
857         case GL_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_EXT:
858         case GL_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_EXT:
859             // Note: once VK_KHR_separate_depth_stencil_layouts becomes core or ubiquitous, we
860             // should optimize depth/stencil image layout transitions to only be performed on the
861             // aspect that needs transition.  In that case, these four layouts can be distinguished
862             // and optimized.  Note that the exact equivalent of these layouts are specified in
863             // VK_KHR_maintenance2, which are also usable, granted we transition the pair of
864             // depth/stencil layouts accordingly elsewhere in ANGLE.
865             return ImageLayout::DepthStencilAttachment;
866         case GL_LAYOUT_SHADER_READ_ONLY_EXT:
867             return ImageLayout::ExternalShadersReadOnly;
868         case GL_LAYOUT_TRANSFER_SRC_EXT:
869             return ImageLayout::TransferSrc;
870         case GL_LAYOUT_TRANSFER_DST_EXT:
871             return ImageLayout::TransferDst;
872         default:
873             UNREACHABLE();
874             return vk::ImageLayout::Undefined;
875     }
876 }
877 
ConvertImageLayoutToGLImageLayout(ImageLayout layout)878 GLenum ConvertImageLayoutToGLImageLayout(ImageLayout layout)
879 {
880     switch (layout)
881     {
882         case ImageLayout::Undefined:
883             return GL_NONE;
884         case ImageLayout::ColorAttachment:
885             return GL_LAYOUT_COLOR_ATTACHMENT_EXT;
886         case ImageLayout::ColorAttachmentAndFragmentShaderRead:
887         case ImageLayout::ColorAttachmentAndAllShadersRead:
888         case ImageLayout::DSAttachmentWriteAndFragmentShaderRead:
889         case ImageLayout::DSAttachmentWriteAndAllShadersRead:
890         case ImageLayout::DSAttachmentReadAndFragmentShaderRead:
891         case ImageLayout::DSAttachmentReadAndAllShadersRead:
892             break;
893         case ImageLayout::DepthStencilAttachmentReadOnly:
894             return GL_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_EXT;
895         case ImageLayout::DepthStencilAttachment:
896             return GL_LAYOUT_DEPTH_STENCIL_ATTACHMENT_EXT;
897         case ImageLayout::DepthStencilResolveAttachment:
898         case ImageLayout::Present:
899         case ImageLayout::SharedPresent:
900         case ImageLayout::ExternalPreInitialized:
901             break;
902         case ImageLayout::ExternalShadersReadOnly:
903             return GL_LAYOUT_SHADER_READ_ONLY_EXT;
904         case ImageLayout::ExternalShadersWrite:
905             return GL_LAYOUT_GENERAL_EXT;
906         case ImageLayout::TransferSrc:
907             return GL_LAYOUT_TRANSFER_SRC_EXT;
908         case ImageLayout::TransferDst:
909             return GL_LAYOUT_TRANSFER_DST_EXT;
910         case ImageLayout::VertexShaderReadOnly:
911         case ImageLayout::VertexShaderWrite:
912         case ImageLayout::PreFragmentShadersReadOnly:
913         case ImageLayout::PreFragmentShadersWrite:
914         case ImageLayout::FragmentShaderReadOnly:
915         case ImageLayout::FragmentShaderWrite:
916         case ImageLayout::ComputeShaderReadOnly:
917         case ImageLayout::ComputeShaderWrite:
918         case ImageLayout::AllGraphicsShadersReadOnly:
919         case ImageLayout::AllGraphicsShadersWrite:
920         case ImageLayout::InvalidEnum:
921             break;
922     }
923     UNREACHABLE();
924     return GL_NONE;
925 }
926 
ConvertImageLayoutToVkImageLayout(ImageLayout imageLayout)927 VkImageLayout ConvertImageLayoutToVkImageLayout(ImageLayout imageLayout)
928 {
929     return kImageMemoryBarrierData[imageLayout].layout;
930 }
931 
FormatHasNecessaryFeature(RendererVk * renderer,angle::FormatID formatID,VkImageTiling tilingMode,VkFormatFeatureFlags featureBits)932 bool FormatHasNecessaryFeature(RendererVk *renderer,
933                                angle::FormatID formatID,
934                                VkImageTiling tilingMode,
935                                VkFormatFeatureFlags featureBits)
936 {
937     return (tilingMode == VK_IMAGE_TILING_OPTIMAL)
938                ? renderer->hasImageFormatFeatureBits(formatID, featureBits)
939                : renderer->hasLinearImageFormatFeatureBits(formatID, featureBits);
940 }
941 
CanCopyWithTransfer(RendererVk * renderer,angle::FormatID srcFormatID,VkImageTiling srcTilingMode,angle::FormatID dstFormatID,VkImageTiling dstTilingMode)942 bool CanCopyWithTransfer(RendererVk *renderer,
943                          angle::FormatID srcFormatID,
944                          VkImageTiling srcTilingMode,
945                          angle::FormatID dstFormatID,
946                          VkImageTiling dstTilingMode)
947 {
948     // Checks that the formats in the copy transfer have the appropriate tiling and transfer bits
949     bool isTilingCompatible           = srcTilingMode == dstTilingMode;
950     bool srcFormatHasNecessaryFeature = FormatHasNecessaryFeature(
951         renderer, srcFormatID, srcTilingMode, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT);
952     bool dstFormatHasNecessaryFeature = FormatHasNecessaryFeature(
953         renderer, dstFormatID, dstTilingMode, VK_FORMAT_FEATURE_TRANSFER_DST_BIT);
954 
955     return isTilingCompatible && srcFormatHasNecessaryFeature && dstFormatHasNecessaryFeature;
956 }
957 
958 // PackedClearValuesArray implementation
PackedClearValuesArray()959 PackedClearValuesArray::PackedClearValuesArray() : mValues{} {}
960 PackedClearValuesArray::~PackedClearValuesArray() = default;
961 
962 PackedClearValuesArray::PackedClearValuesArray(const PackedClearValuesArray &other) = default;
963 PackedClearValuesArray &PackedClearValuesArray::operator=(const PackedClearValuesArray &rhs) =
964     default;
965 
store(PackedAttachmentIndex index,VkImageAspectFlags aspectFlags,const VkClearValue & clearValue)966 void PackedClearValuesArray::store(PackedAttachmentIndex index,
967                                    VkImageAspectFlags aspectFlags,
968                                    const VkClearValue &clearValue)
969 {
970     ASSERT(aspectFlags != 0);
971     if (aspectFlags != VK_IMAGE_ASPECT_STENCIL_BIT)
972     {
973         storeNoDepthStencil(index, clearValue);
974     }
975 }
976 
storeNoDepthStencil(PackedAttachmentIndex index,const VkClearValue & clearValue)977 void PackedClearValuesArray::storeNoDepthStencil(PackedAttachmentIndex index,
978                                                  const VkClearValue &clearValue)
979 {
980     mValues[index.get()] = clearValue;
981 }
982 
983 // CommandBufferHelper implementation.
CommandBufferHelper()984 CommandBufferHelper::CommandBufferHelper()
985     : mPipelineBarriers(),
986       mPipelineBarrierMask(),
987       mCommandPool(nullptr),
988       mCounter(0),
989       mClearValues{},
990       mRenderPassStarted(false),
991       mTransformFeedbackCounterBuffers{},
992       mValidTransformFeedbackBufferCount(0),
993       mRebindTransformFeedbackBuffers(false),
994       mIsTransformFeedbackActiveUnpaused(false),
995       mIsRenderPassCommandBuffer(false),
996       mHasShaderStorageOutput(false),
997       mHasGLMemoryBarrierIssued(false),
998       mDepthAccess(ResourceAccess::Unused),
999       mStencilAccess(ResourceAccess::Unused),
1000       mDepthCmdCountInvalidated(kInfiniteCmdCount),
1001       mDepthCmdCountDisabled(kInfiniteCmdCount),
1002       mStencilCmdCountInvalidated(kInfiniteCmdCount),
1003       mStencilCmdCountDisabled(kInfiniteCmdCount),
1004       mDepthStencilAttachmentIndex(kAttachmentIndexInvalid),
1005       mDepthStencilImage(nullptr),
1006       mDepthStencilResolveImage(nullptr),
1007       mDepthStencilLevelIndex(0),
1008       mDepthStencilLayerIndex(0),
1009       mDepthStencilLayerCount(0),
1010       mColorImagesCount(0),
1011       mImageOptimizeForPresent(nullptr)
1012 {}
1013 
~CommandBufferHelper()1014 CommandBufferHelper::~CommandBufferHelper()
1015 {
1016     mFramebuffer.setHandle(VK_NULL_HANDLE);
1017 }
1018 
initialize(Context * context,bool isRenderPassCommandBuffer,CommandPool * commandPool)1019 angle::Result CommandBufferHelper::initialize(Context *context,
1020                                               bool isRenderPassCommandBuffer,
1021                                               CommandPool *commandPool)
1022 {
1023     ASSERT(mUsedBuffers.empty());
1024     constexpr size_t kInitialBufferCount = 128;
1025     mUsedBuffers.ensureCapacity(kInitialBufferCount);
1026 
1027     mAllocator.initialize(kDefaultPoolAllocatorPageSize, 1);
1028     // Push a scope into the pool allocator so we can easily free and re-init on reset()
1029     mAllocator.push();
1030 
1031     mIsRenderPassCommandBuffer = isRenderPassCommandBuffer;
1032     mCommandPool               = commandPool;
1033 
1034     return initializeCommandBuffer(context);
1035 }
1036 
initializeCommandBuffer(Context * context)1037 angle::Result CommandBufferHelper::initializeCommandBuffer(Context *context)
1038 {
1039     return mCommandBuffer.initialize(context, mCommandPool, mIsRenderPassCommandBuffer,
1040                                      &mAllocator);
1041 }
1042 
reset(Context * context)1043 angle::Result CommandBufferHelper::reset(Context *context)
1044 {
1045     mAllocator.pop();
1046     mAllocator.push();
1047 
1048     // Reset and re-initialize the command buffer
1049     context->getRenderer()->resetSecondaryCommandBuffer(std::move(mCommandBuffer));
1050     ANGLE_TRY(initializeCommandBuffer(context));
1051 
1052     mUsedBuffers.clear();
1053 
1054     if (mIsRenderPassCommandBuffer)
1055     {
1056         mRenderPassStarted                 = false;
1057         mValidTransformFeedbackBufferCount = 0;
1058         mRebindTransformFeedbackBuffers    = false;
1059         mHasShaderStorageOutput            = false;
1060         mHasGLMemoryBarrierIssued          = false;
1061         mDepthAccess                       = ResourceAccess::Unused;
1062         mStencilAccess                     = ResourceAccess::Unused;
1063         mDepthCmdCountInvalidated          = kInfiniteCmdCount;
1064         mDepthCmdCountDisabled             = kInfiniteCmdCount;
1065         mStencilCmdCountInvalidated        = kInfiniteCmdCount;
1066         mStencilCmdCountDisabled           = kInfiniteCmdCount;
1067         mColorImagesCount                  = PackedAttachmentCount(0);
1068         mDepthStencilAttachmentIndex       = kAttachmentIndexInvalid;
1069         mDepthInvalidateArea               = gl::Rectangle();
1070         mStencilInvalidateArea             = gl::Rectangle();
1071         mRenderPassUsedImages.clear();
1072         mDepthStencilImage        = nullptr;
1073         mDepthStencilResolveImage = nullptr;
1074         mColorImages.reset();
1075         mColorResolveImages.reset();
1076         mImageOptimizeForPresent = nullptr;
1077     }
1078     // This state should never change for non-renderPass command buffer
1079     ASSERT(mRenderPassStarted == false);
1080     ASSERT(mValidTransformFeedbackBufferCount == 0);
1081     ASSERT(!mRebindTransformFeedbackBuffers);
1082     ASSERT(!mIsTransformFeedbackActiveUnpaused);
1083     ASSERT(mRenderPassUsedImages.empty());
1084 
1085     return angle::Result::Continue;
1086 }
1087 
usesBuffer(const BufferHelper & buffer) const1088 bool CommandBufferHelper::usesBuffer(const BufferHelper &buffer) const
1089 {
1090     return mUsedBuffers.contains(buffer.getBufferSerial().getValue());
1091 }
1092 
usesBufferForWrite(const BufferHelper & buffer) const1093 bool CommandBufferHelper::usesBufferForWrite(const BufferHelper &buffer) const
1094 {
1095     BufferAccess access;
1096     if (!mUsedBuffers.get(buffer.getBufferSerial().getValue(), &access))
1097     {
1098         return false;
1099     }
1100     return access == BufferAccess::Write;
1101 }
1102 
bufferRead(ContextVk * contextVk,VkAccessFlags readAccessType,PipelineStage readStage,BufferHelper * buffer)1103 void CommandBufferHelper::bufferRead(ContextVk *contextVk,
1104                                      VkAccessFlags readAccessType,
1105                                      PipelineStage readStage,
1106                                      BufferHelper *buffer)
1107 {
1108     VkPipelineStageFlagBits stageBits = kPipelineStageFlagBitMap[readStage];
1109     if (buffer->recordReadBarrier(readAccessType, stageBits, &mPipelineBarriers[readStage]))
1110     {
1111         mPipelineBarrierMask.set(readStage);
1112     }
1113 
1114     ASSERT(!usesBufferForWrite(*buffer));
1115     if (!mUsedBuffers.contains(buffer->getBufferSerial().getValue()))
1116     {
1117         mUsedBuffers.insert(buffer->getBufferSerial().getValue(), BufferAccess::Read);
1118         buffer->retainReadOnly(&contextVk->getResourceUseList());
1119     }
1120 }
1121 
bufferWrite(ContextVk * contextVk,VkAccessFlags writeAccessType,PipelineStage writeStage,AliasingMode aliasingMode,BufferHelper * buffer)1122 void CommandBufferHelper::bufferWrite(ContextVk *contextVk,
1123                                       VkAccessFlags writeAccessType,
1124                                       PipelineStage writeStage,
1125                                       AliasingMode aliasingMode,
1126                                       BufferHelper *buffer)
1127 {
1128     buffer->retainReadWrite(&contextVk->getResourceUseList());
1129     VkPipelineStageFlagBits stageBits = kPipelineStageFlagBitMap[writeStage];
1130     if (buffer->recordWriteBarrier(writeAccessType, stageBits, &mPipelineBarriers[writeStage]))
1131     {
1132         mPipelineBarrierMask.set(writeStage);
1133     }
1134 
1135     // Storage buffers are special. They can alias one another in a shader.
1136     // We support aliasing by not tracking storage buffers. This works well with the GL API
1137     // because storage buffers are required to be externally synchronized.
1138     // Compute / XFB emulation buffers are not allowed to alias.
1139     if (aliasingMode == AliasingMode::Disallowed)
1140     {
1141         ASSERT(!usesBuffer(*buffer));
1142         mUsedBuffers.insert(buffer->getBufferSerial().getValue(), BufferAccess::Write);
1143     }
1144 
1145     // Make sure host-visible buffer writes result in a barrier inserted at the end of the frame to
1146     // make the results visible to the host.  The buffer may be mapped by the application in the
1147     // future.
1148     if (buffer->isHostVisible())
1149     {
1150         contextVk->onHostVisibleBufferWrite();
1151     }
1152 }
1153 
imageRead(ContextVk * contextVk,VkImageAspectFlags aspectFlags,ImageLayout imageLayout,ImageHelper * image)1154 void CommandBufferHelper::imageRead(ContextVk *contextVk,
1155                                     VkImageAspectFlags aspectFlags,
1156                                     ImageLayout imageLayout,
1157                                     ImageHelper *image)
1158 {
1159     if (image->isReadBarrierNecessary(imageLayout))
1160     {
1161         updateImageLayoutAndBarrier(contextVk, image, aspectFlags, imageLayout);
1162     }
1163 
1164     if (mIsRenderPassCommandBuffer)
1165     {
1166         // As noted in the header we don't support multiple read layouts for Images.
1167         // We allow duplicate uses in the RP to accommodate for normal GL sampler usage.
1168         if (!usesImageInRenderPass(*image))
1169         {
1170             mRenderPassUsedImages.insert(image->getImageSerial().getValue());
1171             image->retain(&contextVk->getResourceUseList());
1172         }
1173     }
1174     else
1175     {
1176         image->retain(&contextVk->getResourceUseList());
1177     }
1178 }
1179 
imageWrite(ContextVk * contextVk,gl::LevelIndex level,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags,ImageLayout imageLayout,AliasingMode aliasingMode,ImageHelper * image)1180 void CommandBufferHelper::imageWrite(ContextVk *contextVk,
1181                                      gl::LevelIndex level,
1182                                      uint32_t layerStart,
1183                                      uint32_t layerCount,
1184                                      VkImageAspectFlags aspectFlags,
1185                                      ImageLayout imageLayout,
1186                                      AliasingMode aliasingMode,
1187                                      ImageHelper *image)
1188 {
1189     image->retain(&contextVk->getResourceUseList());
1190     image->onWrite(level, 1, layerStart, layerCount, aspectFlags);
1191     // Write always requires a barrier
1192     updateImageLayoutAndBarrier(contextVk, image, aspectFlags, imageLayout);
1193 
1194     if (mIsRenderPassCommandBuffer)
1195     {
1196         // When used as a storage image we allow for aliased writes.
1197         if (aliasingMode == AliasingMode::Disallowed)
1198         {
1199             ASSERT(!usesImageInRenderPass(*image));
1200         }
1201         if (!usesImageInRenderPass(*image))
1202         {
1203             mRenderPassUsedImages.insert(image->getImageSerial().getValue());
1204         }
1205     }
1206 }
1207 
colorImagesDraw(ResourceUseList * resourceUseList,ImageHelper * image,ImageHelper * resolveImage,PackedAttachmentIndex packedAttachmentIndex)1208 void CommandBufferHelper::colorImagesDraw(ResourceUseList *resourceUseList,
1209                                           ImageHelper *image,
1210                                           ImageHelper *resolveImage,
1211                                           PackedAttachmentIndex packedAttachmentIndex)
1212 {
1213     ASSERT(mIsRenderPassCommandBuffer);
1214     ASSERT(packedAttachmentIndex < mColorImagesCount);
1215 
1216     image->retain(resourceUseList);
1217     if (!usesImageInRenderPass(*image))
1218     {
1219         // This is possible due to different layers of the same texture being attached to different
1220         // attachments
1221         mRenderPassUsedImages.insert(image->getImageSerial().getValue());
1222     }
1223     ASSERT(mColorImages[packedAttachmentIndex] == nullptr);
1224     mColorImages[packedAttachmentIndex] = image;
1225     image->setRenderPassUsageFlag(RenderPassUsage::RenderTargetAttachment);
1226 
1227     if (resolveImage)
1228     {
1229         resolveImage->retain(resourceUseList);
1230         if (!usesImageInRenderPass(*resolveImage))
1231         {
1232             mRenderPassUsedImages.insert(resolveImage->getImageSerial().getValue());
1233         }
1234         ASSERT(mColorResolveImages[packedAttachmentIndex] == nullptr);
1235         mColorResolveImages[packedAttachmentIndex] = resolveImage;
1236         resolveImage->setRenderPassUsageFlag(RenderPassUsage::RenderTargetAttachment);
1237     }
1238 }
1239 
depthStencilImagesDraw(ResourceUseList * resourceUseList,gl::LevelIndex level,uint32_t layerStart,uint32_t layerCount,ImageHelper * image,ImageHelper * resolveImage)1240 void CommandBufferHelper::depthStencilImagesDraw(ResourceUseList *resourceUseList,
1241                                                  gl::LevelIndex level,
1242                                                  uint32_t layerStart,
1243                                                  uint32_t layerCount,
1244                                                  ImageHelper *image,
1245                                                  ImageHelper *resolveImage)
1246 {
1247     ASSERT(mIsRenderPassCommandBuffer);
1248     ASSERT(!usesImageInRenderPass(*image));
1249     ASSERT(!resolveImage || !usesImageInRenderPass(*resolveImage));
1250 
1251     // Because depthStencil buffer's read/write property can change while we build renderpass, we
1252     // defer the image layout changes until endRenderPass time or when images going away so that we
1253     // only insert layout change barrier once.
1254     image->retain(resourceUseList);
1255     mRenderPassUsedImages.insert(image->getImageSerial().getValue());
1256     mDepthStencilImage      = image;
1257     mDepthStencilLevelIndex = level;
1258     mDepthStencilLayerIndex = layerStart;
1259     mDepthStencilLayerCount = layerCount;
1260     image->setRenderPassUsageFlag(RenderPassUsage::RenderTargetAttachment);
1261 
1262     if (resolveImage)
1263     {
1264         // Note that the resolve depth/stencil image has the same level/layer index as the
1265         // depth/stencil image as currently it can only ever come from
1266         // multisampled-render-to-texture renderbuffers.
1267         resolveImage->retain(resourceUseList);
1268         mRenderPassUsedImages.insert(resolveImage->getImageSerial().getValue());
1269         mDepthStencilResolveImage = resolveImage;
1270         resolveImage->setRenderPassUsageFlag(RenderPassUsage::RenderTargetAttachment);
1271     }
1272 }
1273 
onDepthAccess(ResourceAccess access)1274 void CommandBufferHelper::onDepthAccess(ResourceAccess access)
1275 {
1276     // Update the access for optimizing this render pass's loadOp
1277     UpdateAccess(&mDepthAccess, access);
1278 
1279     // Update the invalidate state for optimizing this render pass's storeOp
1280     if (onDepthStencilAccess(access, &mDepthCmdCountInvalidated, &mDepthCmdCountDisabled))
1281     {
1282         // The attachment is no longer invalid, so restore its content.
1283         restoreDepthContent();
1284     }
1285 }
1286 
onStencilAccess(ResourceAccess access)1287 void CommandBufferHelper::onStencilAccess(ResourceAccess access)
1288 {
1289     // Update the access for optimizing this render pass's loadOp
1290     UpdateAccess(&mStencilAccess, access);
1291 
1292     // Update the invalidate state for optimizing this render pass's stencilStoreOp
1293     if (onDepthStencilAccess(access, &mStencilCmdCountInvalidated, &mStencilCmdCountDisabled))
1294     {
1295         // The attachment is no longer invalid, so restore its content.
1296         restoreStencilContent();
1297     }
1298 }
1299 
onDepthStencilAccess(ResourceAccess access,uint32_t * cmdCountInvalidated,uint32_t * cmdCountDisabled)1300 bool CommandBufferHelper::onDepthStencilAccess(ResourceAccess access,
1301                                                uint32_t *cmdCountInvalidated,
1302                                                uint32_t *cmdCountDisabled)
1303 {
1304     if (*cmdCountInvalidated == kInfiniteCmdCount)
1305     {
1306         // If never invalidated or no longer invalidated, return early.
1307         return false;
1308     }
1309     if (access == ResourceAccess::Write)
1310     {
1311         // Drawing to this attachment is being enabled.  Assume that drawing will immediately occur
1312         // after this attachment is enabled, and that means that the attachment will no longer be
1313         // invalidated.
1314         *cmdCountInvalidated = kInfiniteCmdCount;
1315         *cmdCountDisabled    = kInfiniteCmdCount;
1316         // Return true to indicate that the store op should remain STORE and that mContentDefined
1317         // should be set to true;
1318         return true;
1319     }
1320     else
1321     {
1322         // Drawing to this attachment is being disabled.
1323         if (hasWriteAfterInvalidate(*cmdCountInvalidated, *cmdCountDisabled))
1324         {
1325             // The attachment was previously drawn while enabled, and so is no longer invalidated.
1326             *cmdCountInvalidated = kInfiniteCmdCount;
1327             *cmdCountDisabled    = kInfiniteCmdCount;
1328             // Return true to indicate that the store op should remain STORE and that
1329             // mContentDefined should be set to true;
1330             return true;
1331         }
1332         else
1333         {
1334             // Get the latest CmdCount at the start of being disabled.  At the end of the render
1335             // pass, cmdCountDisabled is <= the actual command buffer size, and so it's compared
1336             // with cmdCountInvalidated.  If the same, the attachment is still invalidated.
1337             *cmdCountDisabled = mCommandBuffer.getRenderPassWriteCommandCount();
1338             return false;
1339         }
1340     }
1341 }
1342 
updateStartedRenderPassWithDepthMode(bool readOnlyDepthStencilMode)1343 void CommandBufferHelper::updateStartedRenderPassWithDepthMode(bool readOnlyDepthStencilMode)
1344 {
1345     ASSERT(mIsRenderPassCommandBuffer);
1346     ASSERT(mRenderPassStarted);
1347 
1348     if (mDepthStencilImage)
1349     {
1350         if (readOnlyDepthStencilMode)
1351         {
1352             mDepthStencilImage->setRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment);
1353         }
1354         else
1355         {
1356             mDepthStencilImage->clearRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment);
1357         }
1358     }
1359 
1360     if (mDepthStencilResolveImage)
1361     {
1362         if (readOnlyDepthStencilMode)
1363         {
1364             mDepthStencilResolveImage->setRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment);
1365         }
1366         else
1367         {
1368             mDepthStencilResolveImage->clearRenderPassUsageFlag(
1369                 RenderPassUsage::ReadOnlyAttachment);
1370         }
1371     }
1372 }
1373 
restoreDepthContent()1374 void CommandBufferHelper::restoreDepthContent()
1375 {
1376     // Note that the image may have been deleted since the render pass has started.
1377     if (mDepthStencilImage)
1378     {
1379         ASSERT(mDepthStencilImage->valid());
1380         mDepthStencilImage->restoreSubresourceContent(
1381             mDepthStencilLevelIndex, mDepthStencilLayerIndex, mDepthStencilLayerCount);
1382         mDepthInvalidateArea = gl::Rectangle();
1383     }
1384 }
1385 
restoreStencilContent()1386 void CommandBufferHelper::restoreStencilContent()
1387 {
1388     // Note that the image may have been deleted since the render pass has started.
1389     if (mDepthStencilImage)
1390     {
1391         ASSERT(mDepthStencilImage->valid());
1392         mDepthStencilImage->restoreSubresourceStencilContent(
1393             mDepthStencilLevelIndex, mDepthStencilLayerIndex, mDepthStencilLayerCount);
1394         mStencilInvalidateArea = gl::Rectangle();
1395     }
1396 }
1397 
executeBarriers(const angle::FeaturesVk & features,PrimaryCommandBuffer * primary)1398 void CommandBufferHelper::executeBarriers(const angle::FeaturesVk &features,
1399                                           PrimaryCommandBuffer *primary)
1400 {
1401     // make a local copy for faster access
1402     PipelineStagesMask mask = mPipelineBarrierMask;
1403     if (mask.none())
1404     {
1405         return;
1406     }
1407 
1408     if (features.preferAggregateBarrierCalls.enabled)
1409     {
1410         PipelineStagesMask::Iterator iter = mask.begin();
1411         PipelineBarrier &barrier          = mPipelineBarriers[*iter];
1412         for (++iter; iter != mask.end(); ++iter)
1413         {
1414             barrier.merge(&mPipelineBarriers[*iter]);
1415         }
1416         barrier.execute(primary);
1417     }
1418     else
1419     {
1420         for (PipelineStage pipelineStage : mask)
1421         {
1422             PipelineBarrier &barrier = mPipelineBarriers[pipelineStage];
1423             barrier.execute(primary);
1424         }
1425     }
1426     mPipelineBarrierMask.reset();
1427 }
1428 
updateImageLayoutAndBarrier(Context * context,ImageHelper * image,VkImageAspectFlags aspectFlags,ImageLayout imageLayout)1429 void CommandBufferHelper::updateImageLayoutAndBarrier(Context *context,
1430                                                       ImageHelper *image,
1431                                                       VkImageAspectFlags aspectFlags,
1432                                                       ImageLayout imageLayout)
1433 {
1434     PipelineStage barrierIndex = kImageMemoryBarrierData[imageLayout].barrierIndex;
1435     ASSERT(barrierIndex != PipelineStage::InvalidEnum);
1436     PipelineBarrier *barrier = &mPipelineBarriers[barrierIndex];
1437     if (image->updateLayoutAndBarrier(context, aspectFlags, imageLayout, barrier))
1438     {
1439         mPipelineBarrierMask.set(barrierIndex);
1440     }
1441 }
1442 
finalizeColorImageLayout(Context * context,ImageHelper * image,PackedAttachmentIndex packedAttachmentIndex,bool isResolveImage)1443 void CommandBufferHelper::finalizeColorImageLayout(Context *context,
1444                                                    ImageHelper *image,
1445                                                    PackedAttachmentIndex packedAttachmentIndex,
1446                                                    bool isResolveImage)
1447 {
1448     ASSERT(mIsRenderPassCommandBuffer);
1449     ASSERT(packedAttachmentIndex < mColorImagesCount);
1450     ASSERT(image != nullptr);
1451 
1452     // Do layout change.
1453     ImageLayout imageLayout;
1454     if (image->usedByCurrentRenderPassAsAttachmentAndSampler())
1455     {
1456         // texture code already picked layout and inserted barrier
1457         imageLayout = image->getCurrentImageLayout();
1458         ASSERT(imageLayout == ImageLayout::ColorAttachmentAndFragmentShaderRead ||
1459                imageLayout == ImageLayout::ColorAttachmentAndAllShadersRead);
1460     }
1461     else
1462     {
1463         imageLayout = ImageLayout::ColorAttachment;
1464         updateImageLayoutAndBarrier(context, image, VK_IMAGE_ASPECT_COLOR_BIT, imageLayout);
1465     }
1466 
1467     if (!isResolveImage)
1468     {
1469         mAttachmentOps.setLayouts(packedAttachmentIndex, imageLayout, imageLayout);
1470     }
1471 
1472     if (mImageOptimizeForPresent == image)
1473     {
1474         ASSERT(packedAttachmentIndex == kAttachmentIndexZero);
1475         // Use finalLayout instead of extra barrier for layout change to present
1476         mImageOptimizeForPresent->setCurrentImageLayout(vk::ImageLayout::Present);
1477         // TODO(syoussefi):  We currently don't store the layout of the resolve attachments, so once
1478         // multisampled backbuffers are optimized to use resolve attachments, this information needs
1479         // to be stored somewhere.  http://anglebug.com/4836
1480         SetBitField(mAttachmentOps[packedAttachmentIndex].finalLayout,
1481                     mImageOptimizeForPresent->getCurrentImageLayout());
1482         mImageOptimizeForPresent = nullptr;
1483     }
1484 
1485     image->resetRenderPassUsageFlags();
1486 }
1487 
finalizeDepthStencilImageLayout(Context * context)1488 void CommandBufferHelper::finalizeDepthStencilImageLayout(Context *context)
1489 {
1490     ASSERT(mIsRenderPassCommandBuffer);
1491     ASSERT(mDepthStencilImage);
1492 
1493     // Do depth stencil layout change.
1494     ImageLayout imageLayout;
1495     bool barrierRequired;
1496 
1497     if (mDepthStencilImage->usedByCurrentRenderPassAsAttachmentAndSampler())
1498     {
1499         // texture code already picked layout and inserted barrier
1500         imageLayout = mDepthStencilImage->getCurrentImageLayout();
1501         if (mDepthStencilImage->hasRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment))
1502         {
1503             ASSERT(imageLayout == ImageLayout::DSAttachmentReadAndFragmentShaderRead ||
1504                    imageLayout == ImageLayout::DSAttachmentReadAndAllShadersRead);
1505             barrierRequired = mDepthStencilImage->isReadBarrierNecessary(imageLayout);
1506         }
1507         else
1508         {
1509             ASSERT(imageLayout == ImageLayout::DSAttachmentWriteAndFragmentShaderRead ||
1510                    imageLayout == ImageLayout::DSAttachmentWriteAndAllShadersRead);
1511             barrierRequired = true;
1512         }
1513     }
1514     else if (mDepthStencilImage->hasRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment))
1515     {
1516         imageLayout     = ImageLayout::DepthStencilAttachmentReadOnly;
1517         barrierRequired = mDepthStencilImage->isReadBarrierNecessary(imageLayout);
1518     }
1519     else
1520     {
1521         // Write always requires a barrier
1522         imageLayout     = ImageLayout::DepthStencilAttachment;
1523         barrierRequired = true;
1524     }
1525 
1526     mAttachmentOps.setLayouts(mDepthStencilAttachmentIndex, imageLayout, imageLayout);
1527 
1528     if (barrierRequired)
1529     {
1530         const angle::Format &format = mDepthStencilImage->getActualFormat();
1531         ASSERT(format.hasDepthOrStencilBits());
1532         VkImageAspectFlags aspectFlags = GetDepthStencilAspectFlags(format);
1533         updateImageLayoutAndBarrier(context, mDepthStencilImage, aspectFlags, imageLayout);
1534     }
1535 }
1536 
finalizeDepthStencilResolveImageLayout(Context * context)1537 void CommandBufferHelper::finalizeDepthStencilResolveImageLayout(Context *context)
1538 {
1539     ASSERT(mIsRenderPassCommandBuffer);
1540     ASSERT(mDepthStencilImage);
1541     ASSERT(!mDepthStencilResolveImage->hasRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment));
1542 
1543     ImageLayout imageLayout     = ImageLayout::DepthStencilResolveAttachment;
1544     const angle::Format &format = mDepthStencilResolveImage->getActualFormat();
1545     ASSERT(format.hasDepthOrStencilBits());
1546     VkImageAspectFlags aspectFlags = GetDepthStencilAspectFlags(format);
1547 
1548     updateImageLayoutAndBarrier(context, mDepthStencilResolveImage, aspectFlags, imageLayout);
1549 
1550     if (!mDepthStencilResolveImage->hasRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment))
1551     {
1552         ASSERT(mDepthStencilAttachmentIndex != kAttachmentIndexInvalid);
1553         const PackedAttachmentOpsDesc &dsOps = mAttachmentOps[mDepthStencilAttachmentIndex];
1554 
1555         // If the image is being written to, mark its contents defined.
1556         VkImageAspectFlags definedAspects = 0;
1557         if (!dsOps.isInvalidated)
1558         {
1559             definedAspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
1560         }
1561         if (!dsOps.isStencilInvalidated)
1562         {
1563             definedAspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
1564         }
1565         if (definedAspects != 0)
1566         {
1567             mDepthStencilResolveImage->onWrite(mDepthStencilLevelIndex, 1, mDepthStencilLayerIndex,
1568                                                mDepthStencilLayerCount, definedAspects);
1569         }
1570     }
1571 
1572     mDepthStencilResolveImage->resetRenderPassUsageFlags();
1573 }
1574 
finalizeImageLayout(Context * context,const ImageHelper * image)1575 void CommandBufferHelper::finalizeImageLayout(Context *context, const ImageHelper *image)
1576 {
1577     ASSERT(mIsRenderPassCommandBuffer);
1578 
1579     if (image->hasRenderPassUsageFlag(RenderPassUsage::RenderTargetAttachment))
1580     {
1581         for (PackedAttachmentIndex index = kAttachmentIndexZero; index < mColorImagesCount; ++index)
1582         {
1583             if (mColorImages[index] == image)
1584             {
1585                 finalizeColorImageLayout(context, mColorImages[index], index, false);
1586                 mColorImages[index] = nullptr;
1587             }
1588             else if (mColorResolveImages[index] == image)
1589             {
1590                 finalizeColorImageLayout(context, mColorResolveImages[index], index, true);
1591                 mColorResolveImages[index] = nullptr;
1592             }
1593         }
1594     }
1595 
1596     if (mDepthStencilImage == image)
1597     {
1598         finalizeDepthStencilImageLayoutAndLoadStore(context);
1599         mDepthStencilImage = nullptr;
1600     }
1601 
1602     if (mDepthStencilResolveImage == image)
1603     {
1604         finalizeDepthStencilResolveImageLayout(context);
1605         mDepthStencilResolveImage = nullptr;
1606     }
1607 }
1608 
finalizeDepthStencilLoadStore(Context * context)1609 void CommandBufferHelper::finalizeDepthStencilLoadStore(Context *context)
1610 {
1611     ASSERT(mDepthStencilAttachmentIndex != kAttachmentIndexInvalid);
1612 
1613     PackedAttachmentOpsDesc &dsOps   = mAttachmentOps[mDepthStencilAttachmentIndex];
1614     RenderPassLoadOp depthLoadOp     = static_cast<RenderPassLoadOp>(dsOps.loadOp);
1615     RenderPassStoreOp depthStoreOp   = static_cast<RenderPassStoreOp>(dsOps.storeOp);
1616     RenderPassLoadOp stencilLoadOp   = static_cast<RenderPassLoadOp>(dsOps.stencilLoadOp);
1617     RenderPassStoreOp stencilStoreOp = static_cast<RenderPassStoreOp>(dsOps.stencilStoreOp);
1618 
1619     // This has to be called after layout been finalized
1620     ASSERT(dsOps.initialLayout != static_cast<uint16_t>(ImageLayout::Undefined));
1621 
1622     // Ensure we don't write to a read-only RenderPass. (ReadOnly -> !Write)
1623     ASSERT(!mDepthStencilImage->hasRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment) ||
1624            (mDepthAccess != ResourceAccess::Write && mStencilAccess != ResourceAccess::Write));
1625 
1626     // If the attachment is invalidated, skip the store op.  If we are not loading or clearing the
1627     // attachment and the attachment has not been used, auto-invalidate it.
1628     const bool depthNotLoaded =
1629         depthLoadOp == RenderPassLoadOp::DontCare && !mRenderPassDesc.hasDepthUnresolveAttachment();
1630     if (isInvalidated(mDepthCmdCountInvalidated, mDepthCmdCountDisabled) ||
1631         (depthNotLoaded && mDepthAccess != ResourceAccess::Write))
1632     {
1633         depthStoreOp        = RenderPassStoreOp::DontCare;
1634         dsOps.isInvalidated = true;
1635     }
1636     else if (hasWriteAfterInvalidate(mDepthCmdCountInvalidated, mDepthCmdCountDisabled))
1637     {
1638         // The depth attachment was invalidated, but is now valid.  Let the image know the contents
1639         // are now defined so a future render pass would use loadOp=LOAD.
1640         restoreDepthContent();
1641     }
1642     const bool stencilNotLoaded = stencilLoadOp == RenderPassLoadOp::DontCare &&
1643                                   !mRenderPassDesc.hasStencilUnresolveAttachment();
1644     if (isInvalidated(mStencilCmdCountInvalidated, mStencilCmdCountDisabled) ||
1645         (stencilNotLoaded && mStencilAccess != ResourceAccess::Write))
1646     {
1647         stencilStoreOp             = RenderPassStoreOp::DontCare;
1648         dsOps.isStencilInvalidated = true;
1649     }
1650     else if (hasWriteAfterInvalidate(mStencilCmdCountInvalidated, mStencilCmdCountDisabled))
1651     {
1652         // The stencil attachment was invalidated, but is now valid.  Let the image know the
1653         // contents are now defined so a future render pass would use loadOp=LOAD.
1654         restoreStencilContent();
1655     }
1656 
1657     finalizeDepthStencilLoadStoreOps(context, mDepthAccess, &depthLoadOp, &depthStoreOp);
1658     finalizeDepthStencilLoadStoreOps(context, mStencilAccess, &stencilLoadOp, &stencilStoreOp);
1659 
1660     // This has to be done after storeOp has been finalized.
1661     if (!mDepthStencilImage->hasRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment))
1662     {
1663         // If the image is being written to, mark its contents defined.
1664         VkImageAspectFlags definedAspects = 0;
1665         if (depthStoreOp == RenderPassStoreOp::Store)
1666         {
1667             definedAspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
1668         }
1669         if (stencilStoreOp == RenderPassStoreOp::Store)
1670         {
1671             definedAspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
1672         }
1673         if (definedAspects != 0)
1674         {
1675             mDepthStencilImage->onWrite(mDepthStencilLevelIndex, 1, mDepthStencilLayerIndex,
1676                                         mDepthStencilLayerCount, definedAspects);
1677         }
1678     }
1679 
1680     SetBitField(dsOps.loadOp, depthLoadOp);
1681     SetBitField(dsOps.storeOp, depthStoreOp);
1682     SetBitField(dsOps.stencilLoadOp, stencilLoadOp);
1683     SetBitField(dsOps.stencilStoreOp, stencilStoreOp);
1684 }
1685 
finalizeDepthStencilLoadStoreOps(Context * context,ResourceAccess access,RenderPassLoadOp * loadOp,RenderPassStoreOp * storeOp)1686 void CommandBufferHelper::finalizeDepthStencilLoadStoreOps(Context *context,
1687                                                            ResourceAccess access,
1688                                                            RenderPassLoadOp *loadOp,
1689                                                            RenderPassStoreOp *storeOp)
1690 {
1691     // For read only depth stencil, we can use StoreOpNone if available.  DontCare is still
1692     // preferred, so do this after handling DontCare.
1693     const bool supportsLoadStoreOpNone =
1694         context->getRenderer()->getFeatures().supportsRenderPassLoadStoreOpNone.enabled;
1695     const bool supportsStoreOpNone =
1696         supportsLoadStoreOpNone ||
1697         context->getRenderer()->getFeatures().supportsRenderPassStoreOpNoneQCOM.enabled;
1698     if (mDepthStencilImage->hasRenderPassUsageFlag(RenderPassUsage::ReadOnlyAttachment) &&
1699         supportsStoreOpNone)
1700     {
1701         if (*storeOp == RenderPassStoreOp::Store)
1702         {
1703             *storeOp = RenderPassStoreOp::None;
1704         }
1705     }
1706 
1707     if (access == ResourceAccess::Unused)
1708     {
1709         if (*storeOp == RenderPassStoreOp::DontCare)
1710         {
1711             // If we are loading or clearing the attachment, but the attachment has not been used,
1712             // and the data has also not been stored back into attachment, then just skip the
1713             // load/clear op.
1714             *loadOp = RenderPassLoadOp::DontCare;
1715         }
1716         else if (*loadOp != RenderPassLoadOp::Clear && supportsLoadStoreOpNone)
1717         {
1718             // Otherwise make sure the attachment is neither loaded nor stored (as it's neither
1719             // used nor invalidated).
1720             *loadOp  = RenderPassLoadOp::None;
1721             *storeOp = RenderPassStoreOp::None;
1722         }
1723     }
1724 }
1725 
finalizeDepthStencilImageLayoutAndLoadStore(Context * context)1726 void CommandBufferHelper::finalizeDepthStencilImageLayoutAndLoadStore(Context *context)
1727 {
1728     finalizeDepthStencilImageLayout(context);
1729     finalizeDepthStencilLoadStore(context);
1730     mDepthStencilImage->resetRenderPassUsageFlags();
1731 }
1732 
beginRenderPass(ContextVk * contextVk,const Framebuffer & framebuffer,const gl::Rectangle & renderArea,const RenderPassDesc & renderPassDesc,const AttachmentOpsArray & renderPassAttachmentOps,const vk::PackedAttachmentCount colorAttachmentCount,const PackedAttachmentIndex depthStencilAttachmentIndex,const PackedClearValuesArray & clearValues,CommandBuffer ** commandBufferOut)1733 angle::Result CommandBufferHelper::beginRenderPass(
1734     ContextVk *contextVk,
1735     const Framebuffer &framebuffer,
1736     const gl::Rectangle &renderArea,
1737     const RenderPassDesc &renderPassDesc,
1738     const AttachmentOpsArray &renderPassAttachmentOps,
1739     const vk::PackedAttachmentCount colorAttachmentCount,
1740     const PackedAttachmentIndex depthStencilAttachmentIndex,
1741     const PackedClearValuesArray &clearValues,
1742     CommandBuffer **commandBufferOut)
1743 {
1744     ASSERT(mIsRenderPassCommandBuffer);
1745     ASSERT(empty());
1746 
1747     VkCommandBufferInheritanceInfo inheritanceInfo = {};
1748     ANGLE_TRY(vk::CommandBuffer::InitializeRenderPassInheritanceInfo(
1749         contextVk, framebuffer, renderPassDesc, &inheritanceInfo));
1750     ANGLE_TRY(mCommandBuffer.begin(contextVk, inheritanceInfo));
1751 
1752     mRenderPassDesc              = renderPassDesc;
1753     mAttachmentOps               = renderPassAttachmentOps;
1754     mDepthStencilAttachmentIndex = depthStencilAttachmentIndex;
1755     mColorImagesCount            = colorAttachmentCount;
1756     mFramebuffer.setHandle(framebuffer.getHandle());
1757     mRenderArea       = renderArea;
1758     mClearValues      = clearValues;
1759     *commandBufferOut = &mCommandBuffer;
1760 
1761     mRenderPassStarted = true;
1762     mCounter++;
1763 
1764     return angle::Result::Continue;
1765 }
1766 
endRenderPass(ContextVk * contextVk)1767 angle::Result CommandBufferHelper::endRenderPass(ContextVk *contextVk)
1768 {
1769     ANGLE_TRY(mCommandBuffer.end(contextVk));
1770 
1771     for (PackedAttachmentIndex index = kAttachmentIndexZero; index < mColorImagesCount; ++index)
1772     {
1773         if (mColorImages[index])
1774         {
1775             finalizeColorImageLayout(contextVk, mColorImages[index], index, false);
1776         }
1777         if (mColorResolveImages[index])
1778         {
1779             finalizeColorImageLayout(contextVk, mColorResolveImages[index], index, true);
1780         }
1781     }
1782 
1783     if (mDepthStencilAttachmentIndex == kAttachmentIndexInvalid)
1784     {
1785         return angle::Result::Continue;
1786     }
1787 
1788     // Do depth stencil layout change and load store optimization.
1789     if (mDepthStencilImage)
1790     {
1791         finalizeDepthStencilImageLayoutAndLoadStore(contextVk);
1792     }
1793     if (mDepthStencilResolveImage)
1794     {
1795         finalizeDepthStencilResolveImageLayout(contextVk);
1796     }
1797 
1798     return angle::Result::Continue;
1799 }
1800 
beginTransformFeedback(size_t validBufferCount,const VkBuffer * counterBuffers,bool rebindBuffers)1801 void CommandBufferHelper::beginTransformFeedback(size_t validBufferCount,
1802                                                  const VkBuffer *counterBuffers,
1803                                                  bool rebindBuffers)
1804 {
1805     ASSERT(mIsRenderPassCommandBuffer);
1806     mValidTransformFeedbackBufferCount = static_cast<uint32_t>(validBufferCount);
1807     mRebindTransformFeedbackBuffers    = rebindBuffers;
1808 
1809     for (size_t index = 0; index < validBufferCount; index++)
1810     {
1811         mTransformFeedbackCounterBuffers[index] = counterBuffers[index];
1812     }
1813 }
1814 
endTransformFeedback()1815 void CommandBufferHelper::endTransformFeedback()
1816 {
1817     ASSERT(mIsRenderPassCommandBuffer);
1818     pauseTransformFeedback();
1819     mValidTransformFeedbackBufferCount = 0;
1820 }
1821 
invalidateRenderPassColorAttachment(PackedAttachmentIndex attachmentIndex)1822 void CommandBufferHelper::invalidateRenderPassColorAttachment(PackedAttachmentIndex attachmentIndex)
1823 {
1824     ASSERT(mIsRenderPassCommandBuffer);
1825     SetBitField(mAttachmentOps[attachmentIndex].storeOp, RenderPassStoreOp::DontCare);
1826     mAttachmentOps[attachmentIndex].isInvalidated = true;
1827 }
1828 
invalidateRenderPassDepthAttachment(const gl::DepthStencilState & dsState,const gl::Rectangle & invalidateArea)1829 void CommandBufferHelper::invalidateRenderPassDepthAttachment(const gl::DepthStencilState &dsState,
1830                                                               const gl::Rectangle &invalidateArea)
1831 {
1832     ASSERT(mIsRenderPassCommandBuffer);
1833     // Keep track of the size of commands in the command buffer.  If the size grows in the
1834     // future, that implies that drawing occured since invalidated.
1835     mDepthCmdCountInvalidated = mCommandBuffer.getRenderPassWriteCommandCount();
1836 
1837     // Also track the size if the attachment is currently disabled.
1838     const bool isDepthWriteEnabled = dsState.depthTest && dsState.depthMask;
1839     mDepthCmdCountDisabled = isDepthWriteEnabled ? kInfiniteCmdCount : mDepthCmdCountInvalidated;
1840 
1841     // Set/extend the invalidate area.
1842     ExtendRenderPassInvalidateArea(invalidateArea, &mDepthInvalidateArea);
1843 }
1844 
invalidateRenderPassStencilAttachment(const gl::DepthStencilState & dsState,const gl::Rectangle & invalidateArea)1845 void CommandBufferHelper::invalidateRenderPassStencilAttachment(
1846     const gl::DepthStencilState &dsState,
1847     const gl::Rectangle &invalidateArea)
1848 {
1849     ASSERT(mIsRenderPassCommandBuffer);
1850     // Keep track of the size of commands in the command buffer.  If the size grows in the
1851     // future, that implies that drawing occured since invalidated.
1852     mStencilCmdCountInvalidated = mCommandBuffer.getRenderPassWriteCommandCount();
1853 
1854     // Also track the size if the attachment is currently disabled.
1855     const bool isStencilWriteEnabled =
1856         dsState.stencilTest && (!dsState.isStencilNoOp() || !dsState.isStencilBackNoOp());
1857     mStencilCmdCountDisabled =
1858         isStencilWriteEnabled ? kInfiniteCmdCount : mStencilCmdCountInvalidated;
1859 
1860     // Set/extend the invalidate area.
1861     ExtendRenderPassInvalidateArea(invalidateArea, &mStencilInvalidateArea);
1862 }
1863 
flushToPrimary(Context * context,PrimaryCommandBuffer * primary,const RenderPass * renderPass)1864 angle::Result CommandBufferHelper::flushToPrimary(Context *context,
1865                                                   PrimaryCommandBuffer *primary,
1866                                                   const RenderPass *renderPass)
1867 {
1868     ANGLE_TRACE_EVENT0("gpu.angle", "CommandBufferHelper::flushToPrimary");
1869     ASSERT(!empty());
1870 
1871     // Commands that are added to primary before beginRenderPass command
1872     executeBarriers(context->getRenderer()->getFeatures(), primary);
1873 
1874     if (mIsRenderPassCommandBuffer)
1875     {
1876         ASSERT(renderPass != nullptr);
1877 
1878         VkRenderPassBeginInfo beginInfo    = {};
1879         beginInfo.sType                    = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
1880         beginInfo.renderPass               = renderPass->getHandle();
1881         beginInfo.framebuffer              = mFramebuffer.getHandle();
1882         beginInfo.renderArea.offset.x      = static_cast<uint32_t>(mRenderArea.x);
1883         beginInfo.renderArea.offset.y      = static_cast<uint32_t>(mRenderArea.y);
1884         beginInfo.renderArea.extent.width  = static_cast<uint32_t>(mRenderArea.width);
1885         beginInfo.renderArea.extent.height = static_cast<uint32_t>(mRenderArea.height);
1886         beginInfo.clearValueCount = static_cast<uint32_t>(mRenderPassDesc.attachmentCount());
1887         beginInfo.pClearValues    = mClearValues.data();
1888 
1889         // Run commands inside the RenderPass.
1890         constexpr VkSubpassContents kSubpassContents =
1891             vk::CommandBuffer::ExecutesInline() ? VK_SUBPASS_CONTENTS_INLINE
1892                                                 : VK_SUBPASS_CONTENTS_SECONDARY_COMMAND_BUFFERS;
1893 
1894         primary->beginRenderPass(beginInfo, kSubpassContents);
1895         mCommandBuffer.executeCommands(primary);
1896         primary->endRenderPass();
1897     }
1898     else
1899     {
1900         ANGLE_TRY(mCommandBuffer.end(context));
1901         mCommandBuffer.executeCommands(primary);
1902     }
1903 
1904     // Restart the command buffer.
1905     return reset(context);
1906 }
1907 
updateRenderPassForResolve(ContextVk * contextVk,Framebuffer * newFramebuffer,const RenderPassDesc & renderPassDesc)1908 void CommandBufferHelper::updateRenderPassForResolve(ContextVk *contextVk,
1909                                                      Framebuffer *newFramebuffer,
1910                                                      const RenderPassDesc &renderPassDesc)
1911 {
1912     ASSERT(newFramebuffer);
1913     mFramebuffer.setHandle(newFramebuffer->getHandle());
1914     mRenderPassDesc = renderPassDesc;
1915 }
1916 
1917 // Helper functions used below
GetLoadOpShorthand(RenderPassLoadOp loadOp)1918 char GetLoadOpShorthand(RenderPassLoadOp loadOp)
1919 {
1920     switch (loadOp)
1921     {
1922         case RenderPassLoadOp::Clear:
1923             return 'C';
1924         case RenderPassLoadOp::Load:
1925             return 'L';
1926         case RenderPassLoadOp::None:
1927             return 'N';
1928         default:
1929             return 'D';
1930     }
1931 }
1932 
GetStoreOpShorthand(RenderPassStoreOp storeOp)1933 char GetStoreOpShorthand(RenderPassStoreOp storeOp)
1934 {
1935     switch (storeOp)
1936     {
1937         case RenderPassStoreOp::Store:
1938             return 'S';
1939         case RenderPassStoreOp::None:
1940             return 'N';
1941         default:
1942             return 'D';
1943     }
1944 }
1945 
addCommandDiagnostics(ContextVk * contextVk)1946 void CommandBufferHelper::addCommandDiagnostics(ContextVk *contextVk)
1947 {
1948     std::ostringstream out;
1949 
1950     out << "Memory Barrier: ";
1951     for (PipelineBarrier &barrier : mPipelineBarriers)
1952     {
1953         if (!barrier.isEmpty())
1954         {
1955             barrier.addDiagnosticsString(out);
1956         }
1957     }
1958     out << "\\l";
1959 
1960     if (mIsRenderPassCommandBuffer)
1961     {
1962         size_t attachmentCount             = mRenderPassDesc.attachmentCount();
1963         size_t depthStencilAttachmentCount = mRenderPassDesc.hasDepthStencilAttachment() ? 1 : 0;
1964         size_t colorAttachmentCount        = attachmentCount - depthStencilAttachmentCount;
1965 
1966         PackedAttachmentIndex attachmentIndexVk(0);
1967         std::string loadOps, storeOps;
1968 
1969         if (colorAttachmentCount > 0)
1970         {
1971             loadOps += " Color: ";
1972             storeOps += " Color: ";
1973 
1974             for (size_t i = 0; i < colorAttachmentCount; ++i)
1975             {
1976                 loadOps += GetLoadOpShorthand(
1977                     static_cast<RenderPassLoadOp>(mAttachmentOps[attachmentIndexVk].loadOp));
1978                 storeOps += GetStoreOpShorthand(
1979                     static_cast<RenderPassStoreOp>(mAttachmentOps[attachmentIndexVk].storeOp));
1980                 ++attachmentIndexVk;
1981             }
1982         }
1983 
1984         if (depthStencilAttachmentCount > 0)
1985         {
1986             ASSERT(depthStencilAttachmentCount == 1);
1987 
1988             loadOps += " Depth/Stencil: ";
1989             storeOps += " Depth/Stencil: ";
1990 
1991             loadOps += GetLoadOpShorthand(
1992                 static_cast<RenderPassLoadOp>(mAttachmentOps[attachmentIndexVk].loadOp));
1993             loadOps += GetLoadOpShorthand(
1994                 static_cast<RenderPassLoadOp>(mAttachmentOps[attachmentIndexVk].stencilLoadOp));
1995 
1996             storeOps += GetStoreOpShorthand(
1997                 static_cast<RenderPassStoreOp>(mAttachmentOps[attachmentIndexVk].storeOp));
1998             storeOps += GetStoreOpShorthand(
1999                 static_cast<RenderPassStoreOp>(mAttachmentOps[attachmentIndexVk].stencilStoreOp));
2000         }
2001 
2002         if (attachmentCount > 0)
2003         {
2004             out << "LoadOp:  " << loadOps << "\\l";
2005             out << "StoreOp: " << storeOps << "\\l";
2006         }
2007     }
2008     out << mCommandBuffer.dumpCommands("\\l");
2009     contextVk->addCommandBufferDiagnostics(out.str());
2010 }
2011 
resumeTransformFeedback()2012 void CommandBufferHelper::resumeTransformFeedback()
2013 {
2014     ASSERT(mIsRenderPassCommandBuffer);
2015     ASSERT(isTransformFeedbackStarted());
2016 
2017     uint32_t numCounterBuffers =
2018         mRebindTransformFeedbackBuffers ? 0 : mValidTransformFeedbackBufferCount;
2019 
2020     mRebindTransformFeedbackBuffers    = false;
2021     mIsTransformFeedbackActiveUnpaused = true;
2022 
2023     mCommandBuffer.beginTransformFeedback(0, numCounterBuffers,
2024                                           mTransformFeedbackCounterBuffers.data(), nullptr);
2025 }
2026 
pauseTransformFeedback()2027 void CommandBufferHelper::pauseTransformFeedback()
2028 {
2029     ASSERT(mIsRenderPassCommandBuffer);
2030     ASSERT(isTransformFeedbackStarted() && isTransformFeedbackActiveUnpaused());
2031     mIsTransformFeedbackActiveUnpaused = false;
2032     mCommandBuffer.endTransformFeedback(0, mValidTransformFeedbackBufferCount,
2033                                         mTransformFeedbackCounterBuffers.data(), nullptr);
2034 }
2035 
updateRenderPassColorClear(PackedAttachmentIndex colorIndexVk,const VkClearValue & clearValue)2036 void CommandBufferHelper::updateRenderPassColorClear(PackedAttachmentIndex colorIndexVk,
2037                                                      const VkClearValue &clearValue)
2038 {
2039     mAttachmentOps.setClearOp(colorIndexVk);
2040     mClearValues.store(colorIndexVk, VK_IMAGE_ASPECT_COLOR_BIT, clearValue);
2041 }
2042 
updateRenderPassDepthStencilClear(VkImageAspectFlags aspectFlags,const VkClearValue & clearValue)2043 void CommandBufferHelper::updateRenderPassDepthStencilClear(VkImageAspectFlags aspectFlags,
2044                                                             const VkClearValue &clearValue)
2045 {
2046     // Don't overwrite prior clear values for individual aspects.
2047     VkClearValue combinedClearValue = mClearValues[mDepthStencilAttachmentIndex];
2048 
2049     if ((aspectFlags & VK_IMAGE_ASPECT_DEPTH_BIT) != 0)
2050     {
2051         mAttachmentOps.setClearOp(mDepthStencilAttachmentIndex);
2052         combinedClearValue.depthStencil.depth = clearValue.depthStencil.depth;
2053     }
2054 
2055     if ((aspectFlags & VK_IMAGE_ASPECT_STENCIL_BIT) != 0)
2056     {
2057         mAttachmentOps.setClearStencilOp(mDepthStencilAttachmentIndex);
2058         combinedClearValue.depthStencil.stencil = clearValue.depthStencil.stencil;
2059     }
2060 
2061     // Bypass special D/S handling. This clear values array stores values packed.
2062     mClearValues.storeNoDepthStencil(mDepthStencilAttachmentIndex, combinedClearValue);
2063 }
2064 
growRenderArea(ContextVk * contextVk,const gl::Rectangle & newRenderArea)2065 void CommandBufferHelper::growRenderArea(ContextVk *contextVk, const gl::Rectangle &newRenderArea)
2066 {
2067     ASSERT(mIsRenderPassCommandBuffer);
2068 
2069     // The render area is grown such that it covers both the previous and the new render areas.
2070     gl::GetEnclosingRectangle(mRenderArea, newRenderArea, &mRenderArea);
2071 
2072     // Remove invalidates that are no longer applicable.
2073     if (!mDepthInvalidateArea.empty() && !mDepthInvalidateArea.encloses(mRenderArea))
2074     {
2075         ANGLE_VK_PERF_WARNING(
2076             contextVk, GL_DEBUG_SEVERITY_LOW,
2077             "InvalidateSubFramebuffer for depth discarded due to increased scissor region");
2078         mDepthInvalidateArea      = gl::Rectangle();
2079         mDepthCmdCountInvalidated = kInfiniteCmdCount;
2080     }
2081     if (!mStencilInvalidateArea.empty() && !mStencilInvalidateArea.encloses(mRenderArea))
2082     {
2083         ANGLE_VK_PERF_WARNING(
2084             contextVk, GL_DEBUG_SEVERITY_LOW,
2085             "InvalidateSubFramebuffer for stencil discarded due to increased scissor region");
2086         mStencilInvalidateArea      = gl::Rectangle();
2087         mStencilCmdCountInvalidated = kInfiniteCmdCount;
2088     }
2089 }
2090 
2091 // CommandBufferRecycler implementation.
2092 CommandBufferRecycler::CommandBufferRecycler()  = default;
2093 CommandBufferRecycler::~CommandBufferRecycler() = default;
2094 
onDestroy()2095 void CommandBufferRecycler::onDestroy()
2096 {
2097     for (vk::CommandBufferHelper *commandBufferHelper : mCommandBufferHelperFreeList)
2098     {
2099         SafeDelete(commandBufferHelper);
2100     }
2101     mCommandBufferHelperFreeList.clear();
2102 
2103     ASSERT(mSecondaryCommandBuffersToReset.empty());
2104 }
2105 
getCommandBufferHelper(Context * context,bool hasRenderPass,CommandPool * commandPool,CommandBufferHelper ** commandBufferHelperOut)2106 angle::Result CommandBufferRecycler::getCommandBufferHelper(
2107     Context *context,
2108     bool hasRenderPass,
2109     CommandPool *commandPool,
2110     CommandBufferHelper **commandBufferHelperOut)
2111 {
2112     if (mCommandBufferHelperFreeList.empty())
2113     {
2114         vk::CommandBufferHelper *commandBuffer = new vk::CommandBufferHelper();
2115         *commandBufferHelperOut                = commandBuffer;
2116 
2117         return commandBuffer->initialize(context, hasRenderPass, commandPool);
2118     }
2119     else
2120     {
2121         vk::CommandBufferHelper *commandBuffer = mCommandBufferHelperFreeList.back();
2122         mCommandBufferHelperFreeList.pop_back();
2123         commandBuffer->setHasRenderPass(hasRenderPass);
2124         *commandBufferHelperOut = commandBuffer;
2125         return angle::Result::Continue;
2126     }
2127 }
2128 
recycleCommandBufferHelper(VkDevice device,vk::CommandBufferHelper ** commandBuffer)2129 void CommandBufferRecycler::recycleCommandBufferHelper(VkDevice device,
2130                                                        vk::CommandBufferHelper **commandBuffer)
2131 {
2132     ASSERT((*commandBuffer)->empty());
2133     (*commandBuffer)->markOpen();
2134     recycleImpl(device, commandBuffer);
2135 }
2136 
2137 #if ANGLE_USE_CUSTOM_VULKAN_CMD_BUFFERS
recycleImpl(VkDevice device,CommandBufferHelper ** commandBuffer)2138 void CommandBufferRecycler::recycleImpl(VkDevice device, CommandBufferHelper **commandBuffer)
2139 {
2140     mCommandBufferHelperFreeList.push_back(*commandBuffer);
2141 }
resetCommandBufferHelper(CommandBuffer && commandBuffer)2142 void CommandBufferRecycler::resetCommandBufferHelper(CommandBuffer &&commandBuffer)
2143 {
2144     commandBuffer.reset();
2145 }
2146 #else   // ANGLE_USE_CUSTOM_VULKAN_CMD_BUFFERS
recycleImpl(VkDevice device,CommandBufferHelper ** commandBuffer)2147 void CommandBufferRecycler::recycleImpl(VkDevice device, CommandBufferHelper **commandBuffer)
2148 {
2149     CommandPool *pool = (*commandBuffer)->getCommandPool();
2150 
2151     pool->freeCommandBuffers(device, 1, (*commandBuffer)->getCommandBuffer().ptr());
2152     (*commandBuffer)->getCommandBuffer().releaseHandle();
2153     SafeDelete(*commandBuffer);
2154 }
resetCommandBufferHelper(CommandBuffer && commandBuffer)2155 void CommandBufferRecycler::resetCommandBufferHelper(CommandBuffer &&commandBuffer)
2156 {
2157     mSecondaryCommandBuffersToReset.push_back(std::move(commandBuffer));
2158 }
2159 #endif  // ANGLE_USE_CUSTOM_VULKAN_CMD_BUFFERS
2160 
2161 // DynamicBuffer implementation.
DynamicBuffer()2162 DynamicBuffer::DynamicBuffer()
2163     : mUsage(0),
2164       mHostVisible(false),
2165       mPolicy(DynamicBufferPolicy::OneShotUse),
2166       mInitialSize(0),
2167       mNextAllocationOffset(0),
2168       mLastFlushOrInvalidateOffset(0),
2169       mSize(0),
2170       mAlignment(0),
2171       mMemoryPropertyFlags(0)
2172 {}
2173 
DynamicBuffer(DynamicBuffer && other)2174 DynamicBuffer::DynamicBuffer(DynamicBuffer &&other)
2175     : mUsage(other.mUsage),
2176       mHostVisible(other.mHostVisible),
2177       mPolicy(other.mPolicy),
2178       mInitialSize(other.mInitialSize),
2179       mBuffer(std::move(other.mBuffer)),
2180       mNextAllocationOffset(other.mNextAllocationOffset),
2181       mLastFlushOrInvalidateOffset(other.mLastFlushOrInvalidateOffset),
2182       mSize(other.mSize),
2183       mAlignment(other.mAlignment),
2184       mMemoryPropertyFlags(other.mMemoryPropertyFlags),
2185       mInFlightBuffers(std::move(other.mInFlightBuffers)),
2186       mBufferFreeList(std::move(other.mBufferFreeList))
2187 {}
2188 
init(RendererVk * renderer,VkBufferUsageFlags usage,size_t alignment,size_t initialSize,bool hostVisible,DynamicBufferPolicy policy)2189 void DynamicBuffer::init(RendererVk *renderer,
2190                          VkBufferUsageFlags usage,
2191                          size_t alignment,
2192                          size_t initialSize,
2193                          bool hostVisible,
2194                          DynamicBufferPolicy policy)
2195 {
2196     VkMemoryPropertyFlags memoryPropertyFlags =
2197         (hostVisible) ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
2198 
2199     initWithFlags(renderer, usage, alignment, initialSize, memoryPropertyFlags, policy);
2200 }
2201 
initWithFlags(RendererVk * renderer,VkBufferUsageFlags usage,size_t alignment,size_t initialSize,VkMemoryPropertyFlags memoryPropertyFlags,DynamicBufferPolicy policy)2202 void DynamicBuffer::initWithFlags(RendererVk *renderer,
2203                                   VkBufferUsageFlags usage,
2204                                   size_t alignment,
2205                                   size_t initialSize,
2206                                   VkMemoryPropertyFlags memoryPropertyFlags,
2207                                   DynamicBufferPolicy policy)
2208 {
2209     mUsage               = usage;
2210     mHostVisible         = ((memoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0);
2211     mMemoryPropertyFlags = memoryPropertyFlags;
2212     mPolicy              = policy;
2213 
2214     // Check that we haven't overriden the initial size of the buffer in setMinimumSizeForTesting.
2215     if (mInitialSize == 0)
2216     {
2217         mInitialSize = initialSize;
2218         mSize        = 0;
2219     }
2220 
2221     // Workaround for the mock ICD not supporting allocations greater than 0x1000.
2222     // Could be removed if https://github.com/KhronosGroup/Vulkan-Tools/issues/84 is fixed.
2223     if (renderer->isMockICDEnabled())
2224     {
2225         mSize = std::min<size_t>(mSize, 0x1000);
2226     }
2227 
2228     requireAlignment(renderer, alignment);
2229 }
2230 
~DynamicBuffer()2231 DynamicBuffer::~DynamicBuffer()
2232 {
2233     ASSERT(mBuffer == nullptr);
2234     ASSERT(mInFlightBuffers.empty());
2235     ASSERT(mBufferFreeList.empty());
2236 }
2237 
allocateNewBuffer(ContextVk * contextVk)2238 angle::Result DynamicBuffer::allocateNewBuffer(ContextVk *contextVk)
2239 {
2240     // Gather statistics
2241     const gl::OverlayType *overlay = contextVk->getOverlay();
2242     if (overlay->isEnabled())
2243     {
2244         gl::RunningGraphWidget *dynamicBufferAllocations =
2245             overlay->getRunningGraphWidget(gl::WidgetId::VulkanDynamicBufferAllocations);
2246         dynamicBufferAllocations->add(1);
2247     }
2248 
2249     // Allocate the buffer
2250     ASSERT(!mBuffer);
2251     mBuffer = std::make_unique<BufferHelper>();
2252 
2253     VkBufferCreateInfo createInfo    = {};
2254     createInfo.sType                 = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
2255     createInfo.flags                 = 0;
2256     createInfo.size                  = mSize;
2257     createInfo.usage                 = mUsage;
2258     createInfo.sharingMode           = VK_SHARING_MODE_EXCLUSIVE;
2259     createInfo.queueFamilyIndexCount = 0;
2260     createInfo.pQueueFamilyIndices   = nullptr;
2261 
2262     return mBuffer->init(contextVk, createInfo, mMemoryPropertyFlags);
2263 }
2264 
allocateFromCurrentBuffer(size_t sizeInBytes,uint8_t ** ptrOut,VkDeviceSize * offsetOut)2265 bool DynamicBuffer::allocateFromCurrentBuffer(size_t sizeInBytes,
2266                                               uint8_t **ptrOut,
2267                                               VkDeviceSize *offsetOut)
2268 {
2269     ASSERT(ptrOut);
2270     ASSERT(offsetOut);
2271     size_t sizeToAllocate                                      = roundUp(sizeInBytes, mAlignment);
2272     angle::base::CheckedNumeric<size_t> checkedNextWriteOffset = mNextAllocationOffset;
2273     checkedNextWriteOffset += sizeToAllocate;
2274 
2275     if (!checkedNextWriteOffset.IsValid() || checkedNextWriteOffset.ValueOrDie() >= mSize)
2276     {
2277         return false;
2278     }
2279 
2280     ASSERT(mBuffer != nullptr);
2281     ASSERT(mHostVisible);
2282     ASSERT(mBuffer->getMappedMemory());
2283 
2284     *ptrOut    = mBuffer->getMappedMemory() + mNextAllocationOffset;
2285     *offsetOut = static_cast<VkDeviceSize>(mNextAllocationOffset);
2286 
2287     mNextAllocationOffset += static_cast<uint32_t>(sizeToAllocate);
2288     return true;
2289 }
2290 
allocateWithAlignment(ContextVk * contextVk,size_t sizeInBytes,size_t alignment,uint8_t ** ptrOut,VkBuffer * bufferOut,VkDeviceSize * offsetOut,bool * newBufferAllocatedOut)2291 angle::Result DynamicBuffer::allocateWithAlignment(ContextVk *contextVk,
2292                                                    size_t sizeInBytes,
2293                                                    size_t alignment,
2294                                                    uint8_t **ptrOut,
2295                                                    VkBuffer *bufferOut,
2296                                                    VkDeviceSize *offsetOut,
2297                                                    bool *newBufferAllocatedOut)
2298 {
2299     mNextAllocationOffset =
2300         roundUp<uint32_t>(mNextAllocationOffset, static_cast<uint32_t>(alignment));
2301     size_t sizeToAllocate = roundUp(sizeInBytes, mAlignment);
2302 
2303     angle::base::CheckedNumeric<size_t> checkedNextWriteOffset = mNextAllocationOffset;
2304     checkedNextWriteOffset += sizeToAllocate;
2305 
2306     if (!checkedNextWriteOffset.IsValid() || checkedNextWriteOffset.ValueOrDie() >= mSize)
2307     {
2308         if (mBuffer)
2309         {
2310             // Make sure the buffer is not released externally.
2311             ASSERT(mBuffer->valid());
2312 
2313             ANGLE_TRY(flush(contextVk));
2314 
2315             mInFlightBuffers.push_back(std::move(mBuffer));
2316             ASSERT(!mBuffer);
2317         }
2318 
2319         const size_t sizeIgnoringHistory = std::max(mInitialSize, sizeToAllocate);
2320         if (sizeToAllocate > mSize || sizeIgnoringHistory < mSize / 4)
2321         {
2322             mSize = sizeIgnoringHistory;
2323 
2324             // Clear the free list since the free buffers are now either too small or too big.
2325             ReleaseBufferListToRenderer(contextVk->getRenderer(), &mBufferFreeList);
2326         }
2327 
2328         // The front of the free list should be the oldest. Thus if it is in use the rest of the
2329         // free list should be in use as well.
2330         if (mBufferFreeList.empty() ||
2331             mBufferFreeList.front()->isCurrentlyInUse(contextVk->getLastCompletedQueueSerial()))
2332         {
2333             ANGLE_TRY(allocateNewBuffer(contextVk));
2334         }
2335         else
2336         {
2337             mBuffer = std::move(mBufferFreeList.front());
2338             mBufferFreeList.erase(mBufferFreeList.begin());
2339         }
2340 
2341         ASSERT(mBuffer->getSize() == mSize);
2342 
2343         mNextAllocationOffset        = 0;
2344         mLastFlushOrInvalidateOffset = 0;
2345 
2346         if (newBufferAllocatedOut != nullptr)
2347         {
2348             *newBufferAllocatedOut = true;
2349         }
2350     }
2351     else if (newBufferAllocatedOut != nullptr)
2352     {
2353         *newBufferAllocatedOut = false;
2354     }
2355 
2356     ASSERT(mBuffer != nullptr);
2357 
2358     if (bufferOut != nullptr)
2359     {
2360         *bufferOut = mBuffer->getBuffer().getHandle();
2361     }
2362 
2363     // Optionally map() the buffer if possible
2364     if (ptrOut)
2365     {
2366         ASSERT(mHostVisible);
2367         uint8_t *mappedMemory;
2368         ANGLE_TRY(mBuffer->map(contextVk, &mappedMemory));
2369         *ptrOut = mappedMemory + mNextAllocationOffset;
2370     }
2371 
2372     if (offsetOut != nullptr)
2373     {
2374         *offsetOut = static_cast<VkDeviceSize>(mNextAllocationOffset);
2375     }
2376 
2377     mNextAllocationOffset += static_cast<uint32_t>(sizeToAllocate);
2378     return angle::Result::Continue;
2379 }
2380 
flush(ContextVk * contextVk)2381 angle::Result DynamicBuffer::flush(ContextVk *contextVk)
2382 {
2383     if (mHostVisible && (mNextAllocationOffset > mLastFlushOrInvalidateOffset))
2384     {
2385         ASSERT(mBuffer != nullptr);
2386         ANGLE_TRY(mBuffer->flush(contextVk->getRenderer(), mLastFlushOrInvalidateOffset,
2387                                  mNextAllocationOffset - mLastFlushOrInvalidateOffset));
2388         mLastFlushOrInvalidateOffset = mNextAllocationOffset;
2389     }
2390     return angle::Result::Continue;
2391 }
2392 
invalidate(ContextVk * contextVk)2393 angle::Result DynamicBuffer::invalidate(ContextVk *contextVk)
2394 {
2395     if (mHostVisible && (mNextAllocationOffset > mLastFlushOrInvalidateOffset))
2396     {
2397         ASSERT(mBuffer != nullptr);
2398         ANGLE_TRY(mBuffer->invalidate(contextVk->getRenderer(), mLastFlushOrInvalidateOffset,
2399                                       mNextAllocationOffset - mLastFlushOrInvalidateOffset));
2400         mLastFlushOrInvalidateOffset = mNextAllocationOffset;
2401     }
2402     return angle::Result::Continue;
2403 }
2404 
release(RendererVk * renderer)2405 void DynamicBuffer::release(RendererVk *renderer)
2406 {
2407     reset();
2408 
2409     ReleaseBufferListToRenderer(renderer, &mInFlightBuffers);
2410     ReleaseBufferListToRenderer(renderer, &mBufferFreeList);
2411 
2412     if (mBuffer)
2413     {
2414         mBuffer->release(renderer);
2415         mBuffer.reset(nullptr);
2416     }
2417 }
2418 
releaseInFlightBuffersToResourceUseList(ContextVk * contextVk)2419 void DynamicBuffer::releaseInFlightBuffersToResourceUseList(ContextVk *contextVk)
2420 {
2421     ResourceUseList *resourceUseList = &contextVk->getResourceUseList();
2422     for (std::unique_ptr<BufferHelper> &bufferHelper : mInFlightBuffers)
2423     {
2424         // This function is used only for internal buffers, and they are all read-only.
2425         // It's possible this may change in the future, but there isn't a good way to detect that,
2426         // unfortunately.
2427         bufferHelper->retainReadOnly(resourceUseList);
2428 
2429         if (ShouldReleaseFreeBuffer(*bufferHelper, mSize, mPolicy, mBufferFreeList.size()))
2430         {
2431             bufferHelper->release(contextVk->getRenderer());
2432         }
2433         else
2434         {
2435             mBufferFreeList.push_back(std::move(bufferHelper));
2436         }
2437     }
2438     mInFlightBuffers.clear();
2439 }
2440 
releaseInFlightBuffers(ContextVk * contextVk)2441 void DynamicBuffer::releaseInFlightBuffers(ContextVk *contextVk)
2442 {
2443     for (std::unique_ptr<BufferHelper> &toRelease : mInFlightBuffers)
2444     {
2445         if (ShouldReleaseFreeBuffer(*toRelease, mSize, mPolicy, mBufferFreeList.size()))
2446         {
2447             toRelease->release(contextVk->getRenderer());
2448         }
2449         else
2450         {
2451             mBufferFreeList.push_back(std::move(toRelease));
2452         }
2453     }
2454 
2455     mInFlightBuffers.clear();
2456 }
2457 
destroy(RendererVk * renderer)2458 void DynamicBuffer::destroy(RendererVk *renderer)
2459 {
2460     reset();
2461 
2462     DestroyBufferList(renderer, &mInFlightBuffers);
2463     DestroyBufferList(renderer, &mBufferFreeList);
2464 
2465     if (mBuffer)
2466     {
2467         mBuffer->unmap(renderer);
2468         mBuffer->destroy(renderer);
2469         mBuffer.reset(nullptr);
2470     }
2471 }
2472 
requireAlignment(RendererVk * renderer,size_t alignment)2473 void DynamicBuffer::requireAlignment(RendererVk *renderer, size_t alignment)
2474 {
2475     ASSERT(alignment > 0);
2476 
2477     size_t prevAlignment = mAlignment;
2478 
2479     // If alignment was never set, initialize it with the atom size limit.
2480     if (prevAlignment == 0)
2481     {
2482         prevAlignment =
2483             static_cast<size_t>(renderer->getPhysicalDeviceProperties().limits.nonCoherentAtomSize);
2484         ASSERT(gl::isPow2(prevAlignment));
2485     }
2486 
2487     // We need lcm(prevAlignment, alignment).  Usually, one divides the other so std::max() could be
2488     // used instead.  Only known case where this assumption breaks is for 3-component types with
2489     // 16- or 32-bit channels, so that's special-cased to avoid a full-fledged lcm implementation.
2490 
2491     if (gl::isPow2(prevAlignment * alignment))
2492     {
2493         ASSERT(alignment % prevAlignment == 0 || prevAlignment % alignment == 0);
2494 
2495         alignment = std::max(prevAlignment, alignment);
2496     }
2497     else
2498     {
2499         ASSERT(prevAlignment % 3 != 0 || gl::isPow2(prevAlignment / 3));
2500         ASSERT(alignment % 3 != 0 || gl::isPow2(alignment / 3));
2501 
2502         prevAlignment = prevAlignment % 3 == 0 ? prevAlignment / 3 : prevAlignment;
2503         alignment     = alignment % 3 == 0 ? alignment / 3 : alignment;
2504 
2505         alignment = std::max(prevAlignment, alignment) * 3;
2506     }
2507 
2508     // If alignment has changed, make sure the next allocation is done at an aligned offset.
2509     if (alignment != mAlignment)
2510     {
2511         mNextAllocationOffset = roundUp(mNextAllocationOffset, static_cast<uint32_t>(alignment));
2512     }
2513 
2514     mAlignment = alignment;
2515 }
2516 
setMinimumSizeForTesting(size_t minSize)2517 void DynamicBuffer::setMinimumSizeForTesting(size_t minSize)
2518 {
2519     // This will really only have an effect next time we call allocate.
2520     mInitialSize = minSize;
2521 
2522     // Forces a new allocation on the next allocate.
2523     mSize = 0;
2524 }
2525 
reset()2526 void DynamicBuffer::reset()
2527 {
2528     mSize                        = 0;
2529     mNextAllocationOffset        = 0;
2530     mLastFlushOrInvalidateOffset = 0;
2531 }
2532 
2533 // DynamicShadowBuffer implementation.
DynamicShadowBuffer()2534 DynamicShadowBuffer::DynamicShadowBuffer() : mInitialSize(0), mSize(0) {}
2535 
DynamicShadowBuffer(DynamicShadowBuffer && other)2536 DynamicShadowBuffer::DynamicShadowBuffer(DynamicShadowBuffer &&other)
2537     : mInitialSize(other.mInitialSize), mSize(other.mSize), mBuffer(std::move(other.mBuffer))
2538 {}
2539 
init(size_t initialSize)2540 void DynamicShadowBuffer::init(size_t initialSize)
2541 {
2542     mInitialSize = initialSize;
2543 }
2544 
~DynamicShadowBuffer()2545 DynamicShadowBuffer::~DynamicShadowBuffer()
2546 {
2547     ASSERT(mBuffer.empty());
2548 }
2549 
allocate(size_t sizeInBytes)2550 angle::Result DynamicShadowBuffer::allocate(size_t sizeInBytes)
2551 {
2552     bool result = true;
2553 
2554     // Delete the current buffer, if any
2555     if (!mBuffer.empty())
2556     {
2557         result &= mBuffer.resize(0);
2558     }
2559 
2560     // Cache the new size
2561     mSize = std::max(mInitialSize, sizeInBytes);
2562 
2563     // Allocate the buffer
2564     result &= mBuffer.resize(mSize);
2565 
2566     // If allocation failed, release the buffer and return error.
2567     if (!result)
2568     {
2569         release();
2570         return angle::Result::Stop;
2571     }
2572 
2573     return angle::Result::Continue;
2574 }
2575 
release()2576 void DynamicShadowBuffer::release()
2577 {
2578     reset();
2579 
2580     if (!mBuffer.empty())
2581     {
2582         (void)mBuffer.resize(0);
2583     }
2584 }
2585 
destroy(VkDevice device)2586 void DynamicShadowBuffer::destroy(VkDevice device)
2587 {
2588     release();
2589 }
2590 
reset()2591 void DynamicShadowBuffer::reset()
2592 {
2593     mSize = 0;
2594 }
2595 
2596 // DescriptorPoolHelper implementation.
DescriptorPoolHelper()2597 DescriptorPoolHelper::DescriptorPoolHelper() : mFreeDescriptorSets(0) {}
2598 
2599 DescriptorPoolHelper::~DescriptorPoolHelper() = default;
2600 
hasCapacity(uint32_t descriptorSetCount) const2601 bool DescriptorPoolHelper::hasCapacity(uint32_t descriptorSetCount) const
2602 {
2603     return mFreeDescriptorSets >= descriptorSetCount;
2604 }
2605 
init(ContextVk * contextVk,const std::vector<VkDescriptorPoolSize> & poolSizesIn,uint32_t maxSets)2606 angle::Result DescriptorPoolHelper::init(ContextVk *contextVk,
2607                                          const std::vector<VkDescriptorPoolSize> &poolSizesIn,
2608                                          uint32_t maxSets)
2609 {
2610     if (mDescriptorPool.valid())
2611     {
2612         ASSERT(!isCurrentlyInUse(contextVk->getLastCompletedQueueSerial()));
2613         mDescriptorPool.destroy(contextVk->getDevice());
2614     }
2615 
2616     // Make a copy of the pool sizes, so we can grow them to satisfy the specified maxSets.
2617     std::vector<VkDescriptorPoolSize> poolSizes = poolSizesIn;
2618 
2619     for (VkDescriptorPoolSize &poolSize : poolSizes)
2620     {
2621         poolSize.descriptorCount *= maxSets;
2622     }
2623 
2624     VkDescriptorPoolCreateInfo descriptorPoolInfo = {};
2625     descriptorPoolInfo.sType                      = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
2626     descriptorPoolInfo.flags                      = 0;
2627     descriptorPoolInfo.maxSets                    = maxSets;
2628     descriptorPoolInfo.poolSizeCount              = static_cast<uint32_t>(poolSizes.size());
2629     descriptorPoolInfo.pPoolSizes                 = poolSizes.data();
2630 
2631     mFreeDescriptorSets = maxSets;
2632 
2633     ANGLE_VK_TRY(contextVk, mDescriptorPool.init(contextVk->getDevice(), descriptorPoolInfo));
2634 
2635     return angle::Result::Continue;
2636 }
2637 
destroy(VkDevice device)2638 void DescriptorPoolHelper::destroy(VkDevice device)
2639 {
2640     mDescriptorPool.destroy(device);
2641 }
2642 
release(ContextVk * contextVk)2643 void DescriptorPoolHelper::release(ContextVk *contextVk)
2644 {
2645     contextVk->addGarbage(&mDescriptorPool);
2646 }
2647 
allocateSets(ContextVk * contextVk,const VkDescriptorSetLayout * descriptorSetLayout,uint32_t descriptorSetCount,VkDescriptorSet * descriptorSetsOut)2648 angle::Result DescriptorPoolHelper::allocateSets(ContextVk *contextVk,
2649                                                  const VkDescriptorSetLayout *descriptorSetLayout,
2650                                                  uint32_t descriptorSetCount,
2651                                                  VkDescriptorSet *descriptorSetsOut)
2652 {
2653     VkDescriptorSetAllocateInfo allocInfo = {};
2654     allocInfo.sType                       = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
2655     allocInfo.descriptorPool              = mDescriptorPool.getHandle();
2656     allocInfo.descriptorSetCount          = descriptorSetCount;
2657     allocInfo.pSetLayouts                 = descriptorSetLayout;
2658 
2659     ASSERT(mFreeDescriptorSets >= descriptorSetCount);
2660     mFreeDescriptorSets -= descriptorSetCount;
2661 
2662     ANGLE_VK_TRY(contextVk, mDescriptorPool.allocateDescriptorSets(contextVk->getDevice(),
2663                                                                    allocInfo, descriptorSetsOut));
2664 
2665     // The pool is still in use every time a new descriptor set is allocated from it.
2666     retain(&contextVk->getResourceUseList());
2667 
2668     return angle::Result::Continue;
2669 }
2670 
2671 // DynamicDescriptorPool implementation.
DynamicDescriptorPool()2672 DynamicDescriptorPool::DynamicDescriptorPool()
2673     : mCurrentPoolIndex(0), mCachedDescriptorSetLayout(VK_NULL_HANDLE)
2674 {}
2675 
2676 DynamicDescriptorPool::~DynamicDescriptorPool() = default;
2677 
init(ContextVk * contextVk,const VkDescriptorPoolSize * setSizes,size_t setSizeCount,VkDescriptorSetLayout descriptorSetLayout)2678 angle::Result DynamicDescriptorPool::init(ContextVk *contextVk,
2679                                           const VkDescriptorPoolSize *setSizes,
2680                                           size_t setSizeCount,
2681                                           VkDescriptorSetLayout descriptorSetLayout)
2682 {
2683     ASSERT(setSizes);
2684     ASSERT(setSizeCount);
2685     ASSERT(mCurrentPoolIndex == 0);
2686     ASSERT(mDescriptorPools.empty() ||
2687            (mDescriptorPools.size() == 1 &&
2688             mDescriptorPools[mCurrentPoolIndex]->get().hasCapacity(mMaxSetsPerPool)));
2689     ASSERT(mCachedDescriptorSetLayout == VK_NULL_HANDLE);
2690 
2691     mPoolSizes.assign(setSizes, setSizes + setSizeCount);
2692     mCachedDescriptorSetLayout = descriptorSetLayout;
2693 
2694     mDescriptorPools.push_back(new RefCountedDescriptorPoolHelper());
2695     mCurrentPoolIndex = mDescriptorPools.size() - 1;
2696     return mDescriptorPools[mCurrentPoolIndex]->get().init(contextVk, mPoolSizes, mMaxSetsPerPool);
2697 }
2698 
destroy(VkDevice device)2699 void DynamicDescriptorPool::destroy(VkDevice device)
2700 {
2701     for (RefCountedDescriptorPoolHelper *pool : mDescriptorPools)
2702     {
2703         ASSERT(!pool->isReferenced());
2704         pool->get().destroy(device);
2705         delete pool;
2706     }
2707 
2708     mDescriptorPools.clear();
2709     mCurrentPoolIndex          = 0;
2710     mCachedDescriptorSetLayout = VK_NULL_HANDLE;
2711 }
2712 
release(ContextVk * contextVk)2713 void DynamicDescriptorPool::release(ContextVk *contextVk)
2714 {
2715     for (RefCountedDescriptorPoolHelper *pool : mDescriptorPools)
2716     {
2717         ASSERT(!pool->isReferenced());
2718         pool->get().release(contextVk);
2719         delete pool;
2720     }
2721 
2722     mDescriptorPools.clear();
2723     mCurrentPoolIndex          = 0;
2724     mCachedDescriptorSetLayout = VK_NULL_HANDLE;
2725 }
2726 
allocateSetsAndGetInfo(ContextVk * contextVk,const VkDescriptorSetLayout * descriptorSetLayout,uint32_t descriptorSetCount,RefCountedDescriptorPoolBinding * bindingOut,VkDescriptorSet * descriptorSetsOut,bool * newPoolAllocatedOut)2727 angle::Result DynamicDescriptorPool::allocateSetsAndGetInfo(
2728     ContextVk *contextVk,
2729     const VkDescriptorSetLayout *descriptorSetLayout,
2730     uint32_t descriptorSetCount,
2731     RefCountedDescriptorPoolBinding *bindingOut,
2732     VkDescriptorSet *descriptorSetsOut,
2733     bool *newPoolAllocatedOut)
2734 {
2735     ASSERT(!mDescriptorPools.empty());
2736     ASSERT(*descriptorSetLayout == mCachedDescriptorSetLayout);
2737 
2738     *newPoolAllocatedOut = false;
2739 
2740     if (!bindingOut->valid() || !bindingOut->get().hasCapacity(descriptorSetCount))
2741     {
2742         if (!mDescriptorPools[mCurrentPoolIndex]->get().hasCapacity(descriptorSetCount))
2743         {
2744             ANGLE_TRY(allocateNewPool(contextVk));
2745             *newPoolAllocatedOut = true;
2746         }
2747 
2748         bindingOut->set(mDescriptorPools[mCurrentPoolIndex]);
2749     }
2750 
2751     return bindingOut->get().allocateSets(contextVk, descriptorSetLayout, descriptorSetCount,
2752                                           descriptorSetsOut);
2753 }
2754 
allocateNewPool(ContextVk * contextVk)2755 angle::Result DynamicDescriptorPool::allocateNewPool(ContextVk *contextVk)
2756 {
2757     bool found = false;
2758 
2759     Serial lastCompletedSerial = contextVk->getLastCompletedQueueSerial();
2760     for (size_t poolIndex = 0; poolIndex < mDescriptorPools.size(); ++poolIndex)
2761     {
2762         if (!mDescriptorPools[poolIndex]->isReferenced() &&
2763             !mDescriptorPools[poolIndex]->get().isCurrentlyInUse(lastCompletedSerial))
2764         {
2765             mCurrentPoolIndex = poolIndex;
2766             found             = true;
2767             break;
2768         }
2769     }
2770 
2771     if (!found)
2772     {
2773         mDescriptorPools.push_back(new RefCountedDescriptorPoolHelper());
2774         mCurrentPoolIndex = mDescriptorPools.size() - 1;
2775 
2776         static constexpr size_t kMaxPools = 99999;
2777         ANGLE_VK_CHECK(contextVk, mDescriptorPools.size() < kMaxPools, VK_ERROR_TOO_MANY_OBJECTS);
2778     }
2779 
2780     // This pool is getting hot, so grow its max size to try and prevent allocating another pool in
2781     // the future.
2782     if (mMaxSetsPerPool < kMaxSetsPerPoolMax)
2783     {
2784         mMaxSetsPerPool *= mMaxSetsPerPoolMultiplier;
2785     }
2786 
2787     return mDescriptorPools[mCurrentPoolIndex]->get().init(contextVk, mPoolSizes, mMaxSetsPerPool);
2788 }
2789 
2790 // For testing only!
GetMaxSetsPerPoolForTesting()2791 uint32_t DynamicDescriptorPool::GetMaxSetsPerPoolForTesting()
2792 {
2793     return mMaxSetsPerPool;
2794 }
2795 
2796 // For testing only!
SetMaxSetsPerPoolForTesting(uint32_t maxSetsPerPool)2797 void DynamicDescriptorPool::SetMaxSetsPerPoolForTesting(uint32_t maxSetsPerPool)
2798 {
2799     mMaxSetsPerPool = maxSetsPerPool;
2800 }
2801 
2802 // For testing only!
GetMaxSetsPerPoolMultiplierForTesting()2803 uint32_t DynamicDescriptorPool::GetMaxSetsPerPoolMultiplierForTesting()
2804 {
2805     return mMaxSetsPerPoolMultiplier;
2806 }
2807 
2808 // For testing only!
SetMaxSetsPerPoolMultiplierForTesting(uint32_t maxSetsPerPoolMultiplier)2809 void DynamicDescriptorPool::SetMaxSetsPerPoolMultiplierForTesting(uint32_t maxSetsPerPoolMultiplier)
2810 {
2811     mMaxSetsPerPoolMultiplier = maxSetsPerPoolMultiplier;
2812 }
2813 
2814 // DynamicallyGrowingPool implementation
2815 template <typename Pool>
DynamicallyGrowingPool()2816 DynamicallyGrowingPool<Pool>::DynamicallyGrowingPool()
2817     : mPoolSize(0), mCurrentPool(0), mCurrentFreeEntry(0)
2818 {}
2819 
2820 template <typename Pool>
2821 DynamicallyGrowingPool<Pool>::~DynamicallyGrowingPool() = default;
2822 
2823 template <typename Pool>
initEntryPool(Context * contextVk,uint32_t poolSize)2824 angle::Result DynamicallyGrowingPool<Pool>::initEntryPool(Context *contextVk, uint32_t poolSize)
2825 {
2826     ASSERT(mPools.empty());
2827     mPoolSize         = poolSize;
2828     mCurrentFreeEntry = poolSize;
2829     return angle::Result::Continue;
2830 }
2831 
2832 template <typename Pool>
destroyEntryPool(VkDevice device)2833 void DynamicallyGrowingPool<Pool>::destroyEntryPool(VkDevice device)
2834 {
2835     for (PoolResource &resource : mPools)
2836     {
2837         destroyPoolImpl(device, resource.pool);
2838     }
2839     mPools.clear();
2840 }
2841 
2842 template <typename Pool>
findFreeEntryPool(ContextVk * contextVk)2843 bool DynamicallyGrowingPool<Pool>::findFreeEntryPool(ContextVk *contextVk)
2844 {
2845     Serial lastCompletedQueueSerial = contextVk->getLastCompletedQueueSerial();
2846     for (size_t poolIndex = 0; poolIndex < mPools.size(); ++poolIndex)
2847     {
2848         PoolResource &pool = mPools[poolIndex];
2849         if (pool.freedCount == mPoolSize && !pool.isCurrentlyInUse(lastCompletedQueueSerial))
2850         {
2851             mCurrentPool      = poolIndex;
2852             mCurrentFreeEntry = 0;
2853 
2854             pool.freedCount = 0;
2855 
2856             return true;
2857         }
2858     }
2859 
2860     return false;
2861 }
2862 
2863 template <typename Pool>
allocateNewEntryPool(ContextVk * contextVk,Pool && pool)2864 angle::Result DynamicallyGrowingPool<Pool>::allocateNewEntryPool(ContextVk *contextVk, Pool &&pool)
2865 {
2866     mPools.emplace_back(std::move(pool), 0);
2867 
2868     mCurrentPool      = mPools.size() - 1;
2869     mCurrentFreeEntry = 0;
2870 
2871     return angle::Result::Continue;
2872 }
2873 
2874 template <typename Pool>
onEntryFreed(ContextVk * contextVk,size_t poolIndex)2875 void DynamicallyGrowingPool<Pool>::onEntryFreed(ContextVk *contextVk, size_t poolIndex)
2876 {
2877     ASSERT(poolIndex < mPools.size() && mPools[poolIndex].freedCount < mPoolSize);
2878     mPools[poolIndex].retain(&contextVk->getResourceUseList());
2879     ++mPools[poolIndex].freedCount;
2880 }
2881 
2882 template <typename Pool>
allocatePoolEntries(ContextVk * contextVk,uint32_t entryCount,uint32_t * poolIndex,uint32_t * currentEntryOut)2883 angle::Result DynamicallyGrowingPool<Pool>::allocatePoolEntries(ContextVk *contextVk,
2884                                                                 uint32_t entryCount,
2885                                                                 uint32_t *poolIndex,
2886                                                                 uint32_t *currentEntryOut)
2887 {
2888     if (mCurrentFreeEntry + entryCount > mPoolSize)
2889     {
2890         if (!findFreeEntryPool(contextVk))
2891         {
2892             Pool newPool;
2893             ANGLE_TRY(allocatePoolImpl(contextVk, newPool, mPoolSize));
2894             ANGLE_TRY(allocateNewEntryPool(contextVk, std::move(newPool)));
2895         }
2896     }
2897 
2898     *poolIndex       = static_cast<uint32_t>(mCurrentPool);
2899     *currentEntryOut = mCurrentFreeEntry;
2900 
2901     mCurrentFreeEntry += entryCount;
2902 
2903     return angle::Result::Continue;
2904 }
2905 
2906 template <typename Pool>
PoolResource(Pool && poolIn,uint32_t freedCountIn)2907 DynamicallyGrowingPool<Pool>::PoolResource::PoolResource(Pool &&poolIn, uint32_t freedCountIn)
2908     : pool(std::move(poolIn)), freedCount(freedCountIn)
2909 {}
2910 
2911 template <typename Pool>
PoolResource(PoolResource && other)2912 DynamicallyGrowingPool<Pool>::PoolResource::PoolResource(PoolResource &&other)
2913     : pool(std::move(other.pool)), freedCount(other.freedCount)
2914 {}
2915 
2916 // DynamicQueryPool implementation
2917 DynamicQueryPool::DynamicQueryPool() = default;
2918 
2919 DynamicQueryPool::~DynamicQueryPool() = default;
2920 
init(ContextVk * contextVk,VkQueryType type,uint32_t poolSize)2921 angle::Result DynamicQueryPool::init(ContextVk *contextVk, VkQueryType type, uint32_t poolSize)
2922 {
2923     ANGLE_TRY(initEntryPool(contextVk, poolSize));
2924     mQueryType = type;
2925     return angle::Result::Continue;
2926 }
2927 
destroy(VkDevice device)2928 void DynamicQueryPool::destroy(VkDevice device)
2929 {
2930     destroyEntryPool(device);
2931 }
2932 
destroyPoolImpl(VkDevice device,QueryPool & poolToDestroy)2933 void DynamicQueryPool::destroyPoolImpl(VkDevice device, QueryPool &poolToDestroy)
2934 {
2935     poolToDestroy.destroy(device);
2936 }
2937 
allocateQuery(ContextVk * contextVk,QueryHelper * queryOut,uint32_t queryCount)2938 angle::Result DynamicQueryPool::allocateQuery(ContextVk *contextVk,
2939                                               QueryHelper *queryOut,
2940                                               uint32_t queryCount)
2941 {
2942     ASSERT(!queryOut->valid());
2943 
2944     uint32_t currentPool = 0;
2945     uint32_t queryIndex  = 0;
2946     ANGLE_TRY(allocatePoolEntries(contextVk, queryCount, &currentPool, &queryIndex));
2947 
2948     queryOut->init(this, currentPool, queryIndex, queryCount);
2949 
2950     return angle::Result::Continue;
2951 }
2952 
allocatePoolImpl(ContextVk * contextVk,QueryPool & poolToAllocate,uint32_t entriesToAllocate)2953 angle::Result DynamicQueryPool::allocatePoolImpl(ContextVk *contextVk,
2954                                                  QueryPool &poolToAllocate,
2955                                                  uint32_t entriesToAllocate)
2956 {
2957     VkQueryPoolCreateInfo queryPoolInfo = {};
2958     queryPoolInfo.sType                 = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO;
2959     queryPoolInfo.flags                 = 0;
2960     queryPoolInfo.queryType             = this->mQueryType;
2961     queryPoolInfo.queryCount            = entriesToAllocate;
2962     queryPoolInfo.pipelineStatistics    = 0;
2963 
2964     if (this->mQueryType == VK_QUERY_TYPE_PIPELINE_STATISTICS)
2965     {
2966         queryPoolInfo.pipelineStatistics = VK_QUERY_PIPELINE_STATISTIC_CLIPPING_INVOCATIONS_BIT;
2967     }
2968 
2969     ANGLE_VK_TRY(contextVk, poolToAllocate.init(contextVk->getDevice(), queryPoolInfo));
2970     return angle::Result::Continue;
2971 }
2972 
freeQuery(ContextVk * contextVk,QueryHelper * query)2973 void DynamicQueryPool::freeQuery(ContextVk *contextVk, QueryHelper *query)
2974 {
2975     if (query->valid())
2976     {
2977         size_t poolIndex = query->mQueryPoolIndex;
2978         ASSERT(getQueryPool(poolIndex).valid());
2979 
2980         onEntryFreed(contextVk, poolIndex);
2981 
2982         query->deinit();
2983     }
2984 }
2985 
2986 // QueryResult implementation
setResults(uint64_t * results,uint32_t queryCount)2987 void QueryResult::setResults(uint64_t *results, uint32_t queryCount)
2988 {
2989     ASSERT(mResults[0] == 0 && mResults[1] == 0);
2990 
2991     // Accumulate the query results.  For multiview, where multiple query indices are used to return
2992     // the results, it's undefined how the results are distributed between indices, but the sum is
2993     // guaranteed to be the desired result.
2994     for (uint32_t query = 0; query < queryCount; ++query)
2995     {
2996         for (uint32_t perQueryIndex = 0; perQueryIndex < mIntsPerResult; ++perQueryIndex)
2997         {
2998             mResults[perQueryIndex] += results[query * mIntsPerResult + perQueryIndex];
2999         }
3000     }
3001 }
3002 
3003 // QueryHelper implementation
QueryHelper()3004 QueryHelper::QueryHelper()
3005     : mDynamicQueryPool(nullptr),
3006       mQueryPoolIndex(0),
3007       mQuery(0),
3008       mQueryCount(0),
3009       mStatus(QueryStatus::Inactive)
3010 {}
3011 
~QueryHelper()3012 QueryHelper::~QueryHelper() {}
3013 
3014 // Move constructor
QueryHelper(QueryHelper && rhs)3015 QueryHelper::QueryHelper(QueryHelper &&rhs)
3016     : Resource(std::move(rhs)),
3017       mDynamicQueryPool(rhs.mDynamicQueryPool),
3018       mQueryPoolIndex(rhs.mQueryPoolIndex),
3019       mQuery(rhs.mQuery),
3020       mQueryCount(rhs.mQueryCount)
3021 {
3022     rhs.mDynamicQueryPool = nullptr;
3023     rhs.mQueryPoolIndex   = 0;
3024     rhs.mQuery            = 0;
3025     rhs.mQueryCount       = 0;
3026 }
3027 
operator =(QueryHelper && rhs)3028 QueryHelper &QueryHelper::operator=(QueryHelper &&rhs)
3029 {
3030     std::swap(mDynamicQueryPool, rhs.mDynamicQueryPool);
3031     std::swap(mQueryPoolIndex, rhs.mQueryPoolIndex);
3032     std::swap(mQuery, rhs.mQuery);
3033     std::swap(mQueryCount, rhs.mQueryCount);
3034     return *this;
3035 }
3036 
init(const DynamicQueryPool * dynamicQueryPool,const size_t queryPoolIndex,uint32_t query,uint32_t queryCount)3037 void QueryHelper::init(const DynamicQueryPool *dynamicQueryPool,
3038                        const size_t queryPoolIndex,
3039                        uint32_t query,
3040                        uint32_t queryCount)
3041 {
3042     mDynamicQueryPool = dynamicQueryPool;
3043     mQueryPoolIndex   = queryPoolIndex;
3044     mQuery            = query;
3045     mQueryCount       = queryCount;
3046 
3047     ASSERT(mQueryCount <= gl::IMPLEMENTATION_ANGLE_MULTIVIEW_MAX_VIEWS);
3048 }
3049 
deinit()3050 void QueryHelper::deinit()
3051 {
3052     mDynamicQueryPool = nullptr;
3053     mQueryPoolIndex   = 0;
3054     mQuery            = 0;
3055     mQueryCount       = 0;
3056     mUse.release();
3057     mUse.init();
3058     mStatus = QueryStatus::Inactive;
3059 }
3060 
beginQueryImpl(ContextVk * contextVk,CommandBuffer * resetCommandBuffer,CommandBuffer * commandBuffer)3061 void QueryHelper::beginQueryImpl(ContextVk *contextVk,
3062                                  CommandBuffer *resetCommandBuffer,
3063                                  CommandBuffer *commandBuffer)
3064 {
3065     ASSERT(mStatus != QueryStatus::Active);
3066     const QueryPool &queryPool = getQueryPool();
3067     resetQueryPoolImpl(contextVk, queryPool, resetCommandBuffer);
3068     commandBuffer->beginQuery(queryPool, mQuery, 0);
3069     mStatus = QueryStatus::Active;
3070 }
3071 
endQueryImpl(ContextVk * contextVk,CommandBuffer * commandBuffer)3072 void QueryHelper::endQueryImpl(ContextVk *contextVk, CommandBuffer *commandBuffer)
3073 {
3074     ASSERT(mStatus != QueryStatus::Ended);
3075     commandBuffer->endQuery(getQueryPool(), mQuery);
3076     mStatus = QueryStatus::Ended;
3077     // Query results are available after endQuery, retain this query so that we get its serial
3078     // updated which is used to indicate that query results are (or will be) available.
3079     retain(&contextVk->getResourceUseList());
3080 }
3081 
beginQuery(ContextVk * contextVk)3082 angle::Result QueryHelper::beginQuery(ContextVk *contextVk)
3083 {
3084     if (contextVk->hasStartedRenderPass())
3085     {
3086         ANGLE_TRY(contextVk->flushCommandsAndEndRenderPass(
3087             RenderPassClosureReason::BeginNonRenderPassQuery));
3088     }
3089 
3090     CommandBuffer *commandBuffer;
3091     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer({}, &commandBuffer));
3092 
3093     ANGLE_TRY(contextVk->handleGraphicsEventLog(rx::GraphicsEventCmdBuf::InOutsideCmdBufQueryCmd));
3094 
3095     beginQueryImpl(contextVk, commandBuffer, commandBuffer);
3096 
3097     return angle::Result::Continue;
3098 }
3099 
endQuery(ContextVk * contextVk)3100 angle::Result QueryHelper::endQuery(ContextVk *contextVk)
3101 {
3102     if (contextVk->hasStartedRenderPass())
3103     {
3104         ANGLE_TRY(contextVk->flushCommandsAndEndRenderPass(
3105             RenderPassClosureReason::EndNonRenderPassQuery));
3106     }
3107 
3108     CommandBuffer *commandBuffer;
3109     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer({}, &commandBuffer));
3110 
3111     ANGLE_TRY(contextVk->handleGraphicsEventLog(rx::GraphicsEventCmdBuf::InOutsideCmdBufQueryCmd));
3112 
3113     endQueryImpl(contextVk, commandBuffer);
3114 
3115     return angle::Result::Continue;
3116 }
3117 
3118 template <typename CommandBufferT>
resetQueryPoolImpl(ContextVk * contextVk,const QueryPool & queryPool,CommandBufferT * commandBuffer)3119 void QueryHelper::resetQueryPoolImpl(ContextVk *contextVk,
3120                                      const QueryPool &queryPool,
3121                                      CommandBufferT *commandBuffer)
3122 {
3123     RendererVk *renderer = contextVk->getRenderer();
3124     if (vkResetQueryPoolEXT != nullptr && renderer->getFeatures().supportsHostQueryReset.enabled)
3125     {
3126         vkResetQueryPoolEXT(contextVk->getDevice(), queryPool.getHandle(), mQuery, mQueryCount);
3127     }
3128     else
3129     {
3130         commandBuffer->resetQueryPool(queryPool, mQuery, mQueryCount);
3131     }
3132 }
3133 
beginRenderPassQuery(ContextVk * contextVk)3134 angle::Result QueryHelper::beginRenderPassQuery(ContextVk *contextVk)
3135 {
3136     CommandBuffer *outsideRenderPassCommandBuffer;
3137     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer({}, &outsideRenderPassCommandBuffer));
3138 
3139     CommandBuffer *renderPassCommandBuffer =
3140         &contextVk->getStartedRenderPassCommands().getCommandBuffer();
3141 
3142     beginQueryImpl(contextVk, outsideRenderPassCommandBuffer, renderPassCommandBuffer);
3143 
3144     return angle::Result::Continue;
3145 }
3146 
endRenderPassQuery(ContextVk * contextVk)3147 void QueryHelper::endRenderPassQuery(ContextVk *contextVk)
3148 {
3149     if (mStatus == QueryStatus::Active)
3150     {
3151         endQueryImpl(contextVk, &contextVk->getStartedRenderPassCommands().getCommandBuffer());
3152     }
3153 }
3154 
flushAndWriteTimestamp(ContextVk * contextVk)3155 angle::Result QueryHelper::flushAndWriteTimestamp(ContextVk *contextVk)
3156 {
3157     if (contextVk->hasStartedRenderPass())
3158     {
3159         ANGLE_TRY(
3160             contextVk->flushCommandsAndEndRenderPass(RenderPassClosureReason::TimestampQuery));
3161     }
3162 
3163     CommandBuffer *commandBuffer;
3164     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer({}, &commandBuffer));
3165     writeTimestamp(contextVk, commandBuffer);
3166     return angle::Result::Continue;
3167 }
3168 
writeTimestampToPrimary(ContextVk * contextVk,PrimaryCommandBuffer * primary)3169 void QueryHelper::writeTimestampToPrimary(ContextVk *contextVk, PrimaryCommandBuffer *primary)
3170 {
3171     // Note that commands may not be flushed at this point.
3172 
3173     const QueryPool &queryPool = getQueryPool();
3174     resetQueryPoolImpl(contextVk, queryPool, primary);
3175     primary->writeTimestamp(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, queryPool, mQuery);
3176 }
3177 
writeTimestamp(ContextVk * contextVk,CommandBuffer * commandBuffer)3178 void QueryHelper::writeTimestamp(ContextVk *contextVk, CommandBuffer *commandBuffer)
3179 {
3180     const QueryPool &queryPool = getQueryPool();
3181     resetQueryPoolImpl(contextVk, queryPool, commandBuffer);
3182     commandBuffer->writeTimestamp(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, queryPool, mQuery);
3183     // timestamp results are available immediately, retain this query so that we get its serial
3184     // updated which is used to indicate that query results are (or will be) available.
3185     retain(&contextVk->getResourceUseList());
3186 }
3187 
hasSubmittedCommands() const3188 bool QueryHelper::hasSubmittedCommands() const
3189 {
3190     return mUse.getSerial().valid();
3191 }
3192 
getUint64ResultNonBlocking(ContextVk * contextVk,QueryResult * resultOut,bool * availableOut)3193 angle::Result QueryHelper::getUint64ResultNonBlocking(ContextVk *contextVk,
3194                                                       QueryResult *resultOut,
3195                                                       bool *availableOut)
3196 {
3197     ASSERT(valid());
3198     VkResult result;
3199 
3200     // Ensure that we only wait if we have inserted a query in command buffer. Otherwise you will
3201     // wait forever and trigger GPU timeout.
3202     if (hasSubmittedCommands())
3203     {
3204         constexpr VkQueryResultFlags kFlags = VK_QUERY_RESULT_64_BIT;
3205         result                              = getResultImpl(contextVk, kFlags, resultOut);
3206     }
3207     else
3208     {
3209         result     = VK_SUCCESS;
3210         *resultOut = 0;
3211     }
3212 
3213     if (result == VK_NOT_READY)
3214     {
3215         *availableOut = false;
3216         return angle::Result::Continue;
3217     }
3218     else
3219     {
3220         ANGLE_VK_TRY(contextVk, result);
3221         *availableOut = true;
3222     }
3223     return angle::Result::Continue;
3224 }
3225 
getUint64Result(ContextVk * contextVk,QueryResult * resultOut)3226 angle::Result QueryHelper::getUint64Result(ContextVk *contextVk, QueryResult *resultOut)
3227 {
3228     ASSERT(valid());
3229     if (hasSubmittedCommands())
3230     {
3231         constexpr VkQueryResultFlags kFlags = VK_QUERY_RESULT_64_BIT | VK_QUERY_RESULT_WAIT_BIT;
3232         ANGLE_VK_TRY(contextVk, getResultImpl(contextVk, kFlags, resultOut));
3233     }
3234     else
3235     {
3236         *resultOut = 0;
3237     }
3238     return angle::Result::Continue;
3239 }
3240 
getResultImpl(ContextVk * contextVk,const VkQueryResultFlags flags,QueryResult * resultOut)3241 VkResult QueryHelper::getResultImpl(ContextVk *contextVk,
3242                                     const VkQueryResultFlags flags,
3243                                     QueryResult *resultOut)
3244 {
3245     std::array<uint64_t, 2 * gl::IMPLEMENTATION_ANGLE_MULTIVIEW_MAX_VIEWS> results;
3246 
3247     VkDevice device = contextVk->getDevice();
3248     VkResult result = getQueryPool().getResults(device, mQuery, mQueryCount, sizeof(results),
3249                                                 results.data(), sizeof(uint64_t), flags);
3250 
3251     if (result == VK_SUCCESS)
3252     {
3253         resultOut->setResults(results.data(), mQueryCount);
3254     }
3255 
3256     return result;
3257 }
3258 
3259 // DynamicSemaphorePool implementation
3260 DynamicSemaphorePool::DynamicSemaphorePool() = default;
3261 
3262 DynamicSemaphorePool::~DynamicSemaphorePool() = default;
3263 
init(ContextVk * contextVk,uint32_t poolSize)3264 angle::Result DynamicSemaphorePool::init(ContextVk *contextVk, uint32_t poolSize)
3265 {
3266     ANGLE_TRY(initEntryPool(contextVk, poolSize));
3267     return angle::Result::Continue;
3268 }
3269 
destroy(VkDevice device)3270 void DynamicSemaphorePool::destroy(VkDevice device)
3271 {
3272     destroyEntryPool(device);
3273 }
3274 
allocateSemaphore(ContextVk * contextVk,SemaphoreHelper * semaphoreOut)3275 angle::Result DynamicSemaphorePool::allocateSemaphore(ContextVk *contextVk,
3276                                                       SemaphoreHelper *semaphoreOut)
3277 {
3278     ASSERT(!semaphoreOut->getSemaphore());
3279 
3280     uint32_t currentPool  = 0;
3281     uint32_t currentEntry = 0;
3282     ANGLE_TRY(allocatePoolEntries(contextVk, 1, &currentPool, &currentEntry));
3283 
3284     semaphoreOut->init(currentPool, &getPool(currentPool)[currentEntry]);
3285 
3286     return angle::Result::Continue;
3287 }
3288 
freeSemaphore(ContextVk * contextVk,SemaphoreHelper * semaphore)3289 void DynamicSemaphorePool::freeSemaphore(ContextVk *contextVk, SemaphoreHelper *semaphore)
3290 {
3291     if (semaphore->getSemaphore())
3292     {
3293         onEntryFreed(contextVk, semaphore->getSemaphorePoolIndex());
3294         semaphore->deinit();
3295     }
3296 }
3297 
allocatePoolImpl(ContextVk * contextVk,std::vector<Semaphore> & poolToAllocate,uint32_t entriesToAllocate)3298 angle::Result DynamicSemaphorePool::allocatePoolImpl(ContextVk *contextVk,
3299                                                      std::vector<Semaphore> &poolToAllocate,
3300                                                      uint32_t entriesToAllocate)
3301 {
3302     poolToAllocate.resize(entriesToAllocate);
3303     for (Semaphore &semaphore : poolToAllocate)
3304     {
3305         ANGLE_VK_TRY(contextVk, semaphore.init(contextVk->getDevice()));
3306     }
3307     return angle::Result::Continue;
3308 }
3309 
destroyPoolImpl(VkDevice device,std::vector<Semaphore> & poolToDestroy)3310 void DynamicSemaphorePool::destroyPoolImpl(VkDevice device, std::vector<Semaphore> &poolToDestroy)
3311 {
3312     for (Semaphore &semaphore : poolToDestroy)
3313     {
3314         semaphore.destroy(device);
3315     }
3316 }
3317 
3318 // SemaphoreHelper implementation
SemaphoreHelper()3319 SemaphoreHelper::SemaphoreHelper() : mSemaphorePoolIndex(0), mSemaphore(0) {}
3320 
~SemaphoreHelper()3321 SemaphoreHelper::~SemaphoreHelper() {}
3322 
SemaphoreHelper(SemaphoreHelper && other)3323 SemaphoreHelper::SemaphoreHelper(SemaphoreHelper &&other)
3324     : mSemaphorePoolIndex(other.mSemaphorePoolIndex), mSemaphore(other.mSemaphore)
3325 {
3326     other.mSemaphore = nullptr;
3327 }
3328 
operator =(SemaphoreHelper && other)3329 SemaphoreHelper &SemaphoreHelper::operator=(SemaphoreHelper &&other)
3330 {
3331     std::swap(mSemaphorePoolIndex, other.mSemaphorePoolIndex);
3332     std::swap(mSemaphore, other.mSemaphore);
3333     return *this;
3334 }
3335 
init(const size_t semaphorePoolIndex,const Semaphore * semaphore)3336 void SemaphoreHelper::init(const size_t semaphorePoolIndex, const Semaphore *semaphore)
3337 {
3338     mSemaphorePoolIndex = semaphorePoolIndex;
3339     mSemaphore          = semaphore;
3340 }
3341 
deinit()3342 void SemaphoreHelper::deinit()
3343 {
3344     mSemaphorePoolIndex = 0;
3345     mSemaphore          = nullptr;
3346 }
3347 
3348 // LineLoopHelper implementation.
LineLoopHelper(RendererVk * renderer)3349 LineLoopHelper::LineLoopHelper(RendererVk *renderer)
3350 {
3351     // We need to use an alignment of the maximum size we're going to allocate, which is
3352     // VK_INDEX_TYPE_UINT32. When we switch from a drawElement to a drawArray call, the allocations
3353     // can vary in size. According to the Vulkan spec, when calling vkCmdBindIndexBuffer: 'The
3354     // sum of offset and the address of the range of VkDeviceMemory object that is backing buffer,
3355     // must be a multiple of the type indicated by indexType'.
3356     mDynamicIndexBuffer.init(renderer, kLineLoopDynamicBufferUsage, sizeof(uint32_t),
3357                              kLineLoopDynamicBufferInitialSize, true,
3358                              DynamicBufferPolicy::OneShotUse);
3359     mDynamicIndirectBuffer.init(renderer, kLineLoopDynamicIndirectBufferUsage, sizeof(uint32_t),
3360                                 kLineLoopDynamicIndirectBufferInitialSize, true,
3361                                 DynamicBufferPolicy::OneShotUse);
3362 }
3363 
3364 LineLoopHelper::~LineLoopHelper() = default;
3365 
getIndexBufferForDrawArrays(ContextVk * contextVk,uint32_t clampedVertexCount,GLint firstVertex,BufferHelper ** bufferOut,VkDeviceSize * offsetOut)3366 angle::Result LineLoopHelper::getIndexBufferForDrawArrays(ContextVk *contextVk,
3367                                                           uint32_t clampedVertexCount,
3368                                                           GLint firstVertex,
3369                                                           BufferHelper **bufferOut,
3370                                                           VkDeviceSize *offsetOut)
3371 {
3372     uint32_t *indices    = nullptr;
3373     size_t allocateBytes = sizeof(uint32_t) * (static_cast<size_t>(clampedVertexCount) + 1);
3374 
3375     mDynamicIndexBuffer.releaseInFlightBuffers(contextVk);
3376     ANGLE_TRY(mDynamicIndexBuffer.allocate(contextVk, allocateBytes,
3377                                            reinterpret_cast<uint8_t **>(&indices), nullptr,
3378                                            offsetOut, nullptr));
3379     *bufferOut = mDynamicIndexBuffer.getCurrentBuffer();
3380 
3381     // Note: there could be an overflow in this addition.
3382     uint32_t unsignedFirstVertex = static_cast<uint32_t>(firstVertex);
3383     uint32_t vertexCount         = (clampedVertexCount + unsignedFirstVertex);
3384     for (uint32_t vertexIndex = unsignedFirstVertex; vertexIndex < vertexCount; vertexIndex++)
3385     {
3386         *indices++ = vertexIndex;
3387     }
3388     *indices = unsignedFirstVertex;
3389 
3390     // Since we are not using the VK_MEMORY_PROPERTY_HOST_COHERENT_BIT flag when creating the
3391     // device memory in the StreamingBuffer, we always need to make sure we flush it after
3392     // writing.
3393     ANGLE_TRY(mDynamicIndexBuffer.flush(contextVk));
3394 
3395     return angle::Result::Continue;
3396 }
3397 
getIndexBufferForElementArrayBuffer(ContextVk * contextVk,BufferVk * elementArrayBufferVk,gl::DrawElementsType glIndexType,int indexCount,intptr_t elementArrayOffset,BufferHelper ** bufferOut,VkDeviceSize * bufferOffsetOut,uint32_t * indexCountOut)3398 angle::Result LineLoopHelper::getIndexBufferForElementArrayBuffer(ContextVk *contextVk,
3399                                                                   BufferVk *elementArrayBufferVk,
3400                                                                   gl::DrawElementsType glIndexType,
3401                                                                   int indexCount,
3402                                                                   intptr_t elementArrayOffset,
3403                                                                   BufferHelper **bufferOut,
3404                                                                   VkDeviceSize *bufferOffsetOut,
3405                                                                   uint32_t *indexCountOut)
3406 {
3407     if (glIndexType == gl::DrawElementsType::UnsignedByte ||
3408         contextVk->getState().isPrimitiveRestartEnabled())
3409     {
3410         ANGLE_TRACE_EVENT0("gpu.angle", "LineLoopHelper::getIndexBufferForElementArrayBuffer");
3411 
3412         void *srcDataMapping = nullptr;
3413         ANGLE_TRY(elementArrayBufferVk->mapImpl(contextVk, GL_MAP_READ_BIT, &srcDataMapping));
3414         ANGLE_TRY(streamIndices(contextVk, glIndexType, indexCount,
3415                                 static_cast<const uint8_t *>(srcDataMapping) + elementArrayOffset,
3416                                 bufferOut, bufferOffsetOut, indexCountOut));
3417         ANGLE_TRY(elementArrayBufferVk->unmapImpl(contextVk));
3418         return angle::Result::Continue;
3419     }
3420 
3421     *indexCountOut = indexCount + 1;
3422 
3423     uint32_t *indices    = nullptr;
3424     size_t unitSize      = contextVk->getVkIndexTypeSize(glIndexType);
3425     size_t allocateBytes = unitSize * (indexCount + 1) + 1;
3426 
3427     mDynamicIndexBuffer.releaseInFlightBuffers(contextVk);
3428     ANGLE_TRY(mDynamicIndexBuffer.allocate(contextVk, allocateBytes,
3429                                            reinterpret_cast<uint8_t **>(&indices), nullptr,
3430                                            bufferOffsetOut, nullptr));
3431     *bufferOut = mDynamicIndexBuffer.getCurrentBuffer();
3432 
3433     VkDeviceSize sourceBufferOffset = 0;
3434     BufferHelper *sourceBuffer = &elementArrayBufferVk->getBufferAndOffset(&sourceBufferOffset);
3435 
3436     VkDeviceSize sourceOffset = static_cast<VkDeviceSize>(elementArrayOffset) + sourceBufferOffset;
3437     uint64_t unitCount        = static_cast<VkDeviceSize>(indexCount);
3438     angle::FixedVector<VkBufferCopy, 3> copies = {
3439         {sourceOffset, *bufferOffsetOut, unitCount * unitSize},
3440         {sourceOffset, *bufferOffsetOut + unitCount * unitSize, unitSize},
3441     };
3442 
3443     vk::CommandBufferAccess access;
3444     access.onBufferTransferWrite(*bufferOut);
3445     access.onBufferTransferRead(sourceBuffer);
3446 
3447     vk::CommandBuffer *commandBuffer;
3448     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(access, &commandBuffer));
3449 
3450     commandBuffer->copyBuffer(sourceBuffer->getBuffer(), (*bufferOut)->getBuffer(),
3451                               static_cast<uint32_t>(copies.size()), copies.data());
3452 
3453     ANGLE_TRY(mDynamicIndexBuffer.flush(contextVk));
3454     return angle::Result::Continue;
3455 }
3456 
streamIndices(ContextVk * contextVk,gl::DrawElementsType glIndexType,GLsizei indexCount,const uint8_t * srcPtr,BufferHelper ** bufferOut,VkDeviceSize * bufferOffsetOut,uint32_t * indexCountOut)3457 angle::Result LineLoopHelper::streamIndices(ContextVk *contextVk,
3458                                             gl::DrawElementsType glIndexType,
3459                                             GLsizei indexCount,
3460                                             const uint8_t *srcPtr,
3461                                             BufferHelper **bufferOut,
3462                                             VkDeviceSize *bufferOffsetOut,
3463                                             uint32_t *indexCountOut)
3464 {
3465     size_t unitSize = contextVk->getVkIndexTypeSize(glIndexType);
3466 
3467     uint8_t *indices = nullptr;
3468 
3469     uint32_t numOutIndices = indexCount + 1;
3470     if (contextVk->getState().isPrimitiveRestartEnabled())
3471     {
3472         numOutIndices = GetLineLoopWithRestartIndexCount(glIndexType, indexCount, srcPtr);
3473     }
3474     *indexCountOut       = numOutIndices;
3475     size_t allocateBytes = unitSize * numOutIndices;
3476     ANGLE_TRY(mDynamicIndexBuffer.allocate(contextVk, allocateBytes,
3477                                            reinterpret_cast<uint8_t **>(&indices), nullptr,
3478                                            bufferOffsetOut, nullptr));
3479     *bufferOut = mDynamicIndexBuffer.getCurrentBuffer();
3480 
3481     if (contextVk->getState().isPrimitiveRestartEnabled())
3482     {
3483         HandlePrimitiveRestart(contextVk, glIndexType, indexCount, srcPtr, indices);
3484     }
3485     else
3486     {
3487         if (contextVk->shouldConvertUint8VkIndexType(glIndexType))
3488         {
3489             // If vulkan doesn't support uint8 index types, we need to emulate it.
3490             VkIndexType indexType = contextVk->getVkIndexType(glIndexType);
3491             ASSERT(indexType == VK_INDEX_TYPE_UINT16);
3492             uint16_t *indicesDst = reinterpret_cast<uint16_t *>(indices);
3493             for (int i = 0; i < indexCount; i++)
3494             {
3495                 indicesDst[i] = srcPtr[i];
3496             }
3497 
3498             indicesDst[indexCount] = srcPtr[0];
3499         }
3500         else
3501         {
3502             memcpy(indices, srcPtr, unitSize * indexCount);
3503             memcpy(indices + unitSize * indexCount, srcPtr, unitSize);
3504         }
3505     }
3506 
3507     ANGLE_TRY(mDynamicIndexBuffer.flush(contextVk));
3508     return angle::Result::Continue;
3509 }
3510 
streamIndicesIndirect(ContextVk * contextVk,gl::DrawElementsType glIndexType,BufferHelper * indexBuffer,VkDeviceSize indexBufferOffset,BufferHelper * indirectBuffer,VkDeviceSize indirectBufferOffset,BufferHelper ** indexBufferOut,VkDeviceSize * indexBufferOffsetOut,BufferHelper ** indirectBufferOut,VkDeviceSize * indirectBufferOffsetOut)3511 angle::Result LineLoopHelper::streamIndicesIndirect(ContextVk *contextVk,
3512                                                     gl::DrawElementsType glIndexType,
3513                                                     BufferHelper *indexBuffer,
3514                                                     VkDeviceSize indexBufferOffset,
3515                                                     BufferHelper *indirectBuffer,
3516                                                     VkDeviceSize indirectBufferOffset,
3517                                                     BufferHelper **indexBufferOut,
3518                                                     VkDeviceSize *indexBufferOffsetOut,
3519                                                     BufferHelper **indirectBufferOut,
3520                                                     VkDeviceSize *indirectBufferOffsetOut)
3521 {
3522     size_t unitSize      = contextVk->getVkIndexTypeSize(glIndexType);
3523     size_t allocateBytes = static_cast<size_t>(indexBuffer->getSize() + unitSize);
3524 
3525     if (contextVk->getState().isPrimitiveRestartEnabled())
3526     {
3527         // If primitive restart, new index buffer is 135% the size of the original index buffer. The
3528         // smallest lineloop with primitive restart is 3 indices (point 1, point 2 and restart
3529         // value) when converted to linelist becomes 4 vertices. Expansion of 4/3. Any larger
3530         // lineloops would have less overhead and require less extra space. Any incomplete
3531         // primitives can be dropped or left incomplete and thus not increase the size of the
3532         // destination index buffer. Since we don't know the number of indices being used we'll use
3533         // the size of the index buffer as allocated as the index count.
3534         size_t numInputIndices    = static_cast<size_t>(indexBuffer->getSize() / unitSize);
3535         size_t numNewInputIndices = ((numInputIndices * 4) / 3) + 1;
3536         allocateBytes             = static_cast<size_t>(numNewInputIndices * unitSize);
3537     }
3538 
3539     mDynamicIndexBuffer.releaseInFlightBuffers(contextVk);
3540     mDynamicIndirectBuffer.releaseInFlightBuffers(contextVk);
3541 
3542     ANGLE_TRY(mDynamicIndexBuffer.allocate(contextVk, allocateBytes, nullptr, nullptr,
3543                                            indexBufferOffsetOut, nullptr));
3544     *indexBufferOut = mDynamicIndexBuffer.getCurrentBuffer();
3545 
3546     ANGLE_TRY(mDynamicIndirectBuffer.allocate(contextVk, sizeof(VkDrawIndexedIndirectCommand),
3547                                               nullptr, nullptr, indirectBufferOffsetOut, nullptr));
3548     *indirectBufferOut = mDynamicIndirectBuffer.getCurrentBuffer();
3549 
3550     BufferHelper *destIndexBuffer    = mDynamicIndexBuffer.getCurrentBuffer();
3551     BufferHelper *destIndirectBuffer = mDynamicIndirectBuffer.getCurrentBuffer();
3552 
3553     // Copy relevant section of the source into destination at allocated offset.  Note that the
3554     // offset returned by allocate() above is in bytes. As is the indices offset pointer.
3555     UtilsVk::ConvertLineLoopIndexIndirectParameters params = {};
3556     params.indirectBufferOffset    = static_cast<uint32_t>(indirectBufferOffset);
3557     params.dstIndirectBufferOffset = static_cast<uint32_t>(*indirectBufferOffsetOut);
3558     params.srcIndexBufferOffset    = static_cast<uint32_t>(indexBufferOffset);
3559     params.dstIndexBufferOffset    = static_cast<uint32_t>(*indexBufferOffsetOut);
3560     params.indicesBitsWidth        = static_cast<uint32_t>(unitSize * 8);
3561 
3562     ANGLE_TRY(contextVk->getUtils().convertLineLoopIndexIndirectBuffer(
3563         contextVk, indirectBuffer, destIndirectBuffer, destIndexBuffer, indexBuffer, params));
3564 
3565     return angle::Result::Continue;
3566 }
3567 
streamArrayIndirect(ContextVk * contextVk,size_t vertexCount,BufferHelper * arrayIndirectBuffer,VkDeviceSize arrayIndirectBufferOffset,BufferHelper ** indexBufferOut,VkDeviceSize * indexBufferOffsetOut,BufferHelper ** indexIndirectBufferOut,VkDeviceSize * indexIndirectBufferOffsetOut)3568 angle::Result LineLoopHelper::streamArrayIndirect(ContextVk *contextVk,
3569                                                   size_t vertexCount,
3570                                                   BufferHelper *arrayIndirectBuffer,
3571                                                   VkDeviceSize arrayIndirectBufferOffset,
3572                                                   BufferHelper **indexBufferOut,
3573                                                   VkDeviceSize *indexBufferOffsetOut,
3574                                                   BufferHelper **indexIndirectBufferOut,
3575                                                   VkDeviceSize *indexIndirectBufferOffsetOut)
3576 {
3577     auto unitSize        = sizeof(uint32_t);
3578     size_t allocateBytes = static_cast<size_t>((vertexCount + 1) * unitSize);
3579 
3580     mDynamicIndexBuffer.releaseInFlightBuffers(contextVk);
3581     mDynamicIndirectBuffer.releaseInFlightBuffers(contextVk);
3582 
3583     ANGLE_TRY(mDynamicIndexBuffer.allocate(contextVk, allocateBytes, nullptr, nullptr,
3584                                            indexBufferOffsetOut, nullptr));
3585     *indexBufferOut = mDynamicIndexBuffer.getCurrentBuffer();
3586 
3587     ANGLE_TRY(mDynamicIndirectBuffer.allocate(contextVk, sizeof(VkDrawIndexedIndirectCommand),
3588                                               nullptr, nullptr, indexIndirectBufferOffsetOut,
3589                                               nullptr));
3590     *indexIndirectBufferOut = mDynamicIndirectBuffer.getCurrentBuffer();
3591 
3592     BufferHelper *destIndexBuffer    = mDynamicIndexBuffer.getCurrentBuffer();
3593     BufferHelper *destIndirectBuffer = mDynamicIndirectBuffer.getCurrentBuffer();
3594 
3595     // Copy relevant section of the source into destination at allocated offset.  Note that the
3596     // offset returned by allocate() above is in bytes. As is the indices offset pointer.
3597     UtilsVk::ConvertLineLoopArrayIndirectParameters params = {};
3598     params.indirectBufferOffset    = static_cast<uint32_t>(arrayIndirectBufferOffset);
3599     params.dstIndirectBufferOffset = static_cast<uint32_t>(*indexIndirectBufferOffsetOut);
3600     params.dstIndexBufferOffset    = static_cast<uint32_t>(*indexBufferOffsetOut);
3601 
3602     ANGLE_TRY(contextVk->getUtils().convertLineLoopArrayIndirectBuffer(
3603         contextVk, arrayIndirectBuffer, destIndirectBuffer, destIndexBuffer, params));
3604 
3605     return angle::Result::Continue;
3606 }
3607 
release(ContextVk * contextVk)3608 void LineLoopHelper::release(ContextVk *contextVk)
3609 {
3610     mDynamicIndexBuffer.release(contextVk->getRenderer());
3611     mDynamicIndirectBuffer.release(contextVk->getRenderer());
3612 }
3613 
destroy(RendererVk * renderer)3614 void LineLoopHelper::destroy(RendererVk *renderer)
3615 {
3616     mDynamicIndexBuffer.destroy(renderer);
3617     mDynamicIndirectBuffer.destroy(renderer);
3618 }
3619 
3620 // static
Draw(uint32_t count,uint32_t baseVertex,CommandBuffer * commandBuffer)3621 void LineLoopHelper::Draw(uint32_t count, uint32_t baseVertex, CommandBuffer *commandBuffer)
3622 {
3623     // Our first index is always 0 because that's how we set it up in createIndexBuffer*.
3624     commandBuffer->drawIndexedBaseVertex(count, baseVertex);
3625 }
3626 
GetPipelineStage(gl::ShaderType stage)3627 PipelineStage GetPipelineStage(gl::ShaderType stage)
3628 {
3629     return kPipelineStageShaderMap[stage];
3630 }
3631 
3632 // PipelineBarrier implementation.
addDiagnosticsString(std::ostringstream & out) const3633 void PipelineBarrier::addDiagnosticsString(std::ostringstream &out) const
3634 {
3635     if (mMemoryBarrierSrcAccess != 0 || mMemoryBarrierDstAccess != 0)
3636     {
3637         out << "Src: 0x" << std::hex << mMemoryBarrierSrcAccess << " &rarr; Dst: 0x" << std::hex
3638             << mMemoryBarrierDstAccess << std::endl;
3639     }
3640 }
3641 
3642 // BufferHelper implementation.
BufferHelper()3643 BufferHelper::BufferHelper()
3644     : mMemoryPropertyFlags{},
3645       mSize(0),
3646       mCurrentQueueFamilyIndex(std::numeric_limits<uint32_t>::max()),
3647       mCurrentWriteAccess(0),
3648       mCurrentReadAccess(0),
3649       mCurrentWriteStages(0),
3650       mCurrentReadStages(0),
3651       mSerial()
3652 {}
3653 
3654 BufferHelper::~BufferHelper() = default;
3655 
init(ContextVk * contextVk,const VkBufferCreateInfo & requestedCreateInfo,VkMemoryPropertyFlags memoryPropertyFlags)3656 angle::Result BufferHelper::init(ContextVk *contextVk,
3657                                  const VkBufferCreateInfo &requestedCreateInfo,
3658                                  VkMemoryPropertyFlags memoryPropertyFlags)
3659 {
3660     RendererVk *renderer = contextVk->getRenderer();
3661 
3662     mSerial = renderer->getResourceSerialFactory().generateBufferSerial();
3663     mSize   = requestedCreateInfo.size;
3664 
3665     VkBufferCreateInfo modifiedCreateInfo;
3666     const VkBufferCreateInfo *createInfo = &requestedCreateInfo;
3667 
3668     if (renderer->getFeatures().padBuffersToMaxVertexAttribStride.enabled)
3669     {
3670         const VkDeviceSize maxVertexAttribStride = renderer->getMaxVertexAttribStride();
3671         ASSERT(maxVertexAttribStride);
3672         modifiedCreateInfo = requestedCreateInfo;
3673         modifiedCreateInfo.size += maxVertexAttribStride;
3674         createInfo = &modifiedCreateInfo;
3675     }
3676 
3677     VkMemoryPropertyFlags requiredFlags =
3678         (memoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT);
3679     VkMemoryPropertyFlags preferredFlags =
3680         (memoryPropertyFlags & (~VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT));
3681 
3682     BufferMemoryAllocator &bufferMemoryAllocator = renderer->getBufferMemoryAllocator();
3683     bool persistentlyMapped = renderer->getFeatures().persistentlyMappedBuffers.enabled;
3684 
3685     // Check that the allocation is not too large.
3686     uint32_t memoryTypeIndex = 0;
3687     ANGLE_VK_TRY(contextVk, bufferMemoryAllocator.findMemoryTypeIndexForBufferInfo(
3688                                 renderer, *createInfo, requiredFlags, preferredFlags,
3689                                 persistentlyMapped, &memoryTypeIndex));
3690 
3691     VkDeviceSize heapSize =
3692         renderer->getMemoryProperties().getHeapSizeForMemoryType(memoryTypeIndex);
3693 
3694     ANGLE_VK_CHECK(contextVk, createInfo->size <= heapSize, VK_ERROR_OUT_OF_DEVICE_MEMORY);
3695 
3696     ANGLE_VK_TRY(contextVk, bufferMemoryAllocator.createBuffer(renderer, *createInfo, requiredFlags,
3697                                                                preferredFlags, persistentlyMapped,
3698                                                                &memoryTypeIndex, &mBuffer,
3699                                                                mMemory.getMemoryObject()));
3700     bufferMemoryAllocator.getMemoryTypeProperties(renderer, memoryTypeIndex, &mMemoryPropertyFlags);
3701     mCurrentQueueFamilyIndex = renderer->getQueueFamilyIndex();
3702 
3703     if (renderer->getFeatures().allocateNonZeroMemory.enabled)
3704     {
3705         // This memory can't be mapped, so the buffer must be marked as a transfer destination so we
3706         // can use a staging resource to initialize it to a non-zero value. If the memory is
3707         // mappable we do the initialization in AllocateBufferMemory.
3708         if ((mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0 &&
3709             (requestedCreateInfo.usage & VK_BUFFER_USAGE_TRANSFER_DST_BIT) != 0)
3710         {
3711             ANGLE_TRY(initializeNonZeroMemory(contextVk, createInfo->size));
3712         }
3713         else if ((mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
3714         {
3715             const Allocator &allocator = renderer->getAllocator();
3716             // Can map the memory.
3717             // Pick an arbitrary value to initialize non-zero memory for sanitization.
3718             constexpr int kNonZeroInitValue = 55;
3719             ANGLE_TRY(InitMappableAllocation(contextVk, allocator, mMemory.getMemoryObject(), mSize,
3720                                              kNonZeroInitValue, mMemoryPropertyFlags));
3721         }
3722     }
3723 
3724     ANGLE_TRY(mMemory.init());
3725 
3726     return angle::Result::Continue;
3727 }
3728 
initExternal(ContextVk * contextVk,VkMemoryPropertyFlags memoryProperties,const VkBufferCreateInfo & requestedCreateInfo,GLeglClientBufferEXT clientBuffer)3729 angle::Result BufferHelper::initExternal(ContextVk *contextVk,
3730                                          VkMemoryPropertyFlags memoryProperties,
3731                                          const VkBufferCreateInfo &requestedCreateInfo,
3732                                          GLeglClientBufferEXT clientBuffer)
3733 {
3734     ASSERT(IsAndroid());
3735 
3736     RendererVk *renderer = contextVk->getRenderer();
3737 
3738     mSerial = renderer->getResourceSerialFactory().generateBufferSerial();
3739     mSize   = requestedCreateInfo.size;
3740 
3741     VkBufferCreateInfo modifiedCreateInfo             = requestedCreateInfo;
3742     VkExternalMemoryBufferCreateInfo externCreateInfo = {};
3743     externCreateInfo.sType = VK_STRUCTURE_TYPE_EXTERNAL_MEMORY_BUFFER_CREATE_INFO;
3744     externCreateInfo.handleTypes =
3745         VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
3746     externCreateInfo.pNext   = nullptr;
3747     modifiedCreateInfo.pNext = &externCreateInfo;
3748 
3749     ANGLE_VK_TRY(contextVk, mBuffer.init(renderer->getDevice(), modifiedCreateInfo));
3750 
3751     ANGLE_TRY(InitAndroidExternalMemory(contextVk, clientBuffer, memoryProperties, &mBuffer,
3752                                         &mMemoryPropertyFlags, mMemory.getExternalMemoryObject()));
3753 
3754     ANGLE_TRY(mMemory.initExternal(clientBuffer));
3755 
3756     mCurrentQueueFamilyIndex = renderer->getQueueFamilyIndex();
3757 
3758     return angle::Result::Continue;
3759 }
3760 
initializeNonZeroMemory(Context * context,VkDeviceSize size)3761 angle::Result BufferHelper::initializeNonZeroMemory(Context *context, VkDeviceSize size)
3762 {
3763     // Staging buffer memory is non-zero-initialized in 'init'.
3764     StagingBuffer stagingBuffer;
3765     ANGLE_TRY(stagingBuffer.init(context, size, StagingUsage::Both));
3766 
3767     RendererVk *renderer = context->getRenderer();
3768 
3769     PrimaryCommandBuffer commandBuffer;
3770     ANGLE_TRY(renderer->getCommandBufferOneOff(context, false, &commandBuffer));
3771 
3772     // Queue a DMA copy.
3773     VkBufferCopy copyRegion = {};
3774     copyRegion.srcOffset    = 0;
3775     copyRegion.dstOffset    = 0;
3776     copyRegion.size         = size;
3777 
3778     commandBuffer.copyBuffer(stagingBuffer.getBuffer(), mBuffer, 1, &copyRegion);
3779 
3780     ANGLE_VK_TRY(context, commandBuffer.end());
3781 
3782     Serial serial;
3783     ANGLE_TRY(renderer->queueSubmitOneOff(context, std::move(commandBuffer), false,
3784                                           egl::ContextPriority::Medium, nullptr, 0, nullptr,
3785                                           vk::SubmitPolicy::AllowDeferred, &serial));
3786 
3787     stagingBuffer.collectGarbage(renderer, serial);
3788     // Update both SharedResourceUse objects, since mReadOnlyUse tracks when the buffer can be
3789     // destroyed, and mReadWriteUse tracks when the write has completed.
3790     mReadOnlyUse.updateSerialOneOff(serial);
3791     mReadWriteUse.updateSerialOneOff(serial);
3792 
3793     return angle::Result::Continue;
3794 }
3795 
destroy(RendererVk * renderer)3796 void BufferHelper::destroy(RendererVk *renderer)
3797 {
3798     VkDevice device = renderer->getDevice();
3799     unmap(renderer);
3800     mSize = 0;
3801 
3802     mBuffer.destroy(device);
3803     mMemory.destroy(renderer);
3804 }
3805 
release(RendererVk * renderer)3806 void BufferHelper::release(RendererVk *renderer)
3807 {
3808     unmap(renderer);
3809     mSize = 0;
3810 
3811     renderer->collectGarbageAndReinit(&mReadOnlyUse, &mBuffer, mMemory.getExternalMemoryObject(),
3812                                       mMemory.getMemoryObject());
3813     mReadWriteUse.release();
3814     mReadWriteUse.init();
3815 }
3816 
copyFromBuffer(ContextVk * contextVk,BufferHelper * srcBuffer,uint32_t regionCount,const VkBufferCopy * copyRegions)3817 angle::Result BufferHelper::copyFromBuffer(ContextVk *contextVk,
3818                                            BufferHelper *srcBuffer,
3819                                            uint32_t regionCount,
3820                                            const VkBufferCopy *copyRegions)
3821 {
3822     // Check for self-dependency.
3823     vk::CommandBufferAccess access;
3824     if (srcBuffer->getBufferSerial() == getBufferSerial())
3825     {
3826         access.onBufferSelfCopy(this);
3827     }
3828     else
3829     {
3830         access.onBufferTransferRead(srcBuffer);
3831         access.onBufferTransferWrite(this);
3832     }
3833 
3834     CommandBuffer *commandBuffer;
3835     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(access, &commandBuffer));
3836 
3837     commandBuffer->copyBuffer(srcBuffer->getBuffer(), mBuffer, regionCount, copyRegions);
3838 
3839     return angle::Result::Continue;
3840 }
3841 
unmap(RendererVk * renderer)3842 void BufferHelper::unmap(RendererVk *renderer)
3843 {
3844     mMemory.unmap(renderer);
3845 }
3846 
flush(RendererVk * renderer,VkDeviceSize offset,VkDeviceSize size)3847 angle::Result BufferHelper::flush(RendererVk *renderer, VkDeviceSize offset, VkDeviceSize size)
3848 {
3849     bool hostVisible  = mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3850     bool hostCoherent = mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
3851     if (hostVisible && !hostCoherent)
3852     {
3853         mMemory.flush(renderer, mMemoryPropertyFlags, offset, size);
3854     }
3855     return angle::Result::Continue;
3856 }
3857 
invalidate(RendererVk * renderer,VkDeviceSize offset,VkDeviceSize size)3858 angle::Result BufferHelper::invalidate(RendererVk *renderer, VkDeviceSize offset, VkDeviceSize size)
3859 {
3860     bool hostVisible  = mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
3861     bool hostCoherent = mMemoryPropertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
3862     if (hostVisible && !hostCoherent)
3863     {
3864         mMemory.invalidate(renderer, mMemoryPropertyFlags, offset, size);
3865     }
3866     return angle::Result::Continue;
3867 }
3868 
changeQueue(uint32_t newQueueFamilyIndex,CommandBuffer * commandBuffer)3869 void BufferHelper::changeQueue(uint32_t newQueueFamilyIndex, CommandBuffer *commandBuffer)
3870 {
3871     VkBufferMemoryBarrier bufferMemoryBarrier = {};
3872     bufferMemoryBarrier.sType                 = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
3873     bufferMemoryBarrier.srcAccessMask         = 0;
3874     bufferMemoryBarrier.dstAccessMask         = 0;
3875     bufferMemoryBarrier.srcQueueFamilyIndex   = mCurrentQueueFamilyIndex;
3876     bufferMemoryBarrier.dstQueueFamilyIndex   = newQueueFamilyIndex;
3877     bufferMemoryBarrier.buffer                = mBuffer.getHandle();
3878     bufferMemoryBarrier.offset                = 0;
3879     bufferMemoryBarrier.size                  = VK_WHOLE_SIZE;
3880 
3881     commandBuffer->bufferBarrier(VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
3882                                  VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, &bufferMemoryBarrier);
3883 
3884     mCurrentQueueFamilyIndex = newQueueFamilyIndex;
3885 }
3886 
acquireFromExternal(ContextVk * contextVk,uint32_t externalQueueFamilyIndex,uint32_t rendererQueueFamilyIndex,CommandBuffer * commandBuffer)3887 void BufferHelper::acquireFromExternal(ContextVk *contextVk,
3888                                        uint32_t externalQueueFamilyIndex,
3889                                        uint32_t rendererQueueFamilyIndex,
3890                                        CommandBuffer *commandBuffer)
3891 {
3892     mCurrentQueueFamilyIndex = externalQueueFamilyIndex;
3893 
3894     retainReadWrite(&contextVk->getResourceUseList());
3895     changeQueue(rendererQueueFamilyIndex, commandBuffer);
3896 }
3897 
releaseToExternal(ContextVk * contextVk,uint32_t rendererQueueFamilyIndex,uint32_t externalQueueFamilyIndex,CommandBuffer * commandBuffer)3898 void BufferHelper::releaseToExternal(ContextVk *contextVk,
3899                                      uint32_t rendererQueueFamilyIndex,
3900                                      uint32_t externalQueueFamilyIndex,
3901                                      CommandBuffer *commandBuffer)
3902 {
3903     ASSERT(mCurrentQueueFamilyIndex == rendererQueueFamilyIndex);
3904 
3905     retainReadWrite(&contextVk->getResourceUseList());
3906     changeQueue(externalQueueFamilyIndex, commandBuffer);
3907 }
3908 
isReleasedToExternal() const3909 bool BufferHelper::isReleasedToExternal() const
3910 {
3911 #if !defined(ANGLE_PLATFORM_MACOS) && !defined(ANGLE_PLATFORM_ANDROID)
3912     return IsExternalQueueFamily(mCurrentQueueFamilyIndex);
3913 #else
3914     // TODO(anglebug.com/4635): Implement external memory barriers on Mac/Android.
3915     return false;
3916 #endif
3917 }
3918 
recordReadBarrier(VkAccessFlags readAccessType,VkPipelineStageFlags readStage,PipelineBarrier * barrier)3919 bool BufferHelper::recordReadBarrier(VkAccessFlags readAccessType,
3920                                      VkPipelineStageFlags readStage,
3921                                      PipelineBarrier *barrier)
3922 {
3923     bool barrierModified = false;
3924     // If there was a prior write and we are making a read that is either a new access type or from
3925     // a new stage, we need a barrier
3926     if (mCurrentWriteAccess != 0 && (((mCurrentReadAccess & readAccessType) != readAccessType) ||
3927                                      ((mCurrentReadStages & readStage) != readStage)))
3928     {
3929         barrier->mergeMemoryBarrier(mCurrentWriteStages, readStage, mCurrentWriteAccess,
3930                                     readAccessType);
3931         barrierModified = true;
3932     }
3933 
3934     // Accumulate new read usage.
3935     mCurrentReadAccess |= readAccessType;
3936     mCurrentReadStages |= readStage;
3937     return barrierModified;
3938 }
3939 
recordWriteBarrier(VkAccessFlags writeAccessType,VkPipelineStageFlags writeStage,PipelineBarrier * barrier)3940 bool BufferHelper::recordWriteBarrier(VkAccessFlags writeAccessType,
3941                                       VkPipelineStageFlags writeStage,
3942                                       PipelineBarrier *barrier)
3943 {
3944     bool barrierModified = false;
3945     // We don't need to check mCurrentReadStages here since if it is not zero, mCurrentReadAccess
3946     // must not be zero as well. stage is finer grain than accessType.
3947     ASSERT((!mCurrentReadStages && !mCurrentReadAccess) ||
3948            (mCurrentReadStages && mCurrentReadAccess));
3949     if (mCurrentReadAccess != 0 || mCurrentWriteAccess != 0)
3950     {
3951         barrier->mergeMemoryBarrier(mCurrentWriteStages | mCurrentReadStages, writeStage,
3952                                     mCurrentWriteAccess, writeAccessType);
3953         barrierModified = true;
3954     }
3955 
3956     // Reset usages on the new write.
3957     mCurrentWriteAccess = writeAccessType;
3958     mCurrentReadAccess  = 0;
3959     mCurrentWriteStages = writeStage;
3960     mCurrentReadStages  = 0;
3961     return barrierModified;
3962 }
3963 
3964 // ImageHelper implementation.
ImageHelper()3965 ImageHelper::ImageHelper()
3966 {
3967     resetCachedProperties();
3968 }
3969 
ImageHelper(ImageHelper && other)3970 ImageHelper::ImageHelper(ImageHelper &&other)
3971     : Resource(std::move(other)),
3972       mImage(std::move(other.mImage)),
3973       mDeviceMemory(std::move(other.mDeviceMemory)),
3974       mImageType(other.mImageType),
3975       mTilingMode(other.mTilingMode),
3976       mCreateFlags(other.mCreateFlags),
3977       mUsage(other.mUsage),
3978       mExtents(other.mExtents),
3979       mRotatedAspectRatio(other.mRotatedAspectRatio),
3980       mIntendedFormatID(other.mIntendedFormatID),
3981       mActualFormatID(other.mActualFormatID),
3982       mSamples(other.mSamples),
3983       mImageSerial(other.mImageSerial),
3984       mCurrentLayout(other.mCurrentLayout),
3985       mCurrentQueueFamilyIndex(other.mCurrentQueueFamilyIndex),
3986       mLastNonShaderReadOnlyLayout(other.mLastNonShaderReadOnlyLayout),
3987       mCurrentShaderReadStageMask(other.mCurrentShaderReadStageMask),
3988       mYcbcrConversionDesc(other.mYcbcrConversionDesc),
3989       mYuvConversionSampler(std::move(other.mYuvConversionSampler)),
3990       mFirstAllocatedLevel(other.mFirstAllocatedLevel),
3991       mLayerCount(other.mLayerCount),
3992       mLevelCount(other.mLevelCount),
3993       mStagingBuffer(std::move(other.mStagingBuffer)),
3994       mSubresourceUpdates(std::move(other.mSubresourceUpdates)),
3995       mCurrentSingleClearValue(std::move(other.mCurrentSingleClearValue)),
3996       mContentDefined(std::move(other.mContentDefined)),
3997       mStencilContentDefined(std::move(other.mStencilContentDefined))
3998 {
3999     ASSERT(this != &other);
4000     other.resetCachedProperties();
4001 }
4002 
~ImageHelper()4003 ImageHelper::~ImageHelper()
4004 {
4005     ASSERT(!valid());
4006 }
4007 
resetCachedProperties()4008 void ImageHelper::resetCachedProperties()
4009 {
4010     mImageType                   = VK_IMAGE_TYPE_2D;
4011     mTilingMode                  = VK_IMAGE_TILING_OPTIMAL;
4012     mCreateFlags                 = kVkImageCreateFlagsNone;
4013     mUsage                       = 0;
4014     mExtents                     = {};
4015     mRotatedAspectRatio          = false;
4016     mIntendedFormatID            = angle::FormatID::NONE;
4017     mActualFormatID              = angle::FormatID::NONE;
4018     mSamples                     = 1;
4019     mImageSerial                 = kInvalidImageSerial;
4020     mCurrentLayout               = ImageLayout::Undefined;
4021     mCurrentQueueFamilyIndex     = std::numeric_limits<uint32_t>::max();
4022     mLastNonShaderReadOnlyLayout = ImageLayout::Undefined;
4023     mCurrentShaderReadStageMask  = 0;
4024     mFirstAllocatedLevel         = gl::LevelIndex(0);
4025     mLayerCount                  = 0;
4026     mLevelCount                  = 0;
4027     mYcbcrConversionDesc.reset();
4028     mCurrentSingleClearValue.reset();
4029     mRenderPassUsageFlags.reset();
4030 
4031     setEntireContentUndefined();
4032 }
4033 
setEntireContentDefined()4034 void ImageHelper::setEntireContentDefined()
4035 {
4036     for (LevelContentDefinedMask &levelContentDefined : mContentDefined)
4037     {
4038         levelContentDefined.set();
4039     }
4040     for (LevelContentDefinedMask &levelContentDefined : mStencilContentDefined)
4041     {
4042         levelContentDefined.set();
4043     }
4044 }
4045 
setEntireContentUndefined()4046 void ImageHelper::setEntireContentUndefined()
4047 {
4048     for (LevelContentDefinedMask &levelContentDefined : mContentDefined)
4049     {
4050         levelContentDefined.reset();
4051     }
4052     for (LevelContentDefinedMask &levelContentDefined : mStencilContentDefined)
4053     {
4054         levelContentDefined.reset();
4055     }
4056 }
4057 
setContentDefined(LevelIndex levelStart,uint32_t levelCount,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags)4058 void ImageHelper::setContentDefined(LevelIndex levelStart,
4059                                     uint32_t levelCount,
4060                                     uint32_t layerStart,
4061                                     uint32_t layerCount,
4062                                     VkImageAspectFlags aspectFlags)
4063 {
4064     // Mark the range as defined.  Layers above 8 are discarded, and are always assumed to have
4065     // defined contents.
4066     if (layerStart >= kMaxContentDefinedLayerCount)
4067     {
4068         return;
4069     }
4070 
4071     uint8_t layerRangeBits =
4072         GetContentDefinedLayerRangeBits(layerStart, layerCount, kMaxContentDefinedLayerCount);
4073 
4074     for (uint32_t levelOffset = 0; levelOffset < levelCount; ++levelOffset)
4075     {
4076         LevelIndex level = levelStart + levelOffset;
4077 
4078         if ((aspectFlags & ~VK_IMAGE_ASPECT_STENCIL_BIT) != 0)
4079         {
4080             getLevelContentDefined(level) |= layerRangeBits;
4081         }
4082         if ((aspectFlags & VK_IMAGE_ASPECT_STENCIL_BIT) != 0)
4083         {
4084             getLevelStencilContentDefined(level) |= layerRangeBits;
4085         }
4086     }
4087 }
4088 
getLevelContentDefined(LevelIndex level)4089 ImageHelper::LevelContentDefinedMask &ImageHelper::getLevelContentDefined(LevelIndex level)
4090 {
4091     return mContentDefined[level.get()];
4092 }
4093 
getLevelStencilContentDefined(LevelIndex level)4094 ImageHelper::LevelContentDefinedMask &ImageHelper::getLevelStencilContentDefined(LevelIndex level)
4095 {
4096     return mStencilContentDefined[level.get()];
4097 }
4098 
getLevelContentDefined(LevelIndex level) const4099 const ImageHelper::LevelContentDefinedMask &ImageHelper::getLevelContentDefined(
4100     LevelIndex level) const
4101 {
4102     return mContentDefined[level.get()];
4103 }
4104 
getLevelStencilContentDefined(LevelIndex level) const4105 const ImageHelper::LevelContentDefinedMask &ImageHelper::getLevelStencilContentDefined(
4106     LevelIndex level) const
4107 {
4108     return mStencilContentDefined[level.get()];
4109 }
4110 
initStagingBuffer(RendererVk * renderer,size_t imageCopyBufferAlignment,VkBufferUsageFlags usageFlags,size_t initialSize)4111 void ImageHelper::initStagingBuffer(RendererVk *renderer,
4112                                     size_t imageCopyBufferAlignment,
4113                                     VkBufferUsageFlags usageFlags,
4114                                     size_t initialSize)
4115 {
4116     mStagingBuffer.init(renderer, usageFlags, imageCopyBufferAlignment, initialSize, true,
4117                         DynamicBufferPolicy::OneShotUse);
4118 }
4119 
init(Context * context,gl::TextureType textureType,const VkExtent3D & extents,const Format & format,GLint samples,VkImageUsageFlags usage,gl::LevelIndex firstLevel,uint32_t mipLevels,uint32_t layerCount,bool isRobustResourceInitEnabled,bool hasProtectedContent)4120 angle::Result ImageHelper::init(Context *context,
4121                                 gl::TextureType textureType,
4122                                 const VkExtent3D &extents,
4123                                 const Format &format,
4124                                 GLint samples,
4125                                 VkImageUsageFlags usage,
4126                                 gl::LevelIndex firstLevel,
4127                                 uint32_t mipLevels,
4128                                 uint32_t layerCount,
4129                                 bool isRobustResourceInitEnabled,
4130                                 bool hasProtectedContent)
4131 {
4132     return initExternal(context, textureType, extents, format.getIntendedFormatID(),
4133                         format.getActualRenderableImageFormatID(), samples, usage,
4134                         kVkImageCreateFlagsNone, ImageLayout::Undefined, nullptr, firstLevel,
4135                         mipLevels, layerCount, isRobustResourceInitEnabled, hasProtectedContent);
4136 }
4137 
initMSAASwapchain(Context * context,gl::TextureType textureType,const VkExtent3D & extents,bool rotatedAspectRatio,const Format & format,GLint samples,VkImageUsageFlags usage,gl::LevelIndex firstLevel,uint32_t mipLevels,uint32_t layerCount,bool isRobustResourceInitEnabled,bool hasProtectedContent)4138 angle::Result ImageHelper::initMSAASwapchain(Context *context,
4139                                              gl::TextureType textureType,
4140                                              const VkExtent3D &extents,
4141                                              bool rotatedAspectRatio,
4142                                              const Format &format,
4143                                              GLint samples,
4144                                              VkImageUsageFlags usage,
4145                                              gl::LevelIndex firstLevel,
4146                                              uint32_t mipLevels,
4147                                              uint32_t layerCount,
4148                                              bool isRobustResourceInitEnabled,
4149                                              bool hasProtectedContent)
4150 {
4151     ANGLE_TRY(initExternal(context, textureType, extents, format.getIntendedFormatID(),
4152                            format.getActualRenderableImageFormatID(), samples, usage,
4153                            kVkImageCreateFlagsNone, ImageLayout::Undefined, nullptr, firstLevel,
4154                            mipLevels, layerCount, isRobustResourceInitEnabled,
4155                            hasProtectedContent));
4156     if (rotatedAspectRatio)
4157     {
4158         std::swap(mExtents.width, mExtents.height);
4159     }
4160     mRotatedAspectRatio = rotatedAspectRatio;
4161     return angle::Result::Continue;
4162 }
4163 
initExternal(Context * context,gl::TextureType textureType,const VkExtent3D & extents,angle::FormatID intendedFormatID,angle::FormatID actualFormatID,GLint samples,VkImageUsageFlags usage,VkImageCreateFlags additionalCreateFlags,ImageLayout initialLayout,const void * externalImageCreateInfo,gl::LevelIndex firstLevel,uint32_t mipLevels,uint32_t layerCount,bool isRobustResourceInitEnabled,bool hasProtectedContent)4164 angle::Result ImageHelper::initExternal(Context *context,
4165                                         gl::TextureType textureType,
4166                                         const VkExtent3D &extents,
4167                                         angle::FormatID intendedFormatID,
4168                                         angle::FormatID actualFormatID,
4169                                         GLint samples,
4170                                         VkImageUsageFlags usage,
4171                                         VkImageCreateFlags additionalCreateFlags,
4172                                         ImageLayout initialLayout,
4173                                         const void *externalImageCreateInfo,
4174                                         gl::LevelIndex firstLevel,
4175                                         uint32_t mipLevels,
4176                                         uint32_t layerCount,
4177                                         bool isRobustResourceInitEnabled,
4178                                         bool hasProtectedContent)
4179 {
4180     ASSERT(!valid());
4181     ASSERT(!IsAnySubresourceContentDefined(mContentDefined));
4182     ASSERT(!IsAnySubresourceContentDefined(mStencilContentDefined));
4183 
4184     RendererVk *rendererVk = context->getRenderer();
4185 
4186     mImageType           = gl_vk::GetImageType(textureType);
4187     mExtents             = extents;
4188     mRotatedAspectRatio  = false;
4189     mIntendedFormatID    = intendedFormatID;
4190     mActualFormatID      = actualFormatID;
4191     mSamples             = std::max(samples, 1);
4192     mImageSerial         = context->getRenderer()->getResourceSerialFactory().generateImageSerial();
4193     mFirstAllocatedLevel = firstLevel;
4194     mLevelCount          = mipLevels;
4195     mLayerCount          = layerCount;
4196     mCreateFlags         = GetImageCreateFlags(textureType) | additionalCreateFlags;
4197     mUsage               = usage;
4198 
4199     // Validate that mLayerCount is compatible with the texture type
4200     ASSERT(textureType != gl::TextureType::_3D || mLayerCount == 1);
4201     ASSERT(textureType != gl::TextureType::_2DArray || mExtents.depth == 1);
4202     ASSERT(textureType != gl::TextureType::External || mLayerCount == 1);
4203     ASSERT(textureType != gl::TextureType::Rectangle || mLayerCount == 1);
4204     ASSERT(textureType != gl::TextureType::CubeMap || mLayerCount == gl::kCubeFaceCount);
4205 
4206     // If externalImageCreateInfo is provided, use that directly.  Otherwise derive the necessary
4207     // pNext chain.
4208     const void *imageCreateInfoPNext = externalImageCreateInfo;
4209     VkImageFormatListCreateInfoKHR imageFormatListInfoStorage;
4210     ImageListFormats imageListFormatsStorage;
4211 
4212     if (externalImageCreateInfo == nullptr)
4213     {
4214         imageCreateInfoPNext =
4215             DeriveCreateInfoPNext(context, actualFormatID, nullptr, &imageFormatListInfoStorage,
4216                                   &imageListFormatsStorage, &mCreateFlags);
4217     }
4218     else
4219     {
4220         // Derive the tiling for external images.
4221         deriveExternalImageTiling(externalImageCreateInfo);
4222     }
4223 
4224     mYcbcrConversionDesc.reset();
4225     mYuvConversionSampler.reset();
4226 
4227     const angle::Format &actualFormat = angle::Format::Get(actualFormatID);
4228     VkFormat actualVkFormat           = GetVkFormatFromFormatID(actualFormatID);
4229 
4230     if (actualFormat.isYUV)
4231     {
4232         // The Vulkan spec states: If sampler is used and the VkFormat of the image is a
4233         // multi-planar format, the image must have been created with
4234         // VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT
4235         mCreateFlags |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
4236 
4237         // The Vulkan spec states: The potential format features of the sampler YCBCR conversion
4238         // must support VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT or
4239         // VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT
4240         constexpr VkFormatFeatureFlags kChromaSubSampleFeatureBits =
4241             VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT |
4242             VK_FORMAT_FEATURE_MIDPOINT_CHROMA_SAMPLES_BIT;
4243 
4244         VkFormatFeatureFlags supportedChromaSubSampleFeatureBits =
4245             rendererVk->getImageFormatFeatureBits(mActualFormatID, kChromaSubSampleFeatureBits);
4246 
4247         VkChromaLocation supportedLocation = ((supportedChromaSubSampleFeatureBits &
4248                                                VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT) != 0)
4249                                                  ? VK_CHROMA_LOCATION_COSITED_EVEN
4250                                                  : VK_CHROMA_LOCATION_MIDPOINT;
4251         VkSamplerYcbcrModelConversion conversionModel = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601;
4252         VkSamplerYcbcrRange colorRange                = VK_SAMPLER_YCBCR_RANGE_ITU_NARROW;
4253         VkFilter chromaFilter                         = VK_FILTER_NEAREST;
4254 
4255         // Create the VkSamplerYcbcrConversion to associate with image views and samplers
4256         VkSamplerYcbcrConversionCreateInfo yuvConversionInfo = {};
4257         yuvConversionInfo.sType         = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_CREATE_INFO;
4258         yuvConversionInfo.format        = actualVkFormat;
4259         yuvConversionInfo.xChromaOffset = supportedLocation;
4260         yuvConversionInfo.yChromaOffset = supportedLocation;
4261         yuvConversionInfo.ycbcrModel    = conversionModel;
4262         yuvConversionInfo.ycbcrRange    = colorRange;
4263         yuvConversionInfo.chromaFilter  = chromaFilter;
4264 
4265         // Update the YuvConversionCache key
4266         mYcbcrConversionDesc.update(rendererVk, 0, conversionModel, colorRange, supportedLocation,
4267                                     supportedLocation, chromaFilter, intendedFormatID);
4268 
4269         ANGLE_TRY(rendererVk->getYuvConversionCache().getYuvConversion(
4270             context, mYcbcrConversionDesc, yuvConversionInfo, &mYuvConversionSampler));
4271     }
4272 
4273     if (hasProtectedContent)
4274     {
4275         mCreateFlags |= VK_IMAGE_CREATE_PROTECTED_BIT;
4276     }
4277 
4278     VkImageCreateInfo imageInfo     = {};
4279     imageInfo.sType                 = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
4280     imageInfo.pNext                 = imageCreateInfoPNext;
4281     imageInfo.flags                 = mCreateFlags;
4282     imageInfo.imageType             = mImageType;
4283     imageInfo.format                = actualVkFormat;
4284     imageInfo.extent                = mExtents;
4285     imageInfo.mipLevels             = mLevelCount;
4286     imageInfo.arrayLayers           = mLayerCount;
4287     imageInfo.samples               = gl_vk::GetSamples(mSamples);
4288     imageInfo.tiling                = mTilingMode;
4289     imageInfo.usage                 = mUsage;
4290     imageInfo.sharingMode           = VK_SHARING_MODE_EXCLUSIVE;
4291     imageInfo.queueFamilyIndexCount = 0;
4292     imageInfo.pQueueFamilyIndices   = nullptr;
4293     imageInfo.initialLayout         = ConvertImageLayoutToVkImageLayout(initialLayout);
4294 
4295     mCurrentLayout = initialLayout;
4296 
4297     ANGLE_VK_TRY(context, mImage.init(context->getDevice(), imageInfo));
4298 
4299     mVkImageCreateInfo               = imageInfo;
4300     mVkImageCreateInfo.pNext         = nullptr;
4301     mVkImageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
4302 
4303     stageClearIfEmulatedFormat(isRobustResourceInitEnabled, externalImageCreateInfo != nullptr);
4304 
4305     // Consider the contents defined for any image that has the PREINITIALIZED layout, or is
4306     // imported from external.
4307     if (initialLayout != ImageLayout::Undefined || externalImageCreateInfo != nullptr)
4308     {
4309         setEntireContentDefined();
4310     }
4311 
4312     return angle::Result::Continue;
4313 }
4314 
DeriveCreateInfoPNext(Context * context,angle::FormatID actualFormatID,const void * pNext,VkImageFormatListCreateInfoKHR * imageFormatListInfoStorage,std::array<VkFormat,kImageListFormatCount> * imageListFormatsStorage,VkImageCreateFlags * createFlagsOut)4315 const void *ImageHelper::DeriveCreateInfoPNext(
4316     Context *context,
4317     angle::FormatID actualFormatID,
4318     const void *pNext,
4319     VkImageFormatListCreateInfoKHR *imageFormatListInfoStorage,
4320     std::array<VkFormat, kImageListFormatCount> *imageListFormatsStorage,
4321     VkImageCreateFlags *createFlagsOut)
4322 {
4323     // With the introduction of sRGB related GLES extensions any sample/render target could be
4324     // respecified causing it to be interpreted in a different colorspace.  Create the VkImage
4325     // accordingly.
4326     RendererVk *rendererVk            = context->getRenderer();
4327     const angle::Format &actualFormat = angle::Format::Get(actualFormatID);
4328     angle::FormatID additionalFormat =
4329         actualFormat.isSRGB ? ConvertToLinear(actualFormatID) : ConvertToSRGB(actualFormatID);
4330     (*imageListFormatsStorage)[0] = vk::GetVkFormatFromFormatID(actualFormatID);
4331     (*imageListFormatsStorage)[1] = vk::GetVkFormatFromFormatID(additionalFormat);
4332 
4333     if (rendererVk->getFeatures().supportsImageFormatList.enabled &&
4334         rendererVk->haveSameFormatFeatureBits(actualFormatID, additionalFormat))
4335     {
4336         // Add VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT to VkImage create flag
4337         *createFlagsOut |= VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
4338 
4339         // There is just 1 additional format we might use to create a VkImageView for this
4340         // VkImage
4341         imageFormatListInfoStorage->sType = VK_STRUCTURE_TYPE_IMAGE_FORMAT_LIST_CREATE_INFO_KHR;
4342         imageFormatListInfoStorage->pNext = pNext;
4343         imageFormatListInfoStorage->viewFormatCount = kImageListFormatCount;
4344         imageFormatListInfoStorage->pViewFormats    = imageListFormatsStorage->data();
4345 
4346         pNext = imageFormatListInfoStorage;
4347     }
4348 
4349     return pNext;
4350 }
4351 
deriveExternalImageTiling(const void * createInfoChain)4352 void ImageHelper::deriveExternalImageTiling(const void *createInfoChain)
4353 {
4354     const VkBaseInStructure *chain = reinterpret_cast<const VkBaseInStructure *>(createInfoChain);
4355     while (chain != nullptr)
4356     {
4357         if (chain->sType == VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_LIST_CREATE_INFO_EXT ||
4358             chain->sType == VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_EXPLICIT_CREATE_INFO_EXT)
4359         {
4360             mTilingMode = VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT;
4361             return;
4362         }
4363 
4364         chain = reinterpret_cast<const VkBaseInStructure *>(chain->pNext);
4365     }
4366 }
4367 
releaseImage(RendererVk * renderer)4368 void ImageHelper::releaseImage(RendererVk *renderer)
4369 {
4370     renderer->collectGarbageAndReinit(&mUse, &mImage, &mDeviceMemory);
4371     mImageSerial = kInvalidImageSerial;
4372 
4373     setEntireContentUndefined();
4374 }
4375 
releaseImageFromShareContexts(RendererVk * renderer,ContextVk * contextVk)4376 void ImageHelper::releaseImageFromShareContexts(RendererVk *renderer, ContextVk *contextVk)
4377 {
4378     if (contextVk && mImageSerial.valid())
4379     {
4380         ContextVkSet &shareContextSet = *contextVk->getShareGroupVk()->getContexts();
4381         for (ContextVk *ctx : shareContextSet)
4382         {
4383             ctx->finalizeImageLayout(this);
4384         }
4385     }
4386 
4387     releaseImage(renderer);
4388 }
4389 
releaseStagingBuffer(RendererVk * renderer)4390 void ImageHelper::releaseStagingBuffer(RendererVk *renderer)
4391 {
4392     ASSERT(validateSubresourceUpdateImageRefsConsistent());
4393 
4394     // Remove updates that never made it to the texture.
4395     for (std::vector<SubresourceUpdate> &levelUpdates : mSubresourceUpdates)
4396     {
4397         for (SubresourceUpdate &update : levelUpdates)
4398         {
4399             update.release(renderer);
4400         }
4401     }
4402 
4403     ASSERT(validateSubresourceUpdateImageRefsConsistent());
4404 
4405     mStagingBuffer.release(renderer);
4406     mSubresourceUpdates.clear();
4407     mCurrentSingleClearValue.reset();
4408 }
4409 
resetImageWeakReference()4410 void ImageHelper::resetImageWeakReference()
4411 {
4412     mImage.reset();
4413     mImageSerial        = kInvalidImageSerial;
4414     mRotatedAspectRatio = false;
4415 }
4416 
initializeNonZeroMemory(Context * context,bool hasProtectedContent,VkDeviceSize size)4417 angle::Result ImageHelper::initializeNonZeroMemory(Context *context,
4418                                                    bool hasProtectedContent,
4419                                                    VkDeviceSize size)
4420 {
4421     const angle::Format &angleFormat = getActualFormat();
4422     bool isCompressedFormat          = angleFormat.isBlock;
4423 
4424     if (angleFormat.isYUV)
4425     {
4426         // VUID-vkCmdClearColorImage-image-01545
4427         // vkCmdClearColorImage(): format must not be one of the formats requiring sampler YCBCR
4428         // conversion for VK_IMAGE_ASPECT_COLOR_BIT image views
4429         return angle::Result::Continue;
4430     }
4431 
4432     RendererVk *renderer = context->getRenderer();
4433 
4434     PrimaryCommandBuffer commandBuffer;
4435     ANGLE_TRY(renderer->getCommandBufferOneOff(context, hasProtectedContent, &commandBuffer));
4436 
4437     // Queue a DMA copy.
4438     barrierImpl(context, getAspectFlags(), ImageLayout::TransferDst, mCurrentQueueFamilyIndex,
4439                 &commandBuffer);
4440 
4441     StagingBuffer stagingBuffer;
4442 
4443     if (isCompressedFormat)
4444     {
4445         // If format is compressed, set its contents through buffer copies.
4446 
4447         // The staging buffer memory is non-zero-initialized in 'init'.
4448         ANGLE_TRY(stagingBuffer.init(context, size, StagingUsage::Write));
4449 
4450         for (LevelIndex level(0); level < LevelIndex(mLevelCount); ++level)
4451         {
4452             VkBufferImageCopy copyRegion = {};
4453 
4454             gl_vk::GetExtent(getLevelExtents(level), &copyRegion.imageExtent);
4455             copyRegion.imageSubresource.aspectMask = getAspectFlags();
4456             copyRegion.imageSubresource.layerCount = mLayerCount;
4457 
4458             // If image has depth and stencil, copy to each individually per Vulkan spec.
4459             bool hasBothDepthAndStencil = isCombinedDepthStencilFormat();
4460             if (hasBothDepthAndStencil)
4461             {
4462                 copyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
4463             }
4464 
4465             commandBuffer.copyBufferToImage(stagingBuffer.getBuffer().getHandle(), mImage,
4466                                             VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &copyRegion);
4467 
4468             if (hasBothDepthAndStencil)
4469             {
4470                 copyRegion.imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
4471 
4472                 commandBuffer.copyBufferToImage(stagingBuffer.getBuffer().getHandle(), mImage,
4473                                                 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
4474                                                 &copyRegion);
4475             }
4476         }
4477     }
4478     else
4479     {
4480         // Otherwise issue clear commands.
4481         VkImageSubresourceRange subresource = {};
4482         subresource.aspectMask              = getAspectFlags();
4483         subresource.baseMipLevel            = 0;
4484         subresource.levelCount              = mLevelCount;
4485         subresource.baseArrayLayer          = 0;
4486         subresource.layerCount              = mLayerCount;
4487 
4488         // Arbitrary value to initialize the memory with.  Note: the given uint value, reinterpreted
4489         // as float is about 0.7.
4490         constexpr uint32_t kInitValue   = 0x3F345678;
4491         constexpr float kInitValueFloat = 0.12345f;
4492 
4493         if ((subresource.aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) != 0)
4494         {
4495             VkClearColorValue clearValue;
4496             clearValue.uint32[0] = kInitValue;
4497             clearValue.uint32[1] = kInitValue;
4498             clearValue.uint32[2] = kInitValue;
4499             clearValue.uint32[3] = kInitValue;
4500 
4501             commandBuffer.clearColorImage(mImage, getCurrentLayout(), clearValue, 1, &subresource);
4502         }
4503         else
4504         {
4505             VkClearDepthStencilValue clearValue;
4506             clearValue.depth   = kInitValueFloat;
4507             clearValue.stencil = kInitValue;
4508 
4509             commandBuffer.clearDepthStencilImage(mImage, getCurrentLayout(), clearValue, 1,
4510                                                  &subresource);
4511         }
4512     }
4513 
4514     ANGLE_VK_TRY(context, commandBuffer.end());
4515 
4516     Serial serial;
4517     ANGLE_TRY(renderer->queueSubmitOneOff(context, std::move(commandBuffer), hasProtectedContent,
4518                                           egl::ContextPriority::Medium, nullptr, 0, nullptr,
4519                                           vk::SubmitPolicy::AllowDeferred, &serial));
4520 
4521     if (isCompressedFormat)
4522     {
4523         stagingBuffer.collectGarbage(renderer, serial);
4524     }
4525     mUse.updateSerialOneOff(serial);
4526 
4527     return angle::Result::Continue;
4528 }
4529 
initMemory(Context * context,bool hasProtectedContent,const MemoryProperties & memoryProperties,VkMemoryPropertyFlags flags)4530 angle::Result ImageHelper::initMemory(Context *context,
4531                                       bool hasProtectedContent,
4532                                       const MemoryProperties &memoryProperties,
4533                                       VkMemoryPropertyFlags flags)
4534 {
4535     // TODO(jmadill): Memory sub-allocation. http://anglebug.com/2162
4536     VkDeviceSize size;
4537     if (hasProtectedContent)
4538     {
4539         flags |= VK_MEMORY_PROPERTY_PROTECTED_BIT;
4540     }
4541     ANGLE_TRY(AllocateImageMemory(context, flags, &flags, nullptr, &mImage, &mDeviceMemory, &size));
4542     mCurrentQueueFamilyIndex = context->getRenderer()->getQueueFamilyIndex();
4543 
4544     RendererVk *renderer = context->getRenderer();
4545     if (renderer->getFeatures().allocateNonZeroMemory.enabled)
4546     {
4547         // Can't map the memory. Use a staging resource.
4548         if ((flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
4549         {
4550             ANGLE_TRY(initializeNonZeroMemory(context, hasProtectedContent, size));
4551         }
4552     }
4553 
4554     return angle::Result::Continue;
4555 }
4556 
initExternalMemory(Context * context,const MemoryProperties & memoryProperties,const VkMemoryRequirements & memoryRequirements,const VkSamplerYcbcrConversionCreateInfo * samplerYcbcrConversionCreateInfo,uint32_t extraAllocationInfoCount,const void ** extraAllocationInfo,uint32_t currentQueueFamilyIndex,VkMemoryPropertyFlags flags)4557 angle::Result ImageHelper::initExternalMemory(
4558     Context *context,
4559     const MemoryProperties &memoryProperties,
4560     const VkMemoryRequirements &memoryRequirements,
4561     const VkSamplerYcbcrConversionCreateInfo *samplerYcbcrConversionCreateInfo,
4562     uint32_t extraAllocationInfoCount,
4563     const void **extraAllocationInfo,
4564     uint32_t currentQueueFamilyIndex,
4565     VkMemoryPropertyFlags flags)
4566 {
4567     // Vulkan allows up to 4 memory planes.
4568     constexpr size_t kMaxMemoryPlanes                                     = 4;
4569     constexpr VkImageAspectFlagBits kMemoryPlaneAspects[kMaxMemoryPlanes] = {
4570         VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT,
4571         VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT,
4572         VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT,
4573         VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT,
4574     };
4575     ASSERT(extraAllocationInfoCount <= kMaxMemoryPlanes);
4576 
4577     VkBindImagePlaneMemoryInfoKHR bindImagePlaneMemoryInfo = {};
4578     bindImagePlaneMemoryInfo.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_PLANE_MEMORY_INFO;
4579 
4580     const VkBindImagePlaneMemoryInfoKHR *bindImagePlaneMemoryInfoPtr =
4581         extraAllocationInfoCount == 1 ? nullptr : &bindImagePlaneMemoryInfo;
4582 
4583     for (uint32_t memoryPlane = 0; memoryPlane < extraAllocationInfoCount; ++memoryPlane)
4584     {
4585         bindImagePlaneMemoryInfo.planeAspect = kMemoryPlaneAspects[memoryPlane];
4586 
4587         ANGLE_TRY(AllocateImageMemoryWithRequirements(
4588             context, flags, memoryRequirements, extraAllocationInfo[memoryPlane],
4589             bindImagePlaneMemoryInfoPtr, &mImage, &mDeviceMemory));
4590     }
4591     mCurrentQueueFamilyIndex = currentQueueFamilyIndex;
4592 
4593 #ifdef VK_USE_PLATFORM_ANDROID_KHR
4594     if (samplerYcbcrConversionCreateInfo)
4595     {
4596         const VkExternalFormatANDROID *vkExternalFormat =
4597             reinterpret_cast<const VkExternalFormatANDROID *>(
4598                 samplerYcbcrConversionCreateInfo->pNext);
4599         ASSERT(vkExternalFormat->sType == VK_STRUCTURE_TYPE_EXTERNAL_FORMAT_ANDROID);
4600 
4601         // Update the YuvConversionCache key
4602         mYcbcrConversionDesc.update(context->getRenderer(), vkExternalFormat->externalFormat,
4603                                     samplerYcbcrConversionCreateInfo->ycbcrModel,
4604                                     samplerYcbcrConversionCreateInfo->ycbcrRange,
4605                                     samplerYcbcrConversionCreateInfo->xChromaOffset,
4606                                     samplerYcbcrConversionCreateInfo->yChromaOffset,
4607                                     samplerYcbcrConversionCreateInfo->chromaFilter,
4608                                     angle::FormatID::NONE);
4609 
4610         ANGLE_TRY(context->getRenderer()->getYuvConversionCache().getYuvConversion(
4611             context, mYcbcrConversionDesc, *samplerYcbcrConversionCreateInfo,
4612             &mYuvConversionSampler));
4613     }
4614 #endif
4615     return angle::Result::Continue;
4616 }
4617 
initImageView(Context * context,gl::TextureType textureType,VkImageAspectFlags aspectMask,const gl::SwizzleState & swizzleMap,ImageView * imageViewOut,LevelIndex baseMipLevelVk,uint32_t levelCount)4618 angle::Result ImageHelper::initImageView(Context *context,
4619                                          gl::TextureType textureType,
4620                                          VkImageAspectFlags aspectMask,
4621                                          const gl::SwizzleState &swizzleMap,
4622                                          ImageView *imageViewOut,
4623                                          LevelIndex baseMipLevelVk,
4624                                          uint32_t levelCount)
4625 {
4626     return initLayerImageView(context, textureType, aspectMask, swizzleMap, imageViewOut,
4627                               baseMipLevelVk, levelCount, 0, mLayerCount,
4628                               gl::SrgbWriteControlMode::Default);
4629 }
4630 
initLayerImageView(Context * context,gl::TextureType textureType,VkImageAspectFlags aspectMask,const gl::SwizzleState & swizzleMap,ImageView * imageViewOut,LevelIndex baseMipLevelVk,uint32_t levelCount,uint32_t baseArrayLayer,uint32_t layerCount,gl::SrgbWriteControlMode mode) const4631 angle::Result ImageHelper::initLayerImageView(Context *context,
4632                                               gl::TextureType textureType,
4633                                               VkImageAspectFlags aspectMask,
4634                                               const gl::SwizzleState &swizzleMap,
4635                                               ImageView *imageViewOut,
4636                                               LevelIndex baseMipLevelVk,
4637                                               uint32_t levelCount,
4638                                               uint32_t baseArrayLayer,
4639                                               uint32_t layerCount,
4640                                               gl::SrgbWriteControlMode mode) const
4641 {
4642     angle::FormatID actualFormat = mActualFormatID;
4643 
4644     // If we are initializing an imageview for use with EXT_srgb_write_control, we need to override
4645     // the format to its linear counterpart. Formats that cannot be reinterpreted are exempt from
4646     // this requirement.
4647     if (mode == gl::SrgbWriteControlMode::Linear)
4648     {
4649         angle::FormatID linearFormat = ConvertToLinear(actualFormat);
4650         if (linearFormat != angle::FormatID::NONE)
4651         {
4652             actualFormat = linearFormat;
4653         }
4654     }
4655 
4656     return initLayerImageViewImpl(context, textureType, aspectMask, swizzleMap, imageViewOut,
4657                                   baseMipLevelVk, levelCount, baseArrayLayer, layerCount,
4658                                   GetVkFormatFromFormatID(actualFormat), nullptr);
4659 }
4660 
initLayerImageViewWithFormat(Context * context,gl::TextureType textureType,VkFormat imageFormat,VkImageAspectFlags aspectMask,const gl::SwizzleState & swizzleMap,ImageView * imageViewOut,LevelIndex baseMipLevelVk,uint32_t levelCount,uint32_t baseArrayLayer,uint32_t layerCount) const4661 angle::Result ImageHelper::initLayerImageViewWithFormat(Context *context,
4662                                                         gl::TextureType textureType,
4663                                                         VkFormat imageFormat,
4664                                                         VkImageAspectFlags aspectMask,
4665                                                         const gl::SwizzleState &swizzleMap,
4666                                                         ImageView *imageViewOut,
4667                                                         LevelIndex baseMipLevelVk,
4668                                                         uint32_t levelCount,
4669                                                         uint32_t baseArrayLayer,
4670                                                         uint32_t layerCount) const
4671 {
4672     return initLayerImageViewImpl(context, textureType, aspectMask, swizzleMap, imageViewOut,
4673                                   baseMipLevelVk, levelCount, baseArrayLayer, layerCount,
4674                                   imageFormat, nullptr);
4675 }
4676 
initLayerImageViewImpl(Context * context,gl::TextureType textureType,VkImageAspectFlags aspectMask,const gl::SwizzleState & swizzleMap,ImageView * imageViewOut,LevelIndex baseMipLevelVk,uint32_t levelCount,uint32_t baseArrayLayer,uint32_t layerCount,VkFormat imageFormat,const VkImageViewUsageCreateInfo * imageViewUsageCreateInfo) const4677 angle::Result ImageHelper::initLayerImageViewImpl(
4678     Context *context,
4679     gl::TextureType textureType,
4680     VkImageAspectFlags aspectMask,
4681     const gl::SwizzleState &swizzleMap,
4682     ImageView *imageViewOut,
4683     LevelIndex baseMipLevelVk,
4684     uint32_t levelCount,
4685     uint32_t baseArrayLayer,
4686     uint32_t layerCount,
4687     VkFormat imageFormat,
4688     const VkImageViewUsageCreateInfo *imageViewUsageCreateInfo) const
4689 {
4690     VkImageViewCreateInfo viewInfo = {};
4691     viewInfo.sType                 = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
4692     viewInfo.flags                 = 0;
4693     viewInfo.image                 = mImage.getHandle();
4694     viewInfo.viewType              = gl_vk::GetImageViewType(textureType);
4695     viewInfo.format                = imageFormat;
4696 
4697     if (swizzleMap.swizzleRequired() && !mYuvConversionSampler.valid())
4698     {
4699         viewInfo.components.r = gl_vk::GetSwizzle(swizzleMap.swizzleRed);
4700         viewInfo.components.g = gl_vk::GetSwizzle(swizzleMap.swizzleGreen);
4701         viewInfo.components.b = gl_vk::GetSwizzle(swizzleMap.swizzleBlue);
4702         viewInfo.components.a = gl_vk::GetSwizzle(swizzleMap.swizzleAlpha);
4703     }
4704     else
4705     {
4706         viewInfo.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
4707         viewInfo.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
4708         viewInfo.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
4709         viewInfo.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
4710     }
4711     viewInfo.subresourceRange.aspectMask     = aspectMask;
4712     viewInfo.subresourceRange.baseMipLevel   = baseMipLevelVk.get();
4713     viewInfo.subresourceRange.levelCount     = levelCount;
4714     viewInfo.subresourceRange.baseArrayLayer = baseArrayLayer;
4715     viewInfo.subresourceRange.layerCount     = layerCount;
4716 
4717     viewInfo.pNext = imageViewUsageCreateInfo;
4718 
4719     VkSamplerYcbcrConversionInfo yuvConversionInfo = {};
4720     if (mYuvConversionSampler.valid())
4721     {
4722         ASSERT((context->getRenderer()->getFeatures().supportsYUVSamplerConversion.enabled));
4723         yuvConversionInfo.sType      = VK_STRUCTURE_TYPE_SAMPLER_YCBCR_CONVERSION_INFO;
4724         yuvConversionInfo.pNext      = nullptr;
4725         yuvConversionInfo.conversion = mYuvConversionSampler.get().getHandle();
4726         AddToPNextChain(&viewInfo, &yuvConversionInfo);
4727 
4728         // VUID-VkImageViewCreateInfo-image-02399
4729         // If image has an external format, format must be VK_FORMAT_UNDEFINED
4730         if (mYcbcrConversionDesc.mIsExternalFormat)
4731         {
4732             viewInfo.format = VK_FORMAT_UNDEFINED;
4733         }
4734     }
4735     ANGLE_VK_TRY(context, imageViewOut->init(context->getDevice(), viewInfo));
4736     return angle::Result::Continue;
4737 }
4738 
initReinterpretedLayerImageView(Context * context,gl::TextureType textureType,VkImageAspectFlags aspectMask,const gl::SwizzleState & swizzleMap,ImageView * imageViewOut,LevelIndex baseMipLevelVk,uint32_t levelCount,uint32_t baseArrayLayer,uint32_t layerCount,VkImageUsageFlags imageUsageFlags,angle::FormatID imageViewFormat) const4739 angle::Result ImageHelper::initReinterpretedLayerImageView(Context *context,
4740                                                            gl::TextureType textureType,
4741                                                            VkImageAspectFlags aspectMask,
4742                                                            const gl::SwizzleState &swizzleMap,
4743                                                            ImageView *imageViewOut,
4744                                                            LevelIndex baseMipLevelVk,
4745                                                            uint32_t levelCount,
4746                                                            uint32_t baseArrayLayer,
4747                                                            uint32_t layerCount,
4748                                                            VkImageUsageFlags imageUsageFlags,
4749                                                            angle::FormatID imageViewFormat) const
4750 {
4751     VkImageViewUsageCreateInfo imageViewUsageCreateInfo = {};
4752     imageViewUsageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_USAGE_CREATE_INFO;
4753     imageViewUsageCreateInfo.usage =
4754         imageUsageFlags & GetMaximalImageUsageFlags(context->getRenderer(), imageViewFormat);
4755 
4756     return initLayerImageViewImpl(context, textureType, aspectMask, swizzleMap, imageViewOut,
4757                                   baseMipLevelVk, levelCount, baseArrayLayer, layerCount,
4758                                   vk::GetVkFormatFromFormatID(imageViewFormat),
4759                                   &imageViewUsageCreateInfo);
4760 }
4761 
destroy(RendererVk * renderer)4762 void ImageHelper::destroy(RendererVk *renderer)
4763 {
4764     VkDevice device = renderer->getDevice();
4765 
4766     mImage.destroy(device);
4767     mDeviceMemory.destroy(device);
4768     mStagingBuffer.destroy(renderer);
4769     mCurrentLayout = ImageLayout::Undefined;
4770     mImageType     = VK_IMAGE_TYPE_2D;
4771     mLayerCount    = 0;
4772     mLevelCount    = 0;
4773 
4774     setEntireContentUndefined();
4775 }
4776 
init2DWeakReference(Context * context,VkImage handle,const gl::Extents & glExtents,bool rotatedAspectRatio,angle::FormatID intendedFormatID,angle::FormatID actualFormatID,GLint samples,bool isRobustResourceInitEnabled)4777 void ImageHelper::init2DWeakReference(Context *context,
4778                                       VkImage handle,
4779                                       const gl::Extents &glExtents,
4780                                       bool rotatedAspectRatio,
4781                                       angle::FormatID intendedFormatID,
4782                                       angle::FormatID actualFormatID,
4783                                       GLint samples,
4784                                       bool isRobustResourceInitEnabled)
4785 {
4786     ASSERT(!valid());
4787     ASSERT(!IsAnySubresourceContentDefined(mContentDefined));
4788     ASSERT(!IsAnySubresourceContentDefined(mStencilContentDefined));
4789 
4790     gl_vk::GetExtent(glExtents, &mExtents);
4791     mRotatedAspectRatio = rotatedAspectRatio;
4792     mIntendedFormatID   = intendedFormatID;
4793     mActualFormatID     = actualFormatID;
4794     mSamples            = std::max(samples, 1);
4795     mImageSerial        = context->getRenderer()->getResourceSerialFactory().generateImageSerial();
4796     mCurrentLayout      = ImageLayout::Undefined;
4797     mLayerCount         = 1;
4798     mLevelCount         = 1;
4799 
4800     mImage.setHandle(handle);
4801 
4802     stageClearIfEmulatedFormat(isRobustResourceInitEnabled, false);
4803 }
4804 
init2DStaging(Context * context,bool hasProtectedContent,const MemoryProperties & memoryProperties,const gl::Extents & glExtents,angle::FormatID intendedFormatID,angle::FormatID actualFormatID,VkImageUsageFlags usage,uint32_t layerCount)4805 angle::Result ImageHelper::init2DStaging(Context *context,
4806                                          bool hasProtectedContent,
4807                                          const MemoryProperties &memoryProperties,
4808                                          const gl::Extents &glExtents,
4809                                          angle::FormatID intendedFormatID,
4810                                          angle::FormatID actualFormatID,
4811                                          VkImageUsageFlags usage,
4812                                          uint32_t layerCount)
4813 {
4814     gl_vk::GetExtent(glExtents, &mExtents);
4815 
4816     return initStaging(context, hasProtectedContent, memoryProperties, VK_IMAGE_TYPE_2D, mExtents,
4817                        intendedFormatID, actualFormatID, 1, usage, 1, layerCount);
4818 }
4819 
initStaging(Context * context,bool hasProtectedContent,const MemoryProperties & memoryProperties,VkImageType imageType,const VkExtent3D & extents,angle::FormatID intendedFormatID,angle::FormatID actualFormatID,GLint samples,VkImageUsageFlags usage,uint32_t mipLevels,uint32_t layerCount)4820 angle::Result ImageHelper::initStaging(Context *context,
4821                                        bool hasProtectedContent,
4822                                        const MemoryProperties &memoryProperties,
4823                                        VkImageType imageType,
4824                                        const VkExtent3D &extents,
4825                                        angle::FormatID intendedFormatID,
4826                                        angle::FormatID actualFormatID,
4827                                        GLint samples,
4828                                        VkImageUsageFlags usage,
4829                                        uint32_t mipLevels,
4830                                        uint32_t layerCount)
4831 {
4832     ASSERT(!valid());
4833     ASSERT(!IsAnySubresourceContentDefined(mContentDefined));
4834     ASSERT(!IsAnySubresourceContentDefined(mStencilContentDefined));
4835 
4836     mImageType          = imageType;
4837     mExtents            = extents;
4838     mRotatedAspectRatio = false;
4839     mIntendedFormatID   = intendedFormatID;
4840     mActualFormatID     = actualFormatID;
4841     mSamples            = std::max(samples, 1);
4842     mImageSerial        = context->getRenderer()->getResourceSerialFactory().generateImageSerial();
4843     mLayerCount         = layerCount;
4844     mLevelCount         = mipLevels;
4845     mUsage              = usage;
4846 
4847     // Validate that mLayerCount is compatible with the image type
4848     ASSERT(imageType != VK_IMAGE_TYPE_3D || mLayerCount == 1);
4849     ASSERT(imageType != VK_IMAGE_TYPE_2D || mExtents.depth == 1);
4850 
4851     mCurrentLayout = ImageLayout::Undefined;
4852 
4853     VkImageCreateInfo imageInfo     = {};
4854     imageInfo.sType                 = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
4855     imageInfo.flags                 = hasProtectedContent ? VK_IMAGE_CREATE_PROTECTED_BIT : 0;
4856     imageInfo.imageType             = mImageType;
4857     imageInfo.format                = GetVkFormatFromFormatID(actualFormatID);
4858     imageInfo.extent                = mExtents;
4859     imageInfo.mipLevels             = mLevelCount;
4860     imageInfo.arrayLayers           = mLayerCount;
4861     imageInfo.samples               = gl_vk::GetSamples(mSamples);
4862     imageInfo.tiling                = VK_IMAGE_TILING_OPTIMAL;
4863     imageInfo.usage                 = usage;
4864     imageInfo.sharingMode           = VK_SHARING_MODE_EXCLUSIVE;
4865     imageInfo.queueFamilyIndexCount = 0;
4866     imageInfo.pQueueFamilyIndices   = nullptr;
4867     imageInfo.initialLayout         = getCurrentLayout();
4868 
4869     ANGLE_VK_TRY(context, mImage.init(context->getDevice(), imageInfo));
4870 
4871     mVkImageCreateInfo               = imageInfo;
4872     mVkImageCreateInfo.pNext         = nullptr;
4873     mVkImageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
4874 
4875     // Allocate and bind device-local memory.
4876     VkMemoryPropertyFlags memoryPropertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
4877     if (hasProtectedContent)
4878     {
4879         memoryPropertyFlags |= VK_MEMORY_PROPERTY_PROTECTED_BIT;
4880     }
4881     ANGLE_TRY(initMemory(context, hasProtectedContent, memoryProperties, memoryPropertyFlags));
4882 
4883     return angle::Result::Continue;
4884 }
4885 
initImplicitMultisampledRenderToTexture(Context * context,bool hasProtectedContent,const MemoryProperties & memoryProperties,gl::TextureType textureType,GLint samples,const ImageHelper & resolveImage,bool isRobustResourceInitEnabled)4886 angle::Result ImageHelper::initImplicitMultisampledRenderToTexture(
4887     Context *context,
4888     bool hasProtectedContent,
4889     const MemoryProperties &memoryProperties,
4890     gl::TextureType textureType,
4891     GLint samples,
4892     const ImageHelper &resolveImage,
4893     bool isRobustResourceInitEnabled)
4894 {
4895     ASSERT(!valid());
4896     ASSERT(samples > 1);
4897     ASSERT(!IsAnySubresourceContentDefined(mContentDefined));
4898     ASSERT(!IsAnySubresourceContentDefined(mStencilContentDefined));
4899 
4900     // The image is used as either color or depth/stencil attachment.  Additionally, its memory is
4901     // lazily allocated as the contents are discarded at the end of the renderpass and with tiling
4902     // GPUs no actual backing memory is required.
4903     //
4904     // Note that the Vulkan image is created with or without VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT
4905     // based on whether the memory that will be used to create the image would have
4906     // VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT.  TRANSIENT is provided if there is any memory that
4907     // supports LAZILY_ALLOCATED.  However, based on actual image requirements, such a memory may
4908     // not be suitable for the image.  We don't support such a case, which will result in the
4909     // |initMemory| call below failing.
4910     const bool hasLazilyAllocatedMemory = memoryProperties.hasLazilyAllocatedMemory();
4911 
4912     const VkImageUsageFlags kMultisampledUsageFlags =
4913         (hasLazilyAllocatedMemory ? VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT : 0) |
4914         (resolveImage.getAspectFlags() == VK_IMAGE_ASPECT_COLOR_BIT
4915              ? VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT
4916              : VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
4917     const VkImageCreateFlags kMultisampledCreateFlags =
4918         hasProtectedContent ? VK_IMAGE_CREATE_PROTECTED_BIT : 0;
4919 
4920     ANGLE_TRY(initExternal(context, textureType, resolveImage.getExtents(),
4921                            resolveImage.getIntendedFormatID(), resolveImage.getActualFormatID(),
4922                            samples, kMultisampledUsageFlags, kMultisampledCreateFlags,
4923                            ImageLayout::Undefined, nullptr, resolveImage.getFirstAllocatedLevel(),
4924                            resolveImage.getLevelCount(), resolveImage.getLayerCount(),
4925                            isRobustResourceInitEnabled, hasProtectedContent));
4926 
4927     // Remove the emulated format clear from the multisampled image if any.  There is one already
4928     // staged on the resolve image if needed.
4929     removeStagedUpdates(context, getFirstAllocatedLevel(), getLastAllocatedLevel());
4930 
4931     const VkMemoryPropertyFlags kMultisampledMemoryFlags =
4932         VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
4933         (hasLazilyAllocatedMemory ? VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT : 0) |
4934         (hasProtectedContent ? VK_MEMORY_PROPERTY_PROTECTED_BIT : 0);
4935 
4936     // If this ever fails, it can be retried without the LAZILY_ALLOCATED flag (which will probably
4937     // still fail), but ideally that means GL_EXT_multisampled_render_to_texture should not be
4938     // advertized on this platform in the first place.
4939     return initMemory(context, hasProtectedContent, memoryProperties, kMultisampledMemoryFlags);
4940 }
4941 
getAspectFlags() const4942 VkImageAspectFlags ImageHelper::getAspectFlags() const
4943 {
4944     return GetFormatAspectFlags(angle::Format::Get(mActualFormatID));
4945 }
4946 
isCombinedDepthStencilFormat() const4947 bool ImageHelper::isCombinedDepthStencilFormat() const
4948 {
4949     return (getAspectFlags() & kDepthStencilAspects) == kDepthStencilAspects;
4950 }
4951 
getCurrentLayout() const4952 VkImageLayout ImageHelper::getCurrentLayout() const
4953 {
4954     return ConvertImageLayoutToVkImageLayout(mCurrentLayout);
4955 }
4956 
getLevelExtents(LevelIndex levelVk) const4957 gl::Extents ImageHelper::getLevelExtents(LevelIndex levelVk) const
4958 {
4959     // Level 0 should be the size of the extents, after that every time you increase a level
4960     // you shrink the extents by half.
4961     uint32_t width  = std::max(mExtents.width >> levelVk.get(), 1u);
4962     uint32_t height = std::max(mExtents.height >> levelVk.get(), 1u);
4963     uint32_t depth  = std::max(mExtents.depth >> levelVk.get(), 1u);
4964 
4965     return gl::Extents(width, height, depth);
4966 }
4967 
getLevelExtents2D(LevelIndex levelVk) const4968 gl::Extents ImageHelper::getLevelExtents2D(LevelIndex levelVk) const
4969 {
4970     gl::Extents extents = getLevelExtents(levelVk);
4971     extents.depth       = 1;
4972     return extents;
4973 }
4974 
getRotatedExtents() const4975 const VkExtent3D ImageHelper::getRotatedExtents() const
4976 {
4977     VkExtent3D extents = mExtents;
4978     if (mRotatedAspectRatio)
4979     {
4980         std::swap(extents.width, extents.height);
4981     }
4982     return extents;
4983 }
4984 
getRotatedLevelExtents2D(LevelIndex levelVk) const4985 gl::Extents ImageHelper::getRotatedLevelExtents2D(LevelIndex levelVk) const
4986 {
4987     gl::Extents extents = getLevelExtents2D(levelVk);
4988     if (mRotatedAspectRatio)
4989     {
4990         std::swap(extents.width, extents.height);
4991     }
4992     return extents;
4993 }
4994 
isDepthOrStencil() const4995 bool ImageHelper::isDepthOrStencil() const
4996 {
4997     return getActualFormat().hasDepthOrStencilBits();
4998 }
4999 
setRenderPassUsageFlag(RenderPassUsage flag)5000 void ImageHelper::setRenderPassUsageFlag(RenderPassUsage flag)
5001 {
5002     mRenderPassUsageFlags.set(flag);
5003 }
5004 
clearRenderPassUsageFlag(RenderPassUsage flag)5005 void ImageHelper::clearRenderPassUsageFlag(RenderPassUsage flag)
5006 {
5007     mRenderPassUsageFlags.reset(flag);
5008 }
5009 
resetRenderPassUsageFlags()5010 void ImageHelper::resetRenderPassUsageFlags()
5011 {
5012     mRenderPassUsageFlags.reset();
5013 }
5014 
hasRenderPassUsageFlag(RenderPassUsage flag) const5015 bool ImageHelper::hasRenderPassUsageFlag(RenderPassUsage flag) const
5016 {
5017     return mRenderPassUsageFlags.test(flag);
5018 }
5019 
usedByCurrentRenderPassAsAttachmentAndSampler() const5020 bool ImageHelper::usedByCurrentRenderPassAsAttachmentAndSampler() const
5021 {
5022     return mRenderPassUsageFlags[RenderPassUsage::RenderTargetAttachment] &&
5023            mRenderPassUsageFlags[RenderPassUsage::TextureSampler];
5024 }
5025 
isReadBarrierNecessary(ImageLayout newLayout) const5026 bool ImageHelper::isReadBarrierNecessary(ImageLayout newLayout) const
5027 {
5028     // If transitioning to a different layout, we need always need a barrier.
5029     if (mCurrentLayout != newLayout)
5030     {
5031         return true;
5032     }
5033 
5034     // RAR (read-after-read) is not a hazard and doesn't require a barrier.
5035     //
5036     // RAW (read-after-write) hazards always require a memory barrier.  This can only happen if the
5037     // layout (same as new layout) is writable which in turn is only possible if the image is
5038     // simultaneously bound for shader write (i.e. the layout is GENERAL or SHARED_PRESENT).
5039     const ImageMemoryBarrierData &layoutData = kImageMemoryBarrierData[mCurrentLayout];
5040     return layoutData.type == ResourceAccess::Write;
5041 }
5042 
changeLayoutAndQueue(Context * context,VkImageAspectFlags aspectMask,ImageLayout newLayout,uint32_t newQueueFamilyIndex,CommandBuffer * commandBuffer)5043 void ImageHelper::changeLayoutAndQueue(Context *context,
5044                                        VkImageAspectFlags aspectMask,
5045                                        ImageLayout newLayout,
5046                                        uint32_t newQueueFamilyIndex,
5047                                        CommandBuffer *commandBuffer)
5048 {
5049     ASSERT(isQueueChangeNeccesary(newQueueFamilyIndex));
5050     barrierImpl(context, aspectMask, newLayout, newQueueFamilyIndex, commandBuffer);
5051 }
5052 
acquireFromExternal(ContextVk * contextVk,uint32_t externalQueueFamilyIndex,uint32_t rendererQueueFamilyIndex,ImageLayout currentLayout,CommandBuffer * commandBuffer)5053 void ImageHelper::acquireFromExternal(ContextVk *contextVk,
5054                                       uint32_t externalQueueFamilyIndex,
5055                                       uint32_t rendererQueueFamilyIndex,
5056                                       ImageLayout currentLayout,
5057                                       CommandBuffer *commandBuffer)
5058 {
5059     // The image must be newly allocated or have been released to the external
5060     // queue. If this is not the case, it's an application bug, so ASSERT might
5061     // eventually need to change to a warning.
5062     ASSERT(mCurrentLayout == ImageLayout::ExternalPreInitialized ||
5063            mCurrentQueueFamilyIndex == externalQueueFamilyIndex);
5064 
5065     mCurrentLayout           = currentLayout;
5066     mCurrentQueueFamilyIndex = externalQueueFamilyIndex;
5067 
5068     retain(&contextVk->getResourceUseList());
5069     changeLayoutAndQueue(contextVk, getAspectFlags(), mCurrentLayout, rendererQueueFamilyIndex,
5070                          commandBuffer);
5071 
5072     // It is unknown how the external has modified the image, so assume every subresource has
5073     // defined content.  That is unless the layout is Undefined.
5074     if (currentLayout == ImageLayout::Undefined)
5075     {
5076         setEntireContentUndefined();
5077     }
5078     else
5079     {
5080         setEntireContentDefined();
5081     }
5082 }
5083 
releaseToExternal(ContextVk * contextVk,uint32_t rendererQueueFamilyIndex,uint32_t externalQueueFamilyIndex,ImageLayout desiredLayout,CommandBuffer * commandBuffer)5084 void ImageHelper::releaseToExternal(ContextVk *contextVk,
5085                                     uint32_t rendererQueueFamilyIndex,
5086                                     uint32_t externalQueueFamilyIndex,
5087                                     ImageLayout desiredLayout,
5088                                     CommandBuffer *commandBuffer)
5089 {
5090     ASSERT(mCurrentQueueFamilyIndex == rendererQueueFamilyIndex);
5091 
5092     retain(&contextVk->getResourceUseList());
5093     changeLayoutAndQueue(contextVk, getAspectFlags(), desiredLayout, externalQueueFamilyIndex,
5094                          commandBuffer);
5095 }
5096 
isReleasedToExternal() const5097 bool ImageHelper::isReleasedToExternal() const
5098 {
5099 #if !defined(ANGLE_PLATFORM_MACOS) && !defined(ANGLE_PLATFORM_ANDROID)
5100     return IsExternalQueueFamily(mCurrentQueueFamilyIndex);
5101 #else
5102     // TODO(anglebug.com/4635): Implement external memory barriers on Mac/Android.
5103     return false;
5104 #endif
5105 }
5106 
toVkLevel(gl::LevelIndex levelIndexGL) const5107 LevelIndex ImageHelper::toVkLevel(gl::LevelIndex levelIndexGL) const
5108 {
5109     return gl_vk::GetLevelIndex(levelIndexGL, mFirstAllocatedLevel);
5110 }
5111 
toGLLevel(LevelIndex levelIndexVk) const5112 gl::LevelIndex ImageHelper::toGLLevel(LevelIndex levelIndexVk) const
5113 {
5114     return vk_gl::GetLevelIndex(levelIndexVk, mFirstAllocatedLevel);
5115 }
5116 
initImageMemoryBarrierStruct(VkImageAspectFlags aspectMask,ImageLayout newLayout,uint32_t newQueueFamilyIndex,VkImageMemoryBarrier * imageMemoryBarrier) const5117 ANGLE_INLINE void ImageHelper::initImageMemoryBarrierStruct(
5118     VkImageAspectFlags aspectMask,
5119     ImageLayout newLayout,
5120     uint32_t newQueueFamilyIndex,
5121     VkImageMemoryBarrier *imageMemoryBarrier) const
5122 {
5123     const ImageMemoryBarrierData &transitionFrom = kImageMemoryBarrierData[mCurrentLayout];
5124     const ImageMemoryBarrierData &transitionTo   = kImageMemoryBarrierData[newLayout];
5125 
5126     imageMemoryBarrier->sType               = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
5127     imageMemoryBarrier->srcAccessMask       = transitionFrom.srcAccessMask;
5128     imageMemoryBarrier->dstAccessMask       = transitionTo.dstAccessMask;
5129     imageMemoryBarrier->oldLayout           = transitionFrom.layout;
5130     imageMemoryBarrier->newLayout           = transitionTo.layout;
5131     imageMemoryBarrier->srcQueueFamilyIndex = mCurrentQueueFamilyIndex;
5132     imageMemoryBarrier->dstQueueFamilyIndex = newQueueFamilyIndex;
5133     imageMemoryBarrier->image               = mImage.getHandle();
5134 
5135     // Transition the whole resource.
5136     imageMemoryBarrier->subresourceRange.aspectMask     = aspectMask;
5137     imageMemoryBarrier->subresourceRange.baseMipLevel   = 0;
5138     imageMemoryBarrier->subresourceRange.levelCount     = mLevelCount;
5139     imageMemoryBarrier->subresourceRange.baseArrayLayer = 0;
5140     imageMemoryBarrier->subresourceRange.layerCount     = mLayerCount;
5141 }
5142 
5143 // Generalized to accept both "primary" and "secondary" command buffers.
5144 template <typename CommandBufferT>
barrierImpl(Context * context,VkImageAspectFlags aspectMask,ImageLayout newLayout,uint32_t newQueueFamilyIndex,CommandBufferT * commandBuffer)5145 void ImageHelper::barrierImpl(Context *context,
5146                               VkImageAspectFlags aspectMask,
5147                               ImageLayout newLayout,
5148                               uint32_t newQueueFamilyIndex,
5149                               CommandBufferT *commandBuffer)
5150 {
5151     if (mCurrentLayout == ImageLayout::SharedPresent)
5152     {
5153         const ImageMemoryBarrierData &transition = kImageMemoryBarrierData[mCurrentLayout];
5154 
5155         VkMemoryBarrier memoryBarrier = {};
5156         memoryBarrier.sType           = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
5157         memoryBarrier.srcAccessMask   = transition.srcAccessMask;
5158         memoryBarrier.dstAccessMask   = transition.dstAccessMask;
5159 
5160         commandBuffer->memoryBarrier(transition.srcStageMask, transition.dstStageMask,
5161                                      &memoryBarrier);
5162         return;
5163     }
5164 
5165     // Make sure we never transition out of SharedPresent
5166     ASSERT(mCurrentLayout != ImageLayout::SharedPresent || newLayout == ImageLayout::SharedPresent);
5167 
5168     const ImageMemoryBarrierData &transitionFrom = kImageMemoryBarrierData[mCurrentLayout];
5169     const ImageMemoryBarrierData &transitionTo   = kImageMemoryBarrierData[newLayout];
5170 
5171     VkImageMemoryBarrier imageMemoryBarrier = {};
5172     initImageMemoryBarrierStruct(aspectMask, newLayout, newQueueFamilyIndex, &imageMemoryBarrier);
5173 
5174     // There might be other shaderRead operations there other than the current layout.
5175     VkPipelineStageFlags srcStageMask = GetImageLayoutSrcStageMask(context, transitionFrom);
5176     if (mCurrentShaderReadStageMask)
5177     {
5178         srcStageMask |= mCurrentShaderReadStageMask;
5179         mCurrentShaderReadStageMask  = 0;
5180         mLastNonShaderReadOnlyLayout = ImageLayout::Undefined;
5181     }
5182     commandBuffer->imageBarrier(srcStageMask, GetImageLayoutDstStageMask(context, transitionTo),
5183                                 imageMemoryBarrier);
5184 
5185     mCurrentLayout           = newLayout;
5186     mCurrentQueueFamilyIndex = newQueueFamilyIndex;
5187 }
5188 
5189 template void ImageHelper::barrierImpl<rx::vk::priv::CommandBuffer>(
5190     Context *context,
5191     VkImageAspectFlags aspectMask,
5192     ImageLayout newLayout,
5193     uint32_t newQueueFamilyIndex,
5194     rx::vk::priv::CommandBuffer *commandBuffer);
5195 
updateLayoutAndBarrier(Context * context,VkImageAspectFlags aspectMask,ImageLayout newLayout,PipelineBarrier * barrier)5196 bool ImageHelper::updateLayoutAndBarrier(Context *context,
5197                                          VkImageAspectFlags aspectMask,
5198                                          ImageLayout newLayout,
5199                                          PipelineBarrier *barrier)
5200 {
5201     // Once you transition to ImageLayout::SharedPresent, you never transition out of it.
5202     if (mCurrentLayout == ImageLayout::SharedPresent)
5203     {
5204         newLayout = ImageLayout::SharedPresent;
5205     }
5206     bool barrierModified = false;
5207     if (newLayout == mCurrentLayout)
5208     {
5209         const ImageMemoryBarrierData &layoutData = kImageMemoryBarrierData[mCurrentLayout];
5210         // RAR is not a hazard and doesn't require a barrier, especially as the image layout hasn't
5211         // changed.  The following asserts that such a barrier is not attempted.
5212         ASSERT(layoutData.type == ResourceAccess::Write);
5213         // No layout change, only memory barrier is required
5214         barrier->mergeMemoryBarrier(GetImageLayoutSrcStageMask(context, layoutData),
5215                                     GetImageLayoutDstStageMask(context, layoutData),
5216                                     layoutData.srcAccessMask, layoutData.dstAccessMask);
5217         barrierModified = true;
5218     }
5219     else
5220     {
5221         const ImageMemoryBarrierData &transitionFrom = kImageMemoryBarrierData[mCurrentLayout];
5222         const ImageMemoryBarrierData &transitionTo   = kImageMemoryBarrierData[newLayout];
5223         VkPipelineStageFlags srcStageMask = GetImageLayoutSrcStageMask(context, transitionFrom);
5224         VkPipelineStageFlags dstStageMask = GetImageLayoutDstStageMask(context, transitionTo);
5225 
5226         if (IsShaderReadOnlyLayout(transitionTo) && IsShaderReadOnlyLayout(transitionFrom))
5227         {
5228             // If we are switching between different shader stage reads, then there is no actual
5229             // layout change or access type change. We only need a barrier if we are making a read
5230             // that is from a new stage. Also note that we barrier against previous non-shaderRead
5231             // layout. We do not barrier between one shaderRead and another shaderRead.
5232             bool isNewReadStage = (mCurrentShaderReadStageMask & dstStageMask) != dstStageMask;
5233             if (isNewReadStage)
5234             {
5235                 const ImageMemoryBarrierData &layoutData =
5236                     kImageMemoryBarrierData[mLastNonShaderReadOnlyLayout];
5237                 barrier->mergeMemoryBarrier(GetImageLayoutSrcStageMask(context, layoutData),
5238                                             dstStageMask, layoutData.srcAccessMask,
5239                                             transitionTo.dstAccessMask);
5240                 barrierModified = true;
5241                 // Accumulate new read stage.
5242                 mCurrentShaderReadStageMask |= dstStageMask;
5243             }
5244         }
5245         else
5246         {
5247             VkImageMemoryBarrier imageMemoryBarrier = {};
5248             initImageMemoryBarrierStruct(aspectMask, newLayout, mCurrentQueueFamilyIndex,
5249                                          &imageMemoryBarrier);
5250             // if we transition from shaderReadOnly, we must add in stashed shader stage masks since
5251             // there might be outstanding shader reads from stages other than current layout. We do
5252             // not insert barrier between one shaderRead to another shaderRead
5253             if (mCurrentShaderReadStageMask)
5254             {
5255                 srcStageMask |= mCurrentShaderReadStageMask;
5256                 mCurrentShaderReadStageMask  = 0;
5257                 mLastNonShaderReadOnlyLayout = ImageLayout::Undefined;
5258             }
5259             barrier->mergeImageBarrier(srcStageMask, dstStageMask, imageMemoryBarrier);
5260             barrierModified = true;
5261 
5262             // If we are transition into shaderRead layout, remember the last
5263             // non-shaderRead layout here.
5264             if (IsShaderReadOnlyLayout(transitionTo))
5265             {
5266                 ASSERT(!IsShaderReadOnlyLayout(transitionFrom));
5267                 mLastNonShaderReadOnlyLayout = mCurrentLayout;
5268                 mCurrentShaderReadStageMask  = dstStageMask;
5269             }
5270         }
5271         mCurrentLayout = newLayout;
5272     }
5273     return barrierModified;
5274 }
5275 
clearColor(const VkClearColorValue & color,LevelIndex baseMipLevelVk,uint32_t levelCount,uint32_t baseArrayLayer,uint32_t layerCount,CommandBuffer * commandBuffer)5276 void ImageHelper::clearColor(const VkClearColorValue &color,
5277                              LevelIndex baseMipLevelVk,
5278                              uint32_t levelCount,
5279                              uint32_t baseArrayLayer,
5280                              uint32_t layerCount,
5281                              CommandBuffer *commandBuffer)
5282 {
5283     ASSERT(valid());
5284 
5285     ASSERT(mCurrentLayout == ImageLayout::TransferDst ||
5286            mCurrentLayout == ImageLayout::SharedPresent);
5287 
5288     VkImageSubresourceRange range = {};
5289     range.aspectMask              = VK_IMAGE_ASPECT_COLOR_BIT;
5290     range.baseMipLevel            = baseMipLevelVk.get();
5291     range.levelCount              = levelCount;
5292     range.baseArrayLayer          = baseArrayLayer;
5293     range.layerCount              = layerCount;
5294 
5295     if (mImageType == VK_IMAGE_TYPE_3D)
5296     {
5297         ASSERT(baseArrayLayer == 0);
5298         ASSERT(layerCount == 1 ||
5299                layerCount == static_cast<uint32_t>(getLevelExtents(baseMipLevelVk).depth));
5300         range.layerCount = 1;
5301     }
5302 
5303     commandBuffer->clearColorImage(mImage, getCurrentLayout(), color, 1, &range);
5304 }
5305 
clearDepthStencil(VkImageAspectFlags clearAspectFlags,const VkClearDepthStencilValue & depthStencil,LevelIndex baseMipLevelVk,uint32_t levelCount,uint32_t baseArrayLayer,uint32_t layerCount,CommandBuffer * commandBuffer)5306 void ImageHelper::clearDepthStencil(VkImageAspectFlags clearAspectFlags,
5307                                     const VkClearDepthStencilValue &depthStencil,
5308                                     LevelIndex baseMipLevelVk,
5309                                     uint32_t levelCount,
5310                                     uint32_t baseArrayLayer,
5311                                     uint32_t layerCount,
5312                                     CommandBuffer *commandBuffer)
5313 {
5314     ASSERT(valid());
5315 
5316     ASSERT(mCurrentLayout == ImageLayout::TransferDst);
5317 
5318     VkImageSubresourceRange range = {};
5319     range.aspectMask              = clearAspectFlags;
5320     range.baseMipLevel            = baseMipLevelVk.get();
5321     range.levelCount              = levelCount;
5322     range.baseArrayLayer          = baseArrayLayer;
5323     range.layerCount              = layerCount;
5324 
5325     if (mImageType == VK_IMAGE_TYPE_3D)
5326     {
5327         ASSERT(baseArrayLayer == 0);
5328         ASSERT(layerCount == 1 ||
5329                layerCount == static_cast<uint32_t>(getLevelExtents(baseMipLevelVk).depth));
5330         range.layerCount = 1;
5331     }
5332 
5333     commandBuffer->clearDepthStencilImage(mImage, getCurrentLayout(), depthStencil, 1, &range);
5334 }
5335 
clear(VkImageAspectFlags aspectFlags,const VkClearValue & value,LevelIndex mipLevel,uint32_t baseArrayLayer,uint32_t layerCount,CommandBuffer * commandBuffer)5336 void ImageHelper::clear(VkImageAspectFlags aspectFlags,
5337                         const VkClearValue &value,
5338                         LevelIndex mipLevel,
5339                         uint32_t baseArrayLayer,
5340                         uint32_t layerCount,
5341                         CommandBuffer *commandBuffer)
5342 {
5343     const angle::Format &angleFormat = getActualFormat();
5344     bool isDepthStencil              = angleFormat.depthBits > 0 || angleFormat.stencilBits > 0;
5345 
5346     if (isDepthStencil)
5347     {
5348         clearDepthStencil(aspectFlags, value.depthStencil, mipLevel, 1, baseArrayLayer, layerCount,
5349                           commandBuffer);
5350     }
5351     else
5352     {
5353         ASSERT(!angleFormat.isBlock);
5354 
5355         clearColor(value.color, mipLevel, 1, baseArrayLayer, layerCount, commandBuffer);
5356     }
5357 }
5358 
clearEmulatedChannels(ContextVk * contextVk,VkColorComponentFlags colorMaskFlags,const VkClearValue & value,LevelIndex mipLevel,uint32_t baseArrayLayer,uint32_t layerCount)5359 angle::Result ImageHelper::clearEmulatedChannels(ContextVk *contextVk,
5360                                                  VkColorComponentFlags colorMaskFlags,
5361                                                  const VkClearValue &value,
5362                                                  LevelIndex mipLevel,
5363                                                  uint32_t baseArrayLayer,
5364                                                  uint32_t layerCount)
5365 {
5366     const gl::Extents levelExtents = getLevelExtents(mipLevel);
5367 
5368     if (levelExtents.depth > 1)
5369     {
5370         // Currently not implemented for 3D textures
5371         UNIMPLEMENTED();
5372         return angle::Result::Continue;
5373     }
5374 
5375     UtilsVk::ClearImageParameters params = {};
5376     params.clearArea                     = {0, 0, levelExtents.width, levelExtents.height};
5377     params.dstMip                        = mipLevel;
5378     params.colorMaskFlags                = colorMaskFlags;
5379     params.colorClearValue               = value.color;
5380 
5381     for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex)
5382     {
5383         params.dstLayer = baseArrayLayer + layerIndex;
5384 
5385         ANGLE_TRY(contextVk->getUtils().clearImage(contextVk, this, params));
5386     }
5387 
5388     return angle::Result::Continue;
5389 }
5390 
5391 // static
Copy(ImageHelper * srcImage,ImageHelper * dstImage,const gl::Offset & srcOffset,const gl::Offset & dstOffset,const gl::Extents & copySize,const VkImageSubresourceLayers & srcSubresource,const VkImageSubresourceLayers & dstSubresource,CommandBuffer * commandBuffer)5392 void ImageHelper::Copy(ImageHelper *srcImage,
5393                        ImageHelper *dstImage,
5394                        const gl::Offset &srcOffset,
5395                        const gl::Offset &dstOffset,
5396                        const gl::Extents &copySize,
5397                        const VkImageSubresourceLayers &srcSubresource,
5398                        const VkImageSubresourceLayers &dstSubresource,
5399                        CommandBuffer *commandBuffer)
5400 {
5401     ASSERT(commandBuffer->valid() && srcImage->valid() && dstImage->valid());
5402 
5403     ASSERT(srcImage->getCurrentLayout() == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
5404     ASSERT(dstImage->getCurrentLayout() == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
5405 
5406     VkImageCopy region    = {};
5407     region.srcSubresource = srcSubresource;
5408     region.srcOffset.x    = srcOffset.x;
5409     region.srcOffset.y    = srcOffset.y;
5410     region.srcOffset.z    = srcOffset.z;
5411     region.dstSubresource = dstSubresource;
5412     region.dstOffset.x    = dstOffset.x;
5413     region.dstOffset.y    = dstOffset.y;
5414     region.dstOffset.z    = dstOffset.z;
5415     region.extent.width   = copySize.width;
5416     region.extent.height  = copySize.height;
5417     region.extent.depth   = copySize.depth;
5418 
5419     commandBuffer->copyImage(srcImage->getImage(), srcImage->getCurrentLayout(),
5420                              dstImage->getImage(), dstImage->getCurrentLayout(), 1, &region);
5421 }
5422 
5423 // static
CopyImageSubData(const gl::Context * context,ImageHelper * srcImage,GLint srcLevel,GLint srcX,GLint srcY,GLint srcZ,ImageHelper * dstImage,GLint dstLevel,GLint dstX,GLint dstY,GLint dstZ,GLsizei srcWidth,GLsizei srcHeight,GLsizei srcDepth)5424 angle::Result ImageHelper::CopyImageSubData(const gl::Context *context,
5425                                             ImageHelper *srcImage,
5426                                             GLint srcLevel,
5427                                             GLint srcX,
5428                                             GLint srcY,
5429                                             GLint srcZ,
5430                                             ImageHelper *dstImage,
5431                                             GLint dstLevel,
5432                                             GLint dstX,
5433                                             GLint dstY,
5434                                             GLint dstZ,
5435                                             GLsizei srcWidth,
5436                                             GLsizei srcHeight,
5437                                             GLsizei srcDepth)
5438 {
5439     ContextVk *contextVk = GetImpl(context);
5440 
5441     VkImageTiling srcTilingMode  = srcImage->getTilingMode();
5442     VkImageTiling destTilingMode = dstImage->getTilingMode();
5443 
5444     const gl::LevelIndex srcLevelGL = gl::LevelIndex(srcLevel);
5445     const gl::LevelIndex dstLevelGL = gl::LevelIndex(dstLevel);
5446 
5447     if (CanCopyWithTransferForCopyImage(contextVk->getRenderer(), srcImage, srcTilingMode, dstImage,
5448                                         destTilingMode))
5449     {
5450         bool isSrc3D = srcImage->getType() == VK_IMAGE_TYPE_3D;
5451         bool isDst3D = dstImage->getType() == VK_IMAGE_TYPE_3D;
5452 
5453         srcImage->retain(&contextVk->getResourceUseList());
5454         dstImage->retain(&contextVk->getResourceUseList());
5455 
5456         VkImageCopy region = {};
5457 
5458         region.srcSubresource.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT;
5459         region.srcSubresource.mipLevel       = srcImage->toVkLevel(srcLevelGL).get();
5460         region.srcSubresource.baseArrayLayer = isSrc3D ? 0 : srcZ;
5461         region.srcSubresource.layerCount     = isSrc3D ? 1 : srcDepth;
5462 
5463         region.dstSubresource.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT;
5464         region.dstSubresource.mipLevel       = dstImage->toVkLevel(dstLevelGL).get();
5465         region.dstSubresource.baseArrayLayer = isDst3D ? 0 : dstZ;
5466         region.dstSubresource.layerCount     = isDst3D ? 1 : srcDepth;
5467 
5468         region.srcOffset.x   = srcX;
5469         region.srcOffset.y   = srcY;
5470         region.srcOffset.z   = isSrc3D ? srcZ : 0;
5471         region.dstOffset.x   = dstX;
5472         region.dstOffset.y   = dstY;
5473         region.dstOffset.z   = isDst3D ? dstZ : 0;
5474         region.extent.width  = srcWidth;
5475         region.extent.height = srcHeight;
5476         region.extent.depth  = (isSrc3D || isDst3D) ? srcDepth : 1;
5477 
5478         CommandBufferAccess access;
5479         access.onImageTransferRead(VK_IMAGE_ASPECT_COLOR_BIT, srcImage);
5480         access.onImageTransferWrite(dstLevelGL, 1, region.dstSubresource.baseArrayLayer,
5481                                     region.dstSubresource.layerCount, VK_IMAGE_ASPECT_COLOR_BIT,
5482                                     dstImage);
5483 
5484         CommandBuffer *commandBuffer;
5485         ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(access, &commandBuffer));
5486 
5487         ASSERT(srcImage->valid() && dstImage->valid());
5488         ASSERT(srcImage->getCurrentLayout() == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
5489         ASSERT(dstImage->getCurrentLayout() == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
5490 
5491         commandBuffer->copyImage(srcImage->getImage(), srcImage->getCurrentLayout(),
5492                                  dstImage->getImage(), dstImage->getCurrentLayout(), 1, &region);
5493     }
5494     else if (!srcImage->getIntendedFormat().isBlock && !dstImage->getIntendedFormat().isBlock)
5495     {
5496         // The source and destination image formats may be using a fallback in the case of RGB
5497         // images.  A compute shader is used in such a case to perform the copy.
5498         UtilsVk &utilsVk = contextVk->getUtils();
5499 
5500         UtilsVk::CopyImageBitsParameters params;
5501         params.srcOffset[0]   = srcX;
5502         params.srcOffset[1]   = srcY;
5503         params.srcOffset[2]   = srcZ;
5504         params.srcLevel       = srcLevelGL;
5505         params.dstOffset[0]   = dstX;
5506         params.dstOffset[1]   = dstY;
5507         params.dstOffset[2]   = dstZ;
5508         params.dstLevel       = dstLevelGL;
5509         params.copyExtents[0] = srcWidth;
5510         params.copyExtents[1] = srcHeight;
5511         params.copyExtents[2] = srcDepth;
5512 
5513         ANGLE_TRY(utilsVk.copyImageBits(contextVk, dstImage, srcImage, params));
5514     }
5515     else
5516     {
5517         // No support for emulated compressed formats.
5518         UNIMPLEMENTED();
5519         ANGLE_VK_CHECK(contextVk, false, VK_ERROR_FEATURE_NOT_PRESENT);
5520     }
5521 
5522     return angle::Result::Continue;
5523 }
5524 
generateMipmapsWithBlit(ContextVk * contextVk,LevelIndex baseLevel,LevelIndex maxLevel)5525 angle::Result ImageHelper::generateMipmapsWithBlit(ContextVk *contextVk,
5526                                                    LevelIndex baseLevel,
5527                                                    LevelIndex maxLevel)
5528 {
5529     CommandBufferAccess access;
5530     gl::LevelIndex baseLevelGL = toGLLevel(baseLevel);
5531     access.onImageTransferWrite(baseLevelGL + 1, maxLevel.get(), 0, mLayerCount,
5532                                 VK_IMAGE_ASPECT_COLOR_BIT, this);
5533 
5534     CommandBuffer *commandBuffer;
5535     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(access, &commandBuffer));
5536 
5537     // We are able to use blitImage since the image format we are using supports it.
5538     int32_t mipWidth  = mExtents.width;
5539     int32_t mipHeight = mExtents.height;
5540     int32_t mipDepth  = mExtents.depth;
5541 
5542     // Manually manage the image memory barrier because it uses a lot more parameters than our
5543     // usual one.
5544     VkImageMemoryBarrier barrier            = {};
5545     barrier.sType                           = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
5546     barrier.image                           = mImage.getHandle();
5547     barrier.srcQueueFamilyIndex             = VK_QUEUE_FAMILY_IGNORED;
5548     barrier.dstQueueFamilyIndex             = VK_QUEUE_FAMILY_IGNORED;
5549     barrier.subresourceRange.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT;
5550     barrier.subresourceRange.baseArrayLayer = 0;
5551     barrier.subresourceRange.layerCount     = mLayerCount;
5552     barrier.subresourceRange.levelCount     = 1;
5553 
5554     const VkFilter filter =
5555         gl_vk::GetFilter(CalculateGenerateMipmapFilter(contextVk, getActualFormatID()));
5556 
5557     for (LevelIndex mipLevel(1); mipLevel <= LevelIndex(mLevelCount); ++mipLevel)
5558     {
5559         int32_t nextMipWidth  = std::max<int32_t>(1, mipWidth >> 1);
5560         int32_t nextMipHeight = std::max<int32_t>(1, mipHeight >> 1);
5561         int32_t nextMipDepth  = std::max<int32_t>(1, mipDepth >> 1);
5562 
5563         if (mipLevel > baseLevel && mipLevel <= maxLevel)
5564         {
5565             barrier.subresourceRange.baseMipLevel = mipLevel.get() - 1;
5566             barrier.oldLayout                     = getCurrentLayout();
5567             barrier.newLayout                     = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
5568             barrier.srcAccessMask                 = VK_ACCESS_TRANSFER_WRITE_BIT;
5569             barrier.dstAccessMask                 = VK_ACCESS_TRANSFER_READ_BIT;
5570 
5571             // We can do it for all layers at once.
5572             commandBuffer->imageBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
5573                                         VK_PIPELINE_STAGE_TRANSFER_BIT, barrier);
5574             VkImageBlit blit                   = {};
5575             blit.srcOffsets[0]                 = {0, 0, 0};
5576             blit.srcOffsets[1]                 = {mipWidth, mipHeight, mipDepth};
5577             blit.srcSubresource.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT;
5578             blit.srcSubresource.mipLevel       = mipLevel.get() - 1;
5579             blit.srcSubresource.baseArrayLayer = 0;
5580             blit.srcSubresource.layerCount     = mLayerCount;
5581             blit.dstOffsets[0]                 = {0, 0, 0};
5582             blit.dstOffsets[1]                 = {nextMipWidth, nextMipHeight, nextMipDepth};
5583             blit.dstSubresource.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT;
5584             blit.dstSubresource.mipLevel       = mipLevel.get();
5585             blit.dstSubresource.baseArrayLayer = 0;
5586             blit.dstSubresource.layerCount     = mLayerCount;
5587 
5588             commandBuffer->blitImage(mImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, mImage,
5589                                      VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit, filter);
5590         }
5591         mipWidth  = nextMipWidth;
5592         mipHeight = nextMipHeight;
5593         mipDepth  = nextMipDepth;
5594     }
5595 
5596     // Transition all mip level to the same layout so we can declare our whole image layout to one
5597     // ImageLayout. FragmentShaderReadOnly is picked here since this is the most reasonable usage
5598     // after glGenerateMipmap call.
5599     barrier.oldLayout     = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
5600     barrier.newLayout     = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
5601     barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
5602     if (baseLevel.get() > 0)
5603     {
5604         // [0:baseLevel-1] from TRANSFER_DST to SHADER_READ
5605         barrier.subresourceRange.baseMipLevel = 0;
5606         barrier.subresourceRange.levelCount   = baseLevel.get();
5607         commandBuffer->imageBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
5608                                     VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, barrier);
5609     }
5610     // [maxLevel:mLevelCount-1] from TRANSFER_DST to SHADER_READ
5611     ASSERT(mLevelCount > maxLevel.get());
5612     barrier.subresourceRange.baseMipLevel = maxLevel.get();
5613     barrier.subresourceRange.levelCount   = mLevelCount - maxLevel.get();
5614     commandBuffer->imageBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
5615                                 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, barrier);
5616     // [baseLevel:maxLevel-1] from TRANSFER_SRC to SHADER_READ
5617     barrier.oldLayout                     = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
5618     barrier.subresourceRange.baseMipLevel = baseLevel.get();
5619     barrier.subresourceRange.levelCount   = maxLevel.get() - baseLevel.get();
5620     commandBuffer->imageBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
5621                                 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, barrier);
5622 
5623     // This is just changing the internal state of the image helper so that the next call
5624     // to changeLayout will use this layout as the "oldLayout" argument.
5625     // mLastNonShaderReadOnlyLayout is used to ensure previous write are made visible to reads,
5626     // since the only write here is transfer, hence mLastNonShaderReadOnlyLayout is set to
5627     // ImageLayout::TransferDst.
5628     mLastNonShaderReadOnlyLayout = ImageLayout::TransferDst;
5629     mCurrentShaderReadStageMask  = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
5630     mCurrentLayout               = ImageLayout::FragmentShaderReadOnly;
5631 
5632     return angle::Result::Continue;
5633 }
5634 
resolve(ImageHelper * dst,const VkImageResolve & region,CommandBuffer * commandBuffer)5635 void ImageHelper::resolve(ImageHelper *dst,
5636                           const VkImageResolve &region,
5637                           CommandBuffer *commandBuffer)
5638 {
5639     ASSERT(mCurrentLayout == ImageLayout::TransferSrc ||
5640            mCurrentLayout == ImageLayout::SharedPresent);
5641     commandBuffer->resolveImage(getImage(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dst->getImage(),
5642                                 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
5643 }
5644 
removeSingleSubresourceStagedUpdates(ContextVk * contextVk,gl::LevelIndex levelIndexGL,uint32_t layerIndex,uint32_t layerCount)5645 void ImageHelper::removeSingleSubresourceStagedUpdates(ContextVk *contextVk,
5646                                                        gl::LevelIndex levelIndexGL,
5647                                                        uint32_t layerIndex,
5648                                                        uint32_t layerCount)
5649 {
5650     mCurrentSingleClearValue.reset();
5651 
5652     // Find any staged updates for this index and remove them from the pending list.
5653     std::vector<SubresourceUpdate> *levelUpdates = getLevelUpdates(levelIndexGL);
5654     if (levelUpdates == nullptr)
5655     {
5656         return;
5657     }
5658 
5659     for (size_t index = 0; index < levelUpdates->size();)
5660     {
5661         auto update = levelUpdates->begin() + index;
5662         if (update->isUpdateToLayers(layerIndex, layerCount))
5663         {
5664             update->release(contextVk->getRenderer());
5665             levelUpdates->erase(update);
5666         }
5667         else
5668         {
5669             index++;
5670         }
5671     }
5672 }
5673 
removeStagedUpdates(Context * context,gl::LevelIndex levelGLStart,gl::LevelIndex levelGLEnd)5674 void ImageHelper::removeStagedUpdates(Context *context,
5675                                       gl::LevelIndex levelGLStart,
5676                                       gl::LevelIndex levelGLEnd)
5677 {
5678     ASSERT(validateSubresourceUpdateImageRefsConsistent());
5679 
5680     // Remove all updates to levels [start, end].
5681     for (gl::LevelIndex level = levelGLStart; level <= levelGLEnd; ++level)
5682     {
5683         std::vector<SubresourceUpdate> *levelUpdates = getLevelUpdates(level);
5684         if (levelUpdates == nullptr)
5685         {
5686             ASSERT(static_cast<size_t>(level.get()) >= mSubresourceUpdates.size());
5687             return;
5688         }
5689 
5690         for (SubresourceUpdate &update : *levelUpdates)
5691         {
5692             update.release(context->getRenderer());
5693         }
5694 
5695         levelUpdates->clear();
5696     }
5697 
5698     ASSERT(validateSubresourceUpdateImageRefsConsistent());
5699 }
5700 
stageSubresourceUpdateImpl(ContextVk * contextVk,const gl::ImageIndex & index,const gl::Extents & glExtents,const gl::Offset & offset,const gl::InternalFormat & formatInfo,const gl::PixelUnpackState & unpack,DynamicBuffer * stagingBufferOverride,GLenum type,const uint8_t * pixels,const Format & vkFormat,ImageAccess access,const GLuint inputRowPitch,const GLuint inputDepthPitch,const GLuint inputSkipBytes)5701 angle::Result ImageHelper::stageSubresourceUpdateImpl(ContextVk *contextVk,
5702                                                       const gl::ImageIndex &index,
5703                                                       const gl::Extents &glExtents,
5704                                                       const gl::Offset &offset,
5705                                                       const gl::InternalFormat &formatInfo,
5706                                                       const gl::PixelUnpackState &unpack,
5707                                                       DynamicBuffer *stagingBufferOverride,
5708                                                       GLenum type,
5709                                                       const uint8_t *pixels,
5710                                                       const Format &vkFormat,
5711                                                       ImageAccess access,
5712                                                       const GLuint inputRowPitch,
5713                                                       const GLuint inputDepthPitch,
5714                                                       const GLuint inputSkipBytes)
5715 {
5716     const angle::Format &storageFormat = vkFormat.getActualImageFormat(access);
5717 
5718     size_t outputRowPitch;
5719     size_t outputDepthPitch;
5720     size_t stencilAllocationSize = 0;
5721     uint32_t bufferRowLength;
5722     uint32_t bufferImageHeight;
5723     size_t allocationSize;
5724 
5725     LoadImageFunctionInfo loadFunctionInfo = vkFormat.getTextureLoadFunction(access, type);
5726     LoadImageFunction stencilLoadFunction  = nullptr;
5727 
5728     if (storageFormat.isBlock)
5729     {
5730         const gl::InternalFormat &storageFormatInfo = vkFormat.getInternalFormatInfo(type);
5731         GLuint rowPitch;
5732         GLuint depthPitch;
5733         GLuint totalSize;
5734 
5735         ANGLE_VK_CHECK_MATH(contextVk, storageFormatInfo.computeCompressedImageSize(
5736                                            gl::Extents(glExtents.width, 1, 1), &rowPitch));
5737         ANGLE_VK_CHECK_MATH(contextVk,
5738                             storageFormatInfo.computeCompressedImageSize(
5739                                 gl::Extents(glExtents.width, glExtents.height, 1), &depthPitch));
5740 
5741         ANGLE_VK_CHECK_MATH(contextVk,
5742                             storageFormatInfo.computeCompressedImageSize(glExtents, &totalSize));
5743 
5744         outputRowPitch   = rowPitch;
5745         outputDepthPitch = depthPitch;
5746         allocationSize   = totalSize;
5747 
5748         ANGLE_VK_CHECK_MATH(
5749             contextVk, storageFormatInfo.computeBufferRowLength(glExtents.width, &bufferRowLength));
5750         ANGLE_VK_CHECK_MATH(contextVk, storageFormatInfo.computeBufferImageHeight(
5751                                            glExtents.height, &bufferImageHeight));
5752     }
5753     else
5754     {
5755         ASSERT(storageFormat.pixelBytes != 0);
5756 
5757         if (storageFormat.id == angle::FormatID::D24_UNORM_S8_UINT)
5758         {
5759             stencilLoadFunction = angle::LoadX24S8ToS8;
5760         }
5761         if (storageFormat.id == angle::FormatID::D32_FLOAT_S8X24_UINT)
5762         {
5763             // If depth is D32FLOAT_S8, we must pack D32F tightly (no stencil) for CopyBufferToImage
5764             outputRowPitch = sizeof(float) * glExtents.width;
5765 
5766             // The generic load functions don't handle tightly packing D32FS8 to D32F & S8 so call
5767             // special case load functions.
5768             switch (type)
5769             {
5770                 case GL_UNSIGNED_INT:
5771                     loadFunctionInfo.loadFunction = angle::LoadD32ToD32F;
5772                     stencilLoadFunction           = nullptr;
5773                     break;
5774                 case GL_DEPTH32F_STENCIL8:
5775                 case GL_FLOAT_32_UNSIGNED_INT_24_8_REV:
5776                     loadFunctionInfo.loadFunction = angle::LoadD32FS8X24ToD32F;
5777                     stencilLoadFunction           = angle::LoadX32S8ToS8;
5778                     break;
5779                 case GL_UNSIGNED_INT_24_8_OES:
5780                     loadFunctionInfo.loadFunction = angle::LoadD24S8ToD32F;
5781                     stencilLoadFunction           = angle::LoadX24S8ToS8;
5782                     break;
5783                 default:
5784                     UNREACHABLE();
5785             }
5786         }
5787         else
5788         {
5789             outputRowPitch = storageFormat.pixelBytes * glExtents.width;
5790         }
5791         outputDepthPitch = outputRowPitch * glExtents.height;
5792 
5793         bufferRowLength   = glExtents.width;
5794         bufferImageHeight = glExtents.height;
5795 
5796         allocationSize = outputDepthPitch * glExtents.depth;
5797 
5798         // Note: because the LoadImageFunctionInfo functions are limited to copying a single
5799         // component, we have to special case packed depth/stencil use and send the stencil as a
5800         // separate chunk.
5801         if (storageFormat.depthBits > 0 && storageFormat.stencilBits > 0 &&
5802             formatInfo.depthBits > 0 && formatInfo.stencilBits > 0)
5803         {
5804             // Note: Stencil is always one byte
5805             stencilAllocationSize = glExtents.width * glExtents.height * glExtents.depth;
5806             allocationSize += stencilAllocationSize;
5807         }
5808     }
5809 
5810     VkBuffer bufferHandle = VK_NULL_HANDLE;
5811 
5812     uint8_t *stagingPointer    = nullptr;
5813     VkDeviceSize stagingOffset = 0;
5814     // If caller has provided a staging buffer, use it.
5815     DynamicBuffer *stagingBuffer = stagingBufferOverride ? stagingBufferOverride : &mStagingBuffer;
5816     size_t alignment             = mStagingBuffer.getAlignment();
5817     ANGLE_TRY(stagingBuffer->allocateWithAlignment(contextVk, allocationSize, alignment,
5818                                                    &stagingPointer, &bufferHandle, &stagingOffset,
5819                                                    nullptr));
5820     BufferHelper *currentBuffer = stagingBuffer->getCurrentBuffer();
5821 
5822     const uint8_t *source = pixels + static_cast<ptrdiff_t>(inputSkipBytes);
5823 
5824     loadFunctionInfo.loadFunction(glExtents.width, glExtents.height, glExtents.depth, source,
5825                                   inputRowPitch, inputDepthPitch, stagingPointer, outputRowPitch,
5826                                   outputDepthPitch);
5827 
5828     // YUV formats need special handling.
5829     if (storageFormat.isYUV)
5830     {
5831         gl::YuvFormatInfo yuvInfo(formatInfo.internalFormat, glExtents);
5832 
5833         constexpr VkImageAspectFlagBits kPlaneAspectFlags[3] = {
5834             VK_IMAGE_ASPECT_PLANE_0_BIT, VK_IMAGE_ASPECT_PLANE_1_BIT, VK_IMAGE_ASPECT_PLANE_2_BIT};
5835 
5836         // We only support mip level 0 and layerCount of 1 for YUV formats.
5837         ASSERT(index.getLevelIndex() == 0);
5838         ASSERT(index.getLayerCount() == 1);
5839 
5840         for (uint32_t plane = 0; plane < yuvInfo.planeCount; plane++)
5841         {
5842             VkBufferImageCopy copy           = {};
5843             copy.bufferOffset                = stagingOffset + yuvInfo.planeOffset[plane];
5844             copy.bufferRowLength             = 0;
5845             copy.bufferImageHeight           = 0;
5846             copy.imageSubresource.mipLevel   = 0;
5847             copy.imageSubresource.layerCount = 1;
5848             gl_vk::GetOffset(offset, &copy.imageOffset);
5849             gl_vk::GetExtent(yuvInfo.planeExtent[plane], &copy.imageExtent);
5850             copy.imageSubresource.baseArrayLayer = 0;
5851             copy.imageSubresource.aspectMask     = kPlaneAspectFlags[plane];
5852             appendSubresourceUpdate(gl::LevelIndex(0),
5853                                     SubresourceUpdate(currentBuffer, copy, storageFormat.id));
5854         }
5855 
5856         return angle::Result::Continue;
5857     }
5858 
5859     VkBufferImageCopy copy         = {};
5860     VkImageAspectFlags aspectFlags = GetFormatAspectFlags(storageFormat);
5861 
5862     copy.bufferOffset      = stagingOffset;
5863     copy.bufferRowLength   = bufferRowLength;
5864     copy.bufferImageHeight = bufferImageHeight;
5865 
5866     gl::LevelIndex updateLevelGL(index.getLevelIndex());
5867     copy.imageSubresource.mipLevel   = updateLevelGL.get();
5868     copy.imageSubresource.layerCount = index.getLayerCount();
5869 
5870     gl_vk::GetOffset(offset, &copy.imageOffset);
5871     gl_vk::GetExtent(glExtents, &copy.imageExtent);
5872 
5873     if (gl::IsArrayTextureType(index.getType()))
5874     {
5875         copy.imageSubresource.baseArrayLayer = offset.z;
5876         copy.imageOffset.z                   = 0;
5877         copy.imageExtent.depth               = 1;
5878     }
5879     else
5880     {
5881         copy.imageSubresource.baseArrayLayer = index.hasLayer() ? index.getLayerIndex() : 0;
5882     }
5883 
5884     if (stencilAllocationSize > 0)
5885     {
5886         // Note: Stencil is always one byte
5887         ASSERT((aspectFlags & VK_IMAGE_ASPECT_STENCIL_BIT) != 0);
5888 
5889         // Skip over depth data.
5890         stagingPointer += outputDepthPitch * glExtents.depth;
5891         stagingOffset += outputDepthPitch * glExtents.depth;
5892 
5893         // recompute pitch for stencil data
5894         outputRowPitch   = glExtents.width;
5895         outputDepthPitch = outputRowPitch * glExtents.height;
5896 
5897         ASSERT(stencilLoadFunction != nullptr);
5898         stencilLoadFunction(glExtents.width, glExtents.height, glExtents.depth, source,
5899                             inputRowPitch, inputDepthPitch, stagingPointer, outputRowPitch,
5900                             outputDepthPitch);
5901 
5902         VkBufferImageCopy stencilCopy = {};
5903 
5904         stencilCopy.bufferOffset                    = stagingOffset;
5905         stencilCopy.bufferRowLength                 = bufferRowLength;
5906         stencilCopy.bufferImageHeight               = bufferImageHeight;
5907         stencilCopy.imageSubresource.mipLevel       = copy.imageSubresource.mipLevel;
5908         stencilCopy.imageSubresource.baseArrayLayer = copy.imageSubresource.baseArrayLayer;
5909         stencilCopy.imageSubresource.layerCount     = copy.imageSubresource.layerCount;
5910         stencilCopy.imageOffset                     = copy.imageOffset;
5911         stencilCopy.imageExtent                     = copy.imageExtent;
5912         stencilCopy.imageSubresource.aspectMask     = VK_IMAGE_ASPECT_STENCIL_BIT;
5913         appendSubresourceUpdate(updateLevelGL,
5914                                 SubresourceUpdate(currentBuffer, stencilCopy, storageFormat.id));
5915 
5916         aspectFlags &= ~VK_IMAGE_ASPECT_STENCIL_BIT;
5917     }
5918 
5919     if (HasBothDepthAndStencilAspects(aspectFlags))
5920     {
5921         // We still have both depth and stencil aspect bits set. That means we have a destination
5922         // buffer that is packed depth stencil and that the application is only loading one aspect.
5923         // Figure out which aspect the user is touching and remove the unused aspect bit.
5924         if (formatInfo.stencilBits > 0)
5925         {
5926             aspectFlags &= ~VK_IMAGE_ASPECT_DEPTH_BIT;
5927         }
5928         else
5929         {
5930             aspectFlags &= ~VK_IMAGE_ASPECT_STENCIL_BIT;
5931         }
5932     }
5933 
5934     if (aspectFlags)
5935     {
5936         copy.imageSubresource.aspectMask = aspectFlags;
5937         appendSubresourceUpdate(updateLevelGL,
5938                                 SubresourceUpdate(currentBuffer, copy, storageFormat.id));
5939     }
5940 
5941     return angle::Result::Continue;
5942 }
5943 
reformatStagedBufferUpdates(ContextVk * contextVk,angle::FormatID srcFormatID,angle::FormatID dstFormatID)5944 angle::Result ImageHelper::reformatStagedBufferUpdates(ContextVk *contextVk,
5945                                                        angle::FormatID srcFormatID,
5946                                                        angle::FormatID dstFormatID)
5947 {
5948     const angle::Format &srcFormat = angle::Format::Get(srcFormatID);
5949     const angle::Format &dstFormat = angle::Format::Get(dstFormatID);
5950     const gl::InternalFormat &dstFormatInfo =
5951         gl::GetSizedInternalFormatInfo(dstFormat.glInternalFormat);
5952 
5953     for (std::vector<SubresourceUpdate> &levelUpdates : mSubresourceUpdates)
5954     {
5955         for (SubresourceUpdate &update : levelUpdates)
5956         {
5957             // Right now whenever we stage update from a source image, the formats always match.
5958             ASSERT(valid() || update.updateSource != UpdateSource::Image ||
5959                    update.data.image.formatID == srcFormatID);
5960 
5961             if (update.updateSource == UpdateSource::Buffer &&
5962                 update.data.buffer.formatID == srcFormatID)
5963             {
5964                 const VkBufferImageCopy &copy = update.data.buffer.copyRegion;
5965 
5966                 // Source and dst data are tightly packed
5967                 GLuint srcDataRowPitch = copy.imageExtent.width * srcFormat.pixelBytes;
5968                 GLuint dstDataRowPitch = copy.imageExtent.width * dstFormat.pixelBytes;
5969 
5970                 GLuint srcDataDepthPitch = srcDataRowPitch * copy.imageExtent.height;
5971                 GLuint dstDataDepthPitch = dstDataRowPitch * copy.imageExtent.height;
5972 
5973                 // Retrieve source buffer
5974                 vk::BufferHelper *srcBuffer = update.data.buffer.bufferHelper;
5975                 uint8_t *srcData            = srcBuffer->getMappedMemory() + copy.bufferOffset;
5976 
5977                 // Allocate memory with dstFormat
5978                 uint8_t *dstData             = nullptr;
5979                 VkBuffer dstBufferHandle     = VK_NULL_HANDLE;
5980                 VkDeviceSize dstBufferOffset = 0;
5981                 GLuint dstBufferSize         = dstDataDepthPitch * copy.imageExtent.depth;
5982                 ANGLE_TRY(mStagingBuffer.allocate(contextVk, dstBufferSize, &dstData,
5983                                                   &dstBufferHandle, &dstBufferOffset, nullptr));
5984                 BufferHelper *dstBuffer = mStagingBuffer.getCurrentBuffer();
5985 
5986                 rx::PixelReadFunction pixelReadFunction   = srcFormat.pixelReadFunction;
5987                 rx::PixelWriteFunction pixelWriteFunction = dstFormat.pixelWriteFunction;
5988 
5989                 CopyImageCHROMIUM(srcData, srcDataRowPitch, srcFormat.pixelBytes, srcDataDepthPitch,
5990                                   pixelReadFunction, dstData, dstDataRowPitch, dstFormat.pixelBytes,
5991                                   dstDataDepthPitch, pixelWriteFunction, dstFormatInfo.format,
5992                                   dstFormatInfo.componentType, copy.imageExtent.width,
5993                                   copy.imageExtent.height, copy.imageExtent.depth, false, false,
5994                                   false);
5995 
5996                 // Replace srcBuffer with dstBuffer
5997                 update.data.buffer.bufferHelper            = dstBuffer;
5998                 update.data.buffer.formatID                = dstFormatID;
5999                 update.data.buffer.copyRegion.bufferOffset = dstBufferOffset;
6000             }
6001         }
6002     }
6003 
6004     return angle::Result::Continue;
6005 }
6006 
CalculateBufferInfo(ContextVk * contextVk,const gl::Extents & glExtents,const gl::InternalFormat & formatInfo,const gl::PixelUnpackState & unpack,GLenum type,bool is3D,GLuint * inputRowPitch,GLuint * inputDepthPitch,GLuint * inputSkipBytes)6007 angle::Result ImageHelper::CalculateBufferInfo(ContextVk *contextVk,
6008                                                const gl::Extents &glExtents,
6009                                                const gl::InternalFormat &formatInfo,
6010                                                const gl::PixelUnpackState &unpack,
6011                                                GLenum type,
6012                                                bool is3D,
6013                                                GLuint *inputRowPitch,
6014                                                GLuint *inputDepthPitch,
6015                                                GLuint *inputSkipBytes)
6016 {
6017     // YUV formats need special handling.
6018     if (gl::IsYuvFormat(formatInfo.internalFormat))
6019     {
6020         gl::YuvFormatInfo yuvInfo(formatInfo.internalFormat, glExtents);
6021 
6022         // row pitch = Y plane row pitch
6023         *inputRowPitch = yuvInfo.planePitch[0];
6024         // depth pitch = Y plane size + chroma plane size
6025         *inputDepthPitch = yuvInfo.planeSize[0] + yuvInfo.planeSize[1] + yuvInfo.planeSize[2];
6026         *inputSkipBytes  = 0;
6027 
6028         return angle::Result::Continue;
6029     }
6030 
6031     ANGLE_VK_CHECK_MATH(contextVk,
6032                         formatInfo.computeRowPitch(type, glExtents.width, unpack.alignment,
6033                                                    unpack.rowLength, inputRowPitch));
6034 
6035     ANGLE_VK_CHECK_MATH(contextVk,
6036                         formatInfo.computeDepthPitch(glExtents.height, unpack.imageHeight,
6037                                                      *inputRowPitch, inputDepthPitch));
6038 
6039     ANGLE_VK_CHECK_MATH(
6040         contextVk, formatInfo.computeSkipBytes(type, *inputRowPitch, *inputDepthPitch, unpack, is3D,
6041                                                inputSkipBytes));
6042 
6043     return angle::Result::Continue;
6044 }
6045 
onWrite(gl::LevelIndex levelStart,uint32_t levelCount,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags)6046 void ImageHelper::onWrite(gl::LevelIndex levelStart,
6047                           uint32_t levelCount,
6048                           uint32_t layerStart,
6049                           uint32_t layerCount,
6050                           VkImageAspectFlags aspectFlags)
6051 {
6052     mCurrentSingleClearValue.reset();
6053 
6054     // Mark contents of the given subresource as defined.
6055     setContentDefined(toVkLevel(levelStart), levelCount, layerStart, layerCount, aspectFlags);
6056 }
6057 
hasSubresourceDefinedContent(gl::LevelIndex level,uint32_t layerIndex,uint32_t layerCount) const6058 bool ImageHelper::hasSubresourceDefinedContent(gl::LevelIndex level,
6059                                                uint32_t layerIndex,
6060                                                uint32_t layerCount) const
6061 {
6062     if (layerIndex >= kMaxContentDefinedLayerCount)
6063     {
6064         return true;
6065     }
6066 
6067     uint8_t layerRangeBits =
6068         GetContentDefinedLayerRangeBits(layerIndex, layerCount, kMaxContentDefinedLayerCount);
6069     return (getLevelContentDefined(toVkLevel(level)) & LevelContentDefinedMask(layerRangeBits))
6070         .any();
6071 }
6072 
hasSubresourceDefinedStencilContent(gl::LevelIndex level,uint32_t layerIndex,uint32_t layerCount) const6073 bool ImageHelper::hasSubresourceDefinedStencilContent(gl::LevelIndex level,
6074                                                       uint32_t layerIndex,
6075                                                       uint32_t layerCount) const
6076 {
6077     if (layerIndex >= kMaxContentDefinedLayerCount)
6078     {
6079         return true;
6080     }
6081 
6082     uint8_t layerRangeBits =
6083         GetContentDefinedLayerRangeBits(layerIndex, layerCount, kMaxContentDefinedLayerCount);
6084     return (getLevelStencilContentDefined(toVkLevel(level)) &
6085             LevelContentDefinedMask(layerRangeBits))
6086         .any();
6087 }
6088 
invalidateSubresourceContent(ContextVk * contextVk,gl::LevelIndex level,uint32_t layerIndex,uint32_t layerCount)6089 void ImageHelper::invalidateSubresourceContent(ContextVk *contextVk,
6090                                                gl::LevelIndex level,
6091                                                uint32_t layerIndex,
6092                                                uint32_t layerCount)
6093 {
6094     if (layerIndex < kMaxContentDefinedLayerCount)
6095     {
6096         uint8_t layerRangeBits =
6097             GetContentDefinedLayerRangeBits(layerIndex, layerCount, kMaxContentDefinedLayerCount);
6098         getLevelContentDefined(toVkLevel(level)) &= static_cast<uint8_t>(~layerRangeBits);
6099     }
6100     else
6101     {
6102         ANGLE_VK_PERF_WARNING(
6103             contextVk, GL_DEBUG_SEVERITY_LOW,
6104             "glInvalidateFramebuffer (%s) ineffective on attachments with layer >= 8",
6105             (getAspectFlags() & VK_IMAGE_ASPECT_COLOR_BIT) != 0 ? "color" : "depth");
6106     }
6107 }
6108 
invalidateSubresourceStencilContent(ContextVk * contextVk,gl::LevelIndex level,uint32_t layerIndex,uint32_t layerCount)6109 void ImageHelper::invalidateSubresourceStencilContent(ContextVk *contextVk,
6110                                                       gl::LevelIndex level,
6111                                                       uint32_t layerIndex,
6112                                                       uint32_t layerCount)
6113 {
6114     if (layerIndex < kMaxContentDefinedLayerCount)
6115     {
6116         uint8_t layerRangeBits =
6117             GetContentDefinedLayerRangeBits(layerIndex, layerCount, kMaxContentDefinedLayerCount);
6118         getLevelStencilContentDefined(toVkLevel(level)) &= static_cast<uint8_t>(~layerRangeBits);
6119     }
6120     else
6121     {
6122         ANGLE_VK_PERF_WARNING(
6123             contextVk, GL_DEBUG_SEVERITY_LOW,
6124             "glInvalidateFramebuffer (stencil) ineffective on attachments with layer >= 8");
6125     }
6126 }
6127 
restoreSubresourceContent(gl::LevelIndex level,uint32_t layerIndex,uint32_t layerCount)6128 void ImageHelper::restoreSubresourceContent(gl::LevelIndex level,
6129                                             uint32_t layerIndex,
6130                                             uint32_t layerCount)
6131 {
6132     if (layerIndex < kMaxContentDefinedLayerCount)
6133     {
6134         uint8_t layerRangeBits =
6135             GetContentDefinedLayerRangeBits(layerIndex, layerCount, kMaxContentDefinedLayerCount);
6136         getLevelContentDefined(toVkLevel(level)) |= layerRangeBits;
6137     }
6138 }
6139 
restoreSubresourceStencilContent(gl::LevelIndex level,uint32_t layerIndex,uint32_t layerCount)6140 void ImageHelper::restoreSubresourceStencilContent(gl::LevelIndex level,
6141                                                    uint32_t layerIndex,
6142                                                    uint32_t layerCount)
6143 {
6144     if (layerIndex < kMaxContentDefinedLayerCount)
6145     {
6146         uint8_t layerRangeBits =
6147             GetContentDefinedLayerRangeBits(layerIndex, layerCount, kMaxContentDefinedLayerCount);
6148         getLevelStencilContentDefined(toVkLevel(level)) |= layerRangeBits;
6149     }
6150 }
6151 
stageSubresourceUpdate(ContextVk * contextVk,const gl::ImageIndex & index,const gl::Extents & glExtents,const gl::Offset & offset,const gl::InternalFormat & formatInfo,const gl::PixelUnpackState & unpack,DynamicBuffer * stagingBufferOverride,GLenum type,const uint8_t * pixels,const Format & vkFormat,ImageAccess access)6152 angle::Result ImageHelper::stageSubresourceUpdate(ContextVk *contextVk,
6153                                                   const gl::ImageIndex &index,
6154                                                   const gl::Extents &glExtents,
6155                                                   const gl::Offset &offset,
6156                                                   const gl::InternalFormat &formatInfo,
6157                                                   const gl::PixelUnpackState &unpack,
6158                                                   DynamicBuffer *stagingBufferOverride,
6159                                                   GLenum type,
6160                                                   const uint8_t *pixels,
6161                                                   const Format &vkFormat,
6162                                                   ImageAccess access)
6163 {
6164     GLuint inputRowPitch   = 0;
6165     GLuint inputDepthPitch = 0;
6166     GLuint inputSkipBytes  = 0;
6167     ANGLE_TRY(CalculateBufferInfo(contextVk, glExtents, formatInfo, unpack, type, index.usesTex3D(),
6168                                   &inputRowPitch, &inputDepthPitch, &inputSkipBytes));
6169 
6170     ANGLE_TRY(stageSubresourceUpdateImpl(contextVk, index, glExtents, offset, formatInfo, unpack,
6171                                          stagingBufferOverride, type, pixels, vkFormat, access,
6172                                          inputRowPitch, inputDepthPitch, inputSkipBytes));
6173 
6174     return angle::Result::Continue;
6175 }
6176 
stageSubresourceUpdateAndGetData(ContextVk * contextVk,size_t allocationSize,const gl::ImageIndex & imageIndex,const gl::Extents & glExtents,const gl::Offset & offset,uint8_t ** destData,DynamicBuffer * stagingBufferOverride,angle::FormatID formatID)6177 angle::Result ImageHelper::stageSubresourceUpdateAndGetData(ContextVk *contextVk,
6178                                                             size_t allocationSize,
6179                                                             const gl::ImageIndex &imageIndex,
6180                                                             const gl::Extents &glExtents,
6181                                                             const gl::Offset &offset,
6182                                                             uint8_t **destData,
6183                                                             DynamicBuffer *stagingBufferOverride,
6184                                                             angle::FormatID formatID)
6185 {
6186     VkBuffer bufferHandle;
6187     VkDeviceSize stagingOffset = 0;
6188 
6189     DynamicBuffer *stagingBuffer = stagingBufferOverride ? stagingBufferOverride : &mStagingBuffer;
6190     size_t alignment             = mStagingBuffer.getAlignment();
6191     ANGLE_TRY(stagingBuffer->allocateWithAlignment(contextVk, allocationSize, alignment, destData,
6192                                                    &bufferHandle, &stagingOffset, nullptr));
6193 
6194     gl::LevelIndex updateLevelGL(imageIndex.getLevelIndex());
6195 
6196     VkBufferImageCopy copy               = {};
6197     copy.bufferOffset                    = stagingOffset;
6198     copy.bufferRowLength                 = glExtents.width;
6199     copy.bufferImageHeight               = glExtents.height;
6200     copy.imageSubresource.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT;
6201     copy.imageSubresource.mipLevel       = updateLevelGL.get();
6202     copy.imageSubresource.baseArrayLayer = imageIndex.hasLayer() ? imageIndex.getLayerIndex() : 0;
6203     copy.imageSubresource.layerCount     = imageIndex.getLayerCount();
6204 
6205     // Note: Only support color now
6206     ASSERT((mActualFormatID == angle::FormatID::NONE) ||
6207            (getAspectFlags() == VK_IMAGE_ASPECT_COLOR_BIT));
6208 
6209     gl_vk::GetOffset(offset, &copy.imageOffset);
6210     gl_vk::GetExtent(glExtents, &copy.imageExtent);
6211 
6212     appendSubresourceUpdate(updateLevelGL,
6213                             SubresourceUpdate(stagingBuffer->getCurrentBuffer(), copy, formatID));
6214 
6215     return angle::Result::Continue;
6216 }
6217 
stageSubresourceUpdateFromFramebuffer(const gl::Context * context,const gl::ImageIndex & index,const gl::Rectangle & sourceArea,const gl::Offset & dstOffset,const gl::Extents & dstExtent,const gl::InternalFormat & formatInfo,ImageAccess access,FramebufferVk * framebufferVk,DynamicBuffer * stagingBufferOverride)6218 angle::Result ImageHelper::stageSubresourceUpdateFromFramebuffer(
6219     const gl::Context *context,
6220     const gl::ImageIndex &index,
6221     const gl::Rectangle &sourceArea,
6222     const gl::Offset &dstOffset,
6223     const gl::Extents &dstExtent,
6224     const gl::InternalFormat &formatInfo,
6225     ImageAccess access,
6226     FramebufferVk *framebufferVk,
6227     DynamicBuffer *stagingBufferOverride)
6228 {
6229     ContextVk *contextVk = GetImpl(context);
6230 
6231     // If the extents and offset is outside the source image, we need to clip.
6232     gl::Rectangle clippedRectangle;
6233     const gl::Extents readExtents = framebufferVk->getReadImageExtents();
6234     if (!ClipRectangle(sourceArea, gl::Rectangle(0, 0, readExtents.width, readExtents.height),
6235                        &clippedRectangle))
6236     {
6237         // Empty source area, nothing to do.
6238         return angle::Result::Continue;
6239     }
6240 
6241     bool isViewportFlipEnabled = contextVk->isViewportFlipEnabledForDrawFBO();
6242     if (isViewportFlipEnabled)
6243     {
6244         clippedRectangle.y = readExtents.height - clippedRectangle.y - clippedRectangle.height;
6245     }
6246 
6247     // 1- obtain a buffer handle to copy to
6248     RendererVk *renderer = contextVk->getRenderer();
6249 
6250     const Format &vkFormat             = renderer->getFormat(formatInfo.sizedInternalFormat);
6251     const angle::Format &storageFormat = vkFormat.getActualImageFormat(access);
6252     LoadImageFunctionInfo loadFunction = vkFormat.getTextureLoadFunction(access, formatInfo.type);
6253 
6254     size_t outputRowPitch   = storageFormat.pixelBytes * clippedRectangle.width;
6255     size_t outputDepthPitch = outputRowPitch * clippedRectangle.height;
6256 
6257     VkBuffer bufferHandle = VK_NULL_HANDLE;
6258 
6259     uint8_t *stagingPointer    = nullptr;
6260     VkDeviceSize stagingOffset = 0;
6261 
6262     // The destination is only one layer deep.
6263     size_t allocationSize        = outputDepthPitch;
6264     DynamicBuffer *stagingBuffer = stagingBufferOverride ? stagingBufferOverride : &mStagingBuffer;
6265     size_t alignment             = mStagingBuffer.getAlignment();
6266     ANGLE_TRY(stagingBuffer->allocateWithAlignment(contextVk, allocationSize, alignment,
6267                                                    &stagingPointer, &bufferHandle, &stagingOffset,
6268                                                    nullptr));
6269     BufferHelper *currentBuffer = stagingBuffer->getCurrentBuffer();
6270 
6271     const angle::Format &copyFormat =
6272         GetFormatFromFormatType(formatInfo.internalFormat, formatInfo.type);
6273     PackPixelsParams params(clippedRectangle, copyFormat, static_cast<GLuint>(outputRowPitch),
6274                             isViewportFlipEnabled, nullptr, 0);
6275 
6276     RenderTargetVk *readRenderTarget = framebufferVk->getColorReadRenderTarget();
6277 
6278     // 2- copy the source image region to the pixel buffer using a cpu readback
6279     if (loadFunction.requiresConversion)
6280     {
6281         // When a conversion is required, we need to use the loadFunction to read from a temporary
6282         // buffer instead so its an even slower path.
6283         size_t bufferSize =
6284             storageFormat.pixelBytes * clippedRectangle.width * clippedRectangle.height;
6285         angle::MemoryBuffer *memoryBuffer = nullptr;
6286         ANGLE_VK_CHECK_ALLOC(contextVk, context->getScratchBuffer(bufferSize, &memoryBuffer));
6287 
6288         // Read into the scratch buffer
6289         ANGLE_TRY(framebufferVk->readPixelsImpl(contextVk, clippedRectangle, params,
6290                                                 VK_IMAGE_ASPECT_COLOR_BIT, readRenderTarget,
6291                                                 memoryBuffer->data()));
6292 
6293         // Load from scratch buffer to our pixel buffer
6294         loadFunction.loadFunction(clippedRectangle.width, clippedRectangle.height, 1,
6295                                   memoryBuffer->data(), outputRowPitch, 0, stagingPointer,
6296                                   outputRowPitch, 0);
6297     }
6298     else
6299     {
6300         // We read directly from the framebuffer into our pixel buffer.
6301         ANGLE_TRY(framebufferVk->readPixelsImpl(contextVk, clippedRectangle, params,
6302                                                 VK_IMAGE_ASPECT_COLOR_BIT, readRenderTarget,
6303                                                 stagingPointer));
6304     }
6305 
6306     gl::LevelIndex updateLevelGL(index.getLevelIndex());
6307 
6308     // 3- enqueue the destination image subresource update
6309     VkBufferImageCopy copyToImage               = {};
6310     copyToImage.bufferOffset                    = static_cast<VkDeviceSize>(stagingOffset);
6311     copyToImage.bufferRowLength                 = 0;  // Tightly packed data can be specified as 0.
6312     copyToImage.bufferImageHeight               = clippedRectangle.height;
6313     copyToImage.imageSubresource.aspectMask     = VK_IMAGE_ASPECT_COLOR_BIT;
6314     copyToImage.imageSubresource.mipLevel       = updateLevelGL.get();
6315     copyToImage.imageSubresource.baseArrayLayer = index.hasLayer() ? index.getLayerIndex() : 0;
6316     copyToImage.imageSubresource.layerCount     = index.getLayerCount();
6317     gl_vk::GetOffset(dstOffset, &copyToImage.imageOffset);
6318     gl_vk::GetExtent(dstExtent, &copyToImage.imageExtent);
6319 
6320     // 3- enqueue the destination image subresource update
6321     appendSubresourceUpdate(updateLevelGL,
6322                             SubresourceUpdate(currentBuffer, copyToImage, storageFormat.id));
6323     return angle::Result::Continue;
6324 }
6325 
stageSubresourceUpdateFromImage(RefCounted<ImageHelper> * image,const gl::ImageIndex & index,LevelIndex srcMipLevel,const gl::Offset & destOffset,const gl::Extents & glExtents,const VkImageType imageType)6326 void ImageHelper::stageSubresourceUpdateFromImage(RefCounted<ImageHelper> *image,
6327                                                   const gl::ImageIndex &index,
6328                                                   LevelIndex srcMipLevel,
6329                                                   const gl::Offset &destOffset,
6330                                                   const gl::Extents &glExtents,
6331                                                   const VkImageType imageType)
6332 {
6333     gl::LevelIndex updateLevelGL(index.getLevelIndex());
6334 
6335     VkImageCopy copyToImage               = {};
6336     copyToImage.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
6337     copyToImage.srcSubresource.mipLevel   = srcMipLevel.get();
6338     copyToImage.srcSubresource.layerCount = index.getLayerCount();
6339     copyToImage.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
6340     copyToImage.dstSubresource.mipLevel   = updateLevelGL.get();
6341 
6342     if (imageType == VK_IMAGE_TYPE_3D)
6343     {
6344         // These values must be set explicitly to follow the Vulkan spec:
6345         // https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/VkImageCopy.html
6346         // If either of the calling command's srcImage or dstImage parameters are of VkImageType
6347         // VK_IMAGE_TYPE_3D, the baseArrayLayer and layerCount members of the corresponding
6348         // subresource must be 0 and 1, respectively
6349         copyToImage.dstSubresource.baseArrayLayer = 0;
6350         copyToImage.dstSubresource.layerCount     = 1;
6351         // Preserve the assumption that destOffset.z == "dstSubresource.baseArrayLayer"
6352         ASSERT(destOffset.z == (index.hasLayer() ? index.getLayerIndex() : 0));
6353     }
6354     else
6355     {
6356         copyToImage.dstSubresource.baseArrayLayer = index.hasLayer() ? index.getLayerIndex() : 0;
6357         copyToImage.dstSubresource.layerCount     = index.getLayerCount();
6358     }
6359 
6360     gl_vk::GetOffset(destOffset, &copyToImage.dstOffset);
6361     gl_vk::GetExtent(glExtents, &copyToImage.extent);
6362 
6363     appendSubresourceUpdate(
6364         updateLevelGL, SubresourceUpdate(image, copyToImage, image->get().getActualFormatID()));
6365 }
6366 
stageSubresourceUpdatesFromAllImageLevels(RefCounted<ImageHelper> * image,gl::LevelIndex baseLevel)6367 void ImageHelper::stageSubresourceUpdatesFromAllImageLevels(RefCounted<ImageHelper> *image,
6368                                                             gl::LevelIndex baseLevel)
6369 {
6370     for (LevelIndex levelVk(0); levelVk < LevelIndex(image->get().getLevelCount()); ++levelVk)
6371     {
6372         const gl::LevelIndex levelGL = vk_gl::GetLevelIndex(levelVk, baseLevel);
6373         const gl::ImageIndex index =
6374             gl::ImageIndex::Make2DArrayRange(levelGL.get(), 0, image->get().getLayerCount());
6375 
6376         stageSubresourceUpdateFromImage(image, index, levelVk, gl::kOffsetZero,
6377                                         image->get().getLevelExtents(levelVk),
6378                                         image->get().getType());
6379     }
6380 }
6381 
stageClear(const gl::ImageIndex & index,VkImageAspectFlags aspectFlags,const VkClearValue & clearValue)6382 void ImageHelper::stageClear(const gl::ImageIndex &index,
6383                              VkImageAspectFlags aspectFlags,
6384                              const VkClearValue &clearValue)
6385 {
6386     gl::LevelIndex updateLevelGL(index.getLevelIndex());
6387     appendSubresourceUpdate(updateLevelGL, SubresourceUpdate(aspectFlags, clearValue, index));
6388 }
6389 
stageRobustResourceClear(const gl::ImageIndex & index)6390 void ImageHelper::stageRobustResourceClear(const gl::ImageIndex &index)
6391 {
6392     const VkImageAspectFlags aspectFlags = getAspectFlags();
6393 
6394     ASSERT(mActualFormatID != angle::FormatID::NONE);
6395     VkClearValue clearValue = GetRobustResourceClearValue(getIntendedFormat(), getActualFormat());
6396 
6397     gl::LevelIndex updateLevelGL(index.getLevelIndex());
6398     appendSubresourceUpdate(updateLevelGL, SubresourceUpdate(aspectFlags, clearValue, index));
6399 }
6400 
stageRobustResourceClearWithFormat(ContextVk * contextVk,const gl::ImageIndex & index,const gl::Extents & glExtents,const angle::Format & intendedFormat,const angle::Format & imageFormat)6401 angle::Result ImageHelper::stageRobustResourceClearWithFormat(ContextVk *contextVk,
6402                                                               const gl::ImageIndex &index,
6403                                                               const gl::Extents &glExtents,
6404                                                               const angle::Format &intendedFormat,
6405                                                               const angle::Format &imageFormat)
6406 {
6407     const VkImageAspectFlags aspectFlags = GetFormatAspectFlags(imageFormat);
6408 
6409     // Robust clears must only be staged if we do not have any prior data for this subresource.
6410     ASSERT(!hasStagedUpdatesForSubresource(gl::LevelIndex(index.getLevelIndex()),
6411                                            index.getLayerIndex(), index.getLayerCount()));
6412 
6413     VkClearValue clearValue = GetRobustResourceClearValue(intendedFormat, imageFormat);
6414 
6415     gl::LevelIndex updateLevelGL(index.getLevelIndex());
6416 
6417     if (imageFormat.isBlock)
6418     {
6419         // This only supports doing an initial clear to 0, not clearing to a specific encoded RGBA
6420         // value
6421         ASSERT((clearValue.color.int32[0] == 0) && (clearValue.color.int32[1] == 0) &&
6422                (clearValue.color.int32[2] == 0) && (clearValue.color.int32[3] == 0));
6423 
6424         const gl::InternalFormat &formatInfo =
6425             gl::GetSizedInternalFormatInfo(imageFormat.glInternalFormat);
6426         GLuint totalSize;
6427         ANGLE_VK_CHECK_MATH(contextVk,
6428                             formatInfo.computeCompressedImageSize(glExtents, &totalSize));
6429 
6430         VkBuffer bufferHandle      = VK_NULL_HANDLE;
6431         uint8_t *stagingPointer    = nullptr;
6432         VkDeviceSize stagingOffset = 0;
6433         ANGLE_TRY(mStagingBuffer.allocate(contextVk, totalSize, &stagingPointer, &bufferHandle,
6434                                           &stagingOffset, nullptr));
6435         memset(stagingPointer, 0, totalSize);
6436 
6437         VkBufferImageCopy copyRegion               = {};
6438         copyRegion.imageExtent.width               = glExtents.width;
6439         copyRegion.imageExtent.height              = glExtents.height;
6440         copyRegion.imageExtent.depth               = glExtents.depth;
6441         copyRegion.imageSubresource.mipLevel       = updateLevelGL.get();
6442         copyRegion.imageSubresource.aspectMask     = aspectFlags;
6443         copyRegion.imageSubresource.baseArrayLayer = index.hasLayer() ? index.getLayerIndex() : 0;
6444         copyRegion.imageSubresource.layerCount     = index.getLayerCount();
6445 
6446         appendSubresourceUpdate(updateLevelGL, SubresourceUpdate(mStagingBuffer.getCurrentBuffer(),
6447                                                                  copyRegion, imageFormat.id));
6448     }
6449     else
6450     {
6451         appendSubresourceUpdate(updateLevelGL, SubresourceUpdate(aspectFlags, clearValue, index));
6452     }
6453 
6454     return angle::Result::Continue;
6455 }
6456 
stageClearIfEmulatedFormat(bool isRobustResourceInitEnabled,bool isExternalImage)6457 void ImageHelper::stageClearIfEmulatedFormat(bool isRobustResourceInitEnabled, bool isExternalImage)
6458 {
6459     // Skip staging extra clears if robust resource init is enabled.
6460     if (!hasEmulatedImageChannels() || isRobustResourceInitEnabled)
6461     {
6462         return;
6463     }
6464 
6465     VkClearValue clearValue = {};
6466     if (getIntendedFormat().hasDepthOrStencilBits())
6467     {
6468         clearValue.depthStencil = kRobustInitDepthStencilValue;
6469     }
6470     else
6471     {
6472         clearValue.color = kEmulatedInitColorValue;
6473     }
6474 
6475     const VkImageAspectFlags aspectFlags = getAspectFlags();
6476 
6477     // If the image has an emulated channel and robust resource init is not enabled, always clear
6478     // it. These channels will be masked out in future writes, and shouldn't contain uninitialized
6479     // values.
6480     //
6481     // For external images, we cannot clear the image entirely, as it may contain data in the
6482     // non-emulated channels.  For depth/stencil images, clear is already per aspect, but for color
6483     // images we would need to take a special path where we only clear the emulated channels.
6484     const bool clearOnlyEmulatedChannels =
6485         isExternalImage && !getIntendedFormat().hasDepthOrStencilBits();
6486     const VkColorComponentFlags colorMaskFlags =
6487         clearOnlyEmulatedChannels ? getEmulatedChannelsMask() : 0;
6488 
6489     for (LevelIndex level(0); level < LevelIndex(mLevelCount); ++level)
6490     {
6491         gl::LevelIndex updateLevelGL = toGLLevel(level);
6492         gl::ImageIndex index =
6493             gl::ImageIndex::Make2DArrayRange(updateLevelGL.get(), 0, mLayerCount);
6494 
6495         if (clearOnlyEmulatedChannels)
6496         {
6497             prependSubresourceUpdate(updateLevelGL,
6498                                      SubresourceUpdate(colorMaskFlags, clearValue.color, index));
6499         }
6500         else
6501         {
6502             prependSubresourceUpdate(updateLevelGL,
6503                                      SubresourceUpdate(aspectFlags, clearValue, index));
6504         }
6505     }
6506 }
6507 
verifyEmulatedClearsAreBeforeOtherUpdates(const std::vector<SubresourceUpdate> & updates)6508 bool ImageHelper::verifyEmulatedClearsAreBeforeOtherUpdates(
6509     const std::vector<SubresourceUpdate> &updates)
6510 {
6511     bool isIteratingEmulatedClears = true;
6512 
6513     for (const SubresourceUpdate &update : updates)
6514     {
6515         // If anything other than ClearEmulatedChannelsOnly is visited, there cannot be any
6516         // ClearEmulatedChannelsOnly updates after that.
6517         if (update.updateSource != UpdateSource::ClearEmulatedChannelsOnly)
6518         {
6519             isIteratingEmulatedClears = false;
6520         }
6521         else if (!isIteratingEmulatedClears)
6522         {
6523             // If ClearEmulatedChannelsOnly is visited after another update, that's an error.
6524             return false;
6525         }
6526     }
6527 
6528     // Additionally, verify that emulated clear is not applied multiple times.
6529     if (updates.size() >= 2 && updates[1].updateSource == UpdateSource::ClearEmulatedChannelsOnly)
6530     {
6531         return false;
6532     }
6533 
6534     return true;
6535 }
6536 
stageSelfAsSubresourceUpdates(ContextVk * contextVk,uint32_t levelCount,gl::TexLevelMask skipLevelsMask)6537 void ImageHelper::stageSelfAsSubresourceUpdates(ContextVk *contextVk,
6538                                                 uint32_t levelCount,
6539                                                 gl::TexLevelMask skipLevelsMask)
6540 
6541 {
6542     // Nothing to do if every level must be skipped
6543     gl::TexLevelMask levelsMask(angle::BitMask<uint32_t>(levelCount) << mFirstAllocatedLevel.get());
6544     if ((~skipLevelsMask & levelsMask).none())
6545     {
6546         return;
6547     }
6548 
6549     // Because we are cloning this object to another object, we must finalize the layout if it is
6550     // being used by current renderpass as attachment. Otherwise we are copying the incorrect layout
6551     // since it is determined at endRenderPass time.
6552     contextVk->finalizeImageLayout(this);
6553 
6554     std::unique_ptr<RefCounted<ImageHelper>> prevImage =
6555         std::make_unique<RefCounted<ImageHelper>>();
6556 
6557     // Move the necessary information for staged update to work, and keep the rest as part of this
6558     // object.
6559 
6560     // Usage info
6561     prevImage->get().Resource::operator=(std::move(*this));
6562 
6563     // Vulkan objects
6564     prevImage->get().mImage        = std::move(mImage);
6565     prevImage->get().mDeviceMemory = std::move(mDeviceMemory);
6566 
6567     // Barrier information.  Note: mLevelCount is set to levelCount so that only the necessary
6568     // levels are transitioned when flushing the update.
6569     prevImage->get().mIntendedFormatID            = mIntendedFormatID;
6570     prevImage->get().mActualFormatID              = mActualFormatID;
6571     prevImage->get().mCurrentLayout               = mCurrentLayout;
6572     prevImage->get().mCurrentQueueFamilyIndex     = mCurrentQueueFamilyIndex;
6573     prevImage->get().mLastNonShaderReadOnlyLayout = mLastNonShaderReadOnlyLayout;
6574     prevImage->get().mCurrentShaderReadStageMask  = mCurrentShaderReadStageMask;
6575     prevImage->get().mLevelCount                  = levelCount;
6576     prevImage->get().mLayerCount                  = mLayerCount;
6577     prevImage->get().mImageSerial                 = mImageSerial;
6578 
6579     // Reset information for current (invalid) image.
6580     mCurrentLayout               = ImageLayout::Undefined;
6581     mCurrentQueueFamilyIndex     = std::numeric_limits<uint32_t>::max();
6582     mLastNonShaderReadOnlyLayout = ImageLayout::Undefined;
6583     mCurrentShaderReadStageMask  = 0;
6584     mImageSerial                 = kInvalidImageSerial;
6585 
6586     setEntireContentUndefined();
6587 
6588     // Stage updates from the previous image.
6589     for (LevelIndex levelVk(0); levelVk < LevelIndex(levelCount); ++levelVk)
6590     {
6591         gl::LevelIndex levelGL = toGLLevel(levelVk);
6592         if (skipLevelsMask.test(levelGL.get()))
6593         {
6594             continue;
6595         }
6596 
6597         const gl::ImageIndex index =
6598             gl::ImageIndex::Make2DArrayRange(levelGL.get(), 0, mLayerCount);
6599 
6600         stageSubresourceUpdateFromImage(prevImage.get(), index, levelVk, gl::kOffsetZero,
6601                                         getLevelExtents(levelVk), mImageType);
6602     }
6603 
6604     ASSERT(levelCount > 0);
6605     prevImage.release();
6606 }
6607 
flushSingleSubresourceStagedUpdates(ContextVk * contextVk,gl::LevelIndex levelGL,uint32_t layer,uint32_t layerCount,ClearValuesArray * deferredClears,uint32_t deferredClearIndex)6608 angle::Result ImageHelper::flushSingleSubresourceStagedUpdates(ContextVk *contextVk,
6609                                                                gl::LevelIndex levelGL,
6610                                                                uint32_t layer,
6611                                                                uint32_t layerCount,
6612                                                                ClearValuesArray *deferredClears,
6613                                                                uint32_t deferredClearIndex)
6614 {
6615     std::vector<SubresourceUpdate> *levelUpdates = getLevelUpdates(levelGL);
6616     if (levelUpdates == nullptr || levelUpdates->empty())
6617     {
6618         return angle::Result::Continue;
6619     }
6620 
6621     LevelIndex levelVk = toVkLevel(levelGL);
6622 
6623     // Handle deferred clears. Search the updates list for a matching clear index.
6624     if (deferredClears)
6625     {
6626         Optional<size_t> foundClear;
6627 
6628         for (size_t updateIndex = 0; updateIndex < levelUpdates->size(); ++updateIndex)
6629         {
6630             SubresourceUpdate &update = (*levelUpdates)[updateIndex];
6631 
6632             if (update.isUpdateToLayers(layer, layerCount))
6633             {
6634                 // On any data update, exit out. We'll need to do a full upload.
6635                 const bool isClear              = update.updateSource == UpdateSource::Clear;
6636                 const uint32_t updateLayerCount = isClear ? update.data.clear.layerCount : 0;
6637                 const uint32_t imageLayerCount =
6638                     mImageType == VK_IMAGE_TYPE_3D ? getLevelExtents(levelVk).depth : mLayerCount;
6639 
6640                 if (!isClear || (updateLayerCount != layerCount &&
6641                                  !(update.data.clear.layerCount == VK_REMAINING_ARRAY_LAYERS &&
6642                                    imageLayerCount == layerCount)))
6643                 {
6644                     foundClear.reset();
6645                     break;
6646                 }
6647 
6648                 // Otherwise track the latest clear update index.
6649                 foundClear = updateIndex;
6650             }
6651         }
6652 
6653         // If we have a valid index we defer the clear using the clear reference.
6654         if (foundClear.valid())
6655         {
6656             size_t foundIndex         = foundClear.value();
6657             const ClearUpdate &update = (*levelUpdates)[foundIndex].data.clear;
6658 
6659             // Note that this set command handles combined or separate depth/stencil clears.
6660             deferredClears->store(deferredClearIndex, update.aspectFlags, update.value);
6661 
6662             // Do not call onWrite as it removes mCurrentSingleClearValue, but instead call
6663             // setContentDefined directly.
6664             setContentDefined(toVkLevel(levelGL), 1, layer, layerCount, update.aspectFlags);
6665 
6666             // We process the updates again to erase any clears for this level.
6667             removeSingleSubresourceStagedUpdates(contextVk, levelGL, layer, layerCount);
6668             return angle::Result::Continue;
6669         }
6670 
6671         // Otherwise we proceed with a normal update.
6672     }
6673 
6674     return flushStagedUpdates(contextVk, levelGL, levelGL + 1, layer, layer + layerCount, {});
6675 }
6676 
flushStagedUpdates(ContextVk * contextVk,gl::LevelIndex levelGLStart,gl::LevelIndex levelGLEnd,uint32_t layerStart,uint32_t layerEnd,gl::TexLevelMask skipLevelsMask)6677 angle::Result ImageHelper::flushStagedUpdates(ContextVk *contextVk,
6678                                               gl::LevelIndex levelGLStart,
6679                                               gl::LevelIndex levelGLEnd,
6680                                               uint32_t layerStart,
6681                                               uint32_t layerEnd,
6682                                               gl::TexLevelMask skipLevelsMask)
6683 {
6684     if (!hasStagedUpdatesInLevels(levelGLStart, levelGLEnd))
6685     {
6686         return angle::Result::Continue;
6687     }
6688 
6689     removeSupersededUpdates(contextVk, skipLevelsMask);
6690 
6691     // If a clear is requested and we know it was previously cleared with the same value, we drop
6692     // the clear.
6693     if (mCurrentSingleClearValue.valid())
6694     {
6695         std::vector<SubresourceUpdate> *levelUpdates =
6696             getLevelUpdates(gl::LevelIndex(mCurrentSingleClearValue.value().levelIndex));
6697         if (levelUpdates && levelUpdates->size() == 1)
6698         {
6699             SubresourceUpdate &update = (*levelUpdates)[0];
6700             if (update.updateSource == UpdateSource::Clear &&
6701                 mCurrentSingleClearValue.value() == update.data.clear)
6702             {
6703                 ANGLE_VK_PERF_WARNING(contextVk, GL_DEBUG_SEVERITY_LOW,
6704                                       "Repeated Clear on framebuffer attachment dropped");
6705                 update.release(contextVk->getRenderer());
6706                 levelUpdates->clear();
6707                 return angle::Result::Continue;
6708             }
6709         }
6710     }
6711 
6712     ASSERT(validateSubresourceUpdateImageRefsConsistent());
6713 
6714     ANGLE_TRY(mStagingBuffer.flush(contextVk));
6715 
6716     const VkImageAspectFlags aspectFlags = GetFormatAspectFlags(getActualFormat());
6717 
6718     // For each level, upload layers that don't conflict in parallel.  The layer is hashed to
6719     // `layer % 64` and used to track whether that subresource is currently in transfer.  If so, a
6720     // barrier is inserted.  If mLayerCount > 64, there will be a few unnecessary barriers.
6721     //
6722     // Note: when a barrier is necessary when uploading updates to a level, we could instead move to
6723     // the next level and continue uploads in parallel.  Once all levels need a barrier, a single
6724     // barrier can be issued and we could continue with the rest of the updates from the first
6725     // level.
6726     constexpr uint32_t kMaxParallelSubresourceUpload = 64;
6727 
6728     // Start in TransferDst.  Don't yet mark any subresource as having defined contents; that is
6729     // done with fine granularity as updates are applied.  This is achieved by specifying a layer
6730     // that is outside the tracking range.
6731     CommandBufferAccess access;
6732     access.onImageTransferWrite(levelGLStart, 1, kMaxContentDefinedLayerCount, 0, aspectFlags,
6733                                 this);
6734 
6735     CommandBuffer *commandBuffer;
6736     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(access, &commandBuffer));
6737 
6738     for (gl::LevelIndex updateMipLevelGL = levelGLStart; updateMipLevelGL < levelGLEnd;
6739          ++updateMipLevelGL)
6740     {
6741         std::vector<SubresourceUpdate> *levelUpdates = getLevelUpdates(updateMipLevelGL);
6742         if (levelUpdates == nullptr)
6743         {
6744             ASSERT(static_cast<size_t>(updateMipLevelGL.get()) >= mSubresourceUpdates.size());
6745             break;
6746         }
6747 
6748         std::vector<SubresourceUpdate> updatesToKeep;
6749 
6750         // Hash map of uploads in progress.  See comment on kMaxParallelSubresourceUpload.
6751         uint64_t subresourceUploadsInProgress = 0;
6752 
6753         for (SubresourceUpdate &update : *levelUpdates)
6754         {
6755             ASSERT(update.updateSource == UpdateSource::Clear ||
6756                    update.updateSource == UpdateSource::ClearEmulatedChannelsOnly ||
6757                    (update.updateSource == UpdateSource::Buffer &&
6758                     update.data.buffer.bufferHelper != nullptr) ||
6759                    (update.updateSource == UpdateSource::Image && update.image != nullptr &&
6760                     update.image->isReferenced() && update.image->get().valid()));
6761 
6762             uint32_t updateBaseLayer, updateLayerCount;
6763             update.getDestSubresource(mLayerCount, &updateBaseLayer, &updateLayerCount);
6764 
6765             // If the update layers don't intersect the requested layers, skip the update.
6766             const bool areUpdateLayersOutsideRange =
6767                 updateBaseLayer + updateLayerCount <= layerStart || updateBaseLayer >= layerEnd;
6768 
6769             const LevelIndex updateMipLevelVk = toVkLevel(updateMipLevelGL);
6770 
6771             // Additionally, if updates to this level are specifically asked to be skipped, skip
6772             // them. This can happen when recreating an image that has been partially incompatibly
6773             // redefined, in which case only updates to the levels that haven't been redefined
6774             // should be flushed.
6775             if (areUpdateLayersOutsideRange || skipLevelsMask.test(updateMipLevelGL.get()))
6776             {
6777                 updatesToKeep.emplace_back(std::move(update));
6778                 continue;
6779             }
6780 
6781             // The updates were holding gl::LevelIndex values so that they would not need
6782             // modification when the base level of the texture changes.  Now that the update is
6783             // about to take effect, we need to change miplevel to LevelIndex.
6784             if (update.updateSource == UpdateSource::Clear ||
6785                 update.updateSource == UpdateSource::ClearEmulatedChannelsOnly)
6786             {
6787                 update.data.clear.levelIndex = updateMipLevelVk.get();
6788             }
6789             else if (update.updateSource == UpdateSource::Buffer)
6790             {
6791                 if (update.data.buffer.formatID != mActualFormatID)
6792                 {
6793                     // TODD: http://anglebug.com/6368, we should handle this in higher level code.
6794                     // If we have incompatible updates, skip but keep it.
6795                     updatesToKeep.emplace_back(std::move(update));
6796                     continue;
6797                 }
6798                 update.data.buffer.copyRegion.imageSubresource.mipLevel = updateMipLevelVk.get();
6799             }
6800             else if (update.updateSource == UpdateSource::Image)
6801             {
6802                 if (update.data.image.formatID != mActualFormatID)
6803                 {
6804                     // If we have incompatible updates, skip but keep it.
6805                     updatesToKeep.emplace_back(std::move(update));
6806                     continue;
6807                 }
6808                 update.data.image.copyRegion.dstSubresource.mipLevel = updateMipLevelVk.get();
6809             }
6810 
6811             if (updateLayerCount >= kMaxParallelSubresourceUpload)
6812             {
6813                 // If there are more subresources than bits we can track, always insert a barrier.
6814                 recordWriteBarrier(contextVk, aspectFlags, ImageLayout::TransferDst, commandBuffer);
6815                 subresourceUploadsInProgress = std::numeric_limits<uint64_t>::max();
6816             }
6817             else
6818             {
6819                 const uint64_t subresourceHashRange = angle::BitMask<uint64_t>(updateLayerCount);
6820                 const uint32_t subresourceHashOffset =
6821                     updateBaseLayer % kMaxParallelSubresourceUpload;
6822                 const uint64_t subresourceHash =
6823                     ANGLE_ROTL64(subresourceHashRange, subresourceHashOffset);
6824 
6825                 if ((subresourceUploadsInProgress & subresourceHash) != 0)
6826                 {
6827                     // If there's overlap in subresource upload, issue a barrier.
6828                     recordWriteBarrier(contextVk, aspectFlags, ImageLayout::TransferDst,
6829                                        commandBuffer);
6830                     subresourceUploadsInProgress = 0;
6831                 }
6832                 subresourceUploadsInProgress |= subresourceHash;
6833             }
6834 
6835             if (update.updateSource == UpdateSource::Clear)
6836             {
6837                 clear(update.data.clear.aspectFlags, update.data.clear.value, updateMipLevelVk,
6838                       updateBaseLayer, updateLayerCount, commandBuffer);
6839                 // Remember the latest operation is a clear call
6840                 mCurrentSingleClearValue = update.data.clear;
6841 
6842                 // Do not call onWrite as it removes mCurrentSingleClearValue, but instead call
6843                 // setContentDefined directly.
6844                 setContentDefined(updateMipLevelVk, 1, updateBaseLayer, updateLayerCount,
6845                                   update.data.clear.aspectFlags);
6846             }
6847             else if (update.updateSource == UpdateSource::ClearEmulatedChannelsOnly)
6848             {
6849                 ANGLE_TRY(clearEmulatedChannels(contextVk, update.data.clear.colorMaskFlags,
6850                                                 update.data.clear.value, updateMipLevelVk,
6851                                                 updateBaseLayer, updateLayerCount));
6852 
6853                 // Do not call onWrite.  Even though some channels of the image are cleared, don't
6854                 // consider the contents defined.  Also, since clearing emulated channels is a
6855                 // one-time thing that's superseded by Clears, |mCurrentSingleClearValue| is
6856                 // irrelevant and can't have a value.
6857                 ASSERT(!mCurrentSingleClearValue.valid());
6858 
6859                 // Refresh the command buffer because clearEmulatedChannels may have flushed it.
6860                 // This also transitions the image back to TransferDst, in case it's no longer in
6861                 // that layout.
6862                 ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(access, &commandBuffer));
6863             }
6864             else if (update.updateSource == UpdateSource::Buffer)
6865             {
6866                 BufferUpdate &bufferUpdate = update.data.buffer;
6867 
6868                 BufferHelper *currentBuffer = bufferUpdate.bufferHelper;
6869                 ASSERT(currentBuffer && currentBuffer->valid());
6870 
6871                 CommandBufferAccess bufferAccess;
6872                 bufferAccess.onBufferTransferRead(currentBuffer);
6873                 ANGLE_TRY(
6874                     contextVk->getOutsideRenderPassCommandBuffer(bufferAccess, &commandBuffer));
6875 
6876                 commandBuffer->copyBufferToImage(currentBuffer->getBuffer().getHandle(), mImage,
6877                                                  getCurrentLayout(), 1,
6878                                                  &update.data.buffer.copyRegion);
6879                 onWrite(updateMipLevelGL, 1, updateBaseLayer, updateLayerCount,
6880                         update.data.buffer.copyRegion.imageSubresource.aspectMask);
6881             }
6882             else
6883             {
6884                 CommandBufferAccess imageAccess;
6885                 imageAccess.onImageTransferRead(aspectFlags, &update.image->get());
6886                 ANGLE_TRY(
6887                     contextVk->getOutsideRenderPassCommandBuffer(imageAccess, &commandBuffer));
6888 
6889                 commandBuffer->copyImage(update.image->get().getImage(),
6890                                          update.image->get().getCurrentLayout(), mImage,
6891                                          getCurrentLayout(), 1, &update.data.image.copyRegion);
6892                 onWrite(updateMipLevelGL, 1, updateBaseLayer, updateLayerCount,
6893                         update.data.image.copyRegion.dstSubresource.aspectMask);
6894             }
6895 
6896             update.release(contextVk->getRenderer());
6897         }
6898 
6899         // Only remove the updates that were actually applied to the image.
6900         *levelUpdates = std::move(updatesToKeep);
6901     }
6902 
6903     // Compact mSubresourceUpdates, then check if there are any updates left.
6904     size_t compactSize;
6905     for (compactSize = mSubresourceUpdates.size(); compactSize > 0; --compactSize)
6906     {
6907         if (!mSubresourceUpdates[compactSize - 1].empty())
6908         {
6909             break;
6910         }
6911     }
6912     mSubresourceUpdates.resize(compactSize);
6913 
6914     ASSERT(validateSubresourceUpdateImageRefsConsistent());
6915 
6916     // If no updates left, release the staging buffers to save memory.
6917     if (mSubresourceUpdates.empty())
6918     {
6919         mStagingBuffer.releaseInFlightBuffers(contextVk);
6920         mStagingBuffer.release(contextVk->getRenderer());
6921         onStateChange(angle::SubjectMessage::InitializationComplete);
6922     }
6923 
6924     return angle::Result::Continue;
6925 }
6926 
flushAllStagedUpdates(ContextVk * contextVk)6927 angle::Result ImageHelper::flushAllStagedUpdates(ContextVk *contextVk)
6928 {
6929     return flushStagedUpdates(contextVk, mFirstAllocatedLevel, mFirstAllocatedLevel + mLevelCount,
6930                               0, mLayerCount, {});
6931 }
6932 
hasStagedUpdatesForSubresource(gl::LevelIndex levelGL,uint32_t layer,uint32_t layerCount) const6933 bool ImageHelper::hasStagedUpdatesForSubresource(gl::LevelIndex levelGL,
6934                                                  uint32_t layer,
6935                                                  uint32_t layerCount) const
6936 {
6937     // Check to see if any updates are staged for the given level and layer
6938 
6939     const std::vector<SubresourceUpdate> *levelUpdates = getLevelUpdates(levelGL);
6940     if (levelUpdates == nullptr || levelUpdates->empty())
6941     {
6942         return false;
6943     }
6944 
6945     for (const SubresourceUpdate &update : *levelUpdates)
6946     {
6947         uint32_t updateBaseLayer, updateLayerCount;
6948         update.getDestSubresource(mLayerCount, &updateBaseLayer, &updateLayerCount);
6949 
6950         const uint32_t updateLayerEnd = updateBaseLayer + updateLayerCount;
6951         const uint32_t layerEnd       = layer + layerCount;
6952 
6953         if ((layer >= updateBaseLayer && layer < updateLayerEnd) ||
6954             (layerEnd > updateBaseLayer && layerEnd <= updateLayerEnd))
6955         {
6956             // The layers intersect with the update range
6957             return true;
6958         }
6959     }
6960 
6961     return false;
6962 }
6963 
getLastAllocatedLevel() const6964 gl::LevelIndex ImageHelper::getLastAllocatedLevel() const
6965 {
6966     return mFirstAllocatedLevel + mLevelCount - 1;
6967 }
6968 
hasStagedUpdatesInAllocatedLevels() const6969 bool ImageHelper::hasStagedUpdatesInAllocatedLevels() const
6970 {
6971     return hasStagedUpdatesInLevels(mFirstAllocatedLevel, getLastAllocatedLevel() + 1);
6972 }
6973 
hasStagedUpdatesInLevels(gl::LevelIndex levelStart,gl::LevelIndex levelEnd) const6974 bool ImageHelper::hasStagedUpdatesInLevels(gl::LevelIndex levelStart, gl::LevelIndex levelEnd) const
6975 {
6976     for (gl::LevelIndex level = levelStart; level < levelEnd; ++level)
6977     {
6978         const std::vector<SubresourceUpdate> *levelUpdates = getLevelUpdates(level);
6979         if (levelUpdates == nullptr)
6980         {
6981             ASSERT(static_cast<size_t>(level.get()) >= mSubresourceUpdates.size());
6982             return false;
6983         }
6984 
6985         if (!levelUpdates->empty())
6986         {
6987             return true;
6988         }
6989     }
6990     return false;
6991 }
6992 
hasStagedImageUpdatesWithMismatchedFormat(gl::LevelIndex levelStart,gl::LevelIndex levelEnd,angle::FormatID formatID) const6993 bool ImageHelper::hasStagedImageUpdatesWithMismatchedFormat(gl::LevelIndex levelStart,
6994                                                             gl::LevelIndex levelEnd,
6995                                                             angle::FormatID formatID) const
6996 {
6997     for (gl::LevelIndex level = levelStart; level < levelEnd; ++level)
6998     {
6999         const std::vector<SubresourceUpdate> *levelUpdates = getLevelUpdates(level);
7000         if (levelUpdates == nullptr)
7001         {
7002             continue;
7003         }
7004 
7005         for (const SubresourceUpdate &update : *levelUpdates)
7006         {
7007             if (update.updateSource == UpdateSource::Image &&
7008                 update.data.image.formatID != formatID)
7009             {
7010                 return true;
7011             }
7012         }
7013     }
7014     return false;
7015 }
7016 
validateSubresourceUpdateImageRefConsistent(RefCounted<ImageHelper> * image) const7017 bool ImageHelper::validateSubresourceUpdateImageRefConsistent(RefCounted<ImageHelper> *image) const
7018 {
7019     if (image == nullptr)
7020     {
7021         return true;
7022     }
7023 
7024     uint32_t refs = 0;
7025 
7026     for (const std::vector<SubresourceUpdate> &levelUpdates : mSubresourceUpdates)
7027     {
7028         for (const SubresourceUpdate &update : levelUpdates)
7029         {
7030             if (update.updateSource == UpdateSource::Image && update.image == image)
7031             {
7032                 ++refs;
7033             }
7034         }
7035     }
7036 
7037     return image->isRefCountAsExpected(refs);
7038 }
7039 
validateSubresourceUpdateImageRefsConsistent() const7040 bool ImageHelper::validateSubresourceUpdateImageRefsConsistent() const
7041 {
7042     for (const std::vector<SubresourceUpdate> &levelUpdates : mSubresourceUpdates)
7043     {
7044         for (const SubresourceUpdate &update : levelUpdates)
7045         {
7046             if (update.updateSource == UpdateSource::Image &&
7047                 !validateSubresourceUpdateImageRefConsistent(update.image))
7048             {
7049                 return false;
7050             }
7051         }
7052     }
7053 
7054     return true;
7055 }
7056 
removeSupersededUpdates(ContextVk * contextVk,gl::TexLevelMask skipLevelsMask)7057 void ImageHelper::removeSupersededUpdates(ContextVk *contextVk, gl::TexLevelMask skipLevelsMask)
7058 {
7059     if (mLayerCount > 64)
7060     {
7061         // Not implemented for images with more than 64 layers.  A 64-bit mask is used for
7062         // efficiency, hence the limit.
7063         return;
7064     }
7065 
7066     ASSERT(validateSubresourceUpdateImageRefsConsistent());
7067 
7068     RendererVk *renderer = contextVk->getRenderer();
7069 
7070     // Go over updates in reverse order, and mark the layers they completely overwrite.  If an
7071     // update is encountered whose layers are all already marked, that update is superseded by
7072     // future updates, so it can be dropped.  This tracking is done per level.  If the aspect being
7073     // written to is color/depth or stencil, index 0 or 1 is used respectively.  This is so
7074     // that if a depth write for example covers the whole subresource, a stencil write to that same
7075     // subresource is not dropped.
7076     constexpr size_t kIndexColorOrDepth = 0;
7077     constexpr size_t kIndexStencil      = 1;
7078     uint64_t supersededLayers[2]        = {};
7079 
7080     gl::Extents levelExtents = {};
7081 
7082     // Note: this lambda only needs |this|, but = is specified because clang warns about kIndex* not
7083     // needing capture, while MSVC fails to compile without capturing them.
7084     auto markLayersAndDropSuperseded = [=, &supersededLayers,
7085                                         &levelExtents](SubresourceUpdate &update) {
7086         uint32_t updateBaseLayer, updateLayerCount;
7087         update.getDestSubresource(mLayerCount, &updateBaseLayer, &updateLayerCount);
7088 
7089         const VkImageAspectFlags aspectMask = update.getDestAspectFlags();
7090         const bool hasColorOrDepth =
7091             (aspectMask & (VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT |
7092                            VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT |
7093                            VK_IMAGE_ASPECT_DEPTH_BIT)) != 0;
7094         const bool hasStencil = (aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != 0;
7095 
7096         // Test if the update is to layers that are all superseded.  In that case, drop the update.
7097         ASSERT(updateLayerCount <= 64);
7098         uint64_t updateLayersMask = updateLayerCount >= 64
7099                                         ? ~static_cast<uint64_t>(0)
7100                                         : angle::BitMask<uint64_t>(updateLayerCount);
7101         updateLayersMask <<= updateBaseLayer;
7102 
7103         const bool isColorOrDepthSuperseded =
7104             !hasColorOrDepth ||
7105             (supersededLayers[kIndexColorOrDepth] & updateLayersMask) == updateLayersMask;
7106         const bool isStencilSuperseded =
7107             !hasStencil || (supersededLayers[kIndexStencil] & updateLayersMask) == updateLayersMask;
7108 
7109         if (isColorOrDepthSuperseded && isStencilSuperseded)
7110         {
7111             ANGLE_VK_PERF_WARNING(contextVk, GL_DEBUG_SEVERITY_LOW,
7112                                   "Dropped image update that is superseded by an overlapping one");
7113 
7114             update.release(renderer);
7115             return true;
7116         }
7117 
7118         // Get the area this update affects.  Note that clear updates always clear the whole
7119         // subresource.
7120         gl::Box updateBox(gl::kOffsetZero, levelExtents);
7121 
7122         if (update.updateSource == UpdateSource::Buffer)
7123         {
7124             updateBox = gl::Box(update.data.buffer.copyRegion.imageOffset,
7125                                 update.data.buffer.copyRegion.imageExtent);
7126         }
7127         else if (update.updateSource == UpdateSource::Image)
7128         {
7129             updateBox = gl::Box(update.data.image.copyRegion.dstOffset,
7130                                 update.data.image.copyRegion.extent);
7131         }
7132 
7133         // Only if the update is to the whole subresource, mark its layers.
7134         if (updateBox.coversSameExtent(levelExtents))
7135         {
7136             if (hasColorOrDepth)
7137             {
7138                 supersededLayers[kIndexColorOrDepth] |= updateLayersMask;
7139             }
7140             if (hasStencil)
7141             {
7142                 supersededLayers[kIndexStencil] |= updateLayersMask;
7143             }
7144         }
7145 
7146         return false;
7147     };
7148 
7149     for (LevelIndex levelVk(0); levelVk < LevelIndex(mLevelCount); ++levelVk)
7150     {
7151         gl::LevelIndex levelGL                       = toGLLevel(levelVk);
7152         std::vector<SubresourceUpdate> *levelUpdates = getLevelUpdates(levelGL);
7153         if (levelUpdates == nullptr)
7154         {
7155             ASSERT(static_cast<size_t>(levelGL.get()) >= mSubresourceUpdates.size());
7156             break;
7157         }
7158 
7159         // If level is skipped (because incompatibly redefined), don't remove any of its updates.
7160         if (skipLevelsMask.test(levelGL.get()))
7161         {
7162             continue;
7163         }
7164 
7165         // ClearEmulatedChannelsOnly updates can only be in the beginning of the list of updates.
7166         // They don't entirely clear the image, so they cannot supersede any update.
7167         ASSERT(verifyEmulatedClearsAreBeforeOtherUpdates(*levelUpdates));
7168 
7169         levelExtents                         = getLevelExtents(levelVk);
7170         supersededLayers[kIndexColorOrDepth] = 0;
7171         supersededLayers[kIndexStencil]      = 0;
7172 
7173         levelUpdates->erase(levelUpdates->rend().base(),
7174                             std::remove_if(levelUpdates->rbegin(), levelUpdates->rend(),
7175                                            markLayersAndDropSuperseded)
7176                                 .base());
7177     }
7178 
7179     ASSERT(validateSubresourceUpdateImageRefsConsistent());
7180 }
7181 
copyImageDataToBuffer(ContextVk * contextVk,gl::LevelIndex sourceLevelGL,uint32_t layerCount,uint32_t baseLayer,const gl::Box & sourceArea,BufferHelper ** bufferOut,size_t * bufferSize,StagingBufferOffsetArray * bufferOffsetsOut,uint8_t ** outDataPtr)7182 angle::Result ImageHelper::copyImageDataToBuffer(ContextVk *contextVk,
7183                                                  gl::LevelIndex sourceLevelGL,
7184                                                  uint32_t layerCount,
7185                                                  uint32_t baseLayer,
7186                                                  const gl::Box &sourceArea,
7187                                                  BufferHelper **bufferOut,
7188                                                  size_t *bufferSize,
7189                                                  StagingBufferOffsetArray *bufferOffsetsOut,
7190                                                  uint8_t **outDataPtr)
7191 {
7192     ANGLE_TRACE_EVENT0("gpu.angle", "ImageHelper::copyImageDataToBuffer");
7193 
7194     const angle::Format &imageFormat = getActualFormat();
7195 
7196     // Two VK formats (one depth-only, one combined depth/stencil) use an extra byte for depth.
7197     // From https://www.khronos.org/registry/vulkan/specs/1.1/html/vkspec.html#VkBufferImageCopy:
7198     //  data copied to or from the depth aspect of a VK_FORMAT_X8_D24_UNORM_PACK32 or
7199     //  VK_FORMAT_D24_UNORM_S8_UINT format is packed with one 32-bit word per texel...
7200     // So make sure if we hit the depth/stencil format that we have 5 bytes per pixel (4 for depth
7201     //  data, 1 for stencil). NOTE that depth-only VK_FORMAT_X8_D24_UNORM_PACK32 already has 4 bytes
7202     //  per pixel which is sufficient to contain its depth aspect (no stencil aspect).
7203     uint32_t pixelBytes         = imageFormat.pixelBytes;
7204     uint32_t depthBytesPerPixel = imageFormat.depthBits >> 3;
7205     if (getActualVkFormat() == VK_FORMAT_D24_UNORM_S8_UINT)
7206     {
7207         pixelBytes         = 5;
7208         depthBytesPerPixel = 4;
7209     }
7210 
7211     *bufferSize = sourceArea.width * sourceArea.height * sourceArea.depth * pixelBytes * layerCount;
7212 
7213     const VkImageAspectFlags aspectFlags = getAspectFlags();
7214 
7215     // Allocate staging buffer data from context
7216     VkBuffer bufferHandle;
7217     ANGLE_TRY(mStagingBuffer.allocate(contextVk, *bufferSize, outDataPtr, &bufferHandle,
7218                                       &(*bufferOffsetsOut)[0], nullptr));
7219     *bufferOut = mStagingBuffer.getCurrentBuffer();
7220 
7221     LevelIndex sourceLevelVk = toVkLevel(sourceLevelGL);
7222 
7223     VkBufferImageCopy regions[2] = {};
7224     // Default to non-combined DS case
7225     regions[0].bufferOffset                    = (*bufferOffsetsOut)[0];
7226     regions[0].bufferRowLength                 = 0;
7227     regions[0].bufferImageHeight               = 0;
7228     regions[0].imageExtent.width               = sourceArea.width;
7229     regions[0].imageExtent.height              = sourceArea.height;
7230     regions[0].imageExtent.depth               = sourceArea.depth;
7231     regions[0].imageOffset.x                   = sourceArea.x;
7232     regions[0].imageOffset.y                   = sourceArea.y;
7233     regions[0].imageOffset.z                   = sourceArea.z;
7234     regions[0].imageSubresource.aspectMask     = aspectFlags;
7235     regions[0].imageSubresource.baseArrayLayer = baseLayer;
7236     regions[0].imageSubresource.layerCount     = layerCount;
7237     regions[0].imageSubresource.mipLevel       = sourceLevelVk.get();
7238 
7239     if (isCombinedDepthStencilFormat())
7240     {
7241         // For combined DS image we'll copy depth and stencil aspects separately
7242         // Depth aspect comes first in buffer and can use most settings from above
7243         regions[0].imageSubresource.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
7244 
7245         // Get depth data size since stencil data immediately follows depth data in buffer
7246         const VkDeviceSize depthSize = depthBytesPerPixel * sourceArea.width * sourceArea.height *
7247                                        sourceArea.depth * layerCount;
7248 
7249         // Double-check that we allocated enough buffer space (always 1 byte per stencil)
7250         ASSERT(*bufferSize >= (depthSize + (sourceArea.width * sourceArea.height *
7251                                             sourceArea.depth * layerCount)));
7252 
7253         // Copy stencil data into buffer immediately following the depth data
7254         const VkDeviceSize stencilOffset       = (*bufferOffsetsOut)[0] + depthSize;
7255         (*bufferOffsetsOut)[1]                 = stencilOffset;
7256         regions[1]                             = regions[0];
7257         regions[1].bufferOffset                = stencilOffset;
7258         regions[1].imageSubresource.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
7259     }
7260 
7261     CommandBufferAccess access;
7262     access.onBufferTransferWrite(*bufferOut);
7263     access.onImageTransferRead(aspectFlags, this);
7264 
7265     CommandBuffer *commandBuffer;
7266     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(access, &commandBuffer));
7267 
7268     commandBuffer->copyImageToBuffer(mImage, getCurrentLayout(), bufferHandle, 1, regions);
7269 
7270     return angle::Result::Continue;
7271 }
7272 
7273 // static
GetReadPixelsParams(ContextVk * contextVk,const gl::PixelPackState & packState,gl::Buffer * packBuffer,GLenum format,GLenum type,const gl::Rectangle & area,const gl::Rectangle & clippedArea,PackPixelsParams * paramsOut,GLuint * skipBytesOut)7274 angle::Result ImageHelper::GetReadPixelsParams(ContextVk *contextVk,
7275                                                const gl::PixelPackState &packState,
7276                                                gl::Buffer *packBuffer,
7277                                                GLenum format,
7278                                                GLenum type,
7279                                                const gl::Rectangle &area,
7280                                                const gl::Rectangle &clippedArea,
7281                                                PackPixelsParams *paramsOut,
7282                                                GLuint *skipBytesOut)
7283 {
7284     const gl::InternalFormat &sizedFormatInfo = gl::GetInternalFormatInfo(format, type);
7285 
7286     GLuint outputPitch = 0;
7287     ANGLE_VK_CHECK_MATH(contextVk,
7288                         sizedFormatInfo.computeRowPitch(type, area.width, packState.alignment,
7289                                                         packState.rowLength, &outputPitch));
7290     ANGLE_VK_CHECK_MATH(contextVk, sizedFormatInfo.computeSkipBytes(type, outputPitch, 0, packState,
7291                                                                     false, skipBytesOut));
7292 
7293     *skipBytesOut += (clippedArea.x - area.x) * sizedFormatInfo.pixelBytes +
7294                      (clippedArea.y - area.y) * outputPitch;
7295 
7296     const angle::Format &angleFormat = GetFormatFromFormatType(format, type);
7297 
7298     *paramsOut = PackPixelsParams(clippedArea, angleFormat, outputPitch, packState.reverseRowOrder,
7299                                   packBuffer, 0);
7300     return angle::Result::Continue;
7301 }
7302 
readPixelsForGetImage(ContextVk * contextVk,const gl::PixelPackState & packState,gl::Buffer * packBuffer,gl::LevelIndex levelGL,uint32_t layer,uint32_t layerCount,GLenum format,GLenum type,void * pixels)7303 angle::Result ImageHelper::readPixelsForGetImage(ContextVk *contextVk,
7304                                                  const gl::PixelPackState &packState,
7305                                                  gl::Buffer *packBuffer,
7306                                                  gl::LevelIndex levelGL,
7307                                                  uint32_t layer,
7308                                                  uint32_t layerCount,
7309                                                  GLenum format,
7310                                                  GLenum type,
7311                                                  void *pixels)
7312 {
7313     const angle::Format &angleFormat = GetFormatFromFormatType(format, type);
7314 
7315     VkImageAspectFlagBits aspectFlags = {};
7316     if (angleFormat.redBits > 0 || angleFormat.blueBits > 0 || angleFormat.greenBits > 0 ||
7317         angleFormat.alphaBits > 0 || angleFormat.luminanceBits > 0)
7318     {
7319         aspectFlags = VK_IMAGE_ASPECT_COLOR_BIT;
7320     }
7321     else
7322     {
7323         if (angleFormat.depthBits > 0)
7324         {
7325             if (angleFormat.stencilBits != 0)
7326             {
7327                 // TODO (anglebug.com/4688) Support combined depth stencil for GetTexImage
7328                 WARN() << "Unable to pull stencil from combined depth/stencil for GetTexImage";
7329             }
7330             aspectFlags = VK_IMAGE_ASPECT_DEPTH_BIT;
7331         }
7332         else if (angleFormat.stencilBits > 0)
7333         {
7334             aspectFlags = VK_IMAGE_ASPECT_STENCIL_BIT;
7335         }
7336     }
7337 
7338     ASSERT(aspectFlags != 0);
7339 
7340     PackPixelsParams params;
7341     GLuint outputSkipBytes = 0;
7342 
7343     const LevelIndex levelVk     = toVkLevel(levelGL);
7344     const gl::Extents mipExtents = getLevelExtents(levelVk);
7345     gl::Rectangle area(0, 0, mipExtents.width, mipExtents.height);
7346 
7347     ANGLE_TRY(GetReadPixelsParams(contextVk, packState, packBuffer, format, type, area, area,
7348                                   &params, &outputSkipBytes));
7349 
7350     // Use a temporary staging buffer. Could be optimized.
7351     RendererScoped<DynamicBuffer> stagingBuffer(contextVk->getRenderer());
7352     stagingBuffer.get().init(contextVk->getRenderer(), VK_BUFFER_USAGE_TRANSFER_DST_BIT, 1,
7353                              kStagingBufferSize, true, DynamicBufferPolicy::OneShotUse);
7354 
7355     if (mExtents.depth > 1 || layerCount > 1)
7356     {
7357         ASSERT(layer == 0);
7358         ASSERT(layerCount == 1 || mipExtents.depth == 1);
7359 
7360         uint32_t lastLayer = std::max(static_cast<uint32_t>(mipExtents.depth), layerCount);
7361 
7362         // Depth > 1 means this is a 3D texture and we need to copy all layers
7363         for (uint32_t mipLayer = 0; mipLayer < lastLayer; mipLayer++)
7364         {
7365             ANGLE_TRY(readPixels(contextVk, area, params, aspectFlags, levelGL, mipLayer,
7366                                  static_cast<uint8_t *>(pixels) + outputSkipBytes,
7367                                  &stagingBuffer.get()));
7368 
7369             outputSkipBytes += mipExtents.width * mipExtents.height *
7370                                gl::GetInternalFormatInfo(format, type).pixelBytes;
7371         }
7372     }
7373     else
7374     {
7375         ANGLE_TRY(readPixels(contextVk, area, params, aspectFlags, levelGL, layer,
7376                              static_cast<uint8_t *>(pixels) + outputSkipBytes,
7377                              &stagingBuffer.get()));
7378     }
7379 
7380     return angle::Result::Continue;
7381 }
7382 
canCopyWithTransformForReadPixels(const PackPixelsParams & packPixelsParams,const angle::Format * readFormat)7383 bool ImageHelper::canCopyWithTransformForReadPixels(const PackPixelsParams &packPixelsParams,
7384                                                     const angle::Format *readFormat)
7385 {
7386     ASSERT(mActualFormatID != angle::FormatID::NONE && mIntendedFormatID != angle::FormatID::NONE);
7387 
7388     // Only allow copies to PBOs with identical format.
7389     const bool isSameFormatCopy = *readFormat == *packPixelsParams.destFormat;
7390 
7391     // Disallow any transformation.
7392     const bool needsTransformation =
7393         packPixelsParams.rotation != SurfaceRotation::Identity || packPixelsParams.reverseRowOrder;
7394 
7395     // Disallow copies when the output pitch cannot be correctly specified in Vulkan.
7396     const bool isPitchMultipleOfTexelSize =
7397         packPixelsParams.outputPitch % readFormat->pixelBytes == 0;
7398 
7399     // Don't allow copies from emulated formats for simplicity.
7400     return !hasEmulatedImageFormat() && isSameFormatCopy && !needsTransformation &&
7401            isPitchMultipleOfTexelSize;
7402 }
7403 
readPixels(ContextVk * contextVk,const gl::Rectangle & area,const PackPixelsParams & packPixelsParams,VkImageAspectFlagBits copyAspectFlags,gl::LevelIndex levelGL,uint32_t layer,void * pixels,DynamicBuffer * stagingBuffer)7404 angle::Result ImageHelper::readPixels(ContextVk *contextVk,
7405                                       const gl::Rectangle &area,
7406                                       const PackPixelsParams &packPixelsParams,
7407                                       VkImageAspectFlagBits copyAspectFlags,
7408                                       gl::LevelIndex levelGL,
7409                                       uint32_t layer,
7410                                       void *pixels,
7411                                       DynamicBuffer *stagingBuffer)
7412 {
7413     ANGLE_TRACE_EVENT0("gpu.angle", "ImageHelper::readPixels");
7414 
7415     RendererVk *renderer = contextVk->getRenderer();
7416 
7417     // If the source image is multisampled, we need to resolve it into a temporary image before
7418     // performing a readback.
7419     bool isMultisampled = mSamples > 1;
7420     RendererScoped<ImageHelper> resolvedImage(contextVk->getRenderer());
7421 
7422     ImageHelper *src = this;
7423 
7424     ASSERT(!hasStagedUpdatesForSubresource(levelGL, layer, 1));
7425 
7426     if (isMultisampled)
7427     {
7428         ANGLE_TRY(resolvedImage.get().init2DStaging(
7429             contextVk, contextVk->hasProtectedContent(), renderer->getMemoryProperties(),
7430             gl::Extents(area.width, area.height, 1), mIntendedFormatID, mActualFormatID,
7431             VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, 1));
7432         resolvedImage.get().retain(&contextVk->getResourceUseList());
7433     }
7434 
7435     VkImageAspectFlags layoutChangeAspectFlags = src->getAspectFlags();
7436 
7437     // Note that although we're reading from the image, we need to update the layout below.
7438     CommandBufferAccess access;
7439     access.onImageTransferRead(layoutChangeAspectFlags, this);
7440     if (isMultisampled)
7441     {
7442         access.onImageTransferWrite(gl::LevelIndex(0), 1, 0, 1, layoutChangeAspectFlags,
7443                                     &resolvedImage.get());
7444     }
7445 
7446     CommandBuffer *commandBuffer;
7447     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(access, &commandBuffer));
7448 
7449     const angle::Format *readFormat = &getActualFormat();
7450 
7451     if (copyAspectFlags != VK_IMAGE_ASPECT_COLOR_BIT)
7452     {
7453         readFormat = &GetDepthStencilImageToBufferFormat(*readFormat, copyAspectFlags);
7454     }
7455 
7456     VkOffset3D srcOffset = {area.x, area.y, 0};
7457 
7458     VkImageSubresourceLayers srcSubresource = {};
7459     srcSubresource.aspectMask               = copyAspectFlags;
7460     srcSubresource.mipLevel                 = toVkLevel(levelGL).get();
7461     srcSubresource.baseArrayLayer           = layer;
7462     srcSubresource.layerCount               = 1;
7463 
7464     VkExtent3D srcExtent = {static_cast<uint32_t>(area.width), static_cast<uint32_t>(area.height),
7465                             1};
7466 
7467     if (mExtents.depth > 1)
7468     {
7469         // Depth > 1 means this is a 3D texture and we need special handling
7470         srcOffset.z                   = layer;
7471         srcSubresource.baseArrayLayer = 0;
7472     }
7473 
7474     if (isMultisampled)
7475     {
7476         // Note: resolve only works on color images (not depth/stencil).
7477         ASSERT(copyAspectFlags == VK_IMAGE_ASPECT_COLOR_BIT);
7478 
7479         VkImageResolve resolveRegion                = {};
7480         resolveRegion.srcSubresource                = srcSubresource;
7481         resolveRegion.srcOffset                     = srcOffset;
7482         resolveRegion.dstSubresource.aspectMask     = copyAspectFlags;
7483         resolveRegion.dstSubresource.mipLevel       = 0;
7484         resolveRegion.dstSubresource.baseArrayLayer = 0;
7485         resolveRegion.dstSubresource.layerCount     = 1;
7486         resolveRegion.dstOffset                     = {};
7487         resolveRegion.extent                        = srcExtent;
7488 
7489         resolve(&resolvedImage.get(), resolveRegion, commandBuffer);
7490 
7491         CommandBufferAccess readAccess;
7492         readAccess.onImageTransferRead(layoutChangeAspectFlags, &resolvedImage.get());
7493         ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(readAccess, &commandBuffer));
7494 
7495         // Make the resolved image the target of buffer copy.
7496         src                           = &resolvedImage.get();
7497         srcOffset                     = {0, 0, 0};
7498         srcSubresource.baseArrayLayer = 0;
7499         srcSubresource.layerCount     = 1;
7500         srcSubresource.mipLevel       = 0;
7501     }
7502 
7503     // If PBO and if possible, copy directly on the GPU.
7504     if (packPixelsParams.packBuffer &&
7505         canCopyWithTransformForReadPixels(packPixelsParams, readFormat))
7506     {
7507         VkDeviceSize packBufferOffset = 0;
7508         BufferHelper &packBuffer =
7509             GetImpl(packPixelsParams.packBuffer)->getBufferAndOffset(&packBufferOffset);
7510 
7511         CommandBufferAccess copyAccess;
7512         copyAccess.onBufferTransferWrite(&packBuffer);
7513         copyAccess.onImageTransferRead(copyAspectFlags, src);
7514 
7515         CommandBuffer *copyCommandBuffer;
7516         ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(copyAccess, &copyCommandBuffer));
7517 
7518         ASSERT(packPixelsParams.outputPitch % readFormat->pixelBytes == 0);
7519 
7520         VkBufferImageCopy region = {};
7521         region.bufferImageHeight = srcExtent.height;
7522         region.bufferOffset =
7523             packBufferOffset + packPixelsParams.offset + reinterpret_cast<ptrdiff_t>(pixels);
7524         region.bufferRowLength  = packPixelsParams.outputPitch / readFormat->pixelBytes;
7525         region.imageExtent      = srcExtent;
7526         region.imageOffset      = srcOffset;
7527         region.imageSubresource = srcSubresource;
7528 
7529         copyCommandBuffer->copyImageToBuffer(src->getImage(), src->getCurrentLayout(),
7530                                              packBuffer.getBuffer().getHandle(), 1, &region);
7531         return angle::Result::Continue;
7532     }
7533 
7534     VkBuffer bufferHandle      = VK_NULL_HANDLE;
7535     uint8_t *readPixelBuffer   = nullptr;
7536     VkDeviceSize stagingOffset = 0;
7537     size_t allocationSize      = readFormat->pixelBytes * area.width * area.height;
7538 
7539     ANGLE_TRY(stagingBuffer->allocate(contextVk, allocationSize, &readPixelBuffer, &bufferHandle,
7540                                       &stagingOffset, nullptr));
7541 
7542     VkBufferImageCopy region = {};
7543     region.bufferImageHeight = srcExtent.height;
7544     region.bufferOffset      = stagingOffset;
7545     region.bufferRowLength   = srcExtent.width;
7546     region.imageExtent       = srcExtent;
7547     region.imageOffset       = srcOffset;
7548     region.imageSubresource  = srcSubresource;
7549 
7550     CommandBufferAccess readbackAccess;
7551     readbackAccess.onBufferTransferWrite(stagingBuffer->getCurrentBuffer());
7552 
7553     CommandBuffer *readbackCommandBuffer;
7554     ANGLE_TRY(contextVk->getOutsideRenderPassCommandBuffer(readbackAccess, &readbackCommandBuffer));
7555 
7556     readbackCommandBuffer->copyImageToBuffer(src->getImage(), src->getCurrentLayout(), bufferHandle,
7557                                              1, &region);
7558 
7559     ANGLE_VK_PERF_WARNING(contextVk, GL_DEBUG_SEVERITY_HIGH, "GPU stall due to ReadPixels");
7560 
7561     // Triggers a full finish.
7562     // TODO(jmadill): Don't block on asynchronous readback.
7563     ANGLE_TRY(contextVk->finishImpl(RenderPassClosureReason::GLReadPixels));
7564 
7565     // The buffer we copied to needs to be invalidated before we read from it because its not been
7566     // created with the host coherent bit.
7567     ANGLE_TRY(stagingBuffer->invalidate(contextVk));
7568 
7569     if (packPixelsParams.packBuffer)
7570     {
7571         // Must map the PBO in order to read its contents (and then unmap it later)
7572         BufferVk *packBufferVk = GetImpl(packPixelsParams.packBuffer);
7573         void *mapPtr           = nullptr;
7574         ANGLE_TRY(packBufferVk->mapImpl(contextVk, GL_MAP_WRITE_BIT, &mapPtr));
7575         uint8_t *dst = static_cast<uint8_t *>(mapPtr) + reinterpret_cast<ptrdiff_t>(pixels);
7576         PackPixels(packPixelsParams, *readFormat, area.width * readFormat->pixelBytes,
7577                    readPixelBuffer, static_cast<uint8_t *>(dst));
7578         ANGLE_TRY(packBufferVk->unmapImpl(contextVk));
7579     }
7580     else
7581     {
7582         PackPixels(packPixelsParams, *readFormat, area.width * readFormat->pixelBytes,
7583                    readPixelBuffer, static_cast<uint8_t *>(pixels));
7584     }
7585 
7586     return angle::Result::Continue;
7587 }
7588 
7589 // ImageHelper::SubresourceUpdate implementation
SubresourceUpdate()7590 ImageHelper::SubresourceUpdate::SubresourceUpdate()
7591     : updateSource(UpdateSource::Buffer), image(nullptr)
7592 {
7593     data.buffer.bufferHelper = nullptr;
7594 }
7595 
~SubresourceUpdate()7596 ImageHelper::SubresourceUpdate::~SubresourceUpdate() {}
7597 
SubresourceUpdate(BufferHelper * bufferHelperIn,const VkBufferImageCopy & copyRegionIn,angle::FormatID formatID)7598 ImageHelper::SubresourceUpdate::SubresourceUpdate(BufferHelper *bufferHelperIn,
7599                                                   const VkBufferImageCopy &copyRegionIn,
7600                                                   angle::FormatID formatID)
7601     : updateSource(UpdateSource::Buffer), image(nullptr)
7602 {
7603     data.buffer.bufferHelper = bufferHelperIn;
7604     data.buffer.copyRegion   = copyRegionIn;
7605     data.buffer.formatID     = formatID;
7606 }
7607 
SubresourceUpdate(RefCounted<ImageHelper> * imageIn,const VkImageCopy & copyRegionIn,angle::FormatID formatID)7608 ImageHelper::SubresourceUpdate::SubresourceUpdate(RefCounted<ImageHelper> *imageIn,
7609                                                   const VkImageCopy &copyRegionIn,
7610                                                   angle::FormatID formatID)
7611     : updateSource(UpdateSource::Image), image(imageIn)
7612 {
7613     image->addRef();
7614     data.image.copyRegion = copyRegionIn;
7615     data.image.formatID   = formatID;
7616 }
7617 
SubresourceUpdate(VkImageAspectFlags aspectFlags,const VkClearValue & clearValue,const gl::ImageIndex & imageIndex)7618 ImageHelper::SubresourceUpdate::SubresourceUpdate(VkImageAspectFlags aspectFlags,
7619                                                   const VkClearValue &clearValue,
7620                                                   const gl::ImageIndex &imageIndex)
7621     : updateSource(UpdateSource::Clear), image(nullptr)
7622 {
7623     data.clear.aspectFlags = aspectFlags;
7624     data.clear.value       = clearValue;
7625     data.clear.levelIndex  = imageIndex.getLevelIndex();
7626     data.clear.layerIndex  = imageIndex.hasLayer() ? imageIndex.getLayerIndex() : 0;
7627     data.clear.layerCount =
7628         imageIndex.hasLayer() ? imageIndex.getLayerCount() : VK_REMAINING_ARRAY_LAYERS;
7629     data.clear.colorMaskFlags = 0;
7630 }
7631 
SubresourceUpdate(VkColorComponentFlags colorMaskFlags,const VkClearColorValue & clearValue,const gl::ImageIndex & imageIndex)7632 ImageHelper::SubresourceUpdate::SubresourceUpdate(VkColorComponentFlags colorMaskFlags,
7633                                                   const VkClearColorValue &clearValue,
7634                                                   const gl::ImageIndex &imageIndex)
7635     : updateSource(UpdateSource::ClearEmulatedChannelsOnly), image(nullptr)
7636 {
7637     data.clear.aspectFlags = VK_IMAGE_ASPECT_COLOR_BIT;
7638     data.clear.value.color = clearValue;
7639     data.clear.levelIndex  = imageIndex.getLevelIndex();
7640     data.clear.layerIndex  = imageIndex.hasLayer() ? imageIndex.getLayerIndex() : 0;
7641     data.clear.layerCount =
7642         imageIndex.hasLayer() ? imageIndex.getLayerCount() : VK_REMAINING_ARRAY_LAYERS;
7643     data.clear.colorMaskFlags = colorMaskFlags;
7644 }
7645 
SubresourceUpdate(SubresourceUpdate && other)7646 ImageHelper::SubresourceUpdate::SubresourceUpdate(SubresourceUpdate &&other)
7647     : updateSource(other.updateSource), image(nullptr)
7648 {
7649     switch (updateSource)
7650     {
7651         case UpdateSource::Clear:
7652         case UpdateSource::ClearEmulatedChannelsOnly:
7653             data.clear = other.data.clear;
7654             break;
7655         case UpdateSource::Buffer:
7656             data.buffer = other.data.buffer;
7657             break;
7658         case UpdateSource::Image:
7659             data.image  = other.data.image;
7660             image       = other.image;
7661             other.image = nullptr;
7662             break;
7663         default:
7664             UNREACHABLE();
7665     }
7666 }
7667 
operator =(SubresourceUpdate && other)7668 ImageHelper::SubresourceUpdate &ImageHelper::SubresourceUpdate::operator=(SubresourceUpdate &&other)
7669 {
7670     // Given that the update is a union of three structs, we can't use std::swap on the fields.  For
7671     // example, |this| may be an Image update and |other| may be a Buffer update.
7672     // The following could work:
7673     //
7674     // SubresourceUpdate oldThis;
7675     // Set oldThis to this->field based on updateSource
7676     // Set this->otherField to other.otherField based on other.updateSource
7677     // Set other.field to oldThis->field based on updateSource
7678     // std::Swap(updateSource, other.updateSource);
7679     //
7680     // It's much simpler to just swap the memory instead.
7681 
7682     SubresourceUpdate oldThis;
7683     memcpy(&oldThis, this, sizeof(*this));
7684     memcpy(this, &other, sizeof(*this));
7685     memcpy(&other, &oldThis, sizeof(*this));
7686 
7687     return *this;
7688 }
7689 
release(RendererVk * renderer)7690 void ImageHelper::SubresourceUpdate::release(RendererVk *renderer)
7691 {
7692     if (updateSource == UpdateSource::Image)
7693     {
7694         image->releaseRef();
7695 
7696         if (!image->isReferenced())
7697         {
7698             // Staging images won't be used in render pass attachments.
7699             image->get().releaseImage(renderer);
7700             image->get().releaseStagingBuffer(renderer);
7701             SafeDelete(image);
7702         }
7703 
7704         image = nullptr;
7705     }
7706 }
7707 
isUpdateToLayers(uint32_t layerIndex,uint32_t layerCount) const7708 bool ImageHelper::SubresourceUpdate::isUpdateToLayers(uint32_t layerIndex,
7709                                                       uint32_t layerCount) const
7710 {
7711     uint32_t updateBaseLayer, updateLayerCount;
7712     getDestSubresource(gl::ImageIndex::kEntireLevel, &updateBaseLayer, &updateLayerCount);
7713 
7714     return updateBaseLayer == layerIndex &&
7715            (updateLayerCount == layerCount || updateLayerCount == VK_REMAINING_ARRAY_LAYERS);
7716 }
7717 
getDestSubresource(uint32_t imageLayerCount,uint32_t * baseLayerOut,uint32_t * layerCountOut) const7718 void ImageHelper::SubresourceUpdate::getDestSubresource(uint32_t imageLayerCount,
7719                                                         uint32_t *baseLayerOut,
7720                                                         uint32_t *layerCountOut) const
7721 {
7722     if (updateSource == UpdateSource::Clear ||
7723         updateSource == UpdateSource::ClearEmulatedChannelsOnly)
7724     {
7725         *baseLayerOut  = data.clear.layerIndex;
7726         *layerCountOut = data.clear.layerCount;
7727 
7728         if (*layerCountOut == static_cast<uint32_t>(gl::ImageIndex::kEntireLevel))
7729         {
7730             *layerCountOut = imageLayerCount;
7731         }
7732     }
7733     else
7734     {
7735         const VkImageSubresourceLayers &dstSubresource =
7736             updateSource == UpdateSource::Buffer ? data.buffer.copyRegion.imageSubresource
7737                                                  : data.image.copyRegion.dstSubresource;
7738         *baseLayerOut  = dstSubresource.baseArrayLayer;
7739         *layerCountOut = dstSubresource.layerCount;
7740 
7741         ASSERT(*layerCountOut != static_cast<uint32_t>(gl::ImageIndex::kEntireLevel));
7742     }
7743 }
7744 
getDestAspectFlags() const7745 VkImageAspectFlags ImageHelper::SubresourceUpdate::getDestAspectFlags() const
7746 {
7747     if (updateSource == UpdateSource::Clear ||
7748         updateSource == UpdateSource::ClearEmulatedChannelsOnly)
7749     {
7750         return data.clear.aspectFlags;
7751     }
7752     else if (updateSource == UpdateSource::Buffer)
7753     {
7754         return data.buffer.copyRegion.imageSubresource.aspectMask;
7755     }
7756     else
7757     {
7758         ASSERT(updateSource == UpdateSource::Image);
7759         return data.image.copyRegion.dstSubresource.aspectMask;
7760     }
7761 }
7762 
getLevelUpdates(gl::LevelIndex level)7763 std::vector<ImageHelper::SubresourceUpdate> *ImageHelper::getLevelUpdates(gl::LevelIndex level)
7764 {
7765     return static_cast<size_t>(level.get()) < mSubresourceUpdates.size()
7766                ? &mSubresourceUpdates[level.get()]
7767                : nullptr;
7768 }
7769 
getLevelUpdates(gl::LevelIndex level) const7770 const std::vector<ImageHelper::SubresourceUpdate> *ImageHelper::getLevelUpdates(
7771     gl::LevelIndex level) const
7772 {
7773     return static_cast<size_t>(level.get()) < mSubresourceUpdates.size()
7774                ? &mSubresourceUpdates[level.get()]
7775                : nullptr;
7776 }
7777 
appendSubresourceUpdate(gl::LevelIndex level,SubresourceUpdate && update)7778 void ImageHelper::appendSubresourceUpdate(gl::LevelIndex level, SubresourceUpdate &&update)
7779 {
7780     if (mSubresourceUpdates.size() <= static_cast<size_t>(level.get()))
7781     {
7782         mSubresourceUpdates.resize(level.get() + 1);
7783     }
7784 
7785     mSubresourceUpdates[level.get()].emplace_back(std::move(update));
7786     onStateChange(angle::SubjectMessage::SubjectChanged);
7787 }
7788 
prependSubresourceUpdate(gl::LevelIndex level,SubresourceUpdate && update)7789 void ImageHelper::prependSubresourceUpdate(gl::LevelIndex level, SubresourceUpdate &&update)
7790 {
7791     if (mSubresourceUpdates.size() <= static_cast<size_t>(level.get()))
7792     {
7793         mSubresourceUpdates.resize(level.get() + 1);
7794     }
7795 
7796     mSubresourceUpdates[level.get()].insert(mSubresourceUpdates[level.get()].begin(),
7797                                             std::move(update));
7798     onStateChange(angle::SubjectMessage::SubjectChanged);
7799 }
7800 
hasEmulatedImageChannels() const7801 bool ImageHelper::hasEmulatedImageChannels() const
7802 {
7803     const angle::Format &angleFmt   = getIntendedFormat();
7804     const angle::Format &textureFmt = getActualFormat();
7805 
7806     // The red channel is never emulated.
7807     ASSERT((angleFmt.redBits != 0 || angleFmt.luminanceBits != 0 || angleFmt.alphaBits != 0) ==
7808            (textureFmt.redBits != 0));
7809 
7810     return (angleFmt.alphaBits == 0 && textureFmt.alphaBits > 0) ||
7811            (angleFmt.blueBits == 0 && textureFmt.blueBits > 0) ||
7812            (angleFmt.greenBits == 0 && textureFmt.greenBits > 0) ||
7813            (angleFmt.depthBits == 0 && textureFmt.depthBits > 0) ||
7814            (angleFmt.stencilBits == 0 && textureFmt.stencilBits > 0);
7815 }
7816 
getEmulatedChannelsMask() const7817 VkColorComponentFlags ImageHelper::getEmulatedChannelsMask() const
7818 {
7819     const angle::Format &angleFmt   = getIntendedFormat();
7820     const angle::Format &textureFmt = getActualFormat();
7821 
7822     ASSERT(!angleFmt.hasDepthOrStencilBits());
7823 
7824     VkColorComponentFlags emulatedChannelsMask = 0;
7825 
7826     if (angleFmt.alphaBits == 0 && textureFmt.alphaBits > 0)
7827     {
7828         emulatedChannelsMask |= VK_COLOR_COMPONENT_A_BIT;
7829     }
7830     if (angleFmt.blueBits == 0 && textureFmt.blueBits > 0)
7831     {
7832         emulatedChannelsMask |= VK_COLOR_COMPONENT_B_BIT;
7833     }
7834     if (angleFmt.greenBits == 0 && textureFmt.greenBits > 0)
7835     {
7836         emulatedChannelsMask |= VK_COLOR_COMPONENT_G_BIT;
7837     }
7838 
7839     // The red channel is never emulated.
7840     ASSERT((angleFmt.redBits != 0 || angleFmt.luminanceBits != 0 || angleFmt.alphaBits != 0) ==
7841            (textureFmt.redBits != 0));
7842 
7843     return emulatedChannelsMask;
7844 }
7845 
7846 // FramebufferHelper implementation.
7847 FramebufferHelper::FramebufferHelper() = default;
7848 
7849 FramebufferHelper::~FramebufferHelper() = default;
7850 
FramebufferHelper(FramebufferHelper && other)7851 FramebufferHelper::FramebufferHelper(FramebufferHelper &&other) : Resource(std::move(other))
7852 {
7853     mFramebuffer = std::move(other.mFramebuffer);
7854 }
7855 
operator =(FramebufferHelper && other)7856 FramebufferHelper &FramebufferHelper::operator=(FramebufferHelper &&other)
7857 {
7858     std::swap(mUse, other.mUse);
7859     std::swap(mFramebuffer, other.mFramebuffer);
7860     return *this;
7861 }
7862 
init(ContextVk * contextVk,const VkFramebufferCreateInfo & createInfo)7863 angle::Result FramebufferHelper::init(ContextVk *contextVk,
7864                                       const VkFramebufferCreateInfo &createInfo)
7865 {
7866     ANGLE_VK_TRY(contextVk, mFramebuffer.init(contextVk->getDevice(), createInfo));
7867     return angle::Result::Continue;
7868 }
7869 
release(ContextVk * contextVk)7870 void FramebufferHelper::release(ContextVk *contextVk)
7871 {
7872     contextVk->addGarbage(&mFramebuffer);
7873 }
7874 
GetLayerMode(const vk::ImageHelper & image,uint32_t layerCount)7875 LayerMode GetLayerMode(const vk::ImageHelper &image, uint32_t layerCount)
7876 {
7877     const uint32_t imageLayerCount = GetImageLayerCountForView(image);
7878     const bool allLayers           = layerCount == imageLayerCount;
7879 
7880     ASSERT(allLayers || layerCount > 0 && layerCount <= gl::IMPLEMENTATION_MAX_TEXTURE_LEVELS);
7881     return allLayers ? LayerMode::All : static_cast<LayerMode>(layerCount);
7882 }
7883 
7884 // ImageViewHelper implementation.
ImageViewHelper()7885 ImageViewHelper::ImageViewHelper() : mCurrentMaxLevel(0), mLinearColorspace(true) {}
7886 
ImageViewHelper(ImageViewHelper && other)7887 ImageViewHelper::ImageViewHelper(ImageViewHelper &&other) : Resource(std::move(other))
7888 {
7889     std::swap(mUse, other.mUse);
7890 
7891     std::swap(mCurrentMaxLevel, other.mCurrentMaxLevel);
7892     std::swap(mPerLevelLinearReadImageViews, other.mPerLevelLinearReadImageViews);
7893     std::swap(mPerLevelSRGBReadImageViews, other.mPerLevelSRGBReadImageViews);
7894     std::swap(mPerLevelLinearFetchImageViews, other.mPerLevelLinearFetchImageViews);
7895     std::swap(mPerLevelSRGBFetchImageViews, other.mPerLevelSRGBFetchImageViews);
7896     std::swap(mPerLevelLinearCopyImageViews, other.mPerLevelLinearCopyImageViews);
7897     std::swap(mPerLevelSRGBCopyImageViews, other.mPerLevelSRGBCopyImageViews);
7898     std::swap(mLinearColorspace, other.mLinearColorspace);
7899 
7900     std::swap(mPerLevelStencilReadImageViews, other.mPerLevelStencilReadImageViews);
7901     std::swap(mLayerLevelDrawImageViews, other.mLayerLevelDrawImageViews);
7902     std::swap(mLayerLevelDrawImageViewsLinear, other.mLayerLevelDrawImageViewsLinear);
7903     std::swap(mSubresourceDrawImageViews, other.mSubresourceDrawImageViews);
7904     std::swap(mLevelStorageImageViews, other.mLevelStorageImageViews);
7905     std::swap(mLayerLevelStorageImageViews, other.mLayerLevelStorageImageViews);
7906     std::swap(mImageViewSerial, other.mImageViewSerial);
7907 }
7908 
~ImageViewHelper()7909 ImageViewHelper::~ImageViewHelper() {}
7910 
init(RendererVk * renderer)7911 void ImageViewHelper::init(RendererVk *renderer)
7912 {
7913     if (!mImageViewSerial.valid())
7914     {
7915         mImageViewSerial = renderer->getResourceSerialFactory().generateImageOrBufferViewSerial();
7916     }
7917 }
7918 
release(RendererVk * renderer)7919 void ImageViewHelper::release(RendererVk *renderer)
7920 {
7921     std::vector<GarbageObject> garbage;
7922 
7923     mCurrentMaxLevel = LevelIndex(0);
7924 
7925     // Release the read views
7926     ReleaseImageViews(&mPerLevelLinearReadImageViews, &garbage);
7927     ReleaseImageViews(&mPerLevelSRGBReadImageViews, &garbage);
7928     ReleaseImageViews(&mPerLevelLinearFetchImageViews, &garbage);
7929     ReleaseImageViews(&mPerLevelSRGBFetchImageViews, &garbage);
7930     ReleaseImageViews(&mPerLevelLinearCopyImageViews, &garbage);
7931     ReleaseImageViews(&mPerLevelSRGBCopyImageViews, &garbage);
7932     ReleaseImageViews(&mPerLevelStencilReadImageViews, &garbage);
7933 
7934     // Release the draw views
7935     for (ImageViewVector &layerViews : mLayerLevelDrawImageViews)
7936     {
7937         for (ImageView &imageView : layerViews)
7938         {
7939             if (imageView.valid())
7940             {
7941                 garbage.emplace_back(GetGarbage(&imageView));
7942             }
7943         }
7944     }
7945     mLayerLevelDrawImageViews.clear();
7946     for (ImageViewVector &layerViews : mLayerLevelDrawImageViewsLinear)
7947     {
7948         for (ImageView &imageView : layerViews)
7949         {
7950             if (imageView.valid())
7951             {
7952                 garbage.emplace_back(GetGarbage(&imageView));
7953             }
7954         }
7955     }
7956     mLayerLevelDrawImageViewsLinear.clear();
7957     for (auto &iter : mSubresourceDrawImageViews)
7958     {
7959         std::unique_ptr<ImageView> &imageView = iter.second;
7960         if (imageView->valid())
7961         {
7962             garbage.emplace_back(GetGarbage(imageView.get()));
7963         }
7964     }
7965     mSubresourceDrawImageViews.clear();
7966 
7967     // Release the storage views
7968     ReleaseImageViews(&mLevelStorageImageViews, &garbage);
7969     for (ImageViewVector &layerViews : mLayerLevelStorageImageViews)
7970     {
7971         for (ImageView &imageView : layerViews)
7972         {
7973             if (imageView.valid())
7974             {
7975                 garbage.emplace_back(GetGarbage(&imageView));
7976             }
7977         }
7978     }
7979     mLayerLevelStorageImageViews.clear();
7980 
7981     if (!garbage.empty())
7982     {
7983         renderer->collectGarbage(std::move(mUse), std::move(garbage));
7984 
7985         // Ensure the resource use is always valid.
7986         mUse.init();
7987     }
7988 
7989     // Update image view serial.
7990     mImageViewSerial = renderer->getResourceSerialFactory().generateImageOrBufferViewSerial();
7991 }
7992 
destroy(VkDevice device)7993 void ImageViewHelper::destroy(VkDevice device)
7994 {
7995     mCurrentMaxLevel = LevelIndex(0);
7996 
7997     // Release the read views
7998     DestroyImageViews(&mPerLevelLinearReadImageViews, device);
7999     DestroyImageViews(&mPerLevelSRGBReadImageViews, device);
8000     DestroyImageViews(&mPerLevelLinearFetchImageViews, device);
8001     DestroyImageViews(&mPerLevelSRGBFetchImageViews, device);
8002     DestroyImageViews(&mPerLevelLinearCopyImageViews, device);
8003     DestroyImageViews(&mPerLevelSRGBCopyImageViews, device);
8004     DestroyImageViews(&mPerLevelStencilReadImageViews, device);
8005 
8006     // Release the draw views
8007     for (ImageViewVector &layerViews : mLayerLevelDrawImageViews)
8008     {
8009         for (ImageView &imageView : layerViews)
8010         {
8011             imageView.destroy(device);
8012         }
8013     }
8014     mLayerLevelDrawImageViews.clear();
8015     for (ImageViewVector &layerViews : mLayerLevelDrawImageViewsLinear)
8016     {
8017         for (ImageView &imageView : layerViews)
8018         {
8019             imageView.destroy(device);
8020         }
8021     }
8022     mLayerLevelDrawImageViewsLinear.clear();
8023     for (auto &iter : mSubresourceDrawImageViews)
8024     {
8025         std::unique_ptr<ImageView> &imageView = iter.second;
8026         imageView->destroy(device);
8027     }
8028     mSubresourceDrawImageViews.clear();
8029 
8030     // Release the storage views
8031     DestroyImageViews(&mLevelStorageImageViews, device);
8032     for (ImageViewVector &layerViews : mLayerLevelStorageImageViews)
8033     {
8034         for (ImageView &imageView : layerViews)
8035         {
8036             imageView.destroy(device);
8037         }
8038     }
8039     mLayerLevelStorageImageViews.clear();
8040 
8041     mImageViewSerial = kInvalidImageOrBufferViewSerial;
8042 }
8043 
initReadViews(ContextVk * contextVk,gl::TextureType viewType,const ImageHelper & image,const angle::Format & format,const gl::SwizzleState & formatSwizzle,const gl::SwizzleState & readSwizzle,LevelIndex baseLevel,uint32_t levelCount,uint32_t baseLayer,uint32_t layerCount,bool requiresSRGBViews,VkImageUsageFlags imageUsageFlags)8044 angle::Result ImageViewHelper::initReadViews(ContextVk *contextVk,
8045                                              gl::TextureType viewType,
8046                                              const ImageHelper &image,
8047                                              const angle::Format &format,
8048                                              const gl::SwizzleState &formatSwizzle,
8049                                              const gl::SwizzleState &readSwizzle,
8050                                              LevelIndex baseLevel,
8051                                              uint32_t levelCount,
8052                                              uint32_t baseLayer,
8053                                              uint32_t layerCount,
8054                                              bool requiresSRGBViews,
8055                                              VkImageUsageFlags imageUsageFlags)
8056 {
8057     ASSERT(levelCount > 0);
8058     if (levelCount > mPerLevelLinearReadImageViews.size())
8059     {
8060         mPerLevelLinearReadImageViews.resize(levelCount);
8061         mPerLevelSRGBReadImageViews.resize(levelCount);
8062         mPerLevelLinearFetchImageViews.resize(levelCount);
8063         mPerLevelSRGBFetchImageViews.resize(levelCount);
8064         mPerLevelLinearCopyImageViews.resize(levelCount);
8065         mPerLevelSRGBCopyImageViews.resize(levelCount);
8066         mPerLevelStencilReadImageViews.resize(levelCount);
8067     }
8068     mCurrentMaxLevel = LevelIndex(levelCount - 1);
8069 
8070     // Determine if we already have ImageViews for the new max level
8071     if (getReadImageView().valid())
8072     {
8073         return angle::Result::Continue;
8074     }
8075 
8076     // Since we don't have a readImageView, we must create ImageViews for the new max level
8077     ANGLE_TRY(initReadViewsImpl(contextVk, viewType, image, format, formatSwizzle, readSwizzle,
8078                                 baseLevel, levelCount, baseLayer, layerCount));
8079 
8080     if (requiresSRGBViews)
8081     {
8082         ANGLE_TRY(initSRGBReadViewsImpl(contextVk, viewType, image, format, formatSwizzle,
8083                                         readSwizzle, baseLevel, levelCount, baseLayer, layerCount,
8084                                         imageUsageFlags));
8085     }
8086 
8087     return angle::Result::Continue;
8088 }
8089 
initReadViewsImpl(ContextVk * contextVk,gl::TextureType viewType,const ImageHelper & image,const angle::Format & format,const gl::SwizzleState & formatSwizzle,const gl::SwizzleState & readSwizzle,LevelIndex baseLevel,uint32_t levelCount,uint32_t baseLayer,uint32_t layerCount)8090 angle::Result ImageViewHelper::initReadViewsImpl(ContextVk *contextVk,
8091                                                  gl::TextureType viewType,
8092                                                  const ImageHelper &image,
8093                                                  const angle::Format &format,
8094                                                  const gl::SwizzleState &formatSwizzle,
8095                                                  const gl::SwizzleState &readSwizzle,
8096                                                  LevelIndex baseLevel,
8097                                                  uint32_t levelCount,
8098                                                  uint32_t baseLayer,
8099                                                  uint32_t layerCount)
8100 {
8101     ASSERT(mImageViewSerial.valid());
8102 
8103     const VkImageAspectFlags aspectFlags = GetFormatAspectFlags(image.getIntendedFormat());
8104     mLinearColorspace                    = !format.isSRGB;
8105     VkFormat vkFormat                    = GetVkFormatFromFormatID(format.id);
8106 
8107     if (HasBothDepthAndStencilAspects(aspectFlags))
8108     {
8109         ANGLE_TRY(image.initLayerImageViewWithFormat(
8110             contextVk, viewType, vkFormat, VK_IMAGE_ASPECT_DEPTH_BIT, readSwizzle,
8111             &getReadImageView(), baseLevel, levelCount, baseLayer, layerCount));
8112         ANGLE_TRY(image.initLayerImageViewWithFormat(
8113             contextVk, viewType, vkFormat, VK_IMAGE_ASPECT_STENCIL_BIT, readSwizzle,
8114             &mPerLevelStencilReadImageViews[mCurrentMaxLevel.get()], baseLevel, levelCount,
8115             baseLayer, layerCount));
8116     }
8117     else
8118     {
8119         ANGLE_TRY(image.initLayerImageViewWithFormat(contextVk, viewType, vkFormat, aspectFlags,
8120                                                      readSwizzle, &getReadImageView(), baseLevel,
8121                                                      levelCount, baseLayer, layerCount));
8122     }
8123 
8124     gl::TextureType fetchType = viewType;
8125 
8126     if (viewType == gl::TextureType::CubeMap || viewType == gl::TextureType::_2DArray ||
8127         viewType == gl::TextureType::_2DMultisampleArray)
8128     {
8129         fetchType = Get2DTextureType(layerCount, image.getSamples());
8130 
8131         ANGLE_TRY(image.initLayerImageViewWithFormat(contextVk, fetchType, vkFormat, aspectFlags,
8132                                                      readSwizzle, &getFetchImageView(), baseLevel,
8133                                                      levelCount, baseLayer, layerCount));
8134     }
8135 
8136     ANGLE_TRY(image.initLayerImageViewWithFormat(contextVk, fetchType, vkFormat, aspectFlags,
8137                                                  formatSwizzle, &getCopyImageView(), baseLevel,
8138                                                  levelCount, baseLayer, layerCount));
8139 
8140     return angle::Result::Continue;
8141 }
8142 
initSRGBReadViewsImpl(ContextVk * contextVk,gl::TextureType viewType,const ImageHelper & image,const angle::Format & format,const gl::SwizzleState & formatSwizzle,const gl::SwizzleState & readSwizzle,LevelIndex baseLevel,uint32_t levelCount,uint32_t baseLayer,uint32_t layerCount,VkImageUsageFlags imageUsageFlags)8143 angle::Result ImageViewHelper::initSRGBReadViewsImpl(ContextVk *contextVk,
8144                                                      gl::TextureType viewType,
8145                                                      const ImageHelper &image,
8146                                                      const angle::Format &format,
8147                                                      const gl::SwizzleState &formatSwizzle,
8148                                                      const gl::SwizzleState &readSwizzle,
8149                                                      LevelIndex baseLevel,
8150                                                      uint32_t levelCount,
8151                                                      uint32_t baseLayer,
8152                                                      uint32_t layerCount,
8153                                                      VkImageUsageFlags imageUsageFlags)
8154 {
8155     // When we select the linear/srgb counterpart formats, we must first make sure they're
8156     // actually supported by the ICD. If they are not supported by the ICD, then we treat that as if
8157     // there is no counterpart format. (In this case, the relevant extension should not be exposed)
8158     angle::FormatID srgbOverrideFormat = ConvertToSRGB(image.getActualFormatID());
8159     ASSERT((srgbOverrideFormat == angle::FormatID::NONE) ||
8160            (HasNonRenderableTextureFormatSupport(contextVk->getRenderer(), srgbOverrideFormat)));
8161 
8162     angle::FormatID linearOverrideFormat = ConvertToLinear(image.getActualFormatID());
8163     ASSERT((linearOverrideFormat == angle::FormatID::NONE) ||
8164            (HasNonRenderableTextureFormatSupport(contextVk->getRenderer(), linearOverrideFormat)));
8165 
8166     angle::FormatID linearFormat =
8167         (linearOverrideFormat != angle::FormatID::NONE) ? linearOverrideFormat : format.id;
8168     ASSERT(linearFormat != angle::FormatID::NONE);
8169 
8170     const VkImageAspectFlags aspectFlags = GetFormatAspectFlags(image.getIntendedFormat());
8171 
8172     if (!mPerLevelLinearReadImageViews[mCurrentMaxLevel.get()].valid())
8173     {
8174         ANGLE_TRY(image.initReinterpretedLayerImageView(
8175             contextVk, viewType, aspectFlags, readSwizzle,
8176             &mPerLevelLinearReadImageViews[mCurrentMaxLevel.get()], baseLevel, levelCount,
8177             baseLayer, layerCount, imageUsageFlags, linearFormat));
8178     }
8179     if (srgbOverrideFormat != angle::FormatID::NONE &&
8180         !mPerLevelSRGBReadImageViews[mCurrentMaxLevel.get()].valid())
8181     {
8182         ANGLE_TRY(image.initReinterpretedLayerImageView(
8183             contextVk, viewType, aspectFlags, readSwizzle,
8184             &mPerLevelSRGBReadImageViews[mCurrentMaxLevel.get()], baseLevel, levelCount, baseLayer,
8185             layerCount, imageUsageFlags, srgbOverrideFormat));
8186     }
8187 
8188     gl::TextureType fetchType = viewType;
8189 
8190     if (viewType == gl::TextureType::CubeMap || viewType == gl::TextureType::_2DArray ||
8191         viewType == gl::TextureType::_2DMultisampleArray)
8192     {
8193         fetchType = Get2DTextureType(layerCount, image.getSamples());
8194 
8195         if (!mPerLevelLinearFetchImageViews[mCurrentMaxLevel.get()].valid())
8196         {
8197 
8198             ANGLE_TRY(image.initReinterpretedLayerImageView(
8199                 contextVk, fetchType, aspectFlags, readSwizzle,
8200                 &mPerLevelLinearFetchImageViews[mCurrentMaxLevel.get()], baseLevel, levelCount,
8201                 baseLayer, layerCount, imageUsageFlags, linearFormat));
8202         }
8203         if (srgbOverrideFormat != angle::FormatID::NONE &&
8204             !mPerLevelSRGBFetchImageViews[mCurrentMaxLevel.get()].valid())
8205         {
8206             ANGLE_TRY(image.initReinterpretedLayerImageView(
8207                 contextVk, fetchType, aspectFlags, readSwizzle,
8208                 &mPerLevelSRGBFetchImageViews[mCurrentMaxLevel.get()], baseLevel, levelCount,
8209                 baseLayer, layerCount, imageUsageFlags, srgbOverrideFormat));
8210         }
8211     }
8212 
8213     if (!mPerLevelLinearCopyImageViews[mCurrentMaxLevel.get()].valid())
8214     {
8215         ANGLE_TRY(image.initReinterpretedLayerImageView(
8216             contextVk, fetchType, aspectFlags, formatSwizzle,
8217             &mPerLevelLinearCopyImageViews[mCurrentMaxLevel.get()], baseLevel, levelCount,
8218             baseLayer, layerCount, imageUsageFlags, linearFormat));
8219     }
8220     if (srgbOverrideFormat != angle::FormatID::NONE &&
8221         !mPerLevelSRGBCopyImageViews[mCurrentMaxLevel.get()].valid())
8222     {
8223         ANGLE_TRY(image.initReinterpretedLayerImageView(
8224             contextVk, fetchType, aspectFlags, formatSwizzle,
8225             &mPerLevelSRGBCopyImageViews[mCurrentMaxLevel.get()], baseLevel, levelCount, baseLayer,
8226             layerCount, imageUsageFlags, srgbOverrideFormat));
8227     }
8228 
8229     return angle::Result::Continue;
8230 }
8231 
getLevelStorageImageView(ContextVk * contextVk,gl::TextureType viewType,const ImageHelper & image,LevelIndex levelVk,uint32_t layer,VkImageUsageFlags imageUsageFlags,angle::FormatID formatID,const ImageView ** imageViewOut)8232 angle::Result ImageViewHelper::getLevelStorageImageView(ContextVk *contextVk,
8233                                                         gl::TextureType viewType,
8234                                                         const ImageHelper &image,
8235                                                         LevelIndex levelVk,
8236                                                         uint32_t layer,
8237                                                         VkImageUsageFlags imageUsageFlags,
8238                                                         angle::FormatID formatID,
8239                                                         const ImageView **imageViewOut)
8240 {
8241     ASSERT(mImageViewSerial.valid());
8242 
8243     retain(&contextVk->getResourceUseList());
8244 
8245     ImageView *imageView =
8246         GetLevelImageView(&mLevelStorageImageViews, levelVk, image.getLevelCount());
8247 
8248     *imageViewOut = imageView;
8249     if (imageView->valid())
8250     {
8251         return angle::Result::Continue;
8252     }
8253 
8254     // Create the view.  Note that storage images are not affected by swizzle parameters.
8255     return image.initReinterpretedLayerImageView(contextVk, viewType, image.getAspectFlags(),
8256                                                  gl::SwizzleState(), imageView, levelVk, 1, layer,
8257                                                  image.getLayerCount(), imageUsageFlags, formatID);
8258 }
8259 
getLevelLayerStorageImageView(ContextVk * contextVk,const ImageHelper & image,LevelIndex levelVk,uint32_t layer,VkImageUsageFlags imageUsageFlags,angle::FormatID formatID,const ImageView ** imageViewOut)8260 angle::Result ImageViewHelper::getLevelLayerStorageImageView(ContextVk *contextVk,
8261                                                              const ImageHelper &image,
8262                                                              LevelIndex levelVk,
8263                                                              uint32_t layer,
8264                                                              VkImageUsageFlags imageUsageFlags,
8265                                                              angle::FormatID formatID,
8266                                                              const ImageView **imageViewOut)
8267 {
8268     ASSERT(image.valid());
8269     ASSERT(mImageViewSerial.valid());
8270     ASSERT(!image.getActualFormat().isBlock);
8271 
8272     retain(&contextVk->getResourceUseList());
8273 
8274     ImageView *imageView =
8275         GetLevelLayerImageView(&mLayerLevelStorageImageViews, levelVk, layer, image.getLevelCount(),
8276                                GetImageLayerCountForView(image));
8277     *imageViewOut = imageView;
8278 
8279     if (imageView->valid())
8280     {
8281         return angle::Result::Continue;
8282     }
8283 
8284     // Create the view.  Note that storage images are not affected by swizzle parameters.
8285     gl::TextureType viewType = Get2DTextureType(1, image.getSamples());
8286     return image.initReinterpretedLayerImageView(contextVk, viewType, image.getAspectFlags(),
8287                                                  gl::SwizzleState(), imageView, levelVk, 1, layer,
8288                                                  1, imageUsageFlags, formatID);
8289 }
8290 
getLevelDrawImageView(ContextVk * contextVk,const ImageHelper & image,LevelIndex levelVk,uint32_t layer,uint32_t layerCount,gl::SrgbWriteControlMode mode,const ImageView ** imageViewOut)8291 angle::Result ImageViewHelper::getLevelDrawImageView(ContextVk *contextVk,
8292                                                      const ImageHelper &image,
8293                                                      LevelIndex levelVk,
8294                                                      uint32_t layer,
8295                                                      uint32_t layerCount,
8296                                                      gl::SrgbWriteControlMode mode,
8297                                                      const ImageView **imageViewOut)
8298 {
8299     ASSERT(image.valid());
8300     ASSERT(mImageViewSerial.valid());
8301     ASSERT(!image.getActualFormat().isBlock);
8302 
8303     retain(&contextVk->getResourceUseList());
8304 
8305     ImageSubresourceRange range = MakeImageSubresourceDrawRange(
8306         image.toGLLevel(levelVk), layer, GetLayerMode(image, layerCount), mode);
8307 
8308     std::unique_ptr<ImageView> &view = mSubresourceDrawImageViews[range];
8309     if (view)
8310     {
8311         *imageViewOut = view.get();
8312         return angle::Result::Continue;
8313     }
8314 
8315     view          = std::make_unique<ImageView>();
8316     *imageViewOut = view.get();
8317 
8318     // Lazily allocate the image view.
8319     // Note that these views are specifically made to be used as framebuffer attachments, and
8320     // therefore don't have swizzle.
8321     gl::TextureType viewType = Get2DTextureType(layerCount, image.getSamples());
8322     return image.initLayerImageView(contextVk, viewType, image.getAspectFlags(), gl::SwizzleState(),
8323                                     view.get(), levelVk, 1, layer, layerCount, mode);
8324 }
8325 
getLevelLayerDrawImageView(ContextVk * contextVk,const ImageHelper & image,LevelIndex levelVk,uint32_t layer,gl::SrgbWriteControlMode mode,const ImageView ** imageViewOut)8326 angle::Result ImageViewHelper::getLevelLayerDrawImageView(ContextVk *contextVk,
8327                                                           const ImageHelper &image,
8328                                                           LevelIndex levelVk,
8329                                                           uint32_t layer,
8330                                                           gl::SrgbWriteControlMode mode,
8331                                                           const ImageView **imageViewOut)
8332 {
8333     ASSERT(image.valid());
8334     ASSERT(mImageViewSerial.valid());
8335     ASSERT(!image.getActualFormat().isBlock);
8336 
8337     retain(&contextVk->getResourceUseList());
8338 
8339     LayerLevelImageViewVector &imageViews = (mode == gl::SrgbWriteControlMode::Linear)
8340                                                 ? mLayerLevelDrawImageViewsLinear
8341                                                 : mLayerLevelDrawImageViews;
8342 
8343     // Lazily allocate the storage for image views
8344     ImageView *imageView = GetLevelLayerImageView(
8345         &imageViews, levelVk, layer, image.getLevelCount(), GetImageLayerCountForView(image));
8346     *imageViewOut = imageView;
8347 
8348     if (imageView->valid())
8349     {
8350         return angle::Result::Continue;
8351     }
8352 
8353     // Lazily allocate the image view itself.
8354     // Note that these views are specifically made to be used as framebuffer attachments, and
8355     // therefore don't have swizzle.
8356     gl::TextureType viewType = Get2DTextureType(1, image.getSamples());
8357     return image.initLayerImageView(contextVk, viewType, image.getAspectFlags(), gl::SwizzleState(),
8358                                     imageView, levelVk, 1, layer, 1, mode);
8359 }
8360 
getSubresourceSerial(gl::LevelIndex levelGL,uint32_t levelCount,uint32_t layer,LayerMode layerMode,SrgbDecodeMode srgbDecodeMode,gl::SrgbOverride srgbOverrideMode) const8361 ImageOrBufferViewSubresourceSerial ImageViewHelper::getSubresourceSerial(
8362     gl::LevelIndex levelGL,
8363     uint32_t levelCount,
8364     uint32_t layer,
8365     LayerMode layerMode,
8366     SrgbDecodeMode srgbDecodeMode,
8367     gl::SrgbOverride srgbOverrideMode) const
8368 {
8369     ASSERT(mImageViewSerial.valid());
8370 
8371     ImageOrBufferViewSubresourceSerial serial;
8372     serial.viewSerial  = mImageViewSerial;
8373     serial.subresource = MakeImageSubresourceReadRange(levelGL, levelCount, layer, layerMode,
8374                                                        srgbDecodeMode, srgbOverrideMode);
8375     return serial;
8376 }
8377 
MakeImageSubresourceReadRange(gl::LevelIndex level,uint32_t levelCount,uint32_t layer,LayerMode layerMode,SrgbDecodeMode srgbDecodeMode,gl::SrgbOverride srgbOverrideMode)8378 ImageSubresourceRange MakeImageSubresourceReadRange(gl::LevelIndex level,
8379                                                     uint32_t levelCount,
8380                                                     uint32_t layer,
8381                                                     LayerMode layerMode,
8382                                                     SrgbDecodeMode srgbDecodeMode,
8383                                                     gl::SrgbOverride srgbOverrideMode)
8384 {
8385     ImageSubresourceRange range;
8386 
8387     SetBitField(range.level, level.get());
8388     SetBitField(range.levelCount, levelCount);
8389     SetBitField(range.layer, layer);
8390     SetBitField(range.layerMode, layerMode);
8391     SetBitField(range.srgbDecodeMode, srgbDecodeMode);
8392     SetBitField(range.srgbMode, srgbOverrideMode);
8393 
8394     return range;
8395 }
8396 
MakeImageSubresourceDrawRange(gl::LevelIndex level,uint32_t layer,LayerMode layerMode,gl::SrgbWriteControlMode srgbWriteControlMode)8397 ImageSubresourceRange MakeImageSubresourceDrawRange(gl::LevelIndex level,
8398                                                     uint32_t layer,
8399                                                     LayerMode layerMode,
8400                                                     gl::SrgbWriteControlMode srgbWriteControlMode)
8401 {
8402     ImageSubresourceRange range;
8403 
8404     SetBitField(range.level, level.get());
8405     SetBitField(range.levelCount, 1);
8406     SetBitField(range.layer, layer);
8407     SetBitField(range.layerMode, layerMode);
8408     SetBitField(range.srgbDecodeMode, 0);
8409     SetBitField(range.srgbMode, srgbWriteControlMode);
8410 
8411     return range;
8412 }
8413 
8414 // BufferViewHelper implementation.
BufferViewHelper()8415 BufferViewHelper::BufferViewHelper() : mOffset(0), mSize(0) {}
8416 
BufferViewHelper(BufferViewHelper && other)8417 BufferViewHelper::BufferViewHelper(BufferViewHelper &&other) : Resource(std::move(other))
8418 {
8419     std::swap(mOffset, other.mOffset);
8420     std::swap(mSize, other.mSize);
8421     std::swap(mViews, other.mViews);
8422     std::swap(mViewSerial, other.mViewSerial);
8423 }
8424 
~BufferViewHelper()8425 BufferViewHelper::~BufferViewHelper() {}
8426 
init(RendererVk * renderer,VkDeviceSize offset,VkDeviceSize size)8427 void BufferViewHelper::init(RendererVk *renderer, VkDeviceSize offset, VkDeviceSize size)
8428 {
8429     ASSERT(mViews.empty());
8430 
8431     mOffset = offset;
8432     mSize   = size;
8433 
8434     if (!mViewSerial.valid())
8435     {
8436         mViewSerial = renderer->getResourceSerialFactory().generateImageOrBufferViewSerial();
8437     }
8438 }
8439 
release(ContextVk * contextVk)8440 void BufferViewHelper::release(ContextVk *contextVk)
8441 {
8442     contextVk->flushDescriptorSetUpdates();
8443 
8444     std::vector<GarbageObject> garbage;
8445 
8446     for (auto &formatAndView : mViews)
8447     {
8448         BufferView &view = formatAndView.second;
8449         ASSERT(view.valid());
8450 
8451         garbage.emplace_back(GetGarbage(&view));
8452     }
8453 
8454     if (!garbage.empty())
8455     {
8456         RendererVk *rendererVk = contextVk->getRenderer();
8457         rendererVk->collectGarbage(std::move(mUse), std::move(garbage));
8458 
8459         // Ensure the resource use is always valid.
8460         mUse.init();
8461 
8462         // Update image view serial.
8463         mViewSerial = rendererVk->getResourceSerialFactory().generateImageOrBufferViewSerial();
8464     }
8465 
8466     mViews.clear();
8467 
8468     mOffset = 0;
8469     mSize   = 0;
8470 }
8471 
destroy(VkDevice device)8472 void BufferViewHelper::destroy(VkDevice device)
8473 {
8474     for (auto &formatAndView : mViews)
8475     {
8476         BufferView &view = formatAndView.second;
8477         view.destroy(device);
8478     }
8479 
8480     mViews.clear();
8481 
8482     mOffset = 0;
8483     mSize   = 0;
8484 
8485     mViewSerial = kInvalidImageOrBufferViewSerial;
8486 }
8487 
getView(ContextVk * contextVk,const BufferHelper & buffer,VkDeviceSize bufferOffset,const Format & format,const BufferView ** viewOut)8488 angle::Result BufferViewHelper::getView(ContextVk *contextVk,
8489                                         const BufferHelper &buffer,
8490                                         VkDeviceSize bufferOffset,
8491                                         const Format &format,
8492                                         const BufferView **viewOut)
8493 {
8494     ASSERT(format.valid());
8495 
8496     VkFormat viewVkFormat = format.getActualBufferVkFormat(false);
8497 
8498     auto iter = mViews.find(viewVkFormat);
8499     if (iter != mViews.end())
8500     {
8501         *viewOut = &iter->second;
8502         return angle::Result::Continue;
8503     }
8504 
8505     // If the size is not a multiple of pixelBytes, remove the extra bytes.  The last element cannot
8506     // be read anyway, and this is a requirement of Vulkan (for size to be a multiple of format
8507     // texel block size).
8508     const angle::Format &bufferFormat = format.getActualBufferFormat(false);
8509     const GLuint pixelBytes           = bufferFormat.pixelBytes;
8510     VkDeviceSize size                 = mSize - mSize % pixelBytes;
8511 
8512     VkBufferViewCreateInfo viewCreateInfo = {};
8513     viewCreateInfo.sType                  = VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO;
8514     viewCreateInfo.buffer                 = buffer.getBuffer().getHandle();
8515     viewCreateInfo.format                 = viewVkFormat;
8516     viewCreateInfo.offset                 = mOffset + bufferOffset;
8517     viewCreateInfo.range                  = size;
8518 
8519     BufferView view;
8520     ANGLE_VK_TRY(contextVk, view.init(contextVk->getDevice(), viewCreateInfo));
8521 
8522     // Cache the view
8523     auto insertIter = mViews.insert({viewVkFormat, std::move(view)});
8524     *viewOut        = &insertIter.first->second;
8525     ASSERT(insertIter.second);
8526 
8527     return angle::Result::Continue;
8528 }
8529 
getSerial() const8530 ImageOrBufferViewSubresourceSerial BufferViewHelper::getSerial() const
8531 {
8532     ASSERT(mViewSerial.valid());
8533 
8534     ImageOrBufferViewSubresourceSerial serial = {};
8535     serial.viewSerial                         = mViewSerial;
8536     return serial;
8537 }
8538 
8539 // ShaderProgramHelper implementation.
ShaderProgramHelper()8540 ShaderProgramHelper::ShaderProgramHelper() : mSpecializationConstants{} {}
8541 
8542 ShaderProgramHelper::~ShaderProgramHelper() = default;
8543 
valid(const gl::ShaderType shaderType) const8544 bool ShaderProgramHelper::valid(const gl::ShaderType shaderType) const
8545 {
8546     return mShaders[shaderType].valid();
8547 }
8548 
destroy(RendererVk * rendererVk)8549 void ShaderProgramHelper::destroy(RendererVk *rendererVk)
8550 {
8551     mGraphicsPipelines.destroy(rendererVk);
8552     mComputePipeline.destroy(rendererVk->getDevice());
8553     for (BindingPointer<ShaderAndSerial> &shader : mShaders)
8554     {
8555         shader.reset();
8556     }
8557 }
8558 
release(ContextVk * contextVk)8559 void ShaderProgramHelper::release(ContextVk *contextVk)
8560 {
8561     mGraphicsPipelines.release(contextVk);
8562     contextVk->addGarbage(&mComputePipeline.getPipeline());
8563     for (BindingPointer<ShaderAndSerial> &shader : mShaders)
8564     {
8565         shader.reset();
8566     }
8567 }
8568 
setShader(gl::ShaderType shaderType,RefCounted<ShaderAndSerial> * shader)8569 void ShaderProgramHelper::setShader(gl::ShaderType shaderType, RefCounted<ShaderAndSerial> *shader)
8570 {
8571     mShaders[shaderType].set(shader);
8572 }
8573 
setSpecializationConstant(sh::vk::SpecializationConstantId id,uint32_t value)8574 void ShaderProgramHelper::setSpecializationConstant(sh::vk::SpecializationConstantId id,
8575                                                     uint32_t value)
8576 {
8577     ASSERT(id < sh::vk::SpecializationConstantId::EnumCount);
8578     switch (id)
8579     {
8580         case sh::vk::SpecializationConstantId::LineRasterEmulation:
8581             mSpecializationConstants.lineRasterEmulation = value;
8582             break;
8583         case sh::vk::SpecializationConstantId::SurfaceRotation:
8584             mSpecializationConstants.surfaceRotation = value;
8585             break;
8586         case sh::vk::SpecializationConstantId::DrawableWidth:
8587             mSpecializationConstants.drawableWidth = static_cast<float>(value);
8588             break;
8589         case sh::vk::SpecializationConstantId::DrawableHeight:
8590             mSpecializationConstants.drawableHeight = static_cast<float>(value);
8591             break;
8592         default:
8593             UNREACHABLE();
8594             break;
8595     }
8596 }
8597 
getComputePipeline(Context * context,const PipelineLayout & pipelineLayout,PipelineHelper ** pipelineOut)8598 angle::Result ShaderProgramHelper::getComputePipeline(Context *context,
8599                                                       const PipelineLayout &pipelineLayout,
8600                                                       PipelineHelper **pipelineOut)
8601 {
8602     if (mComputePipeline.valid())
8603     {
8604         *pipelineOut = &mComputePipeline;
8605         return angle::Result::Continue;
8606     }
8607 
8608     RendererVk *renderer = context->getRenderer();
8609 
8610     VkPipelineShaderStageCreateInfo shaderStage = {};
8611     VkComputePipelineCreateInfo createInfo      = {};
8612 
8613     shaderStage.sType               = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
8614     shaderStage.flags               = 0;
8615     shaderStage.stage               = VK_SHADER_STAGE_COMPUTE_BIT;
8616     shaderStage.module              = mShaders[gl::ShaderType::Compute].get().get().getHandle();
8617     shaderStage.pName               = "main";
8618     shaderStage.pSpecializationInfo = nullptr;
8619 
8620     createInfo.sType              = VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO;
8621     createInfo.flags              = 0;
8622     createInfo.stage              = shaderStage;
8623     createInfo.layout             = pipelineLayout.getHandle();
8624     createInfo.basePipelineHandle = VK_NULL_HANDLE;
8625     createInfo.basePipelineIndex  = 0;
8626 
8627     PipelineCache *pipelineCache = nullptr;
8628     ANGLE_TRY(renderer->getPipelineCache(&pipelineCache));
8629     ANGLE_VK_TRY(context, mComputePipeline.getPipeline().initCompute(context->getDevice(),
8630                                                                      createInfo, *pipelineCache));
8631 
8632     *pipelineOut = &mComputePipeline;
8633     return angle::Result::Continue;
8634 }
8635 
8636 // ActiveHandleCounter implementation.
ActiveHandleCounter()8637 ActiveHandleCounter::ActiveHandleCounter() : mActiveCounts{}, mAllocatedCounts{} {}
8638 
8639 ActiveHandleCounter::~ActiveHandleCounter() = default;
8640 
8641 // CommandBufferAccess implementation.
8642 CommandBufferAccess::CommandBufferAccess()  = default;
8643 CommandBufferAccess::~CommandBufferAccess() = default;
8644 
onBufferRead(VkAccessFlags readAccessType,PipelineStage readStage,BufferHelper * buffer)8645 void CommandBufferAccess::onBufferRead(VkAccessFlags readAccessType,
8646                                        PipelineStage readStage,
8647                                        BufferHelper *buffer)
8648 {
8649     ASSERT(!buffer->isReleasedToExternal());
8650     mReadBuffers.emplace_back(buffer, readAccessType, readStage);
8651 }
8652 
onBufferWrite(VkAccessFlags writeAccessType,PipelineStage writeStage,BufferHelper * buffer)8653 void CommandBufferAccess::onBufferWrite(VkAccessFlags writeAccessType,
8654                                         PipelineStage writeStage,
8655                                         BufferHelper *buffer)
8656 {
8657     ASSERT(!buffer->isReleasedToExternal());
8658     mWriteBuffers.emplace_back(buffer, writeAccessType, writeStage);
8659 }
8660 
onImageRead(VkImageAspectFlags aspectFlags,ImageLayout imageLayout,ImageHelper * image)8661 void CommandBufferAccess::onImageRead(VkImageAspectFlags aspectFlags,
8662                                       ImageLayout imageLayout,
8663                                       ImageHelper *image)
8664 {
8665     ASSERT(!image->isReleasedToExternal());
8666     ASSERT(image->getImageSerial().valid());
8667     mReadImages.emplace_back(image, aspectFlags, imageLayout);
8668 }
8669 
onImageWrite(gl::LevelIndex levelStart,uint32_t levelCount,uint32_t layerStart,uint32_t layerCount,VkImageAspectFlags aspectFlags,ImageLayout imageLayout,ImageHelper * image)8670 void CommandBufferAccess::onImageWrite(gl::LevelIndex levelStart,
8671                                        uint32_t levelCount,
8672                                        uint32_t layerStart,
8673                                        uint32_t layerCount,
8674                                        VkImageAspectFlags aspectFlags,
8675                                        ImageLayout imageLayout,
8676                                        ImageHelper *image)
8677 {
8678     ASSERT(!image->isReleasedToExternal());
8679     ASSERT(image->getImageSerial().valid());
8680     mWriteImages.emplace_back(CommandBufferImageAccess{image, aspectFlags, imageLayout}, levelStart,
8681                               levelCount, layerStart, layerCount);
8682 }
8683 
8684 }  // namespace vk
8685 }  // namespace rx
8686