• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2023 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "node_context_pool_manager_gles.h"
17 
18 #include <algorithm>
19 
20 #include <render/namespace.h>
21 
22 #include "device/gpu_resource_manager.h"
23 #include "gles/device_gles.h"
24 #include "gles/gl_functions.h"
25 #include "gles/gpu_image_gles.h"
26 #include "gles/swapchain_gles.h"
27 #include "nodecontext/render_command_list.h" // RenderPassBeginInfo...
28 #include "util/log.h"
29 
30 using namespace BASE_NS;
31 
32 RENDER_BEGIN_NAMESPACE()
33 namespace {
34 constexpr const bool VERBOSE_LOGGING = false;
35 constexpr const bool HASH_LAYOUTS = false;
36 typedef struct {
37     uint32_t layer, mipLevel;
38     const GpuImageGLES* image;
39 } BindImage;
40 
UpdateBindImages(const RenderCommandBeginRenderPass & beginRenderPass,array_view<BindImage> images,GpuResourceManager & gpuResourceMgr_)41 void UpdateBindImages(const RenderCommandBeginRenderPass& beginRenderPass, array_view<BindImage> images,
42     GpuResourceManager& gpuResourceMgr_)
43 {
44     const auto& renderPassDesc = beginRenderPass.renderPassDesc;
45     for (uint32_t idx = 0; idx < renderPassDesc.attachmentCount; ++idx) {
46         images[idx].layer = renderPassDesc.attachments[idx].layer;
47         images[idx].mipLevel = renderPassDesc.attachments[idx].mipLevel;
48         images[idx].image = gpuResourceMgr_.GetImage<GpuImageGLES>(renderPassDesc.attachmentHandles[idx]);
49     }
50 }
51 
hashRPD(const RenderCommandBeginRenderPass & beginRenderPass,GpuResourceManager & gpuResourceMgr_)52 uint64_t hashRPD(const RenderCommandBeginRenderPass& beginRenderPass, GpuResourceManager& gpuResourceMgr_)
53 {
54     const auto& renderPassDesc = beginRenderPass.renderPassDesc;
55     uint64_t rpHash = 0;
56     // hash engine gpu handle
57     {
58         for (uint32_t idx = 0; idx < renderPassDesc.attachmentCount; ++idx) {
59             HashCombine(rpHash, renderPassDesc.attachments[idx].layer);
60             HashCombine(rpHash, renderPassDesc.attachments[idx].mipLevel);
61             const EngineResourceHandle gpuHandle = gpuResourceMgr_.GetGpuHandle(renderPassDesc.attachmentHandles[idx]);
62             HashCombine(rpHash, gpuHandle.id);
63         }
64     }
65 
66     // hash input and output layouts (ignored since, they do not contribute in GL/GLES at all, they are a artifact
67     // of vulkan)
68     if constexpr (HASH_LAYOUTS) {
69         const RenderPassImageLayouts& renderPassImageLayouts = beginRenderPass.imageLayouts;
70         for (uint32_t idx = 0; idx < renderPassDesc.attachmentCount; ++idx) {
71             HashCombine(rpHash, renderPassImageLayouts.attachmentInitialLayouts[idx],
72                 renderPassImageLayouts.attachmentFinalLayouts[idx]);
73         }
74     }
75 
76     // hash subpasses
77     PLUGIN_ASSERT(renderPassDesc.subpassCount <= beginRenderPass.subpasses.size());
78     for (const RenderPassSubpassDesc& subpass : beginRenderPass.subpasses) {
79         HashRange(
80             rpHash, subpass.inputAttachmentIndices, subpass.inputAttachmentIndices + subpass.inputAttachmentCount);
81         HashRange(
82             rpHash, subpass.colorAttachmentIndices, subpass.colorAttachmentIndices + subpass.colorAttachmentCount);
83         if (subpass.depthAttachmentCount) {
84             HashCombine(rpHash, (uint64_t)subpass.depthAttachmentIndex);
85         }
86     }
87     return rpHash;
88 }
89 
VerifyFBO()90 bool VerifyFBO()
91 {
92     GLenum status;
93     status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
94     if (status != GL_FRAMEBUFFER_COMPLETE) {
95         // failed!
96         switch (status) {
97             case GL_FRAMEBUFFER_UNDEFINED:
98                 // is returned if target is the default framebuffer, but the default framebuffer does not exist.
99                 PLUGIN_LOG_E("Framebuffer undefined");
100                 break;
101             case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT:
102                 // is returned if any of the framebuffer attachment points are framebuffer incomplete.
103                 PLUGIN_LOG_E("Framebuffer incomplete attachment");
104                 break;
105             case GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:
106                 // is returned if the framebuffer does not haveat least one image attached to it.
107                 PLUGIN_LOG_E("Framebuffer imcomplete missing attachment");
108                 break;
109             case GL_FRAMEBUFFER_UNSUPPORTED:
110                 // is returned if depth and stencil attachments, if present, are not the same renderbuffer, or
111                 // if the combination of internal formats of the attached images violates an
112                 // implementation-dependent set of restrictions.
113                 PLUGIN_LOG_E("Framebuffer unsupported");
114                 break;
115             case GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE:
116                 // is returned if the value of GL_RENDERBUFFER_SAMPLES  is not the same for all attached
117                 // renderbuffers or, if the attached images are a mix of renderbuffers and textures, the value
118                 // of GL_RENDERBUFFER_SAMPLES is not zero.
119                 PLUGIN_LOG_E("Framebuffer incomplete multisample");
120                 break;
121             case GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS:
122                 // is returned if any framebuffer attachment is layered, and any populated attachment is not
123                 // layered, or if all populated color attachments are not from textures of the same target.
124                 PLUGIN_LOG_E("Framebuffer incomplete layer targets");
125                 break;
126             default: {
127                 PLUGIN_LOG_E("Framebuffer other error: %x", status);
128                 break;
129             }
130         }
131         return false;
132     }
133     return true;
134 }
135 
HasStencil(const GpuImageGLES * image)136 bool HasStencil(const GpuImageGLES* image)
137 {
138     const GpuImagePlatformDataGL& dplat = static_cast<const GpuImagePlatformDataGL&>(image->GetPlatformData());
139     return (dplat.format == GL_STENCIL_INDEX) || (dplat.format == GL_DEPTH_STENCIL);
140 }
141 
HasDepth(const GpuImageGLES * image)142 bool HasDepth(const GpuImageGLES* image)
143 {
144     const GpuImagePlatformDataGL& dplat = static_cast<const GpuImagePlatformDataGL&>(image->GetPlatformData());
145     return (dplat.format == GL_DEPTH_COMPONENT) || (dplat.format == GL_DEPTH_STENCIL);
146 }
147 
148 #if RENDER_GL_FLIP_Y_SWAPCHAIN == 0
IsDefaultAttachment(array_view<const BindImage> images,const RenderPassSubpassDesc & sb)149 bool IsDefaultAttachment(array_view<const BindImage> images, const RenderPassSubpassDesc& sb)
150 {
151     // Valid backbuffer configurations are.
152     // 1. color only
153     // 2. color + depth (stencil).
154     // It is not allowed to mix custom render targets and backbuffer!
155     if (sb.colorAttachmentCount == 1) {
156         // okay, looks good. one color...
157         const auto color = images[sb.colorAttachmentIndices[0]].image;
158         const auto& plat = static_cast<const GpuImagePlatformDataGL&>(color->GetPlatformData());
159         if ((plat.image == 0) &&                // not texture
160             (plat.renderBuffer == 0))           // not renderbuffer
161         {                                       // Colorattachment is backbuffer
162             if (sb.depthAttachmentCount == 1) { // and has one depth.
163                 const auto depth = images[sb.depthAttachmentIndex].image;
164                 const auto& dPlat = static_cast<const GpuImagePlatformDataGL&>(depth->GetPlatformData());
165                 if ((dPlat.image == 0) && (dPlat.renderBuffer == 0)) { // depth attachment is backbuffer depth.
166                     return true;
167                 } else {
168                     // Invalid configuration. (this should be caught earlier already)
169                     PLUGIN_LOG_E("Mixing backbuffer with custom depth is not allowed!");
170                     PLUGIN_ASSERT_MSG(false, "Mixing backbuffer with custom depth is not allowed!");
171                 }
172             } else {
173                 return true;
174             }
175         }
176     }
177     return false;
178 }
IsDefaultResolve(array_view<const BindImage> images,const RenderPassSubpassDesc & sb)179 bool IsDefaultResolve(array_view<const BindImage> images, const RenderPassSubpassDesc& sb)
180 {
181     if (sb.resolveAttachmentCount == 1 && sb.depthResolveAttachmentCount == 0) {
182         // okay, looks good. one color...
183         const GpuImageGLES* color = images[sb.resolveAttachmentIndices[0]].image;
184         const GpuImagePlatformDataGL& plat = static_cast<const GpuImagePlatformDataGL&>(color->GetPlatformData());
185         if ((plat.image == 0) && (plat.renderBuffer == 0)) {
186             return true;
187         }
188     }
189     if (sb.depthResolveAttachmentCount == 1) {
190         // okay, looks good. one depth...
191         const GpuImageGLES* depth = images[sb.depthResolveAttachmentIndex].image;
192         const GpuImagePlatformDataGL& plat = static_cast<const GpuImagePlatformDataGL&>(depth->GetPlatformData());
193         if ((plat.image == 0) && (plat.renderBuffer == 0)) {
194             return true;
195         }
196     }
197     return false;
198 }
199 #endif
200 
DeleteFbos(DeviceGLES & device,LowlevelFramebufferGL & ref)201 void DeleteFbos(DeviceGLES& device, LowlevelFramebufferGL& ref)
202 {
203     for (uint32_t i = 0; i < ref.fbos.size(); i++) {
204         GLuint f, r;
205         f = ref.fbos[i].fbo;
206         r = ref.fbos[i].resolve;
207         if (f != 0) {
208             device.DeleteFrameBuffer(f);
209         }
210         if (r != 0) {
211             device.DeleteFrameBuffer(r);
212         }
213         // the same fbos can be used multiple render passes, so clean those references too.
214         for (uint32_t j = 0; j < ref.fbos.size(); j++) {
215             if (f == ref.fbos[j].fbo) {
216                 ref.fbos[j].fbo = 0;
217             }
218             if (r == ref.fbos[j].resolve) {
219                 ref.fbos[j].resolve = 0;
220             }
221         }
222     }
223     ref.fbos.clear();
224 }
225 
BindToFbo(GLenum attachType,const BindImage & image,size_t & width,size_t & height,bool isStarted)226 void BindToFbo(GLenum attachType, const BindImage& image, size_t& width, size_t& height, bool isStarted)
227 {
228     const GpuImagePlatformDataGL& plat = static_cast<const GpuImagePlatformDataGL&>(image.image->GetPlatformData());
229     const GpuImageDesc& desc = image.image->GetDesc();
230     if (isStarted) {
231         // Assert that all attachments are the same size.
232         PLUGIN_ASSERT_MSG(width == desc.width, "Depth attachment is not the same size as other attachments");
233         PLUGIN_ASSERT_MSG(height == desc.height, "Depth attachment is not the same size as other attachments");
234     }
235     width = desc.width;
236     height = desc.height;
237     const bool isSrc = (desc.usageFlags & CORE_IMAGE_USAGE_TRANSFER_SRC_BIT);
238     const bool isDst = (desc.usageFlags & CORE_IMAGE_USAGE_TRANSFER_DST_BIT);
239     const bool isSample = (desc.usageFlags & CORE_IMAGE_USAGE_SAMPLED_BIT);
240     const bool isStorage = (desc.usageFlags & CORE_IMAGE_USAGE_STORAGE_BIT);
241     // could check for bool isColor = (desc.usageFlags & CORE_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
242     // could check for isDepth = (desc.usageFlags & CORE_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
243     // could check for bool isTrans = (desc.usageFlags & CORE_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
244     const bool isInput = (desc.usageFlags & CORE_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
245     PLUGIN_UNUSED(isSrc);
246     PLUGIN_UNUSED(isDst);
247     PLUGIN_UNUSED(isSample);
248     PLUGIN_UNUSED(isStorage);
249     PLUGIN_UNUSED(isInput);
250     if (plat.renderBuffer) {
251         PLUGIN_ASSERT((!isSrc) && (!isDst) && (!isSample) && (!isStorage) && (!isInput));
252         glFramebufferRenderbuffer(GL_FRAMEBUFFER, attachType, GL_RENDERBUFFER, plat.renderBuffer);
253     } else {
254         if ((plat.type == GL_TEXTURE_2D_ARRAY) || (plat.type == GL_TEXTURE_2D_MULTISAMPLE_ARRAY)) {
255             glFramebufferTextureLayer(
256                 GL_FRAMEBUFFER, attachType, plat.image, (GLint)image.mipLevel, (GLint)image.layer);
257         } else {
258             glFramebufferTexture2D(GL_FRAMEBUFFER, attachType, plat.type, plat.image, (GLint)image.mipLevel);
259         }
260     }
261 }
BindToFboMultisampled(GLenum attachType,const BindImage & image,const BindImage & resolveImage,size_t & width,size_t & height,bool isStarted,bool multisampledRenderToTexture)262 void BindToFboMultisampled(GLenum attachType, const BindImage& image, const BindImage& resolveImage, size_t& width,
263     size_t& height, bool isStarted, bool multisampledRenderToTexture)
264 {
265     const GpuImagePlatformDataGL& plat =
266         static_cast<const GpuImagePlatformDataGL&>(resolveImage.image->GetPlatformData());
267     const GpuImageDesc& desc = image.image->GetDesc();
268     if (isStarted) {
269         // Assert that all attachments are the same size.
270         PLUGIN_ASSERT_MSG(width == desc.width, "Depth attachment is not the same size as other attachments");
271         PLUGIN_ASSERT_MSG(height == desc.height, "Depth attachment is not the same size as other attachments");
272     }
273     width = desc.width;
274     height = desc.height;
275     const bool isSrc = (desc.usageFlags & CORE_IMAGE_USAGE_TRANSFER_SRC_BIT);
276     const bool isDst = (desc.usageFlags & CORE_IMAGE_USAGE_TRANSFER_DST_BIT);
277     const bool isSample = (desc.usageFlags & CORE_IMAGE_USAGE_SAMPLED_BIT);
278     const bool isStorage = (desc.usageFlags & CORE_IMAGE_USAGE_STORAGE_BIT);
279     // could check for bool isColor = (desc.usageFlags & CORE_IMAGE_USAGE_COLOR_ATTACHMENT_BIT);
280     // could check for isDepth = (desc.usageFlags & CORE_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
281     const bool isTrans = (desc.usageFlags & CORE_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT);
282     const bool isInput = (desc.usageFlags & CORE_IMAGE_USAGE_INPUT_ATTACHMENT_BIT);
283     PLUGIN_UNUSED(isSrc);
284     PLUGIN_UNUSED(isDst);
285     PLUGIN_UNUSED(isSample);
286     PLUGIN_UNUSED(isStorage);
287     PLUGIN_UNUSED(isTrans);
288     PLUGIN_UNUSED(isInput);
289     if (plat.renderBuffer) {
290         PLUGIN_ASSERT((!isSrc) && (!isDst) && (!isSample) && (!isStorage) && (!isInput));
291         glFramebufferRenderbuffer(GL_FRAMEBUFFER, attachType, GL_RENDERBUFFER, plat.renderBuffer);
292     } else {
293         if ((plat.type == GL_TEXTURE_2D_ARRAY) || (plat.type == GL_TEXTURE_2D_MULTISAMPLE_ARRAY)) {
294             glFramebufferTextureLayer(
295                 GL_FRAMEBUFFER, attachType, plat.image, (GLint)image.mipLevel, (GLint)image.layer);
296 #if RENDER_HAS_GLES_BACKEND
297         } else if (multisampledRenderToTexture && isTrans &&
298                    ((plat.type == GL_TEXTURE_2D_MULTISAMPLE) || (desc.sampleCountFlags & ~CORE_SAMPLE_COUNT_1_BIT))) {
299             const auto samples = (desc.sampleCountFlags & CORE_SAMPLE_COUNT_8_BIT)
300                                      ? 8
301                                      : ((desc.sampleCountFlags & CORE_SAMPLE_COUNT_4_BIT) ? 4 : 2);
302             glFramebufferTexture2DMultisampleEXT(
303                 GL_FRAMEBUFFER, attachType, plat.type, plat.image, (GLint)image.mipLevel, samples);
304 #endif
305         } else {
306             glFramebufferTexture2D(GL_FRAMEBUFFER, attachType, plat.type, plat.image, (GLint)image.mipLevel);
307         }
308     }
309 }
HashAttachments(const RenderPassSubpassDesc & sb)310 uint64_t HashAttachments(const RenderPassSubpassDesc& sb)
311 {
312     // generate hash for attachments.
313     uint64_t subHash = 0;
314     HashRange(subHash, sb.colorAttachmentIndices, sb.colorAttachmentIndices + sb.colorAttachmentCount);
315     if (sb.depthAttachmentCount) {
316         HashCombine(subHash, (uint64_t)sb.depthAttachmentIndex);
317     }
318     return subHash;
319 }
320 
BindType(const GpuImageGLES * image)321 GLenum BindType(const GpuImageGLES* image)
322 {
323     GLenum bindType = GL_NONE;
324     const bool depth = HasDepth(image);
325     const bool stencil = HasStencil(image);
326     if (depth && stencil) {
327         bindType = GL_DEPTH_STENCIL_ATTACHMENT;
328     } else if (depth) {
329         bindType = GL_DEPTH_ATTACHMENT;
330     } else if (stencil) {
331         bindType = GL_STENCIL_ATTACHMENT;
332     }
333     return bindType;
334 }
335 
GenerateSubPassFBO(DeviceGLES & device,LowlevelFramebufferGL & framebuffer,const RenderPassSubpassDesc & sb,const array_view<const BindImage> images,const size_t resolveAttachmentCount,const array_view<const uint32_t> imageMap,bool multisampledRenderToTexture)336 uint32_t GenerateSubPassFBO(DeviceGLES& device, LowlevelFramebufferGL& framebuffer, const RenderPassSubpassDesc& sb,
337     const array_view<const BindImage> images, const size_t resolveAttachmentCount,
338     const array_view<const uint32_t> imageMap, bool multisampledRenderToTexture)
339 {
340     // generate fbo for subpass (depth/color).
341     GLuint fbo;
342     glGenFramebuffers(1, &fbo);
343 #if (RENDER_DEBUG_GPU_RESOURCE_IDS == 1)
344     PLUGIN_LOG_D("fbo id >: %u", fbo);
345 #endif
346     device.BindFrameBuffer(fbo);
347     GLenum drawBuffers[PipelineStateConstants::MAX_COLOR_ATTACHMENT_COUNT] = { GL_NONE };
348     GLenum colorAttachmentCount = 0;
349     for (uint32_t idx = 0; idx < sb.colorAttachmentCount; ++idx) {
350         const uint32_t ci = sb.colorAttachmentIndices[idx];
351         const uint32_t original = (ci < imageMap.size()) ? imageMap[ci] : 0xff;
352         if (images[ci].image) {
353             drawBuffers[idx] = GL_COLOR_ATTACHMENT0 + colorAttachmentCount;
354             if (original == 0xff) {
355                 BindToFbo(drawBuffers[idx], images[ci], framebuffer.width, framebuffer.height,
356                     (colorAttachmentCount) || (resolveAttachmentCount));
357             } else {
358                 BindToFboMultisampled(drawBuffers[idx], images[original], images[ci], framebuffer.width,
359                     framebuffer.height, (colorAttachmentCount) || (resolveAttachmentCount),
360                     multisampledRenderToTexture);
361             }
362             ++colorAttachmentCount;
363         } else {
364             PLUGIN_LOG_E("no image for color attachment %u %u", idx, ci);
365             drawBuffers[idx] = GL_NONE;
366         }
367     }
368     glDrawBuffers((GLsizei)sb.colorAttachmentCount, drawBuffers);
369     if (sb.depthAttachmentCount == 1) {
370         const auto* image = images[sb.depthAttachmentIndex].image;
371         if (image) {
372             const GLenum bindType = BindType(image);
373             PLUGIN_ASSERT_MSG(bindType != GL_NONE, "Depth attachment has no stencil or depth");
374             BindToFboMultisampled(bindType, images[sb.depthAttachmentIndex], images[sb.depthAttachmentIndex],
375                 framebuffer.width, framebuffer.height, (colorAttachmentCount) || (resolveAttachmentCount),
376                 multisampledRenderToTexture);
377         } else {
378             PLUGIN_LOG_E("no image for depth attachment");
379         }
380     }
381     if (!VerifyFBO()) {
382         PLUGIN_LOG_E("Failed to create subpass FBO size [%zd %zd] [color:%d depth:%d resolve:%d]", framebuffer.width,
383             framebuffer.height, sb.colorAttachmentCount, sb.depthAttachmentCount, sb.resolveAttachmentCount);
384         PLUGIN_ASSERT_MSG(false, "Framebuffer creation failed!");
385     } else if constexpr (VERBOSE_LOGGING) {
386         PLUGIN_LOG_V("Created subpass FBO size [%zd %zd] [color:%d depth:%d resolve:%d]", framebuffer.width,
387             framebuffer.height, sb.colorAttachmentCount, sb.depthAttachmentCount, sb.resolveAttachmentCount);
388     }
389     return fbo;
390 }
391 
392 struct ResolvePair {
393     uint32_t resolveAttachmentCount;
394     uint32_t resolveFbo;
395 };
396 
GenerateResolveFBO(DeviceGLES & device,LowlevelFramebufferGL & framebuffer,const RenderPassSubpassDesc & sb,array_view<const BindImage> images)397 ResolvePair GenerateResolveFBO(DeviceGLES& device, LowlevelFramebufferGL& framebuffer, const RenderPassSubpassDesc& sb,
398     array_view<const BindImage> images)
399 {
400     // generate fbos for resolve attachments if needed.
401     if ((sb.resolveAttachmentCount == 0) && (sb.depthResolveAttachmentCount == 0)) {
402         return { 0, 0 };
403     }
404 #if RENDER_GL_FLIP_Y_SWAPCHAIN == 0
405     // currently resolving to backbuffer AND other attachments at the same time is not possible.
406     if (IsDefaultResolve(images, sb)) {
407         // resolving from custom render target to default fbo.
408         const auto* swp = device.GetSwapchain();
409         if (swp) {
410             const auto desc = swp->GetDesc();
411             framebuffer.width = desc.width;
412             framebuffer.height = desc.height;
413         }
414         return { 1, 0 };
415     }
416 #endif
417     // all subpasses with resolve will get a special resolve fbo.. (expecting that no more than one
418     // subpass resolves to a single attachment. if more than one subpass resolves to a single
419     // attachment we have extra fbos.)
420     ResolvePair rp { 0, 0 };
421     glGenFramebuffers(1, &rp.resolveFbo);
422 #if (RENDER_DEBUG_GPU_RESOURCE_IDS == 1)
423     PLUGIN_LOG_D("fbo id >: %u", rp.resolveFbo);
424 #endif
425     device.BindFrameBuffer(rp.resolveFbo);
426     GLenum drawBuffers[PipelineStateConstants::MAX_COLOR_ATTACHMENT_COUNT] = { GL_NONE };
427     for (uint32_t idx = 0; idx < sb.resolveAttachmentCount; ++idx) {
428         const uint32_t ci = sb.resolveAttachmentIndices[idx];
429         const auto* image = images[ci].image;
430         if (image) {
431             drawBuffers[idx] = GL_COLOR_ATTACHMENT0 + rp.resolveAttachmentCount;
432             BindToFbo(
433                 drawBuffers[idx], images[ci], framebuffer.width, framebuffer.height, (rp.resolveAttachmentCount > 0));
434             ++rp.resolveAttachmentCount;
435         } else {
436             PLUGIN_LOG_E("no image for resolve attachment %u %u", idx, ci);
437             drawBuffers[idx] = GL_NONE;
438         }
439     }
440     glDrawBuffers((GLsizei)sb.resolveAttachmentCount, drawBuffers);
441     for (uint32_t idx = 0; idx < sb.depthResolveAttachmentCount; ++idx) {
442         const uint32_t ci = sb.depthResolveAttachmentIndex;
443         const auto* image = images[ci].image;
444         if (image) {
445             BindToFbo(
446                 BindType(image), images[ci], framebuffer.width, framebuffer.height, (rp.resolveAttachmentCount > 0));
447         } else {
448             PLUGIN_LOG_E("no image for depth resolve attachment %u %u", idx, ci);
449         }
450     }
451     return rp;
452 }
453 
ProcessSubPass(DeviceGLES & device,LowlevelFramebufferGL & framebuffer,std::map<uint64_t,GLuint> & fboMap,array_view<const BindImage> images,const array_view<const uint32_t> imageMap,const RenderPassSubpassDesc & sb,bool multisampledRenderToTexture)454 LowlevelFramebufferGL::SubPassPair ProcessSubPass(DeviceGLES& device, LowlevelFramebufferGL& framebuffer,
455     std::map<uint64_t, GLuint>& fboMap, array_view<const BindImage> images, const array_view<const uint32_t> imageMap,
456     const RenderPassSubpassDesc& sb, bool multisampledRenderToTexture)
457 {
458 #if RENDER_GL_FLIP_Y_SWAPCHAIN == 0
459     if (IsDefaultAttachment(images, sb)) {
460         // This subpass uses backbuffer!
461         const auto* swp = device.GetSwapchain();
462         if (swp) {
463             const auto desc = swp->GetDesc();
464             framebuffer.width = desc.width;
465             framebuffer.height = desc.height;
466         }
467         // NOTE: it is technically possible to resolve from backbuffer to a custom render target.
468         // but we do not support it now.
469         PLUGIN_ASSERT_MSG(sb.resolveAttachmentCount == 0, "No resolving from default framebuffer");
470         return { 0, 0 };
471     }
472 #endif
473     // This subpass uses custom render targets.
474     PLUGIN_ASSERT((sb.colorAttachmentCount + sb.depthAttachmentCount) <
475                   (PipelineStateConstants::MAX_COLOR_ATTACHMENT_COUNT + 1)); // +1 for depth
476     uint32_t fbo = 0;
477     const auto resolveResult = GenerateResolveFBO(device, framebuffer, sb, images);
478     const uint64_t subHash = HashAttachments(sb);
479     if (const auto it = fboMap.find(subHash); it != fboMap.end()) {
480         // matching fbo created already, re-use
481         fbo = it->second;
482     } else {
483         fbo = GenerateSubPassFBO(device, framebuffer, sb, images, resolveResult.resolveAttachmentCount, imageMap,
484             multisampledRenderToTexture);
485         fboMap[subHash] = fbo;
486     }
487     return { fbo, resolveResult.resolveFbo };
488 }
489 
490 } // namespace
491 
NodeContextPoolManagerGLES(Device & device,GpuResourceManager & gpuResourceManager)492 NodeContextPoolManagerGLES::NodeContextPoolManagerGLES(Device& device, GpuResourceManager& gpuResourceManager)
493     : NodeContextPoolManager(), device_ { (DeviceGLES&)device }, gpuResourceMgr_ { gpuResourceManager }
494 {
495     bufferingCount_ = device_.GetCommandBufferingCount();
496 #if RENDER_HAS_GLES_BACKEND
497     if (device_.GetBackendType() == DeviceBackendType::OPENGLES) {
498         multisampledRenderToTexture_ = device_.HasExtension("GL_EXT_multisampled_render_to_texture2");
499     }
500 #endif
501 }
502 
~NodeContextPoolManagerGLES()503 NodeContextPoolManagerGLES::~NodeContextPoolManagerGLES()
504 {
505     if (!framebufferCache_.framebuffers.empty()) {
506         PLUGIN_ASSERT(device_.IsActive());
507         for (auto& ref : framebufferCache_.framebuffers) {
508             DeleteFbos(device_, ref);
509         }
510     }
511 }
512 
BeginFrame()513 void NodeContextPoolManagerGLES::BeginFrame()
514 {
515     bufferingIndex_ = 0;
516     const auto maxAge = 2;
517     const auto minAge = device_.GetCommandBufferingCount() + maxAge;
518     const auto ageLimit = (device_.GetFrameCount() < minAge) ? 0 : (device_.GetFrameCount() - minAge);
519     const size_t limit = framebufferCache_.frameBufferFrameUseIndex.size();
520     for (size_t index = 0; index < limit; ++index) {
521         auto const useIndex = framebufferCache_.frameBufferFrameUseIndex[index];
522         auto& ref = framebufferCache_.framebuffers[index];
523         if (useIndex < ageLimit && (!ref.fbos.empty())) {
524             DeleteFbos(device_, ref);
525             auto const pos = std::find_if(framebufferCache_.renderPassHashToIndex.begin(),
526                 framebufferCache_.renderPassHashToIndex.end(),
527                 [index](auto const& hashToIndex) { return hashToIndex.second == index; });
528             if (pos != framebufferCache_.renderPassHashToIndex.end()) {
529                 framebufferCache_.renderPassHashToIndex.erase(pos);
530             }
531         }
532     }
533 }
534 
GetFramebufferHandle(const RenderCommandBeginRenderPass & beginRenderPass)535 EngineResourceHandle NodeContextPoolManagerGLES::GetFramebufferHandle(
536     const RenderCommandBeginRenderPass& beginRenderPass)
537 {
538     PLUGIN_ASSERT(device_.IsActive());
539     const uint64_t rpHash = hashRPD(beginRenderPass, gpuResourceMgr_);
540     if (const auto iter = framebufferCache_.renderPassHashToIndex.find(rpHash);
541         iter != framebufferCache_.renderPassHashToIndex.cend()) {
542         PLUGIN_ASSERT(iter->second < framebufferCache_.framebuffers.size());
543         // store frame index for usage
544         framebufferCache_.frameBufferFrameUseIndex[iter->second] = device_.GetFrameCount();
545         return RenderHandleUtil::CreateEngineResourceHandle(RenderHandleType::UNDEFINED, iter->second, 0);
546     }
547 
548     BindImage images[PipelineStateConstants::MAX_RENDER_PASS_ATTACHMENT_COUNT];
549     UpdateBindImages(beginRenderPass, images, gpuResourceMgr_);
550 
551     // Create fbo for each subpass
552     LowlevelFramebufferGL fb;
553 
554     const auto& rpd = beginRenderPass.renderPassDesc;
555     fb.fbos.resize(rpd.subpassCount);
556     if constexpr (VERBOSE_LOGGING) {
557         PLUGIN_LOG_V("Creating framebuffer with %d subpasses", rpd.subpassCount);
558     }
559     // NOTE: we currently expect that resolve, color and depth attachments are regular 2d textures (or
560     // renderbuffers)
561     std::map<uint64_t, GLuint> fboMap;
562     std::transform(std::begin(beginRenderPass.subpasses), std::begin(beginRenderPass.subpasses) + rpd.subpassCount,
563         fb.fbos.data(),
564         [this, &fb, &fboMap, images = array_view<const BindImage>(images),
565             imageMap = array_view<const uint32_t>(imageMap_)](const RenderPassSubpassDesc& subpass) {
566             return ProcessSubPass(device_, fb, fboMap, images, imageMap, subpass, multisampledRenderToTexture_);
567         });
568     if constexpr (VERBOSE_LOGGING) {
569         PLUGIN_LOG_V("Created framebuffer with %d subpasses at size [%zd %zd]", rpd.subpassCount, fb.width, fb.height);
570     }
571 
572     uint32_t arrayIndex = 0;
573     if (auto const pos = std::find_if(framebufferCache_.framebuffers.begin(), framebufferCache_.framebuffers.end(),
574             [](auto const& framebuffer) { return framebuffer.fbos.empty(); });
575         pos != framebufferCache_.framebuffers.end()) {
576         arrayIndex = (uint32_t)std::distance(framebufferCache_.framebuffers.begin(), pos);
577         *pos = move(fb);
578         framebufferCache_.frameBufferFrameUseIndex[arrayIndex] = device_.GetFrameCount();
579     } else {
580         framebufferCache_.framebuffers.emplace_back(move(fb));
581         framebufferCache_.frameBufferFrameUseIndex.emplace_back(device_.GetFrameCount());
582         arrayIndex = (uint32_t)framebufferCache_.framebuffers.size() - 1;
583     }
584     framebufferCache_.renderPassHashToIndex[rpHash] = arrayIndex;
585     return RenderHandleUtil::CreateEngineResourceHandle(RenderHandleType::UNDEFINED, arrayIndex, 0);
586 }
587 
GetFramebuffer(const EngineResourceHandle handle) const588 const LowlevelFramebufferGL& NodeContextPoolManagerGLES::GetFramebuffer(const EngineResourceHandle handle) const
589 {
590     PLUGIN_ASSERT(RenderHandleUtil::IsValid(handle));
591     const uint32_t index = RenderHandleUtil::GetIndexPart(handle);
592     PLUGIN_ASSERT(index < framebufferCache_.framebuffers.size());
593     return framebufferCache_.framebuffers[index];
594 }
595 
FilterRenderPass(RenderCommandBeginRenderPass & beginRenderPass)596 void NodeContextPoolManagerGLES::FilterRenderPass(RenderCommandBeginRenderPass& beginRenderPass)
597 {
598     imageMap_.clear();
599     imageMap_.insert(imageMap_.end(), beginRenderPass.renderPassDesc.attachmentCount, 0xff);
600     auto begin = beginRenderPass.subpasses.begin();
601     auto pos = std::find_if(begin, beginRenderPass.subpasses.end(),
602         [](const RenderPassSubpassDesc& subpass) { return (subpass.resolveAttachmentCount > 0); });
603     if (pos != beginRenderPass.subpasses.end()) {
604         BindImage images[PipelineStateConstants::MAX_RENDER_PASS_ATTACHMENT_COUNT];
605         UpdateBindImages(beginRenderPass, images, gpuResourceMgr_);
606         if (!IsDefaultResolve(images, *pos)) {
607             const auto resolveAttachmentIndices =
608                 array_view(pos->resolveAttachmentIndices, pos->resolveAttachmentCount);
609             const auto colorAttachmentIndices = array_view(pos->colorAttachmentIndices, pos->colorAttachmentCount);
610             for (auto i = 0U; i < pos->resolveAttachmentCount; ++i) {
611                 const auto color = colorAttachmentIndices[i];
612                 const auto resolve = resolveAttachmentIndices[i];
613                 imageMap_[color] = resolve;
614                 beginRenderPass.renderPassDesc.attachments[resolve].loadOp =
615                     beginRenderPass.renderPassDesc.attachments[color].loadOp;
616             }
617             for (auto i = begin; i != pos; ++i) {
618                 for (auto ci = 0U; ci < i->colorAttachmentCount; ++ci) {
619                     const auto oldColor = i->colorAttachmentIndices[ci];
620                     if (const auto newColor = imageMap_[oldColor]; newColor != 0xff) {
621                         i->colorAttachmentIndices[ci] = newColor;
622                     }
623                 }
624             }
625             for (auto ci = 0U; ci < pos->colorAttachmentCount; ++ci) {
626                 const auto oldColor = pos->colorAttachmentIndices[ci];
627                 if (const auto newColor = imageMap_[oldColor]; newColor != 0xff) {
628                     pos->colorAttachmentIndices[ci] = newColor;
629                 }
630             }
631             pos->resolveAttachmentCount = 0;
632             vector<uint32_t> map(imageMap_.size(), 0xff);
633             for (uint32_t color = 0U, end = static_cast<uint32_t>(imageMap_.size()); color < end; ++color) {
634                 const auto resolve = imageMap_[color];
635                 if (resolve != 0xff) {
636                     map[resolve] = color;
637                 }
638             }
639             imageMap_ = map;
640         }
641     }
642 }
643 
644 RENDER_END_NAMESPACE()
645