• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2023 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "gpu_buffer_gles.h"
17 
18 #if (RENDER_PERF_ENABLED == 1)
19 #include <core/implementation_uids.h>
20 #include <core/perf/intf_performance_data_manager.h>
21 #endif
22 #include <render/namespace.h>
23 
24 #include "gles/device_gles.h"
25 #include "gles/gl_functions.h"
26 #include "util/log.h"
27 
28 #define IS_BIT(value, bit) (((value & bit) == bit) ? (GLboolean)GL_TRUE : (GLboolean)GL_FALSE)
29 
30 RENDER_BEGIN_NAMESPACE()
31 namespace {
32 #if (RENDER_PERF_ENABLED == 1)
RecordAllocation(const int64_t alignedByteSize)33 void RecordAllocation(const int64_t alignedByteSize)
34 {
35     if (auto* inst = CORE_NS::GetInstance<CORE_NS::IPerformanceDataManagerFactory>(CORE_NS::UID_PERFORMANCE_FACTORY);
36         inst) {
37         CORE_NS::IPerformanceDataManager* pdm = inst->Get("Memory");
38         pdm->UpdateData("AllGpuImages", "GPU_IMAGE", alignedByteSize);
39     }
40 }
41 #endif
42 
43 constexpr GLenum INIT_TARGET = GL_COPY_WRITE_BUFFER;
44 
MakeFlags(uint32_t requiredFlags)45 constexpr uint32_t MakeFlags(uint32_t requiredFlags)
46 {
47     uint32_t flags = 0;
48     if ((requiredFlags & CORE_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) == 0) {
49         // allow non device local (non gpu) memory (since device_local not set)
50         flags |= GL_CLIENT_STORAGE_BIT_EXT;
51     }
52     if (requiredFlags & CORE_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
53         // can be mapped (reads?)
54         flags |= GL_MAP_WRITE_BIT;
55     }
56     if (requiredFlags & CORE_MEMORY_PROPERTY_HOST_COHERENT_BIT) {
57         // no need to flush
58         flags |= GL_MAP_COHERENT_BIT_EXT;
59     }
60     if (flags & GL_MAP_COHERENT_BIT_EXT) {
61         // It is an error to specify MAP_COHERENT_BIT_EXT without also specifying MAP_PERSISTENT_BIT_EXT.
62         flags |= GL_MAP_PERSISTENT_BIT_EXT;
63     }
64     if (flags & GL_MAP_PERSISTENT_BIT_EXT) {
65         // If <flags> contains MAP_PERSISTENT_BIT_EXT, it must also contain at least one of
66         // MAP_READ_BIT or MAP_WRITE_BIT.
67         flags |= GL_MAP_WRITE_BIT;
68     }
69     return flags;
70 }
71 } // namespace
72 
GpuBufferGLES(Device & device,const GpuBufferDesc & desc)73 GpuBufferGLES::GpuBufferGLES(Device& device, const GpuBufferDesc& desc)
74     : device_((DeviceGLES&)device), plat_({ {}, 0u, 0u, desc.byteSize, 0u, desc.byteSize }), desc_(desc),
75       isPersistantlyMapped_((desc.memoryPropertyFlags & CORE_MEMORY_PROPERTY_HOST_VISIBLE_BIT) &&
76                             (desc.memoryPropertyFlags & CORE_MEMORY_PROPERTY_HOST_COHERENT_BIT)),
77       // At some point see if other memory property flags should be used.
78       isMappable_(IS_BIT(desc.memoryPropertyFlags, CORE_MEMORY_PROPERTY_HOST_VISIBLE_BIT))
79 {
80     PLUGIN_ASSERT(device_.IsActive());
81     glGenBuffers(1, &plat_.buffer);
82 
83     // we need to handle the alignment only for mappable uniform buffers due to binding offset
84     GLint minAlignment = sizeof(float) * 4u; // NOTE: un-educated guess here.
85     glGetIntegerv(GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, &minAlignment);
86 
87     minAlignment = minAlignment > 0 ? minAlignment : 1;
88     plat_.alignedBindByteSize = ((plat_.bindMemoryByteSize + (minAlignment - 1)) / minAlignment) * minAlignment;
89     plat_.alignedByteSize = plat_.alignedBindByteSize;
90 
91     if (desc.engineCreationFlags & CORE_ENGINE_BUFFER_CREATION_DYNAMIC_RING_BUFFER) {
92         isRingBuffer_ = true;
93         plat_.alignedByteSize *= device_.GetCommandBufferingCount();
94     }
95 
96     const auto oldBind = device_.BoundBuffer(INIT_TARGET);
97     device_.BindBuffer(INIT_TARGET, plat_.buffer);
98 
99     // check for the extension
100     if (const bool hasBufferStorageEXT = device_.HasExtension("GL_EXT_buffer_storage"); hasBufferStorageEXT) {
101         uint32_t flags = MakeFlags(desc.memoryPropertyFlags);
102         glBufferStorageEXT(INIT_TARGET, static_cast<GLsizeiptr>(plat_.alignedByteSize), nullptr, flags);
103         if (isPersistantlyMapped_) {
104             // make the persistant mapping..
105             flags = flags & (~GL_CLIENT_STORAGE_BIT_EXT); // not valid for map buffer..
106             data_ = reinterpret_cast<uint8_t*>(
107                 glMapBufferRange(INIT_TARGET, 0, static_cast<GLsizeiptr>(plat_.alignedByteSize), flags));
108         }
109     } else {
110         // glBufferStorageEXT not found, so persistant mapping is not possible.
111         isPersistantlyMapped_ = false;
112         // legacy path, no glbufferStorage extension.
113         if (desc_.engineCreationFlags & EngineBufferCreationFlagBits::CORE_ENGINE_BUFFER_CREATION_SINGLE_SHOT_STAGING) {
114             // single shot staging buffer so give the driver more hints. (cpu write once, gpu read few times)
115             glBufferData(INIT_TARGET, static_cast<GLsizeiptr>(plat_.alignedByteSize), nullptr, GL_STREAM_DRAW);
116         } else {
117             if (isMappable_) {
118                 // modified repeatedly , used many times.
119                 glBufferData(INIT_TARGET, static_cast<GLsizeiptr>(plat_.alignedByteSize), nullptr, GL_DYNAMIC_DRAW);
120             } else {
121                 // modified once, used many times.
122                 glBufferData(INIT_TARGET, static_cast<GLsizeiptr>(plat_.alignedByteSize), nullptr, GL_STATIC_DRAW);
123             }
124         }
125     }
126     device_.BindBuffer(INIT_TARGET, oldBind);
127 
128 #if (RENDER_PERF_ENABLED == 1)
129     RecordAllocation(static_cast<int64_t>(plat_.alignedByteSize));
130 #endif
131 
132 #if (RENDER_DEBUG_GPU_RESOURCE_IDS == 1)
133     PLUGIN_LOG_E("gpu buffer >: %u", plat_.buffer);
134 #endif
135 }
136 
~GpuBufferGLES()137 GpuBufferGLES::~GpuBufferGLES()
138 {
139     if (plat_.buffer) {
140         PLUGIN_ASSERT(device_.IsActive());
141         if ((isPersistantlyMapped_) || (isMapped_)) {
142             isMapped_ = false;
143             // Unmap the buffer.
144             device_.BindBuffer(GL_COPY_WRITE_BUFFER, plat_.buffer);
145             glUnmapBuffer(GL_COPY_WRITE_BUFFER);
146             device_.BindBuffer(GL_COPY_WRITE_BUFFER, 0);
147         }
148         device_.DeleteBuffer(plat_.buffer);
149     }
150 
151 #if (RENDER_PERF_ENABLED == 1)
152     RecordAllocation(-static_cast<int64_t>(plat_.alignedByteSize));
153 #endif
154 #if (RENDER_DEBUG_GPU_RESOURCE_IDS == 1)
155     PLUGIN_LOG_E("gpu buffer <: %u", plat_.buffer);
156 #endif
157 }
158 
GetDesc() const159 const GpuBufferDesc& GpuBufferGLES::GetDesc() const
160 {
161     return desc_;
162 }
163 
GetPlatformData() const164 const GpuBufferPlatformDataGL& GpuBufferGLES::GetPlatformData() const
165 {
166     return plat_;
167 }
168 
Map()169 void* GpuBufferGLES::Map()
170 {
171     if (!isMappable_) {
172         PLUGIN_LOG_E("trying to map non-mappable gpu buffer");
173         return nullptr;
174     }
175     if (isMapped_) {
176         PLUGIN_LOG_E("gpu buffer already mapped");
177         Unmap();
178     }
179     isMapped_ = true;
180 
181     if (isRingBuffer_) {
182         plat_.currentByteOffset += plat_.alignedBindByteSize;
183         if (plat_.currentByteOffset >= plat_.alignedByteSize) {
184             plat_.currentByteOffset = 0;
185         }
186     }
187 
188     void* ret = nullptr;
189     if (isPersistantlyMapped_) {
190         ret = data_ + plat_.currentByteOffset;
191     } else {
192         PLUGIN_ASSERT(device_.IsActive());
193         const auto oldBind = device_.BoundBuffer(GL_COPY_WRITE_BUFFER);
194         device_.BindBuffer(GL_COPY_WRITE_BUFFER, plat_.buffer);
195         if (!isRingBuffer_) {
196             ret = glMapBufferRange(GL_COPY_WRITE_BUFFER, 0, static_cast<GLsizeiptr>(plat_.alignedByteSize),
197                 GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT);
198         } else {
199             ret = glMapBufferRange(GL_COPY_WRITE_BUFFER, static_cast<GLintptr>(plat_.currentByteOffset),
200                 static_cast<GLsizeiptr>(plat_.bindMemoryByteSize), GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT);
201         }
202         device_.BindBuffer(GL_COPY_WRITE_BUFFER, oldBind);
203     }
204     return ret;
205 }
206 
Unmap() const207 void GpuBufferGLES::Unmap() const
208 {
209     if (!isMappable_) {
210         PLUGIN_LOG_E("trying to unmap non-mappable gpu buffer");
211     }
212     if (!isMapped_) {
213         PLUGIN_LOG_E("gpu buffer not mapped");
214     }
215     isMapped_ = false;
216 
217     if (!isPersistantlyMapped_) {
218         PLUGIN_ASSERT(device_.IsActive());
219         const auto oldBind = device_.BoundBuffer(GL_COPY_WRITE_BUFFER);
220         device_.BindBuffer(GL_COPY_WRITE_BUFFER, plat_.buffer);
221         glUnmapBuffer(GL_COPY_WRITE_BUFFER);
222         device_.BindBuffer(GL_COPY_WRITE_BUFFER, oldBind);
223     }
224 }
225 
MapMemory()226 void* GpuBufferGLES::MapMemory()
227 {
228     if (!isMappable_) {
229         PLUGIN_LOG_E("trying to map non-mappable gpu buffer");
230         return nullptr;
231     }
232     if (isMapped_) {
233         PLUGIN_LOG_E("gpu buffer already mapped");
234         Unmap();
235     }
236     isMapped_ = true;
237 
238     void* ret = nullptr;
239     if (isPersistantlyMapped_) {
240         ret = data_;
241     } else {
242         PLUGIN_ASSERT(device_.IsActive());
243         const auto oldBind = device_.BoundBuffer(GL_COPY_WRITE_BUFFER);
244         device_.BindBuffer(GL_COPY_WRITE_BUFFER, plat_.buffer);
245         if (!isRingBuffer_) {
246             ret = glMapBufferRange(GL_COPY_WRITE_BUFFER, 0, static_cast<GLsizeiptr>(plat_.alignedByteSize),
247                 GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT);
248         } else {
249             ret = glMapBufferRange(GL_COPY_WRITE_BUFFER, 0, static_cast<GLsizeiptr>(plat_.alignedByteSize),
250                 GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT);
251         }
252         device_.BindBuffer(GL_COPY_WRITE_BUFFER, oldBind);
253     }
254     return ret;
255 }
256 RENDER_END_NAMESPACE()
257