1 /*
2 * Copyright (c) 2022 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "device_gles.h"
17
18 #include <algorithm>
19
20 #include <base/containers/string.h>
21 #include <base/math/vector.h>
22 #include <base/util/compile_time_hashes.h>
23 #include <render/namespace.h>
24
25 #include "device/gpu_resource_manager.h"
26 #include "device/shader_manager.h"
27 #include "device/shader_module.h"
28 #include "gles/gl_functions.h"
29 #include "gles/gpu_buffer_gles.h"
30 #include "gles/gpu_image_gles.h"
31 #include "gles/gpu_program_gles.h"
32 #include "gles/gpu_sampler_gles.h"
33 #include "gles/gpu_semaphore_gles.h"
34 #include "gles/node_context_descriptor_set_manager_gles.h"
35 #include "gles/node_context_pool_manager_gles.h"
36 #include "gles/pipeline_state_object_gles.h"
37 #include "gles/render_backend_gles.h"
38 #include "gles/render_frame_sync_gles.h"
39 #include "gles/shader_module_gles.h"
40 #include "gles/swapchain_gles.h"
41 #include "render_context.h"
42 #include "util/log.h"
43
44 using namespace BASE_NS;
45
46 RENDER_BEGIN_NAMESPACE()
47 namespace {
48 // Make all temporary binds to unit GL_TEXTURE15. (should use the last available unit, so as to least affect actual
49 // usage) "The number of texture units is implementation-dependent, but must be at least 32. texture must be one of
50 // GL_TEXTUREi, where i ranges from zero to the value of GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS minus one." Currently our
51 // there is an implementation limit in our resource caching which limits it to 16... (this is why we use 16 instead of
52 // 32)
53 constexpr const uint32_t TEMP_BIND_UNIT = 15;
54 constexpr const string_view EXT_BUFFER_STORAGE = "GL_EXT_buffer_storage";
55 constexpr const uint32_t CACHE_VERSION = 1U;
56 #if RENDER_GL_DEBUG
57 #define DUMP(a) \
58 { \
59 GLint val; \
60 glGetIntegerv(a, &val); \
61 PLUGIN_LOG_V(#a ": %d", val); \
62 }
63
64 bool (*filterErrorFunc)(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length,
65 const string_view message, const void* userParam) noexcept = nullptr;
66
SourceName(GLenum source)67 auto SourceName(GLenum source)
68 {
69 switch (source) {
70 case GL_DEBUG_SOURCE_API:
71 return "GL_DEBUG_SOURCE_API";
72 case GL_DEBUG_SOURCE_WINDOW_SYSTEM:
73 return "GL_DEBUG_SOURCE_WINDOW_SYSTEM";
74 case GL_DEBUG_SOURCE_SHADER_COMPILER:
75 return "GL_DEBUG_SOURCE_SHADER_COMPILER";
76 case GL_DEBUG_SOURCE_THIRD_PARTY:
77 return "GL_DEBUG_SOURCE_THIRD_PARTY";
78 case GL_DEBUG_SOURCE_APPLICATION:
79 return "GL_DEBUG_SOURCE_APPLICATION";
80 case GL_DEBUG_SOURCE_OTHER:
81 return "GL_DEBUG_SOURCE_OTHER";
82
83 default:
84 break;
85 }
86 return "UNKNOWN";
87 }
88
TypeName(GLenum type)89 auto TypeName(GLenum type)
90 {
91 switch (type) {
92 case GL_DEBUG_TYPE_ERROR:
93 return "GL_DEBUG_TYPE_ERROR";
94 case GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR:
95 return "GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR";
96 case GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR:
97 return "GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR";
98 case GL_DEBUG_TYPE_PORTABILITY:
99 return "GL_DEBUG_TYPE_PORTABILITY";
100 case GL_DEBUG_TYPE_PERFORMANCE:
101 return "GL_DEBUG_TYPE_PERFORMANCE";
102 case GL_DEBUG_TYPE_MARKER:
103 return "GL_DEBUG_TYPE_MARKER";
104 case GL_DEBUG_TYPE_PUSH_GROUP:
105 return "GL_DEBUG_TYPE_PUSH_GROUP";
106 case GL_DEBUG_TYPE_POP_GROUP:
107 return "GL_DEBUG_TYPE_POP_GROUP";
108 case GL_DEBUG_TYPE_OTHER:
109 return "GL_DEBUG_TYPE_OTHER";
110
111 default:
112 break;
113 }
114 return "UNKNOWN";
115 }
116
SeverityName(GLenum severity)117 auto SeverityName(GLenum severity)
118 {
119 switch (severity) {
120 case GL_DEBUG_SEVERITY_LOW:
121 return "GL_DEBUG_SEVERITY_LOW";
122 case GL_DEBUG_SEVERITY_MEDIUM:
123 return "GL_DEBUG_SEVERITY_MEDIUM";
124 case GL_DEBUG_SEVERITY_HIGH:
125 return "GL_DEBUG_SEVERITY_HIGH";
126 case GL_DEBUG_SEVERITY_NOTIFICATION:
127 return "GL_DEBUG_SEVERITY_NOTIFICATION";
128
129 default:
130 break;
131 }
132 return "UNKNOWN";
133 }
134
135 #ifndef APIENTRY
136 #define APIENTRY
137 #endif
OnGlError(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar * message,const void * userParam)138 void APIENTRY OnGlError(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar* message,
139 const void* userParam) noexcept
140 {
141 if (type == GL_DEBUG_TYPE_PUSH_GROUP) {
142 return;
143 } else if (type == GL_DEBUG_TYPE_POP_GROUP) {
144 return;
145 } else if ((filterErrorFunc) && (filterErrorFunc(source, type, id, severity, length, message, userParam))) {
146 return;
147 } else if (type == GL_DEBUG_TYPE_ERROR) {
148 PLUGIN_LOG_E("---------------------opengl-callback-start------------\n"
149 "source: %s\n"
150 "type: %s\n"
151 "id: %u\n"
152 "severity: %s\n"
153 "message: %s\n"
154 "---------------------opengl-callback-end--------------\n",
155 SourceName(source), TypeName(type), id, SeverityName(severity), message);
156 } else {
157 PLUGIN_LOG_D("---------------------opengl-callback-start------------\n"
158 "source: %s\n"
159 "type: %s\n"
160 "id: %u\n"
161 "severity: %s\n"
162 "message: %s\n"
163 "---------------------opengl-callback-end--------------\n",
164 SourceName(source), TypeName(type), id, SeverityName(severity), message);
165 }
166 }
167 #else
168 #define DUMP(a)
169 #endif
170
DumpLimits()171 void DumpLimits()
172 {
173 #if RENDER_GL_DEBUG
174 DUMP(GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS);
175 DUMP(GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS);
176 DUMP(GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS);
177 DUMP(GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS);
178 DUMP(GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS);
179 DUMP(GL_MAX_SHADER_STORAGE_BLOCK_SIZE);
180 #endif
181 }
182
183 template<typename State>
RegisterDebugCallback(const State & eglState)184 void RegisterDebugCallback(const State& eglState)
185 {
186 #if RENDER_GL_DEBUG
187 filterErrorFunc = (decltype(filterErrorFunc))eglState.ErrorFilter();
188 glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS);
189 glDebugMessageCallback(OnGlError, nullptr);
190 GLuint unusedIds = 0;
191 glDebugMessageControl(GL_DONT_CARE, GL_DONT_CARE, GL_DONT_CARE, 0, &unusedIds, true);
192 #endif
193 }
194
195 struct FormatFeatures {
196 GLenum internalFormat;
197 FormatFeatureFlags flags;
198 };
199
200 // image store and atomic operations seem to go hand in hand
201 constexpr const FormatFeatureFlags ATOMIC_STORE =
202 CORE_FORMAT_FEATURE_STORAGE_IMAGE_BIT | CORE_FORMAT_FEATURE_STORAGE_IMAGE_ATOMIC_BIT;
203 // no writable texture buffers in gl?
204 constexpr const FormatFeatureFlags TEXEL_BUF = CORE_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT;
205 // color renderable
206 constexpr const FormatFeatureFlags CR = CORE_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT;
207 // texture filterable
208 constexpr const FormatFeatureFlags TF = CORE_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
209 // required texture formats. assume can be sampled, and transfered to/from
210 constexpr const FormatFeatureFlags TEX =
211 CORE_FORMAT_FEATURE_SAMPLED_IMAGE_BIT | CORE_FORMAT_FEATURE_TRANSFER_DST_BIT | CORE_FORMAT_FEATURE_TRANSFER_SRC_BIT;
212 // required depth format
213 constexpr const FormatFeatureFlags DS = CORE_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT | TEX | TF;
214
215 constexpr const FormatFeatureFlags TF_TEX = TF | TEX;
216 #if RENDER_HAS_GL_BACKEND
217 static constexpr const FormatFeatureFlags CR_TEX = CR | TEX; // color renderable, texture format
218 #endif
219 constexpr const FormatFeatureFlags CR_REND_TEX = CR | TEX; // color renderable, renderbuffer, texture format
220 #if RENDER_HAS_GLES_BACKEND
221 constexpr const FormatFeatureFlags CR_TF_REND_TEX = CR | TF | TEX;
222 #endif
223
224 constexpr const FormatFeatures IMAGE_FORMAT_FEATURES[] = {
225 #if RENDER_HAS_GL_BACKEND
226 { GL_R8, CR_TEX | ATOMIC_STORE | TEXEL_BUF },
227 { GL_R8_SNORM, CR_TEX | ATOMIC_STORE },
228 { GL_R16, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
229 { GL_R16_SNORM, CR_TEX | ATOMIC_STORE },
230 { GL_RG8, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
231 { GL_RG8_SNORM, CR_TEX | ATOMIC_STORE },
232 { GL_RG16, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
233 { GL_RG16_SNORM, CR_TEX | ATOMIC_STORE },
234 // R3_G3_B2 not in base format
235 { GL_RGB4, CR_TEX },
236 // RGB5 not in base format
237 { GL_RGB565, CR_REND_TEX },
238 { GL_RGB8, CR_TEX },
239 { GL_RGB8_SNORM, CR_TEX },
240 { GL_RGB10, CR_TEX },
241 { GL_RGB12, CR_TEX },
242 { GL_RGB16, CR_TEX },
243 { GL_RGB16_SNORM, CR_TEX },
244 // RGBA2 not in base format
245 { GL_RGBA4, CR_REND_TEX },
246 { GL_RGB5_A1, CR_REND_TEX },
247 { GL_RGBA8, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
248 { GL_RGBA8_SNORM, CR_TEX | ATOMIC_STORE },
249 { GL_RGB10_A2, CR_REND_TEX | ATOMIC_STORE },
250 { GL_RGB10_A2UI, CR_REND_TEX | ATOMIC_STORE },
251 { GL_RGBA12, CR_TEX },
252 { GL_RGBA16, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
253 { GL_RGBA16_SNORM, CR_TEX | ATOMIC_STORE },
254 { GL_SRGB8, CR_TEX },
255 { GL_SRGB8_ALPHA8, CR_REND_TEX },
256 { GL_R16F, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
257 { GL_RG16F, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
258 { GL_RGB16F, CR_TEX },
259 { GL_RGBA16F, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
260 { GL_R32F, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
261 { GL_RG32F, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
262 { GL_RGB32F, CR_TEX | TEXEL_BUF | TEXEL_BUF },
263 { GL_RGBA32F, CR_REND_TEX | ATOMIC_STORE },
264 { GL_R11F_G11F_B10F, CR_REND_TEX | ATOMIC_STORE },
265 { GL_RGB9_E5, TEX },
266 { GL_R8I, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
267 { GL_R8UI, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
268 { GL_R16I, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
269 { GL_R16UI, CR_REND_TEX | TEXEL_BUF },
270 { GL_R32I, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
271 { GL_R32UI, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
272 { GL_RG8I, CR_REND_TEX | TEXEL_BUF },
273 { GL_RG8UI, CR_REND_TEX | TEXEL_BUF },
274 { GL_RG16I, CR_REND_TEX | TEXEL_BUF },
275 { GL_RG16UI, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
276 { GL_RG32I, CR_REND_TEX | TEXEL_BUF },
277 { GL_RG32UI, CR_REND_TEX | TEXEL_BUF },
278 { GL_RGB8I, CR_TEX },
279 { GL_RGB8UI, CR_TEX },
280 { GL_RGB16I, CR_TEX },
281 { GL_RGB16UI, CR_TEX },
282 { GL_RGB32I, CR_TEX | TEXEL_BUF },
283 { GL_RGB32UI, CR_TEX | ATOMIC_STORE | TEXEL_BUF },
284 { GL_RGBA8I, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
285 { GL_RGBA8UI, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
286 { GL_RGBA16I, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
287 { GL_RGBA16UI, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
288 { GL_RGBA32I, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
289 { GL_RGBA32UI, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
290 #elif RENDER_HAS_GLES_BACKEND
291 { GL_R8, CR_TF_REND_TEX | TEXEL_BUF },
292 { GL_R8_SNORM, TF_TEX },
293 { GL_RG8, CR_TF_REND_TEX | TEXEL_BUF },
294 { GL_RG8_SNORM, TF_TEX },
295 { GL_RGB8, CR_TF_REND_TEX },
296 { GL_RGB8_SNORM, TF_TEX },
297 { GL_RGB565, CR_TF_REND_TEX },
298 { GL_RGBA4, CR_TF_REND_TEX },
299 { GL_RGB5_A1, CR_TF_REND_TEX },
300 { GL_RGBA8, CR_TF_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
301 { GL_RGBA8_SNORM, TF_TEX | ATOMIC_STORE },
302 { GL_RGB10_A2, CR_TF_REND_TEX },
303 { GL_RGB10_A2UI, CR_REND_TEX },
304 { GL_SRGB8, TF_TEX },
305 { GL_SRGB8_ALPHA8, CR_TF_REND_TEX },
306 { GL_R16F, CR_TF_REND_TEX | TEXEL_BUF },
307 { GL_RG16F, CR_TF_REND_TEX | TEXEL_BUF },
308 { GL_RGB16F, TF_TEX },
309 { GL_RGBA16F, CR_TF_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
310 { GL_R32F, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
311 { GL_RG32F, CR_REND_TEX | TEXEL_BUF },
312 { GL_RGB32F, TEX | TEXEL_BUF },
313 { GL_RGBA32F, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
314 { GL_R11F_G11F_B10F, CR_TF_REND_TEX },
315 { GL_RGB9_E5, TF_TEX },
316 { GL_R8I, CR_REND_TEX | TEXEL_BUF },
317 { GL_R8UI, CR_REND_TEX | TEXEL_BUF },
318 { GL_R16I, CR_REND_TEX | TEXEL_BUF },
319 { GL_R16UI, CR_REND_TEX | TEXEL_BUF },
320 { GL_R32I, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
321 { GL_R32UI, CR_REND_TEX | TEXEL_BUF },
322 { GL_RG8I, CR_REND_TEX | TEXEL_BUF },
323 { GL_RG8UI, CR_REND_TEX | TEXEL_BUF },
324 { GL_RG16I, CR_REND_TEX | TEXEL_BUF },
325 { GL_RG16UI, CR_REND_TEX | TEXEL_BUF },
326 { GL_RG32I, CR_REND_TEX | TEXEL_BUF },
327 { GL_RG32UI, CR_REND_TEX | TEXEL_BUF },
328 { GL_RGB8I, TEX },
329 { GL_RGB8UI, TEX },
330 { GL_RGB16I, TEX },
331 { GL_RGB16UI, TEX },
332 { GL_RGB32I, TEX | TEXEL_BUF },
333 { GL_RGB32UI, TEX | TEXEL_BUF },
334 { GL_RGBA8I, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
335 { GL_RGBA8UI, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
336 { GL_RGBA16I, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
337 { GL_RGBA16UI, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
338 { GL_RGBA32I, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
339 { GL_RGBA32UI, CR_REND_TEX | ATOMIC_STORE | TEXEL_BUF },
340 #endif
341 { GL_DEPTH_COMPONENT16, DS },
342 { GL_DEPTH_COMPONENT24, DS },
343 { GL_DEPTH_COMPONENT32F, DS },
344 { GL_DEPTH24_STENCIL8, DS },
345 { GL_DEPTH32F_STENCIL8, DS },
346 { GL_STENCIL_INDEX8, DS },
347
348 #if (defined(GL_EXT_texture_sRGB_R8) && (GL_EXT_texture_sRGB_R8))
349 { GL_SR8_EXT, TF_TEX },
350 #endif
351
352 #if (defined(GL_EXT_texture_sRGB_RG8) && (GL_EXT_texture_sRGB_RG8))
353 { GL_SRG8_EXT, TF_TEX },
354 #endif
355
356 #if defined(GL_EXT_texture_format_BGRA8888) && (GL_EXT_texture_format_BGRA8888)
357 { GL_BGRA_EXT, CR_REND_TEX },
358 #endif
359
360 #if defined(GL_EXT_texture_norm16) && (GL_EXT_texture_norm16)
361 { GL_R16_EXT, CR_TF_REND_TEX },
362 { GL_RG16_EXT, CR_TF_REND_TEX },
363 { GL_RGB16_EXT, TF_TEX },
364 { GL_RGBA16_EXT, CR_TF_REND_TEX },
365 { GL_R16_SNORM_EXT, TF_TEX },
366 { GL_RG16_SNORM_EXT, TF_TEX },
367 { GL_RGB16_SNORM_EXT, TF_TEX },
368 { GL_RGBA16_SNORM_EXT, TF_TEX },
369 #endif
370
371 { GL_COMPRESSED_R11_EAC, TF_TEX },
372 { GL_COMPRESSED_SIGNED_R11_EAC, TF_TEX },
373 { GL_COMPRESSED_RG11_EAC, TF_TEX },
374 { GL_COMPRESSED_SIGNED_RG11_EAC, TF_TEX },
375 { GL_COMPRESSED_RGB8_ETC2, TF_TEX },
376 { GL_COMPRESSED_SRGB8_ETC2, TF_TEX },
377 { GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2, TF_TEX },
378 { GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2, TF_TEX },
379 { GL_COMPRESSED_RGBA8_ETC2_EAC, TF_TEX },
380 { GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC, TF_TEX },
381 #if RENDER_HAS_GLES_BACKEND
382 { GL_COMPRESSED_RGBA_ASTC_4x4, TF_TEX },
383 { GL_COMPRESSED_RGBA_ASTC_5x4, TF_TEX },
384 { GL_COMPRESSED_RGBA_ASTC_5x5, TF_TEX },
385 { GL_COMPRESSED_RGBA_ASTC_6x5, TF_TEX },
386 { GL_COMPRESSED_RGBA_ASTC_6x6, TF_TEX },
387 { GL_COMPRESSED_RGBA_ASTC_8x5, TF_TEX },
388 { GL_COMPRESSED_RGBA_ASTC_8x6, TF_TEX },
389 { GL_COMPRESSED_RGBA_ASTC_8x8, TF_TEX },
390 { GL_COMPRESSED_RGBA_ASTC_10x5, TF_TEX },
391 { GL_COMPRESSED_RGBA_ASTC_10x6, TF_TEX },
392 { GL_COMPRESSED_RGBA_ASTC_10x8, TF_TEX },
393 { GL_COMPRESSED_RGBA_ASTC_10x10, TF_TEX },
394 { GL_COMPRESSED_RGBA_ASTC_12x10, TF_TEX },
395 { GL_COMPRESSED_RGBA_ASTC_12x12, TF_TEX },
396 { GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4, TF_TEX },
397 { GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4, TF_TEX },
398 { GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5, TF_TEX },
399 { GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5, TF_TEX },
400 { GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6, TF_TEX },
401 { GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5, TF_TEX },
402 { GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6, TF_TEX },
403 { GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8, TF_TEX },
404 { GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5, TF_TEX },
405 { GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6, TF_TEX },
406 { GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8, TF_TEX },
407 { GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10, TF_TEX },
408 { GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10, TF_TEX },
409 { GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12, TF_TEX },
410 #endif
411 #if defined(GL_EXT_texture_compression_s3tc) && (GL_EXT_texture_compression_s3tc)
412 { GL_COMPRESSED_RGB_S3TC_DXT1_EXT, TF_TEX },
413 { GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, TF_TEX },
414 { GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, TF_TEX },
415 { GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, TF_TEX },
416 #endif
417 #if defined(GL_ARB_texture_compression_bptc) && (GL_ARB_texture_compression_bptc)
418 { GL_COMPRESSED_RGBA_BPTC_UNORM_ARB, TF_TEX },
419 { GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB, TF_TEX },
420 { GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB, TF_TEX },
421 { GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB, TF_TEX },
422 #endif
423 #if defined(GL_EXT_texture_compression_rgtc) && (GL_EXT_texture_compression_rgtc)
424 { GL_COMPRESSED_RED_RGTC1_EXT, TF_TEX },
425 { GL_COMPRESSED_SIGNED_RED_RGTC1_EXT, TF_TEX },
426 { GL_COMPRESSED_RED_GREEN_RGTC2_EXT, TF_TEX },
427 { GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT, TF_TEX },
428 #endif
429 };
430
431 // Dont allow SRGB_R8 and SRGB_R8G8 internal formats, instead use the GL_SRGB8 with swizzle as workaround.
432 #define USE_EXTENSION_FORMATS
433
434 #define BLOCK_BITS_8 1
435 #define BLOCK_BITS_16 2
436 #define BLOCK_BITS_32 4
437 #define BLOCK_BITS_64 8
438 #define BLOCK_BITS_128 16
439
440 // GL_EXT_texture_sRGB_R8 extension
441 #if (defined(GL_EXT_texture_sRGB_R8) && (GL_EXT_texture_sRGB_R8))
442 constexpr DeviceGLES::ImageFormat IMAGE_FORMATS_EXT_SRGB_R8[] = {
443 { BASE_FORMAT_R8_SRGB, GL_RED, GL_SR8_EXT, GL_UNSIGNED_BYTE, 1, { false, 0, 0, 0 },
444 { GL_RED, GL_ZERO, GL_ZERO, GL_ONE } },
445 };
446 #endif
447
448 // GL_EXT_texture_sRGB_RG8 extension
449 #if (defined(GL_EXT_texture_sRGB_RG8) && (GL_EXT_texture_sRGB_RG8))
450 constexpr DeviceGLES::ImageFormat IMAGE_FORMATS_EXT_SRGB_RG8[] = {
451 { BASE_FORMAT_R8G8_SRGB, GL_RG, GL_SRG8_EXT, GL_UNSIGNED_BYTE, 1, { false, 0, 0, 0 },
452 { GL_RED, GL_GREEN, GL_ZERO, GL_ONE } },
453 };
454 #endif
455
456 // GL_EXT_texture_sRGB extension
457 #if defined(GL_EXT_texture_sRGB) && (GL_EXT_texture_sRGB)
458 constexpr DeviceGLES::ImageFormat IMAGE_FORMATS_EXT_SRGB[] = {
459 { BASE_FORMAT_BC1_RGB_SRGB_BLOCK, GL_RGB, GL_COMPRESSED_SRGB_S3TC_DXT1_EXT, GL_UNSIGNED_BYTE, 0,
460 { true, 4, 4, BLOCK_BITS_64 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
461 { BASE_FORMAT_BC1_RGBA_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, GL_UNSIGNED_BYTE, 0,
462 { true, 4, 4, BLOCK_BITS_64 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
463 { BASE_FORMAT_BC2_SRGB_BLOCK, GL_RGB, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, GL_UNSIGNED_BYTE, 0,
464 { true, 4, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
465 { BASE_FORMAT_BC3_SRGB_BLOCK, GL_RGB, GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, GL_UNSIGNED_BYTE, 0,
466 { true, 4, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
467 };
468 #endif
469
470 // GL_EXT_texture_format_BGRA8888 extension
471 #if defined(GL_EXT_texture_format_BGRA8888) && (GL_EXT_texture_format_BGRA8888)
472 constexpr DeviceGLES::ImageFormat IMAGE_FORMATS_EXT_BGRA[] = {
473 { BASE_FORMAT_B8G8R8A8_UNORM, GL_BGRA_EXT, GL_BGRA_EXT, GL_UNSIGNED_BYTE, 4, { false, 0, 0, 0 },
474 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
475 };
476 #endif
477
478 #if defined(GL_EXT_texture_norm16) && (GL_EXT_texture_norm16)
479 constexpr DeviceGLES::ImageFormat IMAGE_FORMATS_EXT_NORM16[] = {
480 { BASE_FORMAT_R16_UNORM, GL_RED, GL_R16_EXT, GL_UNSIGNED_SHORT, 2, { false, 0, 0, 0 },
481 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
482 { BASE_FORMAT_R16G16_UNORM, GL_RG, GL_RG16_EXT, GL_UNSIGNED_SHORT, 4, { false, 0, 0, 0 },
483 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
484 { BASE_FORMAT_R16G16B16_UNORM, GL_RGB, GL_RGB16_EXT, GL_UNSIGNED_SHORT, 6, { false, 0, 0, 0 },
485 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
486 { BASE_FORMAT_R16G16B16A16_UNORM, GL_RGBA, GL_RGBA16_EXT, GL_UNSIGNED_SHORT, 8, { false, 0, 0, 0 },
487 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
488 { BASE_FORMAT_R16_SNORM, GL_RED, GL_R16_SNORM_EXT, GL_SHORT, 2, { false, 0, 0, 0 },
489 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
490 { BASE_FORMAT_R16G16_SNORM, GL_RG, GL_RG16_SNORM_EXT, GL_SHORT, 4, { false, 0, 0, 0 },
491 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
492 { BASE_FORMAT_R16G16B16_SNORM, GL_RGB, GL_RGB16_SNORM_EXT, GL_SHORT, 6, { false, 0, 0, 0 },
493 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
494 { BASE_FORMAT_R16G16B16A16_SNORM, GL_RGBA, GL_RGBA16_SNORM_EXT, GL_SHORT, 8, { false, 0, 0, 0 },
495 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
496 };
497 #endif
498
499 // GL_EXT_texture_compression_s3tc extension
500 #if defined(GL_EXT_texture_compression_s3tc) && (GL_EXT_texture_compression_s3tc)
501 constexpr DeviceGLES::ImageFormat IMAGE_FORMATS_EXT_S3TC[] = {
502 { BASE_FORMAT_BC1_RGB_UNORM_BLOCK, GL_RGB, GL_COMPRESSED_RGB_S3TC_DXT1_EXT, GL_UNSIGNED_BYTE, 0,
503 { true, 4, 4, BLOCK_BITS_64 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
504 { BASE_FORMAT_BC1_RGBA_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_S3TC_DXT1_EXT, GL_UNSIGNED_BYTE, 0,
505 { true, 4, 4, BLOCK_BITS_64 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
506 { BASE_FORMAT_BC2_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_S3TC_DXT3_EXT, GL_UNSIGNED_BYTE, 0,
507 { true, 4, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
508 { BASE_FORMAT_BC3_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_S3TC_DXT5_EXT, GL_UNSIGNED_BYTE, 0,
509 { true, 4, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
510 };
511 #endif
512
513 // GL_ARB_texture_compression_bptc extension
514 #if defined(GL_ARB_texture_compression_bptc) && (GL_ARB_texture_compression_bptc)
515 static constexpr DeviceGLES::ImageFormat IMAGE_FORMATS_EXT_BPTC[] = {
516 { BASE_FORMAT_BC7_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_BPTC_UNORM_ARB, GL_UNSIGNED_BYTE, 0,
517 { true, 4, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
518 { BASE_FORMAT_BC7_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB, GL_UNSIGNED_BYTE, 0,
519 { true, 4, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
520 { BASE_FORMAT_BC6H_SFLOAT_BLOCK, GL_RGB, GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB, GL_FLOAT, 0,
521 { true, 4, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
522 { BASE_FORMAT_BC6H_UFLOAT_BLOCK, GL_RGB, GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB, GL_FLOAT, 0,
523 { true, 4, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
524 };
525 #endif
526
527 // GL_EXT_texture_norm16 extension
528 #if defined(GL_EXT_texture_compression_rgtc) && (GL_EXT_texture_compression_rgtc)
529 constexpr DeviceGLES::ImageFormat IMAGE_FORMATS_EXT_RGTC[] = {
530 { BASE_FORMAT_BC4_UNORM_BLOCK, GL_RED, GL_COMPRESSED_RED_RGTC1_EXT, GL_UNSIGNED_BYTE, 0,
531 { true, 4, 4, BLOCK_BITS_64 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
532 { BASE_FORMAT_BC4_SNORM_BLOCK, GL_RED, GL_COMPRESSED_SIGNED_RED_RGTC1_EXT, GL_UNSIGNED_BYTE, 0,
533 { true, 4, 4, BLOCK_BITS_64 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
534 { BASE_FORMAT_BC5_UNORM_BLOCK, GL_RG, GL_COMPRESSED_RED_GREEN_RGTC2_EXT, GL_UNSIGNED_BYTE, 0,
535 { true, 4, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
536 { BASE_FORMAT_BC5_SNORM_BLOCK, GL_RG, GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT, GL_UNSIGNED_BYTE, 0,
537 { true, 4, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
538 };
539 #endif
540
541 constexpr DeviceGLES::ImageFormat IMAGE_FORMATS_FALLBACK[] = {
542 { BASE_FORMAT_R4G4_UNORM_PACK8, GL_RG, GL_RGBA4, GL_UNSIGNED_BYTE, 1, { false, 0, 0, 0 },
543 { GL_RED, GL_GREEN, GL_ZERO, GL_ONE } },
544 { BASE_FORMAT_B4G4R4A4_UNORM_PACK16, GL_RGBA, GL_RGBA4, GL_UNSIGNED_SHORT_4_4_4_4, 2, { false, 0, 0, 0 },
545 { GL_BLUE, GL_GREEN, GL_RED, GL_ALPHA } },
546 { BASE_FORMAT_B5G6R5_UNORM_PACK16, GL_RGB, GL_RGB565, GL_UNSIGNED_SHORT_5_6_5, 2, { false, 0, 0, 0 },
547 { GL_BLUE, GL_GREEN, GL_RED, GL_ONE } },
548 { BASE_FORMAT_B5G5R5A1_UNORM_PACK16, GL_RGBA, GL_RGB5_A1, GL_UNSIGNED_SHORT_5_5_5_1, 2, { false, 0, 0, 0 },
549 { GL_BLUE, GL_GREEN, GL_RED, GL_ALPHA } },
550 { BASE_FORMAT_A1R5G5B5_UNORM_PACK16, GL_RGBA, GL_RGB5_A1, GL_UNSIGNED_SHORT_5_5_5_1, 2, { false, 0, 0, 0 },
551 { GL_BLUE, GL_GREEN, GL_RED, GL_ALPHA } },
552
553 // not available in desktop, available as an extension in opengles. "GL_EXT_texture_sRGB_RG8"
554 { BASE_FORMAT_R8_SRGB, GL_RED, GL_SRGB8, GL_UNSIGNED_BYTE, 1, { false, 0, 0, 0 },
555 { GL_RED, GL_ZERO, GL_ZERO, GL_ONE } },
556 { BASE_FORMAT_R8G8_SRGB, GL_RG, GL_SRGB8, GL_UNSIGNED_BYTE, 1, { false, 0, 0, 0 },
557 { GL_RED, GL_GREEN, GL_ZERO, GL_ONE } },
558
559 { BASE_FORMAT_A2R10G10B10_UNORM_PACK32, GL_RGBA, GL_RGB10_A2, GL_UNSIGNED_INT_2_10_10_10_REV, 4, { false, 0, 0, 0 },
560 { GL_BLUE, GL_GREEN, GL_RED, GL_ALPHA } },
561 { BASE_FORMAT_A2R10G10B10_UINT_PACK32, GL_RGBA, GL_RGB10_A2UI, GL_UNSIGNED_INT_2_10_10_10_REV, 4,
562 { false, 0, 0, 0 }, { GL_BLUE, GL_GREEN, GL_RED, GL_ALPHA } },
563
564 // available as an extension in opengles. "GL_EXT_texture_norm16" -> fallback to half float
565 { BASE_FORMAT_R16_UNORM, GL_RED, GL_R16F, GL_HALF_FLOAT, 2, { false, 0, 0, 0 },
566 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
567 { BASE_FORMAT_R16G16_UNORM, GL_RG, GL_RG16F, GL_HALF_FLOAT, 4, { false, 0, 0, 0 },
568 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
569 { BASE_FORMAT_R16G16B16_UNORM, GL_RGB, GL_RGB16F, GL_HALF_FLOAT, 6, { false, 0, 0, 0 },
570 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
571 { BASE_FORMAT_R16G16B16A16_UNORM, GL_RGBA, GL_RGBA16F, GL_HALF_FLOAT, 8, { false, 0, 0, 0 },
572 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
573 };
574
575 // NOTE: verify this table. add missing formats.
576 constexpr DeviceGLES::ImageFormat IMAGE_FORMATS[] = {
577 { BASE_FORMAT_UNDEFINED, GL_NONE, GL_NONE, GL_NONE, 0, { false, 0, 0, 0 },
578 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
579 // These are required in GL and GLES
580 { BASE_FORMAT_R8_UNORM, GL_RED, GL_R8, GL_UNSIGNED_BYTE, 1, { false, 0, 0, 0 },
581 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
582 { BASE_FORMAT_R8_SNORM, GL_RED, GL_R8_SNORM, GL_BYTE, 1, { false, 0, 0, 0 },
583 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
584 { BASE_FORMAT_R8G8_UNORM, GL_RG, GL_RG8, GL_UNSIGNED_BYTE, 2, { false, 0, 0, 0 },
585 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
586 { BASE_FORMAT_R8G8_SNORM, GL_RG, GL_RG8, GL_BYTE, 2, { false, 0, 0, 0 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
587 { BASE_FORMAT_R5G6B5_UNORM_PACK16, GL_RGB, GL_RGB565, GL_UNSIGNED_SHORT_5_6_5, 2, { false, 0, 0, 0 },
588 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
589 { BASE_FORMAT_R8G8B8_UNORM, GL_RGB, GL_RGB8, GL_UNSIGNED_BYTE, 3, { false, 0, 0, 0 },
590 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
591 { BASE_FORMAT_R8G8B8_SNORM, GL_RGB, GL_RGB8, GL_BYTE, 3, { false, 0, 0, 0 },
592 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
593 { BASE_FORMAT_R4G4B4A4_UNORM_PACK16, GL_RGBA, GL_RGBA4, GL_UNSIGNED_SHORT_4_4_4_4, 2, { false, 0, 0, 0 },
594 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
595 { BASE_FORMAT_R5G5B5A1_UNORM_PACK16, GL_RGBA, GL_RGB5_A1, GL_UNSIGNED_SHORT_5_5_5_1, 2, { false, 0, 0, 0 },
596 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
597 { BASE_FORMAT_R8G8B8A8_UNORM, GL_RGBA, GL_RGBA8, GL_UNSIGNED_BYTE, 4, { false, 0, 0, 0 },
598 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
599 { BASE_FORMAT_R8G8B8A8_SNORM, GL_RGBA, GL_RGBA8, GL_BYTE, 4, { false, 0, 0, 0 },
600 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
601 { BASE_FORMAT_A2B10G10R10_UNORM_PACK32, GL_RGBA, GL_RGB10_A2, GL_UNSIGNED_INT_2_10_10_10_REV, 4, { false, 0, 0, 0 },
602 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
603 { BASE_FORMAT_A2B10G10R10_UINT_PACK32, GL_RGBA, GL_RGB10_A2UI, GL_UNSIGNED_INT_2_10_10_10_REV, 4,
604 { false, 0, 0, 0 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
605 { BASE_FORMAT_R8G8B8_SRGB, GL_RGB, GL_SRGB8, GL_UNSIGNED_BYTE, 3, { false, 0, 0, 0 },
606 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
607 { BASE_FORMAT_R8G8B8A8_SRGB, GL_RGBA, GL_SRGB8_ALPHA8, GL_UNSIGNED_BYTE, 4, { false, 0, 0, 0 },
608 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
609 { BASE_FORMAT_R16_SFLOAT, GL_RED, GL_R16F, GL_HALF_FLOAT, 2, { false, 0, 0, 0 },
610 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
611 { BASE_FORMAT_R16G16_SFLOAT, GL_RG, GL_RG16F, GL_HALF_FLOAT, 4, { false, 0, 0, 0 },
612 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
613 { BASE_FORMAT_R16G16B16_SFLOAT, GL_RGB, GL_RGB16F, GL_HALF_FLOAT, 6, { false, 0, 0, 0 },
614 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
615 { BASE_FORMAT_R16G16B16A16_SFLOAT, GL_RGBA, GL_RGBA16F, GL_HALF_FLOAT, 8, { false, 0, 0, 0 },
616 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
617 { BASE_FORMAT_R32_SFLOAT, GL_RED, GL_R32F, GL_FLOAT, 4, { false, 0, 0, 0 },
618 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
619 { BASE_FORMAT_R32G32_SFLOAT, GL_RG, GL_RG32F, GL_FLOAT, 8, { false, 0, 0, 0 },
620 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
621 { BASE_FORMAT_R32G32B32_SFLOAT, GL_RGB, GL_RGB32F, GL_FLOAT, 12, { false, 0, 0, 0 },
622 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
623 { BASE_FORMAT_R32G32B32A32_SFLOAT, GL_RGBA, GL_RGBA32F, GL_FLOAT, 16, { false, 0, 0, 0 },
624 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
625 // Special R11 G11 B10 float format. This format does not work with compute on GLES, but works as a texture. (this
626 // is handled elsewhere)
627 { BASE_FORMAT_B10G11R11_UFLOAT_PACK32, GL_RGB, GL_R11F_G11F_B10F, GL_UNSIGNED_INT_10F_11F_11F_REV, 4,
628 { false, 0, 0, 0 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
629 { BASE_FORMAT_E5B9G9R9_UFLOAT_PACK32, GL_RGB, GL_RGB9_E5, GL_UNSIGNED_INT_5_9_9_9_REV, 4, { false, 0, 0, 0 },
630 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
631 { BASE_FORMAT_R8_SINT, GL_RED_INTEGER, GL_R8I, GL_BYTE, 1, { false, 0, 0, 0 },
632 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
633 { BASE_FORMAT_R8_UINT, GL_RED_INTEGER, GL_R8UI, GL_UNSIGNED_BYTE, 1, { false, 0, 0, 0 },
634 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
635 { BASE_FORMAT_R16_SINT, GL_RED_INTEGER, GL_R16I, GL_SHORT, 2, { false, 0, 0, 0 },
636 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
637 { BASE_FORMAT_R16_UINT, GL_RED_INTEGER, GL_R16UI, GL_UNSIGNED_SHORT, 2, { false, 0, 0, 0 },
638 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
639 { BASE_FORMAT_R32_SINT, GL_RED_INTEGER, GL_R32I, GL_INT, 4, { false, 0, 0, 0 },
640 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
641 { BASE_FORMAT_R32_UINT, GL_RED_INTEGER, GL_R32UI, GL_UNSIGNED_INT, 4, { false, 0, 0, 0 },
642 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
643 { BASE_FORMAT_R8G8_SINT, GL_RG_INTEGER, GL_RG8I, GL_BYTE, 2, { false, 0, 0, 0 },
644 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
645 { BASE_FORMAT_R8G8_UINT, GL_RG_INTEGER, GL_R8UI, GL_UNSIGNED_BYTE, 2, { false, 0, 0, 0 },
646 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
647 { BASE_FORMAT_R16G16_SINT, GL_RG_INTEGER, GL_RG16I, GL_SHORT, 4, { false, 0, 0, 0 },
648 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
649 { BASE_FORMAT_R16G16_UINT, GL_RG_INTEGER, GL_RG16UI, GL_UNSIGNED_SHORT, 4, { false, 0, 0, 0 },
650 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
651 { BASE_FORMAT_R32G32_SINT, GL_RG_INTEGER, GL_RG32I, GL_INT, 8, { false, 0, 0, 0 },
652 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
653 { BASE_FORMAT_R32G32_UINT, GL_RG_INTEGER, GL_RG32UI, GL_UNSIGNED_INT, 8, { false, 0, 0, 0 },
654 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
655 { BASE_FORMAT_R8G8B8_SINT, GL_RGB_INTEGER, GL_RGB8I, GL_BYTE, 3, { false, 0, 0, 0 },
656 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
657 { BASE_FORMAT_R8G8B8_UINT, GL_RGB_INTEGER, GL_RGB8UI, GL_UNSIGNED_BYTE, 3, { false, 0, 0, 0 },
658 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
659 { BASE_FORMAT_R16G16B16_SINT, GL_RGB_INTEGER, GL_RGB16I, GL_SHORT, 6, { false, 0, 0, 0 },
660 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
661 { BASE_FORMAT_R16G16B16_UINT, GL_RGB_INTEGER, GL_RGB16UI, GL_UNSIGNED_SHORT, 6, { false, 0, 0, 0 },
662 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
663 { BASE_FORMAT_R32G32B32_SINT, GL_RGB_INTEGER, GL_RGB32I, GL_INT, 12, { false, 0, 0, 0 },
664 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
665 { BASE_FORMAT_R32G32B32_UINT, GL_RGB_INTEGER, GL_RGB32UI, GL_UNSIGNED_INT, 12, { false, 0, 0, 0 },
666 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
667 { BASE_FORMAT_R8G8B8A8_SINT, GL_RGBA_INTEGER, GL_RGBA8I, GL_BYTE, 4, { false, 0, 0, 0 },
668 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
669 { BASE_FORMAT_R8G8B8A8_UINT, GL_RGBA_INTEGER, GL_RGBA8UI, GL_UNSIGNED_BYTE, 4, { false, 0, 0, 0 },
670 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
671 { BASE_FORMAT_R16G16B16A16_SINT, GL_RGBA_INTEGER, GL_RGBA16I, GL_SHORT, 8, { false, 0, 0, 0 },
672 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
673 { BASE_FORMAT_R16G16B16A16_UINT, GL_RGBA_INTEGER, GL_RGBA16UI, GL_UNSIGNED_SHORT, 8, { false, 0, 0, 0 },
674 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
675 { BASE_FORMAT_R32G32B32A32_SINT, GL_RGBA_INTEGER, GL_RGBA32I, GL_INT, 16, { false, 0, 0, 0 },
676 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
677 { BASE_FORMAT_R32G32B32A32_UINT, GL_RGBA_INTEGER, GL_RGBA32UI, GL_UNSIGNED_INT, 16, { false, 0, 0, 0 },
678 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
679 { BASE_FORMAT_D16_UNORM, GL_DEPTH_COMPONENT, GL_DEPTH_COMPONENT16, GL_UNSIGNED_SHORT, 2, { false, 0, 0, 0 },
680 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
681 { BASE_FORMAT_X8_D24_UNORM_PACK32, GL_DEPTH_COMPONENT, GL_DEPTH_COMPONENT24, GL_UNSIGNED_INT, 4, { false, 0, 0, 0 },
682 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
683 { BASE_FORMAT_D32_SFLOAT, GL_DEPTH_COMPONENT, GL_DEPTH_COMPONENT32F, GL_FLOAT, 4, { false, 0, 0, 0 },
684 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
685 { BASE_FORMAT_S8_UINT, GL_STENCIL_INDEX, GL_STENCIL_INDEX8, GL_UNSIGNED_BYTE, 1, { false, 0, 0, 0 },
686 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
687 { BASE_FORMAT_D24_UNORM_S8_UINT, GL_DEPTH_STENCIL, GL_DEPTH24_STENCIL8, GL_UNSIGNED_INT_24_8, 4, { false, 0, 0, 0 },
688 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
689 { BASE_FORMAT_D32_SFLOAT_S8_UINT, GL_DEPTH_STENCIL, GL_DEPTH32F_STENCIL8, GL_FLOAT_32_UNSIGNED_INT_24_8_REV, 8,
690 { false, 0, 0, 0 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
691 // EAC
692 { BASE_FORMAT_EAC_R11_UNORM_BLOCK, GL_RED, GL_COMPRESSED_R11_EAC, GL_UNSIGNED_BYTE, 0,
693 { true, 4, 4, BLOCK_BITS_64 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
694 { BASE_FORMAT_EAC_R11_SNORM_BLOCK, GL_RED, GL_COMPRESSED_SIGNED_R11_EAC, GL_BYTE, 0, { true, 4, 4, BLOCK_BITS_64 },
695 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
696 { BASE_FORMAT_EAC_R11G11_UNORM_BLOCK, GL_RG, GL_COMPRESSED_RG11_EAC, GL_UNSIGNED_BYTE, 0,
697 { true, 4, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
698 { BASE_FORMAT_EAC_R11G11_SNORM_BLOCK, GL_RG, GL_COMPRESSED_SIGNED_RG11_EAC, GL_BYTE, 0,
699 { true, 4, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
700 // ETC
701 { BASE_FORMAT_ETC2_R8G8B8_UNORM_BLOCK, GL_RGB, GL_COMPRESSED_RGB8_ETC2, GL_UNSIGNED_BYTE, 0,
702 { true, 4, 4, BLOCK_BITS_64 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
703 { BASE_FORMAT_ETC2_R8G8B8_SRGB_BLOCK, GL_RGB, GL_COMPRESSED_SRGB8_ETC2, GL_UNSIGNED_BYTE, 0,
704 { true, 4, 4, BLOCK_BITS_64 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
705 { BASE_FORMAT_ETC2_R8G8B8A1_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2, GL_UNSIGNED_BYTE, 0,
706 { true, 4, 4, BLOCK_BITS_64 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
707 { BASE_FORMAT_ETC2_R8G8B8A1_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2, GL_UNSIGNED_BYTE, 0,
708 { true, 4, 4, BLOCK_BITS_64 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
709 { BASE_FORMAT_ETC2_R8G8B8A8_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA8_ETC2_EAC, GL_UNSIGNED_BYTE, 0,
710 { true, 4, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
711 { BASE_FORMAT_ETC2_R8G8B8A8_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC, GL_UNSIGNED_BYTE, 0,
712 { true, 4, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
713 #if RENDER_HAS_GL_BACKEND
714 // required by GL
715 { BASE_FORMAT_R16_UNORM, GL_RED, GL_R16, GL_UNSIGNED_SHORT, 2, { false, 0, 0, 0 },
716 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
717 { BASE_FORMAT_R16_SNORM, GL_RED, GL_R16, GL_SHORT, 2, { false, 0, 0, 0 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
718 { BASE_FORMAT_R16G16_UNORM, GL_RG, GL_RG16, GL_UNSIGNED_SHORT, 4, { false, 0, 0, 0 },
719 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
720 { BASE_FORMAT_R16G16_SNORM, GL_RG, GL_RG16, GL_SHORT, 4, { false, 0, 0, 0 },
721 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
722 // GL_R3_G3_B2, RGB4, RGB5, RGB10, RGB12 not in base formats
723 { BASE_FORMAT_R16G16B16_UNORM, GL_RGB, GL_RGB16, GL_UNSIGNED_SHORT, 6, { false, 0, 0, 0 },
724 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
725 { BASE_FORMAT_R16G16B16_SNORM, GL_RGB, GL_RGB16, GL_SHORT, 6, { false, 0, 0, 0 },
726 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
727 // RGBA2, RGBA12 not in base formats
728 { BASE_FORMAT_R16G16B16A16_UNORM, GL_RGBA, GL_RGBA16, GL_UNSIGNED_SHORT, 8, { false, 0, 0, 0 },
729 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
730 { BASE_FORMAT_R16G16B16A16_SNORM, GL_RGBA, GL_RGBA16, GL_SHORT, 8, { false, 0, 0, 0 },
731 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
732 // STENCIL_INDEX1, STENCIL_INDEX4, STENCIL_INDEX16 not in base formats
733 { BASE_FORMAT_B4G4R4A4_UNORM_PACK16, GL_BGRA, GL_RGBA4, GL_UNSIGNED_SHORT_4_4_4_4_REV, 2, { false, 0, 0, 0 },
734 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
735 { BASE_FORMAT_A1R5G5B5_UNORM_PACK16, GL_BGRA, GL_RGB5_A1, GL_UNSIGNED_SHORT_1_5_5_5_REV, 2, { false, 0, 0, 0 },
736 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
737 { BASE_FORMAT_B5G6R5_UNORM_PACK16, GL_BGR, GL_RGB565, GL_UNSIGNED_SHORT_5_6_5_REV, 2, { false, 0, 0, 0 },
738 { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
739 #elif RENDER_HAS_GLES_BACKEND
740 // required by GLES
741 { BASE_FORMAT_ASTC_4x4_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_ASTC_4x4_KHR, GL_UNSIGNED_BYTE, 0,
742 { true, 4, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
743 { BASE_FORMAT_ASTC_5x4_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_ASTC_5x4_KHR, GL_UNSIGNED_BYTE, 0,
744 { true, 5, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
745 { BASE_FORMAT_ASTC_5x5_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_ASTC_5x5_KHR, GL_UNSIGNED_BYTE, 0,
746 { true, 5, 5, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
747 { BASE_FORMAT_ASTC_6x5_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_ASTC_6x5_KHR, GL_UNSIGNED_BYTE, 0,
748 { true, 6, 5, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
749 { BASE_FORMAT_ASTC_6x6_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_ASTC_6x6_KHR, GL_UNSIGNED_BYTE, 0,
750 { true, 6, 6, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
751 { BASE_FORMAT_ASTC_8x5_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_ASTC_8x5_KHR, GL_UNSIGNED_BYTE, 0,
752 { true, 8, 5, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
753 { BASE_FORMAT_ASTC_8x6_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_ASTC_8x6_KHR, GL_UNSIGNED_BYTE, 0,
754 { true, 8, 6, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
755 { BASE_FORMAT_ASTC_8x8_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_ASTC_8x8_KHR, GL_UNSIGNED_BYTE, 0,
756 { true, 8, 8, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
757 { BASE_FORMAT_ASTC_10x5_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_ASTC_10x5_KHR, GL_UNSIGNED_BYTE, 0,
758 { true, 10, 5, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
759 { BASE_FORMAT_ASTC_10x6_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_ASTC_10x6_KHR, GL_UNSIGNED_BYTE, 0,
760 { true, 10, 6, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
761 { BASE_FORMAT_ASTC_10x8_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_ASTC_10x8_KHR, GL_UNSIGNED_BYTE, 0,
762 { true, 10, 8, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
763 { BASE_FORMAT_ASTC_10x10_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_ASTC_10x10_KHR, GL_UNSIGNED_BYTE, 0,
764 { true, 10, 10, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
765 { BASE_FORMAT_ASTC_12x10_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_ASTC_12x10_KHR, GL_UNSIGNED_BYTE, 0,
766 { true, 12, 10, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
767 { BASE_FORMAT_ASTC_12x12_UNORM_BLOCK, GL_RGBA, GL_COMPRESSED_RGBA_ASTC_12x12_KHR, GL_UNSIGNED_BYTE, 0,
768 { true, 12, 12, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
769 { BASE_FORMAT_ASTC_4x4_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR, GL_UNSIGNED_BYTE, 0,
770 { true, 4, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
771 { BASE_FORMAT_ASTC_5x4_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR, GL_UNSIGNED_BYTE, 0,
772 { true, 5, 4, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
773 { BASE_FORMAT_ASTC_5x5_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR, GL_UNSIGNED_BYTE, 0,
774 { true, 5, 5, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
775 { BASE_FORMAT_ASTC_6x5_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR, GL_UNSIGNED_BYTE, 0,
776 { true, 6, 5, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
777 { BASE_FORMAT_ASTC_6x6_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR, GL_UNSIGNED_BYTE, 0,
778 { true, 6, 6, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
779 { BASE_FORMAT_ASTC_8x5_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR, GL_UNSIGNED_BYTE, 0,
780 { true, 8, 5, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
781 { BASE_FORMAT_ASTC_8x6_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR, GL_UNSIGNED_BYTE, 0,
782 { true, 8, 6, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
783 { BASE_FORMAT_ASTC_8x8_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR, GL_UNSIGNED_BYTE, 0,
784 { true, 8, 8, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
785 { BASE_FORMAT_ASTC_10x5_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR, GL_UNSIGNED_BYTE, 0,
786 { true, 10, 5, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
787 { BASE_FORMAT_ASTC_10x6_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR, GL_UNSIGNED_BYTE, 0,
788 { true, 10, 6, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
789 { BASE_FORMAT_ASTC_10x8_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR, GL_UNSIGNED_BYTE, 0,
790 { true, 10, 8, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
791 { BASE_FORMAT_ASTC_10x10_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR, GL_UNSIGNED_BYTE, 0,
792 { true, 10, 10, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
793 { BASE_FORMAT_ASTC_12x10_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR, GL_UNSIGNED_BYTE, 0,
794 { true, 12, 10, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
795 { BASE_FORMAT_ASTC_12x12_SRGB_BLOCK, GL_RGBA, GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR, GL_UNSIGNED_BYTE, 0,
796 { true, 12, 12, BLOCK_BITS_128 }, { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA } },
797 #endif
798 };
799
FillExtensionFormats(const DeviceGLES & device,BASE_NS::vector<DeviceGLES::ImageFormat> & sf)800 void FillExtensionFormats(const DeviceGLES& device, BASE_NS::vector<DeviceGLES::ImageFormat>& sf)
801 {
802 #if (defined(GL_EXT_texture_sRGB_R8) && (GL_EXT_texture_sRGB_R8))
803 if (device.HasExtension("GL_EXT_texture_sRGB_R8")) {
804 sf.append(std::begin(IMAGE_FORMATS_EXT_SRGB_R8), std::end(IMAGE_FORMATS_EXT_SRGB_R8));
805 }
806 #endif
807
808 #if (defined(GL_EXT_texture_sRGB_RG8) && (GL_EXT_texture_sRGB_RG8))
809 if (device.HasExtension("GL_EXT_texture_sRGB_RG8")) {
810 sf.append(std::begin(IMAGE_FORMATS_EXT_SRGB_RG8), std::end(IMAGE_FORMATS_EXT_SRGB_RG8));
811 }
812 #endif
813
814 #if defined(GL_EXT_texture_sRGB) && (GL_EXT_texture_sRGB)
815 if (device.HasExtension("GL_EXT_texture_sRGB")) {
816 sf.append(std::begin(IMAGE_FORMATS_EXT_SRGB), std::end(IMAGE_FORMATS_EXT_SRGB));
817 }
818 #endif
819
820 #if defined(GL_EXT_texture_format_BGRA8888) && (GL_EXT_texture_format_BGRA8888)
821 if (device.HasExtension("GL_EXT_texture_format_BGRA8888")) {
822 sf.append(std::begin(IMAGE_FORMATS_EXT_BGRA), std::end(IMAGE_FORMATS_EXT_BGRA));
823 }
824 #endif
825
826 #if defined(GL_EXT_texture_norm16) && (GL_EXT_texture_norm16)
827 if (device.HasExtension("GL_EXT_texture_norm16")) {
828 sf.append(std::begin(IMAGE_FORMATS_EXT_NORM16), std::end(IMAGE_FORMATS_EXT_NORM16));
829 }
830 #endif
831
832 #if defined(GL_EXT_texture_compression_s3tc) && (GL_EXT_texture_compression_s3tc)
833 if (device.HasExtension("GL_EXT_texture_compression_s3tc")) {
834 sf.append(std::begin(IMAGE_FORMATS_EXT_S3TC), std::end(IMAGE_FORMATS_EXT_S3TC));
835 }
836 #endif
837
838 #if defined(GL_ARB_texture_compression_bptc) && (GL_ARB_texture_compression_bptc)
839 if (device.HasExtension("GL_ARB_texture_compression_bptc")) {
840 sf.append(std::begin(IMAGE_FORMATS_EXT_BPTC), std::end(IMAGE_FORMATS_EXT_BPTC));
841 }
842 #endif
843
844 #if defined(GL_EXT_texture_compression_rgtc) && (GL_EXT_texture_compression_rgtc)
845 if (device.HasExtension("GL_EXT_texture_compression_rgtc")) {
846 sf.append(std::begin(IMAGE_FORMATS_EXT_RGTC), std::end(IMAGE_FORMATS_EXT_RGTC));
847 }
848 #endif
849 }
850
FillSupportedFormats(const DeviceGLES & device,BASE_NS::vector<DeviceGLES::ImageFormat> & sf)851 void FillSupportedFormats(const DeviceGLES& device, BASE_NS::vector<DeviceGLES::ImageFormat>& sf)
852 {
853 // First dump required formats and then add based on supported extensions
854 sf.append(std::begin(IMAGE_FORMATS), std::end(IMAGE_FORMATS));
855
856 FillExtensionFormats(device, sf);
857
858 // Keep the list sorted for faster lookup
859 std::sort(sf.begin(), sf.end(), [](const DeviceGLES::ImageFormat& lhs, const DeviceGLES::ImageFormat& rhs) {
860 return lhs.coreFormat < rhs.coreFormat;
861 });
862 }
863
FillExtensions(BASE_NS::vector<BASE_NS::string_view> & extensions)864 void FillExtensions(BASE_NS::vector<BASE_NS::string_view>& extensions)
865 {
866 GLint n = 0;
867 glGetIntegerv(GL_NUM_EXTENSIONS, &n);
868 extensions.reserve(n + 1U);
869 for (GLuint i = 0U; i < static_cast<GLuint>(n); ++i) {
870 const auto ext = reinterpret_cast<const char*>(glGetStringi(GL_EXTENSIONS, i));
871 extensions.emplace_back(ext);
872 }
873 std::sort(
874 extensions.begin(), extensions.end(), [](const string_view& lhs, const string_view& rhs) { return lhs < rhs; });
875 #ifndef NDEBUG
876 PLUGIN_LOG_V("GL_EXTENSIONs:");
877 for (const auto& ext : extensions) {
878 PLUGIN_LOG_V("\t%s", ext.data());
879 }
880 #endif // !NDEBUG
881 }
882 } // namespace
883
884 // Some OpenGL/ES features are supported and using them will lead to an assertion unless
885 // the following define is added locally or as part of the build command: #define HANDLE_UNSUPPORTED_ENUMS
TargetToBinding(uint32_t target)886 inline uint32_t DeviceGLES::TargetToBinding(uint32_t target)
887 {
888 if (target == GL_UNIFORM_BUFFER) {
889 return GL_UNIFORM_BUFFER_BINDING;
890 } else if (target == GL_SHADER_STORAGE_BUFFER) {
891 return GL_SHADER_STORAGE_BUFFER_BINDING;
892 } else if (target == GL_PIXEL_UNPACK_BUFFER) {
893 return GL_PIXEL_UNPACK_BUFFER_BINDING;
894 } else if (target == GL_COPY_READ_BUFFER) {
895 return GL_COPY_READ_BUFFER_BINDING;
896 } else if (target == GL_COPY_WRITE_BUFFER) {
897 return GL_COPY_WRITE_BUFFER_BINDING;
898 #ifdef HANDLE_UNSUPPORTED_ENUMS
899 } else if (target == GL_ATOMIC_COUNTER_BUFFER) {
900 return GL_ATOMIC_COUNTER_BUFFER_BINDING;
901 } else if (target == GL_TRANSFORM_FEEDBACK_BUFFER) {
902 return GL_TRANSFORM_FEEDBACK_BUFFER_BINDING;
903 } else if (target == GL_PIXEL_PACK_BUFFER) {
904 return GL_PIXEL_PACK_BUFFER_BINDING;
905 } else if (target == GL_QUERY_BUFFER) {
906 return GL_QUERY_BUFFER_BINDING;
907 } else if (target == GL_ARRAY_BUFFER) {
908 return GL_ARRAY_BUFFER_BINDING;
909 } else if (target == GL_DISPATCH_INDIRECT_BUFFER) {
910 return GL_DISPATCH_INDIRECT_BUFFER_BINDING;
911 } else if (target == GL_DRAW_INDIRECT_BUFFER) {
912 return GL_DRAW_INDIRECT_BUFFER_BINDING;
913 } else if (target == GL_ELEMENT_ARRAY_BUFFER) { // stored in VAO state...
914 return GL_ELEMENT_ARRAY_BUFFER_BINDING;
915 } else if (target == GL_TEXTURE_BUFFER) {
916 return GL_TEXTURE_BUFFER_BINDING;
917 #endif
918 }
919 PLUGIN_ASSERT_MSG(false, "UNHANDLED BUFFER BIND TARGET UNIT");
920 return GL_NONE;
921 }
922
IndexedTargetToTargetId(uint32_t target)923 inline DeviceGLES::BufferBindId DeviceGLES::IndexedTargetToTargetId(uint32_t target)
924 {
925 if (target == GL_UNIFORM_BUFFER) {
926 return BufferBindId::UNIFORM_BUFFER_BIND;
927 } else if (target == GL_SHADER_STORAGE_BUFFER) {
928 return BufferBindId::SHADER_STORAGE_BUFFER_BIND;
929 #ifdef HANDLE_UNSUPPORTED_ENUMS
930 } else if (target == GL_ATOMIC_COUNTER_BUFFER) {
931 return BufferBindId::ATOMIC_COUNTER_BUFFER;
932 } else if (target == GL_TRANSFORM_FEEDBACK_BUFFER) {
933 return BufferBindId::TRANSFORM_FEEDBACK_BUFFER;
934 #endif
935 }
936 PLUGIN_ASSERT_MSG(false, "UNHANDLED BUFFER BIND TARGET UNIT");
937 return BufferBindId::MAX_BUFFER_BIND_ID;
938 }
939
IndexedTargetIdToTarget(DeviceGLES::BufferBindId target)940 inline uint32_t DeviceGLES::IndexedTargetIdToTarget(DeviceGLES::BufferBindId target)
941 {
942 if (target == BufferBindId::UNIFORM_BUFFER_BIND) {
943 return GL_UNIFORM_BUFFER;
944 } else if (target == BufferBindId::SHADER_STORAGE_BUFFER_BIND) {
945 return GL_SHADER_STORAGE_BUFFER;
946 #ifdef HANDLE_UNSUPPORTED_ENUMS
947 } else if (target == BufferBindId::ATOMIC_COUNTER_BUFFER_BIND) {
948 return GL_ATOMIC_COUNTER_BUFFER;
949 } else if (target == BufferBindId::TRANSFORM_FEEDBACK_BUFFER_BIND) {
950 return GL_TRANSFORM_FEEDBACK_BUFFER;
951 #endif
952 }
953 PLUGIN_ASSERT_MSG(false, "UNHANDLED BUFFER BIND TARGET UNIT");
954 return 0;
955 }
956
GenericTargetToTargetId(uint32_t target)957 inline DeviceGLES::BufferTargetId DeviceGLES::GenericTargetToTargetId(uint32_t target)
958 {
959 if (target == GL_PIXEL_UNPACK_BUFFER) {
960 return BufferTargetId::PIXEL_UNPACK_BUFFER;
961 } else if (target == GL_PIXEL_PACK_BUFFER) {
962 return BufferTargetId::PIXEL_PACK_BUFFER;
963 } else if (target == GL_COPY_READ_BUFFER) {
964 return BufferTargetId::COPY_READ_BUFFER;
965 } else if (target == GL_COPY_WRITE_BUFFER) {
966 return BufferTargetId::COPY_WRITE_BUFFER;
967 } else if (target == GL_UNIFORM_BUFFER) {
968 return BufferTargetId::UNIFORM_BUFFER;
969 } else if (target == GL_SHADER_STORAGE_BUFFER) {
970 return BufferTargetId::SHADER_STORAGE_BUFFER;
971 } else if (target == GL_DISPATCH_INDIRECT_BUFFER) {
972 return BufferTargetId::DISPATCH_INDIRECT_BUFFER;
973 } else if (target == GL_DRAW_INDIRECT_BUFFER) {
974 return BufferTargetId::DRAW_INDIRECT_BUFFER;
975 #ifdef HANDLE_UNSUPPORTED_ENUMS
976 } else if (target == GL_ATOMIC_COUNTER_BUFFER) {
977 return BufferTargetId::ATOMIC_COUNTER_BUFFER;
978 } else if (target == GL_QUERY_BUFFER) {
979 return BufferTargetId::QUERY_BUFFER;
980 } else if (target == GL_TRANSFORM_FEEDBACK_BUFFER) {
981 return BufferTargetId::TRANSFORM_FEEDBACK_BUFFER;
982 } else if (target == GL_ARRAY_BUFFER) {
983 return BufferTargetId::ARRAY_BUFFER;
984 } else if (target == GL_ELEMENT_ARRAY_BUFFER) { // stored in VAO state...
985 return BufferTargetId::ELEMENT_ARRAY_BUFFER;
986 } else if (target == GL_TEXTURE_BUFFER) {
987 return BufferTargetId::TEXTURE_BUFFER;
988 #endif
989 }
990 PLUGIN_ASSERT_MSG(false, "UNHANDLED BUFFER BIND TARGET");
991 return BufferTargetId::MAX_BUFFER_TARGET_ID;
992 }
993
GenericTargetIdToTarget(BufferTargetId target)994 inline uint32_t DeviceGLES::GenericTargetIdToTarget(BufferTargetId target)
995 {
996 if (target == BufferTargetId::PIXEL_UNPACK_BUFFER) {
997 return GL_PIXEL_UNPACK_BUFFER;
998 } else if (target == BufferTargetId::PIXEL_PACK_BUFFER) {
999 return GL_PIXEL_PACK_BUFFER;
1000 } else if (target == BufferTargetId::COPY_READ_BUFFER) {
1001 return GL_COPY_READ_BUFFER;
1002 } else if (target == BufferTargetId::COPY_WRITE_BUFFER) {
1003 return GL_COPY_WRITE_BUFFER;
1004 } else if (target == BufferTargetId::UNIFORM_BUFFER) {
1005 return GL_UNIFORM_BUFFER;
1006 } else if (target == BufferTargetId::SHADER_STORAGE_BUFFER) {
1007 return GL_SHADER_STORAGE_BUFFER;
1008 } else if (target == BufferTargetId::DISPATCH_INDIRECT_BUFFER) {
1009 return GL_DISPATCH_INDIRECT_BUFFER;
1010 } else if (target == BufferTargetId::DRAW_INDIRECT_BUFFER) {
1011 return GL_DRAW_INDIRECT_BUFFER;
1012 #ifdef HANDLE_UNSUPPORTED_ENUMS
1013 } else if (target == BufferTargetId::ATOMIC_COUNTER_BUFFER) {
1014 return GL_ATOMIC_COUNTER_BUFFER;
1015 } else if (target == BufferTargetId::QUERY_BUFFER) {
1016 return GL_QUERY_BUFFER;
1017 } else if (target == BufferTargetId::TRANSFORM_FEEDBACK_BUFFER) {
1018 return GL_TRANSFORM_FEEDBACK_BUFFER;
1019 } else if (target == BufferTargetId::ARRAY_BUFFER) {
1020 return GL_ARRAY_BUFFER;
1021 } else if (target == BufferTargetId::ELEMENT_ARRAY_BUFFER) { // stored in VAO state...
1022 return GL_ELEMENT_ARRAY_BUFFER;
1023 } else if (target == BufferTargetId::TEXTURE_BUFFER) {
1024 return GL_TEXTURE_BUFFER;
1025 #endif
1026 }
1027 PLUGIN_ASSERT_MSG(false, "UNHANDLED BUFFER BIND TARGET");
1028 return 0;
1029 }
1030
TextureTargetToTargetId(uint32_t target)1031 inline DeviceGLES::TextureTargetId DeviceGLES::TextureTargetToTargetId(uint32_t target)
1032 {
1033 if (target == GL_TEXTURE_2D) {
1034 return TextureTargetId::TEXTURE_2D;
1035 } else if ((target == GL_TEXTURE_CUBE_MAP_POSITIVE_X) || (target == GL_TEXTURE_CUBE_MAP_NEGATIVE_X) ||
1036 (target == GL_TEXTURE_CUBE_MAP_POSITIVE_Y) || (target == GL_TEXTURE_CUBE_MAP_NEGATIVE_Y) ||
1037 (target == GL_TEXTURE_CUBE_MAP_POSITIVE_Z) || (target == GL_TEXTURE_CUBE_MAP_NEGATIVE_Z) ||
1038 (target == GL_TEXTURE_CUBE_MAP)) {
1039 return TextureTargetId::TEXTURE_CUBE_MAP;
1040 #if RENDER_HAS_GLES_BACKEND
1041 } else if (target == GL_TEXTURE_EXTERNAL_OES) {
1042 return TextureTargetId::TEXTURE_EXTERNAL_OES;
1043 #endif
1044 } else if (target == GL_TEXTURE_2D_MULTISAMPLE) {
1045 return TextureTargetId::TEXTURE_2D_MULTISAMPLE;
1046 } else if (target == GL_TEXTURE_2D_ARRAY) {
1047 return TextureTargetId::TEXTURE_2D_ARRAY;
1048 } else if (target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) {
1049 return TextureTargetId::TEXTURE_2D_MULTISAMPLE_ARRAY;
1050 } else if (target == GL_TEXTURE_3D) {
1051 return TextureTargetId::TEXTURE_3D;
1052 }
1053 PLUGIN_ASSERT_MSG(false, "UNHANDLED TEXTURE TARGET UNIT");
1054 return TextureTargetId::MAX_TEXTURE_TARGET_ID;
1055 }
1056
TextureTargetIdToTarget(DeviceGLES::TextureTargetId target)1057 inline uint32_t DeviceGLES::TextureTargetIdToTarget(DeviceGLES::TextureTargetId target)
1058 {
1059 if (target == TextureTargetId::TEXTURE_2D) {
1060 return GL_TEXTURE_2D;
1061 } else if (target == TextureTargetId::TEXTURE_CUBE_MAP) {
1062 return GL_TEXTURE_CUBE_MAP;
1063 #if RENDER_HAS_GLES_BACKEND
1064 } else if (target == TextureTargetId::TEXTURE_EXTERNAL_OES) {
1065 return GL_TEXTURE_EXTERNAL_OES;
1066 #endif
1067 } else if (target == TextureTargetId::TEXTURE_2D_MULTISAMPLE) {
1068 return GL_TEXTURE_2D_MULTISAMPLE;
1069 } else if (target == TextureTargetId::TEXTURE_2D_MULTISAMPLE_ARRAY) {
1070 return GL_TEXTURE_2D_MULTISAMPLE_ARRAY;
1071 } else if (target == TextureTargetId::TEXTURE_2D_ARRAY) {
1072 return GL_TEXTURE_2D_ARRAY;
1073 } else if (target == TextureTargetId::TEXTURE_3D) {
1074 return GL_TEXTURE_3D;
1075 }
1076 PLUGIN_ASSERT_MSG(false, "UNHANDLED TEXTURE TARGET UNIT");
1077 return 0;
1078 }
1079
1080 thread_local bool DeviceGLES::isActiveInThread_ = false;
Activate(RenderHandle swapchain)1081 void DeviceGLES::Activate(RenderHandle swapchain)
1082 {
1083 if (HasSwapchain()) {
1084 eglState_.SetContext(static_cast<const SwapchainGLES*>(GetSwapchain(swapchain)));
1085 } else {
1086 // bind the dummy surface as there is no swapchain.
1087 eglState_.SetContext(nullptr);
1088 }
1089 }
1090
SwapBuffers(const SwapchainGLES & swapchain)1091 void DeviceGLES::SwapBuffers(const SwapchainGLES& swapchain)
1092 {
1093 eglState_.SwapBuffers(swapchain);
1094 }
1095
1096 #if RENDER_HAS_GL_BACKEND
GetEglState() const1097 const WGLHelpers::WGLState& DeviceGLES::GetEglState()
1098 #endif
1099 #if RENDER_HAS_GLES_BACKEND
1100 const EGLHelpers::EGLState& DeviceGLES::GetEglState()
1101 #endif
1102 {
1103 return eglState_;
1104 }
1105
1106 #if RENDER_HAS_GLES_BACKEND
IsDepthResolveSupported() const1107 bool DeviceGLES::IsDepthResolveSupported() const
1108 {
1109 return backendConfig_.allowDepthResolve;
1110 }
1111 #endif
1112
DeviceGLES(RenderContext & renderContext)1113 DeviceGLES::DeviceGLES(RenderContext& renderContext) : Device(renderContext)
1114 {
1115 eglState_.CreateContext(renderContext.GetCreateInfo().deviceCreateInfo);
1116 if (!eglState_.IsValid()) {
1117 PLUGIN_LOG_F("Failed to create a context");
1118 return;
1119 }
1120 eglState_.GlInitialize();
1121
1122 RegisterDebugCallback(eglState_);
1123
1124 PLUGIN_LOG_I("GL_VENDOR: %s", glGetString(GL_VENDOR));
1125 PLUGIN_LOG_I("GL_RENDERER: %s", glGetString(GL_RENDERER));
1126 PLUGIN_LOG_I("GL_VERSION: %s", glGetString(GL_VERSION));
1127 PLUGIN_LOG_I("GL_SHADING_LANGUAGE_VERSION: %s", glGetString(GL_SHADING_LANGUAGE_VERSION));
1128 GLint value = 0;
1129 glGetIntegerv(GL_NUM_SHADER_BINARY_FORMATS, &value);
1130 PLUGIN_LOG_I("GL_NUM_SHADER_BINARY_FORMATS: %d", value);
1131 if (value > 0) {
1132 supportsBinaryShaders_ = true;
1133 #if defined(RENDER_GL_DEBUG) && (RENDER_GL_DEBUG)
1134 vector<GLint> values(static_cast<size_t>(value), 0);
1135 glGetIntegerv(GL_SHADER_BINARY_FORMATS, values.data());
1136 for (const auto& format : values) {
1137 PLUGIN_LOG_I(" SHADER_BINARY_FORMATS: %x", format);
1138 }
1139 #endif
1140 }
1141 glGetIntegerv(GL_NUM_PROGRAM_BINARY_FORMATS, &value);
1142 PLUGIN_LOG_I("GL_NUM_PROGRAM_BINARY_FORMATS: %d", value);
1143 if (value > 0) {
1144 supportsBinaryPrograms_ = true;
1145 #if defined(RENDER_GL_DEBUG) && (RENDER_GL_DEBUG)
1146 vector<GLint> values(static_cast<size_t>(value), 0);
1147 glGetIntegerv(GL_PROGRAM_BINARY_FORMATS, values.data());
1148 for (const auto& format : values) {
1149 PLUGIN_LOG_I(" PROGRAM_BINARY_FORMATS: %x", format);
1150 }
1151 #endif
1152 }
1153 FillExtensions(extensions_);
1154
1155 #if RENDER_HAS_GLES_BACKEND
1156 if (!HasExtension("GL_EXT_buffer_storage")) {
1157 glBufferStorageEXT = nullptr;
1158 }
1159
1160 if (!HasExtension("GL_OES_EGL_image")) {
1161 glEGLImageTargetTexture2DOES = nullptr;
1162 }
1163
1164 if (!HasExtension("GL_EXT_multisampled_render_to_texture")) {
1165 glRenderbufferStorageMultisampleEXT = nullptr;
1166 glFramebufferTexture2DMultisampleEXT = nullptr;
1167 }
1168
1169 if (!HasExtension("GL_OVR_multiview")) {
1170 glFramebufferTextureMultiviewOVR = nullptr;
1171 }
1172
1173 if (!HasExtension("GL_OVR_multiview_multisampled_render_to_texture")) {
1174 glFramebufferTextureMultisampleMultiviewOVR = nullptr;
1175 }
1176
1177 if (!HasExtension("GL_EXT_disjoint_timer_query")) {
1178 glGetQueryObjectui64vEXT = nullptr;
1179 }
1180
1181 if (!HasExtension("GL_EXT_external_buffer")) {
1182 glBufferStorageExternalEXT = nullptr;
1183 }
1184 #endif
1185
1186 #if RENDER_HAS_GL_BACKEND
1187 // Extension in OpenGL ES, but part of core in OpenGL.
1188 if (const auto pos = std::lower_bound(extensions_.cbegin(), extensions_.cend(), EXT_BUFFER_STORAGE,
1189 [](const string_view& element, const string_view& value) { return element < value; });
1190 (pos == extensions_.cend()) || (*pos != EXT_BUFFER_STORAGE)) {
1191 extensions_.insert(pos, EXT_BUFFER_STORAGE);
1192 }
1193 // Seamless cubemaps are always on in vulkan and gles 3.0..
1194 // (3.0 made it required, not supported prior to 3.0 es)
1195 // on desktop gl, it's optional.
1196 // (but must be supported since 3.2)
1197 // So just enable it always, so that desktop GL works as vulkan and GLES.
1198 glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
1199 #endif
1200
1201 // Dump interesting constants.
1202 DumpLimits();
1203
1204 boundReadFbo_ = boundWriteFbo_ = 0;
1205 eglState_.RestoreContext();
1206 #ifdef RENDER_OPTIMIZE_FOR_INTEGRATED_GPU
1207 // NOTE: we expect e.g. mobile devices to have integrated memory, where we can bypass staging and write directly to
1208 // linear gpu buffers without additional copies and performance decrease
1209 deviceSharedMemoryPropertyFlags_ = CORE_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | CORE_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
1210 #endif
1211 SetDeviceStatus(true);
1212
1213 FillSupportedFormats(*this, supportedFormats_);
1214
1215 const GpuResourceManager::CreateInfo grmCreateInfo {
1216 GpuResourceManager::GPU_RESOURCE_MANAGER_OPTIMIZE_STAGING_MEMORY,
1217 };
1218 gpuResourceMgr_ = make_unique<GpuResourceManager>(*this, grmCreateInfo);
1219 shaderMgr_ = make_unique<ShaderManager>(*this);
1220 globalDescriptorSetMgr_ = make_unique<DescriptorSetManagerGles>(*this);
1221
1222 lowLevelDevice_ = make_unique<LowLevelDeviceGLES>(*this);
1223 }
1224
~DeviceGLES()1225 DeviceGLES::~DeviceGLES()
1226 {
1227 if (eglState_.IsValid()) {
1228 Activate(); // make sure we are active during teardown..
1229 WaitForIdle();
1230
1231 globalDescriptorSetMgr_.reset();
1232 // must release handles before taking down gpu resource manager.
1233 swapchains_.clear();
1234
1235 gpuResourceMgr_.reset();
1236 shaderMgr_.reset();
1237 Deactivate(); // make sure the previous context is still active..
1238 {
1239 // destroying the context should be the last thing we do
1240 auto lock = std::lock_guard(activeMutex_);
1241 eglState_.DestroyContext();
1242 }
1243 }
1244 }
1245
HasExtension(const string_view extension) const1246 bool DeviceGLES::HasExtension(const string_view extension) const
1247 {
1248 return std::binary_search(extensions_.begin(), extensions_.end(), extension,
1249 [](const string_view& element, const string_view value) { return element < value; });
1250 }
1251
GetBackendType() const1252 DeviceBackendType DeviceGLES::GetBackendType() const
1253 {
1254 return backendType_;
1255 }
1256
GetPlatformData() const1257 const DevicePlatformData& DeviceGLES::GetPlatformData() const
1258 {
1259 return eglState_.GetPlatformData();
1260 }
1261
GetLowLevelDevice() const1262 ILowLevelDevice& DeviceGLES::GetLowLevelDevice() const
1263 {
1264 return *lowLevelDevice_;
1265 }
1266
GetFormatProperties(const Format format) const1267 FormatProperties DeviceGLES::GetFormatProperties(const Format format) const
1268 {
1269 FormatProperties properties;
1270 auto& glFormat = GetGlImageFormat(format);
1271 if (glFormat.internalFormat != GL_NONE) {
1272 if (auto pos = std::find_if(std::begin(IMAGE_FORMAT_FEATURES), std::end(IMAGE_FORMAT_FEATURES),
1273 [internalFormat = glFormat.internalFormat](
1274 const FormatFeatures& features) { return features.internalFormat == internalFormat; });
1275 pos != std::end(IMAGE_FORMAT_FEATURES)) {
1276 // split texel buffer support to bufferFeatures
1277 properties.linearTilingFeatures = properties.optimalTilingFeatures =
1278 pos->flags & ~CORE_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT;
1279 // assume if the format can be sampled it can be used as a vertex buffer.
1280 properties.bufferFeatures =
1281 ((pos->flags & CORE_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) ? CORE_FORMAT_FEATURE_VERTEX_BUFFER_BIT : 0U) |
1282 (pos->flags & CORE_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT);
1283 #if RENDER_HAS_GL_BACKEND
1284 // desktop GL can filter anything
1285 properties.linearTilingFeatures |= TF;
1286 properties.optimalTilingFeatures |= TF;
1287 #endif
1288 // can probably blit if not compressed
1289 if (!glFormat.compression.compressed) {
1290 properties.linearTilingFeatures |= CORE_FORMAT_FEATURE_BLIT_DST_BIT | CORE_FORMAT_FEATURE_BLIT_SRC_BIT;
1291 properties.optimalTilingFeatures |= CORE_FORMAT_FEATURE_BLIT_DST_BIT | CORE_FORMAT_FEATURE_BLIT_SRC_BIT;
1292 }
1293 properties.bytesPerPixel = glFormat.bytesperpixel;
1294 }
1295 }
1296 return properties;
1297 }
1298
GetAccelerationStructureBuildSizes(const AsBuildGeometryInfo & geometry,BASE_NS::array_view<const AsGeometryTrianglesInfo> triangles,BASE_NS::array_view<const AsGeometryAabbsInfo> aabbs,BASE_NS::array_view<const AsGeometryInstancesInfo> instances) const1299 AsBuildSizes DeviceGLES::GetAccelerationStructureBuildSizes(const AsBuildGeometryInfo& geometry,
1300 BASE_NS::array_view<const AsGeometryTrianglesInfo> triangles, BASE_NS::array_view<const AsGeometryAabbsInfo> aabbs,
1301 BASE_NS::array_view<const AsGeometryInstancesInfo> instances) const
1302 {
1303 return {};
1304 }
1305
GetPlatformGpuMemoryAllocator()1306 PlatformGpuMemoryAllocator* DeviceGLES::GetPlatformGpuMemoryAllocator()
1307 {
1308 PLUGIN_ASSERT_MSG(false, "DeviceGLES::GetPlatformGpuMemoryAllocator called!");
1309 return nullptr;
1310 }
1311
1312 // (re-)create swapchain
CreateDeviceSwapchain(const SwapchainCreateInfo & swapchainCreateInfo)1313 unique_ptr<Swapchain> DeviceGLES::CreateDeviceSwapchain(const SwapchainCreateInfo& swapchainCreateInfo)
1314 {
1315 PLUGIN_ASSERT(IsActive());
1316 auto swapchain = make_unique<SwapchainGLES>(*this, swapchainCreateInfo);
1317 // Switch to the new swapchain.
1318 eglState_.SetContext(swapchain.get());
1319 return swapchain;
1320 }
1321
DestroyDeviceSwapchain()1322 void DeviceGLES::DestroyDeviceSwapchain()
1323 {
1324 PLUGIN_ASSERT(IsActive());
1325 // Drop to dummycontext (ie. 1x1 surface etc...)
1326 eglState_.SetContext(nullptr);
1327 }
1328
IsActive() const1329 bool DeviceGLES::IsActive() const
1330 {
1331 return isActiveInThread_;
1332 }
1333
Activate()1334 void DeviceGLES::Activate()
1335 {
1336 activeMutex_.lock();
1337 if (isActive_ == 0) {
1338 eglState_.SaveContext();
1339
1340 constexpr RenderHandle defaultSwapchain {};
1341 Activate(defaultSwapchain);
1342 isActiveInThread_ = true;
1343 }
1344 isActive_++;
1345 }
1346
Deactivate()1347 void DeviceGLES::Deactivate()
1348 {
1349 PLUGIN_ASSERT_MSG(isActiveInThread_, "Deactivate called while already inactive");
1350 if (isActive_ == 1) {
1351 eglState_.RestoreContext();
1352 isActiveInThread_ = false;
1353 }
1354 if (isActive_ > 0) {
1355 isActive_--;
1356 }
1357 activeMutex_.unlock();
1358 }
1359
GetValidGpuQueue(const GpuQueue & gpuQueue) const1360 GpuQueue DeviceGLES::GetValidGpuQueue(const GpuQueue& gpuQueue) const
1361 {
1362 return { GpuQueue::QueueType::GRAPHICS, 0 }; // no queues -> graphics
1363 }
1364
GetGpuQueueCount() const1365 uint32_t DeviceGLES::GetGpuQueueCount() const
1366 {
1367 return 1;
1368 }
1369
1370 struct CacheHeader {
1371 uint32_t version;
1372 uint32_t programs;
1373 uint64_t revisionHash;
1374 struct Program {
1375 uint64_t vertHash;
1376 uint64_t fragHash;
1377 uint64_t compHash;
1378 GLuint offset;
1379 GLenum binaryFormat;
1380 };
1381 };
InitializePipelineCache(array_view<const uint8_t> initialData)1382 void DeviceGLES::InitializePipelineCache(array_view<const uint8_t> initialData)
1383 {
1384 if (initialData.size() <= sizeof(CacheHeader)) {
1385 return;
1386 }
1387 auto* header = reinterpret_cast<const CacheHeader*>(initialData.data());
1388 if ((header->version != CACHE_VERSION) ||
1389 (header->revisionHash != Hash(string_view(reinterpret_cast<const char *>(glGetString(GL_VENDOR))),
1390 string_view(reinterpret_cast<const char *>(glGetString(GL_RENDERER))),
1391 string_view(reinterpret_cast<const char *>(glGetString(GL_VERSION)))))) {
1392 return;
1393 }
1394 if ((sizeof(CacheHeader) + header->programs * sizeof(CacheHeader::Program)) > initialData.size()) {
1395 return;
1396 }
1397 auto programs = array_view(
1398 reinterpret_cast<const CacheHeader::Program*>(initialData.data() + sizeof(CacheHeader)), header->programs);
1399 auto binaryData = array_view(initialData.data() + (sizeof(CacheHeader) + programs.size_bytes()),
1400 initialData.size() - (sizeof(CacheHeader) + programs.size_bytes()));
1401 for (auto i = 0U; i < programs.size(); ++i) {
1402 auto& program = programs[i];
1403 GLsizei length;
1404 if ((i + 1) < programs.size()) {
1405 length = static_cast<GLsizei>(programs[i + 1].offset) - static_cast<GLsizei>(program.offset);
1406 } else {
1407 length = static_cast<GLsizei>(binaryData.size()) - static_cast<GLsizei>(program.offset);
1408 }
1409 if ((length < 0) || (program.offset + length) > binaryData.size()) {
1410 continue;
1411 }
1412
1413 const GLuint programObj = glCreateProgram();
1414 glProgramBinary(programObj, program.binaryFormat, binaryData.data() + program.offset, length);
1415 GLint result = GL_FALSE;
1416 glGetProgramiv(programObj, GL_LINK_STATUS, &result);
1417 if (result != GL_FALSE) {
1418 auto& entry = programs_.emplace_back();
1419 entry.program = programObj;
1420 entry.hashVert = program.vertHash;
1421 entry.hashFrag = program.fragHash;
1422 entry.hashComp = program.compHash;
1423 } else {
1424 #if (RENDER_VALIDATION_ENABLED == 1)
1425 GLint logLength = 0;
1426 glGetProgramiv(programObj, GL_INFO_LOG_LENGTH, &logLength);
1427 string messages;
1428 messages.resize(static_cast<size_t>(logLength));
1429 glGetProgramInfoLog(programObj, logLength, 0, messages.data());
1430 PLUGIN_LOG_ONCE_E("gl_shader_linking_error_" + to_string(programObj),
1431 "RENDER_VALIDATION: Shader linking error: %s", messages.c_str());
1432 #endif
1433 glDeleteProgram(programObj);
1434 }
1435 }
1436 }
1437
GetPipelineCache() const1438 vector<uint8_t> DeviceGLES::GetPipelineCache() const
1439 {
1440 vector<uint8_t> cacheData;
1441 if (!supportsBinaryPrograms_) {
1442 return cacheData;
1443 }
1444
1445 CacheHeader header;
1446 header.version = CACHE_VERSION;
1447 header.revisionHash = Hash(string_view(reinterpret_cast<const char*>(glGetString(GL_VENDOR))),
1448 string_view(reinterpret_cast<const char*>(glGetString(GL_RENDERER))),
1449 string_view(reinterpret_cast<const char*>(glGetString(GL_VERSION))));
1450
1451 vector<CacheHeader::Program> programs;
1452 for (const auto& info : programs_) {
1453 GLint programLength = 0;
1454 glGetProgramiv(info.program, GL_PROGRAM_BINARY_LENGTH, &programLength);
1455 if (!programLength) {
1456 continue;
1457 }
1458 const auto offset = cacheData.size();
1459 cacheData.resize(offset + programLength);
1460 GLsizei length;
1461 GLenum binaryFormat;
1462 glGetProgramBinary(info.program, programLength, &length, &binaryFormat, cacheData.data() + offset);
1463 programs.push_back({ info.hashVert, info.hashFrag, info.hashComp, static_cast<GLuint>(offset), binaryFormat });
1464 }
1465 header.programs = static_cast<uint32_t>(programs.size());
1466 cacheData.insert(cacheData.cbegin(), sizeof(CacheHeader) + programs.size_in_bytes(), uint8_t(0U));
1467 CloneData(cacheData.data(), cacheData.size(), &header, sizeof(CacheHeader));
1468 CloneData(cacheData.data() + sizeof(CacheHeader), cacheData.size() - sizeof(CacheHeader), programs.data(),
1469 programs.size_in_bytes());
1470 return cacheData;
1471 }
1472
WaitForIdle()1473 void DeviceGLES::WaitForIdle()
1474 {
1475 const bool activeState = IsActive();
1476 if (!activeState) {
1477 Activate();
1478 }
1479 if (!isRenderbackendRunning_) {
1480 PLUGIN_LOG_D("Device - WaitForIdle");
1481 glFinish();
1482 } else {
1483 PLUGIN_LOG_E("Device WaitForIdle can only called when render backend is not running");
1484 }
1485 if (!activeState) {
1486 Deactivate();
1487 }
1488 }
1489
1490 #if (RENDER_HAS_GL_BACKEND)
CreateDeviceGL(RenderContext & renderContext)1491 unique_ptr<Device> CreateDeviceGL(RenderContext& renderContext)
1492 {
1493 if (auto device = make_unique<DeviceGLES>(renderContext); device) {
1494 const auto& plat = static_cast<const DevicePlatformDataGL&>(device->GetPlatformData());
1495 if (plat.context != nullptr) {
1496 return device;
1497 }
1498 }
1499 return nullptr;
1500 }
1501 #endif
1502 #if (RENDER_HAS_GLES_BACKEND)
CreateDeviceGLES(RenderContext & renderContext)1503 unique_ptr<Device> CreateDeviceGLES(RenderContext& renderContext)
1504 {
1505 if (auto device = make_unique<DeviceGLES>(renderContext); device) {
1506 const auto& plat = static_cast<const DevicePlatformDataGLES&>(device->GetPlatformData());
1507 if (plat.context != EGL_NO_CONTEXT) {
1508 return device;
1509 }
1510 }
1511 return nullptr;
1512 }
1513 #endif
1514
ReleaseShader(uint32_t type,uint32_t shader)1515 void DeviceGLES::ReleaseShader(uint32_t type, uint32_t shader)
1516 {
1517 vector<ShaderCache::Entry>* cache = nullptr;
1518 if (type == GL_FRAGMENT_SHADER) {
1519 cache = &shaders_[DeviceGLES::FRAGMENT_CACHE].cache;
1520 } else if (type == GL_VERTEX_SHADER) {
1521 cache = &shaders_[DeviceGLES::VERTEX_CACHE].cache;
1522 } else if (type == GL_COMPUTE_SHADER) {
1523 cache = &shaders_[DeviceGLES::COMPUTE_CACHE].cache;
1524 } else {
1525 return;
1526 }
1527
1528 const auto pos = std::find_if(
1529 cache->begin(), cache->end(), [shader](const ShaderCache::Entry& entry) { return entry.shader == shader; });
1530 if (pos != cache->end()) {
1531 ShaderCache::Entry& entry = *pos;
1532 entry.refCount--;
1533 if (entry.refCount == 0) {
1534 glDeleteShader(entry.shader);
1535 cache->erase(pos);
1536 }
1537 return;
1538 }
1539
1540 PLUGIN_ASSERT_MSG(false, "Tried to release a non-existant shader?");
1541 }
1542
ReleaseProgram(uint32_t program)1543 void DeviceGLES::ReleaseProgram(uint32_t program)
1544 {
1545 PLUGIN_ASSERT_MSG(isActive_, "Device not active when releasing shaders");
1546 for (auto it = programs_.begin(); it != programs_.end(); it++) {
1547 auto& t = *it;
1548 if (t.program != program) {
1549 continue;
1550 }
1551 t.refCount--;
1552 if (t.refCount == 0) {
1553 if (t.fragShader) {
1554 ReleaseShader(GL_FRAGMENT_SHADER, t.fragShader);
1555 }
1556 if (t.vertShader) {
1557 ReleaseShader(GL_VERTEX_SHADER, t.vertShader);
1558 }
1559 if (t.compShader) {
1560 ReleaseShader(GL_COMPUTE_SHADER, t.compShader);
1561 }
1562 glDeleteProgram(t.program);
1563 programs_.erase(it);
1564 }
1565 return;
1566 }
1567 PLUGIN_ASSERT_MSG(false, "Tried to release a non-existant program?");
1568 }
1569
CacheShader(int type,const string_view source)1570 const DeviceGLES::ShaderCache::Entry& DeviceGLES::CacheShader(int type, const string_view source)
1571 {
1572 PLUGIN_ASSERT(type < MAX_CACHES);
1573 if (source.empty()) {
1574 static constexpr DeviceGLES::ShaderCache::Entry invalid {};
1575 return invalid;
1576 }
1577 static constexpr GLenum types[] = { GL_VERTEX_SHADER, GL_FRAGMENT_SHADER, GL_COMPUTE_SHADER };
1578 // NOTE: check other hash functions, also i guess verify (strcmp) on collision would be prudent.
1579 const uint64_t hash = FNV1aHash(source.data(), source.size());
1580 PLUGIN_ASSERT(hash != 0);
1581 for (auto& t : shaders_[type].cache) {
1582 if (t.hash == hash) {
1583 shaders_[type].hit++;
1584 t.refCount++;
1585 return t;
1586 }
1587 }
1588 shaders_[type].miss++;
1589 DeviceGLES::ShaderCache::Entry entry;
1590 entry.hash = hash;
1591 entry.shader = glCreateShader(types[type]);
1592 entry.refCount = 1;
1593 const GLint len = static_cast<GLint>(source.length());
1594 const auto data = source.data();
1595 glShaderSource(entry.shader, 1, &data, &len);
1596 glCompileShader(entry.shader);
1597 GLint result = GL_FALSE;
1598 glGetShaderiv(entry.shader, GL_COMPILE_STATUS, &result);
1599 if (result == GL_FALSE) {
1600 GLint logLength = 0;
1601 glGetShaderiv(entry.shader, GL_INFO_LOG_LENGTH, &logLength);
1602 string messages;
1603 messages.resize(static_cast<size_t>(logLength));
1604 glGetShaderInfoLog(entry.shader, logLength, 0, messages.data());
1605 PLUGIN_LOG_F("Shader compilation error: %s", messages.c_str());
1606 glDeleteShader(entry.shader);
1607 entry.shader = 0U;
1608 }
1609 shaders_[type].cache.push_back(entry);
1610 return shaders_[type].cache.back();
1611 }
1612
CacheProgram(const string_view vertSource,const string_view fragSource,const string_view compSource)1613 uint32_t DeviceGLES::CacheProgram(
1614 const string_view vertSource, const string_view fragSource, const string_view compSource)
1615 {
1616 PLUGIN_ASSERT_MSG(isActive_, "Device not active when building shaders");
1617 const uint64_t vertHash = vertSource.empty() ? 0U : FNV1aHash(vertSource.data(), vertSource.size());
1618 const uint64_t fragHash = fragSource.empty() ? 0U : FNV1aHash(fragSource.data(), fragSource.size());
1619 const uint64_t compHash = compSource.empty() ? 0U : FNV1aHash(compSource.data(), compSource.size());
1620 // Then check if we have the program already cached (ie. matching shaders linked)
1621 for (ProgramCache& t : programs_) {
1622 if ((t.hashVert != vertHash) || (t.hashFrag != fragHash) || (t.hashComp != compHash)) {
1623 continue;
1624 }
1625 pCacheHit_++;
1626 t.refCount++;
1627 return t.program;
1628 }
1629
1630 // Hash and cache shader sources.
1631 const auto& vEntry = CacheShader(DeviceGLES::VERTEX_CACHE, vertSource);
1632 const auto& fEntry = CacheShader(DeviceGLES::FRAGMENT_CACHE, fragSource);
1633 const auto& cEntry = CacheShader(DeviceGLES::COMPUTE_CACHE, compSource);
1634 // Then check if we have the program already cached (ie. matching shaders linked)
1635 for (ProgramCache& t : programs_) {
1636 if ((t.hashVert != vEntry.hash) || (t.hashFrag != fEntry.hash) || (t.hashComp != cEntry.hash)) {
1637 continue;
1638 }
1639 pCacheHit_++;
1640 t.refCount++;
1641 return t.program;
1642 }
1643 // Create new program
1644 pCacheMiss_++;
1645 const GLuint program = glCreateProgram();
1646 if (supportsBinaryPrograms_) {
1647 glProgramParameteri(program, GL_PROGRAM_BINARY_RETRIEVABLE_HINT, GL_TRUE);
1648 }
1649 #if defined(CORE_USE_SEPARATE_SHADER_OBJECTS) && (CORE_USE_SEPARATE_SHADER_OBJECTS == 1)
1650 // enable separable programs.
1651 glProgramParameteri(program, GL_PROGRAM_SEPARABLE, GL_TRUE);
1652 #endif
1653 // Attach and link
1654 if (vEntry.shader) {
1655 glAttachShader(program, vEntry.shader);
1656 }
1657 if (fEntry.shader) {
1658 glAttachShader(program, fEntry.shader);
1659 }
1660 if (cEntry.shader) {
1661 glAttachShader(program, cEntry.shader);
1662 }
1663 glLinkProgram(program);
1664 if (vEntry.shader) {
1665 glDetachShader(program, vEntry.shader);
1666 }
1667 if (fEntry.shader) {
1668 glDetachShader(program, fEntry.shader);
1669 }
1670 if (cEntry.shader) {
1671 glDetachShader(program, cEntry.shader);
1672 }
1673 GLint result = GL_FALSE;
1674 glGetProgramiv(program, GL_LINK_STATUS, &result);
1675 if (result == GL_FALSE) {
1676 #if (RENDER_VALIDATION_ENABLED == 1)
1677 GLint logLength = 0;
1678 glGetProgramiv(program, GL_INFO_LOG_LENGTH, &logLength);
1679 string messages;
1680 messages.resize(static_cast<size_t>(logLength));
1681 glGetProgramInfoLog(program, logLength, 0, messages.data());
1682 PLUGIN_LOG_ONCE_E("gl_shader_linking_error_" + to_string(program),
1683 "RENDER_VALIDATION: Shader linking error: %s", messages.c_str());
1684 #endif
1685 glDeleteProgram(program);
1686 return 0U;
1687 }
1688 // Add the program to cache
1689 programs_.push_back(
1690 { program, vEntry.shader, fEntry.shader, cEntry.shader, vEntry.hash, fEntry.hash, cEntry.hash, 1 });
1691 return program;
1692 }
1693
UseProgram(uint32_t program)1694 void DeviceGLES::UseProgram(uint32_t program)
1695 {
1696 if (boundProgram_ != program) {
1697 boundProgram_ = program;
1698 glUseProgram(static_cast<GLuint>(program));
1699 }
1700 }
1701
BindBuffer(uint32_t target,uint32_t buffer)1702 void DeviceGLES::BindBuffer(uint32_t target, uint32_t buffer)
1703 {
1704 const uint32_t targetId = GenericTargetToTargetId(target);
1705 auto& state = bufferBound_[targetId];
1706 if ((!state.bound) || (state.buffer != buffer)) {
1707 state.bound = true;
1708 state.buffer = buffer;
1709 glBindBuffer(target, static_cast<GLuint>(buffer));
1710 }
1711 }
1712
BindBufferRange(uint32_t target,uint32_t binding,uint32_t buffer,uint64_t offset,uint64_t size)1713 void DeviceGLES::BindBufferRange(uint32_t target, uint32_t binding, uint32_t buffer, uint64_t offset, uint64_t size)
1714 {
1715 const uint32_t targetId = IndexedTargetToTargetId(target);
1716 auto& slot = boundBuffers_[targetId][binding];
1717
1718 if ((slot.cached == false) || (slot.buffer != buffer) || (slot.offset != offset) || (slot.size != size)) {
1719 slot.cached = true;
1720 slot.buffer = buffer;
1721 slot.offset = offset;
1722 slot.size = size;
1723 glBindBufferRange(target, binding, buffer, static_cast<GLintptr>(offset), static_cast<GLsizeiptr>(size));
1724 // BindBufferRange sets the "generic" binding too. so make sure cache state is correct.
1725 const uint32_t targetId2 = GenericTargetToTargetId(target);
1726 auto& state = bufferBound_[targetId2];
1727 state.bound = true;
1728 state.buffer = buffer;
1729 }
1730 // NOTE: we are not forcing the generic bind point here. use BindBuffer to set the generic one if needed!
1731 }
1732
BindSampler(uint32_t textureUnit,uint32_t sampler)1733 void DeviceGLES::BindSampler(uint32_t textureUnit, uint32_t sampler)
1734 {
1735 if ((sampler + 1) != boundSampler_[textureUnit]) {
1736 boundSampler_[textureUnit] = sampler + 1;
1737 glBindSampler(textureUnit, sampler);
1738 }
1739 }
1740
BoundReadFrameBuffer() const1741 uint32_t DeviceGLES::BoundReadFrameBuffer() const
1742 {
1743 return boundReadFbo_;
1744 }
1745
BoundWriteFrameBuffer() const1746 uint32_t DeviceGLES::BoundWriteFrameBuffer() const
1747 {
1748 return boundWriteFbo_;
1749 }
1750
BoundProgram() const1751 uint32_t DeviceGLES::BoundProgram() const
1752 {
1753 return boundProgram_;
1754 }
1755
BoundBuffer(uint32_t target) const1756 uint32_t DeviceGLES::BoundBuffer(uint32_t target) const
1757 {
1758 const uint32_t targetId = GenericTargetToTargetId(target);
1759 if (targetId >= MAX_BUFFER_BIND_ID) {
1760 return 0;
1761 }
1762 const auto& slot = bufferBound_[targetId];
1763 if (!slot.bound) {
1764 return 0;
1765 }
1766 return slot.buffer;
1767 }
1768
BoundBuffer(uint32_t target,uint32_t binding) const1769 uint32_t DeviceGLES::BoundBuffer(uint32_t target, uint32_t binding) const
1770 {
1771 const uint32_t targetId = IndexedTargetToTargetId(target);
1772 if (targetId >= MAX_BUFFER_BIND_ID || binding >= MAX_BINDING_VALUE) {
1773 return 0;
1774 }
1775 const auto& slot = boundBuffers_[targetId][binding];
1776 if (!slot.cached) {
1777 return 0;
1778 }
1779 return slot.buffer;
1780 }
1781
BoundSampler(uint32_t textureUnit) const1782 uint32_t DeviceGLES::BoundSampler(uint32_t textureUnit) const
1783 {
1784 if (textureUnit >= MAX_SAMPLERS) {
1785 return 0;
1786 }
1787 const uint32_t bound = boundSampler_[textureUnit];
1788 return bound ? (bound - 1) : bound;
1789 }
1790
BoundTexture(uint32_t textureUnit,uint32_t target) const1791 uint32_t DeviceGLES::BoundTexture(uint32_t textureUnit, uint32_t target) const
1792 {
1793 const uint32_t targetId = TextureTargetToTargetId(target);
1794 if (textureUnit >= MAX_TEXTURE_UNITS || targetId >= MAX_TEXTURE_TARGET_ID) {
1795 return 0;
1796 }
1797 const uint32_t bound = boundTexture_[textureUnit][targetId];
1798 if (bound == 0) {
1799 return 0; // bound 0 == nothing has been bound via cache yet.
1800 }
1801 return bound - 1;
1802 }
1803
BindImageTexture(uint32_t unit,uint32_t texture,uint32_t level,bool layered,uint32_t layer,uint32_t access,uint32_t format)1804 void DeviceGLES::BindImageTexture(
1805 uint32_t unit, uint32_t texture, uint32_t level, bool layered, uint32_t layer, uint32_t access, uint32_t format)
1806 {
1807 auto& image = boundImage_[unit];
1808 if ((!image.bound) || (image.texture != texture) || (image.level != level) || (image.layered != layered) ||
1809 (image.access != access) || (image.format != format)) {
1810 image.bound = true;
1811 image.texture = texture;
1812 image.level = level;
1813 image.layered = layered;
1814 image.access = access;
1815 image.format = format;
1816 glBindImageTexture(static_cast<GLuint>(unit), static_cast<GLuint>(texture), static_cast<GLint>(level),
1817 static_cast<GLboolean>(layered), static_cast<GLint>(layer), static_cast<GLenum>(access),
1818 static_cast<GLenum>(format));
1819 }
1820 }
1821
SetActiveTextureUnit(uint32_t textureUnit)1822 void DeviceGLES::SetActiveTextureUnit(uint32_t textureUnit)
1823 {
1824 if ((textureUnit + 1) != activeTextureUnit_) {
1825 activeTextureUnit_ = textureUnit + 1;
1826 glActiveTexture(GL_TEXTURE0 + textureUnit);
1827 }
1828 }
1829
BindTexture(uint32_t textureUnit,uint32_t target,uint32_t texture)1830 void DeviceGLES::BindTexture(uint32_t textureUnit, uint32_t target, uint32_t texture)
1831 {
1832 const uint32_t targetId = TextureTargetToTargetId(target);
1833 #if RENDER_HAS_GLES_BACKEND
1834 if (target == GL_TEXTURE_EXTERNAL_OES) {
1835 // Work around for oes textures needing a bind to zero to update.
1836 SetActiveTextureUnit(textureUnit);
1837 glBindTexture(GL_TEXTURE_EXTERNAL_OES, 0);
1838 boundTexture_[textureUnit][targetId] = 0;
1839 // Force the default sampler for OES textures.
1840 BindSampler(textureUnit, 0);
1841 }
1842 #endif
1843 if ((texture + 1) != boundTexture_[textureUnit][targetId]) {
1844 SetActiveTextureUnit(textureUnit);
1845 boundTexture_[textureUnit][targetId] = texture + 1;
1846 // remap the cubemap layer ids...
1847 switch (target) {
1848 case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
1849 case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
1850 case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
1851 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
1852 case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
1853 case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
1854 glBindTexture(GL_TEXTURE_CUBE_MAP, texture);
1855 break;
1856 default:
1857 glBindTexture(target, texture);
1858 break;
1859 }
1860 }
1861 }
1862
TexSwizzle(uint32_t image,uint32_t target,const Math::UVec4 & swizzle)1863 void DeviceGLES::TexSwizzle(uint32_t image, uint32_t target, const Math::UVec4& swizzle)
1864 {
1865 // set only if not default..
1866 if ((swizzle.x != GL_RED) || (swizzle.y != GL_GREEN) || (swizzle.z != GL_BLUE) || (swizzle.w != GL_ALPHA)) {
1867 SetActiveTextureUnit(TEMP_BIND_UNIT);
1868 BindTexture(TEMP_BIND_UNIT, target, image);
1869 glTexParameteri(static_cast<GLenum>(target), GL_TEXTURE_SWIZZLE_R, static_cast<GLint>(swizzle.x));
1870 glTexParameteri(static_cast<GLenum>(target), GL_TEXTURE_SWIZZLE_G, static_cast<GLint>(swizzle.y));
1871 glTexParameteri(static_cast<GLenum>(target), GL_TEXTURE_SWIZZLE_B, static_cast<GLint>(swizzle.z));
1872 glTexParameteri(static_cast<GLenum>(target), GL_TEXTURE_SWIZZLE_A, static_cast<GLint>(swizzle.w));
1873 }
1874 }
1875
TexStorage2D(uint32_t image,uint32_t target,uint32_t levels,uint32_t internalformat,const Math::UVec2 & extent)1876 void DeviceGLES::TexStorage2D(
1877 uint32_t image, uint32_t target, uint32_t levels, uint32_t internalformat, const Math::UVec2& extent)
1878 {
1879 SetActiveTextureUnit(TEMP_BIND_UNIT);
1880 BindTexture(TEMP_BIND_UNIT, target, image);
1881 glTexStorage2D(static_cast<GLenum>(target), static_cast<GLsizei>(levels), static_cast<GLenum>(internalformat),
1882 static_cast<GLsizei>(extent.x), static_cast<GLsizei>(extent.y));
1883 }
1884
TexStorage2DMultisample(uint32_t image,uint32_t target,uint32_t samples,uint32_t internalformat,const Math::UVec2 & extent,bool fixedsamplelocations)1885 void DeviceGLES::TexStorage2DMultisample(uint32_t image, uint32_t target, uint32_t samples, uint32_t internalformat,
1886 const Math::UVec2& extent, bool fixedsamplelocations)
1887 {
1888 SetActiveTextureUnit(TEMP_BIND_UNIT);
1889 BindTexture(TEMP_BIND_UNIT, target, image);
1890 glTexStorage2DMultisample(static_cast<GLenum>(target), static_cast<GLsizei>(samples),
1891 static_cast<GLenum>(internalformat), static_cast<GLsizei>(extent.x), static_cast<GLsizei>(extent.y),
1892 fixedsamplelocations);
1893 }
1894
TexStorage3D(uint32_t image,uint32_t target,uint32_t levels,uint32_t internalformat,const Math::UVec3 & extent)1895 void DeviceGLES::TexStorage3D(
1896 uint32_t image, uint32_t target, uint32_t levels, uint32_t internalformat, const Math::UVec3& extent)
1897 {
1898 SetActiveTextureUnit(TEMP_BIND_UNIT);
1899 BindTexture(TEMP_BIND_UNIT, target, image);
1900 glTexStorage3D((GLenum)target, (GLsizei)levels, (GLenum)internalformat, (GLsizei)extent.x, (GLsizei)extent.y,
1901 (GLsizei)extent.z);
1902 }
1903
TexStorage3DMultisample(uint32_t image,uint32_t target,uint32_t samples,uint32_t internalformat,const BASE_NS::Math::UVec3 & extent,bool fixedsamplelocations)1904 void DeviceGLES::TexStorage3DMultisample(uint32_t image, uint32_t target, uint32_t samples, uint32_t internalformat,
1905 const BASE_NS::Math::UVec3& extent, bool fixedsamplelocations)
1906 {
1907 SetActiveTextureUnit(TEMP_BIND_UNIT);
1908 BindTexture(TEMP_BIND_UNIT, target, image);
1909 glTexStorage3DMultisample(static_cast<GLenum>(target), static_cast<GLsizei>(samples),
1910 static_cast<GLenum>(internalformat), static_cast<GLsizei>(extent.x), static_cast<GLsizei>(extent.y),
1911 static_cast<GLsizei>(extent.z), fixedsamplelocations);
1912 }
1913
TexSubImage2D(uint32_t image,uint32_t target,uint32_t level,const Math::UVec2 & offset,const Math::UVec2 & extent,uint32_t format,uint32_t type,const void * pixels)1914 void DeviceGLES::TexSubImage2D(uint32_t image, uint32_t target, uint32_t level, const Math::UVec2& offset,
1915 const Math::UVec2& extent, uint32_t format, uint32_t type, const void* pixels)
1916 {
1917 SetActiveTextureUnit(TEMP_BIND_UNIT);
1918 BindTexture(TEMP_BIND_UNIT, target, image);
1919 glTexSubImage2D((GLenum)target, (GLint)level, (GLint)offset.x, (GLint)offset.y, (GLsizei)extent.x,
1920 (GLsizei)extent.y, (GLenum)format, (GLenum)type, pixels);
1921 }
1922
TexSubImage3D(uint32_t image,uint32_t target,uint32_t level,const Math::UVec3 & offset,const Math::UVec3 & extent,uint32_t format,uint32_t type,const void * pixels)1923 void DeviceGLES::TexSubImage3D(uint32_t image, uint32_t target, uint32_t level, const Math::UVec3& offset,
1924 const Math::UVec3& extent, uint32_t format, uint32_t type, const void* pixels)
1925 {
1926 SetActiveTextureUnit(TEMP_BIND_UNIT);
1927 BindTexture(TEMP_BIND_UNIT, target, image);
1928 glTexSubImage3D((GLenum)target, (GLint)level, (GLint)offset.x, (GLint)offset.y, (GLint)offset.z, (GLsizei)extent.x,
1929 (GLsizei)extent.y, (GLsizei)extent.z, (GLenum)format, (GLenum)type, pixels);
1930 }
1931
CompressedTexSubImage2D(uint32_t image,uint32_t target,uint32_t level,const Math::UVec2 & offset,const Math::UVec2 & extent,uint32_t format,uint32_t imageSize,const void * data)1932 void DeviceGLES::CompressedTexSubImage2D(uint32_t image, uint32_t target, uint32_t level, const Math::UVec2& offset,
1933 const Math::UVec2& extent, uint32_t format, uint32_t imageSize, const void* data)
1934 {
1935 SetActiveTextureUnit(TEMP_BIND_UNIT);
1936 BindTexture(TEMP_BIND_UNIT, target, image);
1937 glCompressedTexSubImage2D((GLenum)target, (GLint)level, (GLint)offset.x, (GLint)offset.y, (GLsizei)extent.x,
1938 (GLsizei)extent.y, (GLenum)format, (GLint)imageSize, data);
1939 }
1940
CompressedTexSubImage3D(uint32_t image,uint32_t target,uint32_t level,const Math::UVec3 & offset,const Math::UVec3 & extent,uint32_t format,uint32_t imageSize,const void * data)1941 void DeviceGLES::CompressedTexSubImage3D(uint32_t image, uint32_t target, uint32_t level, const Math::UVec3& offset,
1942 const Math::UVec3& extent, uint32_t format, uint32_t imageSize, const void* data)
1943 {
1944 SetActiveTextureUnit(TEMP_BIND_UNIT);
1945 BindTexture(TEMP_BIND_UNIT, target, image);
1946 glCompressedTexSubImage3D((GLenum)target, (GLint)level, (GLint)offset.x, (GLint)offset.y, (GLint)offset.z,
1947 (GLsizei)extent.x, (GLsizei)extent.y, (GLsizei)extent.z, (GLenum)format, (GLint)imageSize, data);
1948 }
1949
GetGlImageFormat(const Format format) const1950 const DeviceGLES::ImageFormat& DeviceGLES::GetGlImageFormat(const Format format) const
1951 {
1952 if (const auto pos =
1953 std::lower_bound(supportedFormats_.begin(), supportedFormats_.end(), format,
1954 [](const ImageFormat &element, const Format value) { return element.coreFormat < value; });
1955 (pos != supportedFormats_.end()) && (pos->coreFormat == format)) {
1956 return *pos;
1957 }
1958 if (const auto pos =
1959 std::lower_bound(std::begin(IMAGE_FORMATS_FALLBACK), std::end(IMAGE_FORMATS_FALLBACK), format,
1960 [](const ImageFormat &element, const Format value) { return element.coreFormat < value; });
1961 (pos != std::end(IMAGE_FORMATS_FALLBACK)) && (pos->coreFormat == format)) {
1962 PLUGIN_LOG_I("using fallback for format %u", format);
1963 return *pos;
1964 }
1965 PLUGIN_LOG_I("asking for unsupported format %u", format);
1966 return supportedFormats_[0];
1967 }
1968
DeleteTexture(uint32_t texture)1969 void DeviceGLES::DeleteTexture(uint32_t texture)
1970 {
1971 UnBindTexture(texture);
1972 glDeleteTextures(1, &texture);
1973 }
1974
DeleteBuffer(uint32_t buffer)1975 void DeviceGLES::DeleteBuffer(uint32_t buffer)
1976 {
1977 UnBindBuffer(buffer);
1978 glDeleteBuffers(1, &buffer);
1979 }
1980
DeleteSampler(uint32_t sampler)1981 void DeviceGLES::DeleteSampler(uint32_t sampler)
1982 {
1983 UnBindSampler(sampler);
1984 glDeleteSamplers(1, &sampler);
1985 }
1986
CreateVertexArray()1987 uint32_t DeviceGLES::CreateVertexArray()
1988 {
1989 GLuint vao;
1990 glGenVertexArrays(1, &vao);
1991 if (vaoStatesInUse_ == vaoStates_.size()) {
1992 for (auto it = vaoStates_.begin(); it != vaoStates_.end(); it++) {
1993 if (it->vao == 0) {
1994 // re-use old "object"
1995 it->vao = vao;
1996 vaoStatesInUse_++;
1997 return static_cast<uint32_t>(1 + (it - vaoStates_.begin()));
1998 }
1999 }
2000 }
2001 VAOState v;
2002 v.vao = vao;
2003 vaoStates_.push_back(v);
2004 vaoStatesInUse_++;
2005 return static_cast<uint32_t>(vaoStates_.size());
2006 }
2007
DeleteVertexArray(uint32_t vao)2008 void DeviceGLES::DeleteVertexArray(uint32_t vao)
2009 {
2010 PLUGIN_ASSERT(!vaoStates_.empty());
2011 if (vao > 0 && vao < vaoStates_.size()) {
2012 UnBindVertexArray(vao);
2013 auto& state = vaoStates_[vao - 1];
2014 glDeleteVertexArrays(1, &state.vao);
2015 state = {}; // clear the object.
2016 vaoStatesInUse_--;
2017 }
2018 }
2019
DeleteFrameBuffer(uint32_t fbo)2020 void DeviceGLES::DeleteFrameBuffer(uint32_t fbo)
2021 {
2022 PLUGIN_ASSERT(IsActive());
2023 UnBindFrameBuffer(fbo);
2024 glDeleteFramebuffers(1, &fbo);
2025 #if (RENDER_DEBUG_GPU_RESOURCE_IDS == 1)
2026 PLUGIN_LOG_D("fbo id <: %u", fbo);
2027 #endif
2028 }
2029
UnBindTexture(uint32_t texture)2030 void DeviceGLES::UnBindTexture(uint32_t texture)
2031 {
2032 uint32_t unit = 0;
2033 for (auto& textureUnit : boundTexture_) {
2034 uint32_t targetId = 0;
2035 for (uint32_t& typeBinding : textureUnit) {
2036 if (typeBinding == texture + 1) {
2037 SetActiveTextureUnit(unit);
2038 const uint32_t target = TextureTargetIdToTarget(TextureTargetId { targetId });
2039 glBindTexture(target, 0);
2040 typeBinding = 0;
2041 }
2042 targetId++;
2043 }
2044 unit++;
2045 }
2046
2047 unit = 0;
2048 for (auto& image : boundImage_) {
2049 if ((image.bound) && (image.texture == texture)) {
2050 glBindImageTexture(static_cast<GLuint>(unit), 0, 0, false, 0, GL_READ_ONLY, GL_R32UI);
2051 // set default state...
2052 image.bound = false;
2053 image.texture = 0;
2054 image.level = 0;
2055 image.layered = false;
2056 image.access = GL_READ_ONLY;
2057 image.format = GL_R32UI;
2058 }
2059 unit++;
2060 }
2061 }
2062
UnBindBuffer(uint32_t buffer)2063 void DeviceGLES::UnBindBuffer(uint32_t buffer)
2064 {
2065 uint32_t tid = 0;
2066 for (auto& buffers : boundBuffers_) {
2067 const uint32_t targetId = IndexedTargetIdToTarget(BufferBindId { tid });
2068 uint32_t bid = 0;
2069 for (auto& slot : buffers) {
2070 if (slot.buffer == buffer) {
2071 glBindBufferRange(targetId, bid, 0, 0, 0);
2072 // nothing bound
2073 slot.cached = false;
2074 slot.buffer = 0;
2075 slot.offset = 0;
2076 slot.size = 0;
2077 }
2078 bid++;
2079 }
2080 tid++;
2081 }
2082
2083 tid = 0;
2084 for (auto& slot : bufferBound_) {
2085 if (slot.buffer == buffer) {
2086 const uint32_t targetId = GenericTargetIdToTarget(BufferTargetId { tid });
2087 glBindBuffer(targetId, 0);
2088 // nothing bound
2089 slot.bound = false;
2090 slot.buffer = 0;
2091 }
2092 tid++;
2093 }
2094
2095 // scan VAOs also..
2096 UnBindBufferFromVertexArray(buffer);
2097 }
2098
UnBindBufferFromVertexArray(uint32_t buffer)2099 void DeviceGLES::UnBindBufferFromVertexArray(uint32_t buffer)
2100 {
2101 uint32_t vao = 1;
2102 uint32_t wasbound = BoundVertexArray();
2103 for (auto& state : vaoStates_) {
2104 if (state.vao > 0) {
2105 auto& elementBuffer = state.elementBuffer;
2106 auto& vertexBufferBinds = state.vertexBufferBinds;
2107 int slot = 0;
2108 for (auto& t : vertexBufferBinds) {
2109 if ((t.bound) && (t.buffer == buffer)) {
2110 // detach the buffer from vao.
2111 BindVertexArray(vao);
2112 glBindVertexBuffer((GLuint)slot, 0, 0, 0);
2113 t.bound = false;
2114 t.buffer = 0;
2115 t.offset = 0;
2116 t.stride = 0;
2117 }
2118 slot++;
2119 }
2120 if ((elementBuffer.bound) && (elementBuffer.buffer == buffer)) {
2121 // detach the buffer from vao.
2122 BindVertexArray(vao);
2123 glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
2124 elementBuffer.bound = false;
2125 elementBuffer.buffer = 0;
2126 }
2127 }
2128 vao++;
2129 }
2130 BindVertexArray(wasbound);
2131 }
2132
UnBindSampler(uint32_t sampler)2133 void DeviceGLES::UnBindSampler(uint32_t sampler)
2134 {
2135 for (uint32_t& boundSampler : boundSampler_) {
2136 if ((sampler + 1) == boundSampler) {
2137 glBindSampler((sampler + 1), 0);
2138 boundSampler = 0;
2139 }
2140 }
2141 }
2142
UnBindVertexArray(uint32_t vao)2143 void DeviceGLES::UnBindVertexArray(uint32_t vao)
2144 {
2145 if (boundVao_ == vao) {
2146 glBindVertexArray(0);
2147 boundVao_ = 0;
2148 }
2149 }
2150
UnBindFrameBuffer(uint32_t fbo)2151 void DeviceGLES::UnBindFrameBuffer(uint32_t fbo)
2152 {
2153 if ((fbo == boundReadFbo_) && (fbo == boundWriteFbo_)) {
2154 boundReadFbo_ = 0;
2155 boundWriteFbo_ = 0;
2156 glBindFramebuffer(GL_FRAMEBUFFER, 0);
2157 } else if (boundWriteFbo_ == fbo) {
2158 boundWriteFbo_ = 0;
2159 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
2160 } else if (boundReadFbo_ == fbo) {
2161 boundReadFbo_ = 0;
2162 glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
2163 }
2164 }
2165
BoundVertexArray() const2166 uint32_t DeviceGLES::BoundVertexArray() const
2167 {
2168 return boundVao_;
2169 }
2170
BindFrameBuffer(uint32_t fbo)2171 void DeviceGLES::BindFrameBuffer(uint32_t fbo)
2172 {
2173 if ((boundReadFbo_ != fbo) && (boundWriteFbo_ != fbo)) {
2174 glBindFramebuffer(GL_FRAMEBUFFER, fbo);
2175 boundReadFbo_ = boundWriteFbo_ = fbo;
2176 } else if (boundWriteFbo_ != fbo) {
2177 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo);
2178 boundWriteFbo_ = fbo;
2179 } else if (boundReadFbo_ != fbo) {
2180 glBindFramebuffer(GL_READ_FRAMEBUFFER, fbo);
2181 boundReadFbo_ = fbo;
2182 }
2183 }
2184
BindReadFrameBuffer(uint32_t fbo)2185 void DeviceGLES::BindReadFrameBuffer(uint32_t fbo)
2186 {
2187 if (boundReadFbo_ != fbo) {
2188 glBindFramebuffer(GL_READ_FRAMEBUFFER, fbo);
2189 boundReadFbo_ = fbo;
2190 }
2191 }
2192
BindWriteFrameBuffer(uint32_t fbo)2193 void DeviceGLES::BindWriteFrameBuffer(uint32_t fbo)
2194 {
2195 if (boundWriteFbo_ != fbo) {
2196 glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo);
2197 boundWriteFbo_ = fbo;
2198 }
2199 }
2200
BindVertexArray(uint32_t vao)2201 void DeviceGLES::BindVertexArray(uint32_t vao)
2202 {
2203 if (boundVao_ != vao) {
2204 PLUGIN_ASSERT(vao <= vaoStates_.size());
2205 if (vao > 0) {
2206 glBindVertexArray(vaoStates_[vao - 1].vao);
2207 } else {
2208 glBindVertexArray(0);
2209 }
2210 boundVao_ = vao;
2211 }
2212 }
2213
BindVertexBuffer(uint32_t slot,uint32_t buffer,intptr_t offset,intptr_t stride)2214 void DeviceGLES::BindVertexBuffer(uint32_t slot, uint32_t buffer, intptr_t offset, intptr_t stride)
2215 {
2216 PLUGIN_ASSERT(boundVao_ > 0);
2217 PLUGIN_ASSERT(boundVao_ <= vaoStates_.size());
2218 auto& vertexBufferBinds = vaoStates_[boundVao_ - 1].vertexBufferBinds;
2219 bool bind = true;
2220 if (vertexBufferBinds[slot].bound) {
2221 bind = (vertexBufferBinds[slot].buffer != buffer) || (vertexBufferBinds[slot].offset != offset) ||
2222 (vertexBufferBinds[slot].stride != stride);
2223 }
2224 if (bind) {
2225 vertexBufferBinds[slot].bound = true;
2226 vertexBufferBinds[slot].buffer = buffer;
2227 vertexBufferBinds[slot].offset = offset;
2228 vertexBufferBinds[slot].stride = stride;
2229 glBindVertexBuffer((GLuint)slot, (GLuint)buffer, (GLintptr)offset, (GLsizei)stride);
2230 }
2231 }
2232
VertexBindingDivisor(uint32_t slot,uint32_t divisor)2233 void DeviceGLES::VertexBindingDivisor(uint32_t slot, uint32_t divisor)
2234 {
2235 PLUGIN_ASSERT(boundVao_ > 0);
2236 PLUGIN_ASSERT(boundVao_ <= vaoStates_.size());
2237 auto& vertexBufferBinds = vaoStates_[boundVao_ - 1].vertexBufferBinds;
2238 if (vertexBufferBinds[slot].divisor != divisor) {
2239 vertexBufferBinds[slot].divisor = divisor;
2240 glVertexBindingDivisor(slot, divisor);
2241 }
2242 }
2243
BindElementBuffer(uint32_t buffer)2244 void DeviceGLES::BindElementBuffer(uint32_t buffer)
2245 {
2246 PLUGIN_ASSERT(boundVao_ > 0);
2247 PLUGIN_ASSERT(boundVao_ <= vaoStates_.size());
2248 auto& elementBuffer = vaoStates_[boundVao_ - 1].elementBuffer;
2249 bool bind = true;
2250 if (elementBuffer.bound) {
2251 bind = (buffer != elementBuffer.buffer);
2252 }
2253 if (bind) {
2254 elementBuffer.bound = true;
2255 elementBuffer.buffer = buffer;
2256 glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, buffer);
2257 }
2258 }
2259
AllowThreadedProcessing() const2260 bool DeviceGLES::AllowThreadedProcessing() const
2261 {
2262 return HasExtension(EXT_BUFFER_STORAGE);
2263 }
2264
CreateGpuBuffer(const GpuBufferDesc & desc)2265 unique_ptr<GpuBuffer> DeviceGLES::CreateGpuBuffer(const GpuBufferDesc& desc)
2266 {
2267 return make_unique<GpuBufferGLES>(*this, desc);
2268 }
2269
CreateGpuBuffer(const GpuAccelerationStructureDesc & desc)2270 unique_ptr<GpuBuffer> DeviceGLES::CreateGpuBuffer(const GpuAccelerationStructureDesc& desc)
2271 {
2272 return make_unique<GpuBufferGLES>(*this, desc.bufferDesc);
2273 }
2274
CreateGpuImage(const GpuImageDesc & desc)2275 unique_ptr<GpuImage> DeviceGLES::CreateGpuImage(const GpuImageDesc& desc)
2276 {
2277 return make_unique<GpuImageGLES>(*this, desc);
2278 }
2279
CreateGpuImageView(const GpuImageDesc & desc,const GpuImagePlatformData & platformData)2280 unique_ptr<GpuImage> DeviceGLES::CreateGpuImageView(const GpuImageDesc& desc, const GpuImagePlatformData& platformData)
2281 {
2282 PLUGIN_ASSERT(IsActive());
2283 return make_unique<GpuImageGLES>(*this, desc, platformData);
2284 }
2285
CreateGpuImageViews(const Swapchain & platformSwapchain)2286 vector<unique_ptr<GpuImage>> DeviceGLES::CreateGpuImageViews(const Swapchain& platformSwapchain)
2287 {
2288 vector<unique_ptr<GpuImage>> gpuImages;
2289 const SwapchainGLES& swapchain = static_cast<const SwapchainGLES&>(platformSwapchain);
2290 const GpuImageDesc& desc = swapchain.GetDesc();
2291 const auto& swapchainPlat = swapchain.GetPlatformData();
2292
2293 gpuImages.resize(swapchainPlat.swapchainImages.images.size());
2294
2295 PLUGIN_ASSERT(IsActive());
2296 for (size_t idx = 0; idx < gpuImages.size(); ++idx) {
2297 GpuImagePlatformDataGL gpuImagePlat {};
2298 gpuImagePlat.image = swapchainPlat.swapchainImages.images[idx];
2299 gpuImagePlat.swizzle = { GL_RED, GL_GREEN, GL_BLUE, GL_ALPHA };
2300 gpuImages[idx] = CreateGpuImageView(desc, gpuImagePlat);
2301 }
2302 return gpuImages;
2303 }
2304
CreateGpuSampler(const GpuSamplerDesc & desc)2305 unique_ptr<GpuSampler> DeviceGLES::CreateGpuSampler(const GpuSamplerDesc& desc)
2306 {
2307 return make_unique<GpuSamplerGLES>(*this, desc);
2308 }
2309
CreateRenderFrameSync()2310 unique_ptr<RenderFrameSync> DeviceGLES::CreateRenderFrameSync()
2311 {
2312 return make_unique<RenderFrameSyncGLES>(*this);
2313 }
2314
CreateRenderBackend(GpuResourceManager & gpuResourceMgr,CORE_NS::ITaskQueue * const)2315 unique_ptr<RenderBackend> DeviceGLES::CreateRenderBackend(
2316 GpuResourceManager& gpuResourceMgr, CORE_NS::ITaskQueue* const)
2317 {
2318 return make_unique<RenderBackendGLES>(*this, gpuResourceMgr);
2319 }
2320
CreateShaderModule(const ShaderModuleCreateInfo & data)2321 unique_ptr<ShaderModule> DeviceGLES::CreateShaderModule(const ShaderModuleCreateInfo& data)
2322 {
2323 return make_unique<ShaderModuleGLES>(*this, data);
2324 }
2325
CreateComputeShaderModule(const ShaderModuleCreateInfo & data)2326 unique_ptr<ShaderModule> DeviceGLES::CreateComputeShaderModule(const ShaderModuleCreateInfo& data)
2327 {
2328 return make_unique<ShaderModuleGLES>(*this, data);
2329 }
2330
CreateGpuShaderProgram(const GpuShaderProgramCreateData & data)2331 unique_ptr<GpuShaderProgram> DeviceGLES::CreateGpuShaderProgram(const GpuShaderProgramCreateData& data)
2332 {
2333 return make_unique<GpuShaderProgramGLES>(*this, data);
2334 }
2335
CreateGpuComputeProgram(const GpuComputeProgramCreateData & data)2336 unique_ptr<GpuComputeProgram> DeviceGLES::CreateGpuComputeProgram(const GpuComputeProgramCreateData& data)
2337 {
2338 return make_unique<GpuComputeProgramGLES>(*this, data);
2339 }
2340
CreateNodeContextDescriptorSetManager()2341 unique_ptr<NodeContextDescriptorSetManager> DeviceGLES::CreateNodeContextDescriptorSetManager()
2342 {
2343 return make_unique<NodeContextDescriptorSetManagerGles>(*this);
2344 }
2345
CreateNodeContextPoolManager(GpuResourceManager & gpuResourceMgr,const GpuQueue & gpuQueue)2346 unique_ptr<NodeContextPoolManager> DeviceGLES::CreateNodeContextPoolManager(
2347 GpuResourceManager& gpuResourceMgr, const GpuQueue& gpuQueue)
2348 {
2349 return make_unique<NodeContextPoolManagerGLES>(*this, gpuResourceMgr);
2350 }
2351
CreateGraphicsPipelineStateObject(const GpuShaderProgram & gpuProgram,const GraphicsState & graphicsState,const PipelineLayout & pipelineLayout,const VertexInputDeclarationView & vertexInputDeclaration,const ShaderSpecializationConstantDataView & specializationConstants,const array_view<const DynamicStateEnum> dynamicStates,const RenderPassDesc & renderPassDesc,const array_view<const RenderPassSubpassDesc> & renderPassSubpassDescs,const uint32_t subpassIndex,const LowLevelRenderPassData * renderPassData,const LowLevelPipelineLayoutData * pipelineLayoutData)2352 unique_ptr<GraphicsPipelineStateObject> DeviceGLES::CreateGraphicsPipelineStateObject(
2353 const GpuShaderProgram& gpuProgram, const GraphicsState& graphicsState, const PipelineLayout& pipelineLayout,
2354 const VertexInputDeclarationView& vertexInputDeclaration,
2355 const ShaderSpecializationConstantDataView& specializationConstants,
2356 const array_view<const DynamicStateEnum> dynamicStates, const RenderPassDesc& renderPassDesc,
2357 const array_view<const RenderPassSubpassDesc>& renderPassSubpassDescs, const uint32_t subpassIndex,
2358 const LowLevelRenderPassData* renderPassData, const LowLevelPipelineLayoutData* pipelineLayoutData)
2359 {
2360 PLUGIN_ASSERT(!renderPassData);
2361 PLUGIN_ASSERT(!pipelineLayoutData);
2362 auto pipeline = make_unique<GraphicsPipelineStateObjectGLES>(*this, gpuProgram, graphicsState, pipelineLayout,
2363 vertexInputDeclaration, specializationConstants, dynamicStates, renderPassDesc, renderPassSubpassDescs);
2364 return unique_ptr<GraphicsPipelineStateObject> { pipeline->GetPlatformData().graphicsShader ? pipeline.release()
2365 : nullptr };
2366 }
2367
CreateComputePipelineStateObject(const GpuComputeProgram & gpuProgram,const PipelineLayout & pipelineLayout,const ShaderSpecializationConstantDataView & specializationConstants,const LowLevelPipelineLayoutData * pipelineLayoutData)2368 unique_ptr<ComputePipelineStateObject> DeviceGLES::CreateComputePipelineStateObject(const GpuComputeProgram& gpuProgram,
2369 const PipelineLayout& pipelineLayout, const ShaderSpecializationConstantDataView& specializationConstants,
2370 const LowLevelPipelineLayoutData* pipelineLayoutData)
2371 {
2372 PLUGIN_ASSERT(!pipelineLayoutData);
2373 auto pipeline =
2374 make_unique<ComputePipelineStateObjectGLES>(*this, gpuProgram, pipelineLayout, specializationConstants);
2375 return unique_ptr<ComputePipelineStateObject> { pipeline->GetPlatformData().computeShader ? pipeline.release()
2376 : nullptr };
2377 }
2378
CreateGpuSemaphore()2379 unique_ptr<GpuSemaphore> DeviceGLES::CreateGpuSemaphore()
2380 {
2381 return make_unique<GpuSemaphoreGles>(*this);
2382 }
2383
CreateGpuSemaphoreView(const uint64_t handle)2384 unique_ptr<GpuSemaphore> DeviceGLES::CreateGpuSemaphoreView(const uint64_t handle)
2385 {
2386 return make_unique<GpuSemaphoreGles>(*this, handle);
2387 }
2388
SetBackendConfig(const BackendConfig & config)2389 void DeviceGLES::SetBackendConfig(const BackendConfig& config)
2390 {
2391 #if RENDER_HAS_GLES_BACKEND
2392 backendConfig_.allowDepthResolve = static_cast<const BackendConfigGLES&>(config).allowDepthResolve &&
2393 HasExtension("GL_EXT_multisampled_render_to_texture2");
2394 #endif
2395 }
2396
LowLevelDeviceGLES(DeviceGLES & deviceGLES)2397 LowLevelDeviceGLES::LowLevelDeviceGLES(DeviceGLES& deviceGLES)
2398 : deviceGLES_(deviceGLES), gpuResourceMgr_(static_cast<GpuResourceManager&>(deviceGLES.GetGpuResourceManager()))
2399 {}
2400
GetBackendType() const2401 DeviceBackendType LowLevelDeviceGLES::GetBackendType() const
2402 {
2403 return deviceGLES_.GetBackendType();
2404 }
2405
2406 #if RENDER_HAS_EXPERIMENTAL
Activate()2407 void LowLevelDeviceGLES::Activate()
2408 {
2409 deviceGLES_.Activate();
2410 }
2411
Deactivate()2412 void LowLevelDeviceGLES::Deactivate()
2413 {
2414 deviceGLES_.Deactivate();
2415 }
2416
SwapBuffers()2417 void LowLevelDeviceGLES::SwapBuffers()
2418 {
2419 if (deviceGLES_.IsActive() && deviceGLES_.HasSwapchain()) {
2420 RenderHandle defaultSwapChain {};
2421 auto sc = static_cast<const SwapchainGLES*>(deviceGLES_.GetSwapchain(defaultSwapChain));
2422 deviceGLES_.SwapBuffers(*sc);
2423 }
2424 }
2425 #endif
2426
2427 RENDER_END_NAMESPACE()
2428