1 use crate::api::icd::*;
2 use crate::api::types::*;
3 use crate::core::context::*;
4 use crate::core::device::*;
5 use crate::core::format::*;
6 use crate::core::memory::*;
7 use crate::core::queue::*;
8 use crate::core::util::*;
9
10 use libc_rust_gen::{close, dlsym};
11 use mesa_rust::pipe::context::RWFlags;
12 use rusticl_opencl_gen::*;
13
14 use mesa_rust::pipe::fence::*;
15 use mesa_rust::pipe::resource::*;
16 use mesa_rust::pipe::screen::*;
17
18 use std::collections::HashMap;
19 use std::ffi::CStr;
20 use std::ffi::CString;
21 use std::mem;
22 use std::os::raw::c_void;
23 use std::ptr;
24 use std::sync::Arc;
25
26 type CLGLMappings = Option<HashMap<Arc<PipeResource>, Arc<PipeResource>>>;
27
28 pub struct XPlatManager {
29 #[cfg(glx)]
30 glx_get_proc_addr: PFNGLXGETPROCADDRESSPROC,
31 egl_get_proc_addr: PFNEGLGETPROCADDRESSPROC,
32 }
33
34 impl Default for XPlatManager {
default() -> Self35 fn default() -> Self {
36 Self::new()
37 }
38 }
39
40 impl XPlatManager {
new() -> Self41 pub fn new() -> Self {
42 Self {
43 #[cfg(glx)]
44 glx_get_proc_addr: Self::get_proc_address_func(c"glXGetProcAddress"),
45 egl_get_proc_addr: Self::get_proc_address_func(c"eglGetProcAddress"),
46 }
47 }
48
get_proc_address_func<T>(name: &CStr) -> T49 fn get_proc_address_func<T>(name: &CStr) -> T {
50 unsafe {
51 let pfn = dlsym(ptr::null_mut(), name.as_ptr());
52 mem::transmute_copy(&pfn)
53 }
54 }
55
56 #[cfg(glx)]
get_func_glx(&self, cname: &CStr) -> CLResult<__GLXextFuncPtr>57 unsafe fn get_func_glx(&self, cname: &CStr) -> CLResult<__GLXextFuncPtr> {
58 unsafe {
59 Ok(self
60 .glx_get_proc_addr
61 .ok_or(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)?(
62 cname.as_ptr().cast(),
63 ))
64 }
65 }
66
67 // in theory it should return CLResult<__GLXextFuncPtr> but luckily it's identical
68 #[cfg(not(glx))]
get_func_glx(&self, _: &CStr) -> CLResult<__eglMustCastToProperFunctionPointerType>69 unsafe fn get_func_glx(&self, _: &CStr) -> CLResult<__eglMustCastToProperFunctionPointerType> {
70 Err(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)
71 }
72
get_func<T>(&self, name: &str) -> CLResult<T>73 fn get_func<T>(&self, name: &str) -> CLResult<T> {
74 let cname = CString::new(name).unwrap();
75 unsafe {
76 let raw_func = if name.starts_with("glX") {
77 self.get_func_glx(&cname)?
78 } else if name.starts_with("egl") {
79 self.egl_get_proc_addr
80 .ok_or(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)?(
81 cname.as_ptr().cast()
82 )
83 } else {
84 panic!();
85 };
86
87 Ok(mem::transmute_copy(&raw_func))
88 }
89 }
90
91 #[allow(non_snake_case)]
MesaGLInteropEGLQueryDeviceInfo( &self, ) -> CLResult<PFNMESAGLINTEROPEGLQUERYDEVICEINFOPROC>92 pub fn MesaGLInteropEGLQueryDeviceInfo(
93 &self,
94 ) -> CLResult<PFNMESAGLINTEROPEGLQUERYDEVICEINFOPROC> {
95 self.get_func::<PFNMESAGLINTEROPEGLQUERYDEVICEINFOPROC>("eglGLInteropQueryDeviceInfoMESA")
96 }
97
98 #[allow(non_snake_case)]
MesaGLInteropGLXQueryDeviceInfo( &self, ) -> CLResult<PFNMESAGLINTEROPGLXQUERYDEVICEINFOPROC>99 pub fn MesaGLInteropGLXQueryDeviceInfo(
100 &self,
101 ) -> CLResult<PFNMESAGLINTEROPGLXQUERYDEVICEINFOPROC> {
102 self.get_func::<PFNMESAGLINTEROPGLXQUERYDEVICEINFOPROC>("glXGLInteropQueryDeviceInfoMESA")
103 }
104
105 #[allow(non_snake_case)]
MesaGLInteropEGLExportObject(&self) -> CLResult<PFNMESAGLINTEROPEGLEXPORTOBJECTPROC>106 pub fn MesaGLInteropEGLExportObject(&self) -> CLResult<PFNMESAGLINTEROPEGLEXPORTOBJECTPROC> {
107 self.get_func::<PFNMESAGLINTEROPEGLEXPORTOBJECTPROC>("eglGLInteropExportObjectMESA")
108 }
109
110 #[allow(non_snake_case)]
MesaGLInteropGLXExportObject(&self) -> CLResult<PFNMESAGLINTEROPGLXEXPORTOBJECTPROC>111 pub fn MesaGLInteropGLXExportObject(&self) -> CLResult<PFNMESAGLINTEROPGLXEXPORTOBJECTPROC> {
112 self.get_func::<PFNMESAGLINTEROPGLXEXPORTOBJECTPROC>("glXGLInteropExportObjectMESA")
113 }
114
115 #[allow(non_snake_case)]
MesaGLInteropEGLFlushObjects(&self) -> CLResult<PFNMESAGLINTEROPEGLFLUSHOBJECTSPROC>116 pub fn MesaGLInteropEGLFlushObjects(&self) -> CLResult<PFNMESAGLINTEROPEGLFLUSHOBJECTSPROC> {
117 self.get_func::<PFNMESAGLINTEROPEGLFLUSHOBJECTSPROC>("eglGLInteropFlushObjectsMESA")
118 }
119
120 #[allow(non_snake_case)]
MesaGLInteropGLXFlushObjects(&self) -> CLResult<PFNMESAGLINTEROPGLXFLUSHOBJECTSPROC>121 pub fn MesaGLInteropGLXFlushObjects(&self) -> CLResult<PFNMESAGLINTEROPGLXFLUSHOBJECTSPROC> {
122 self.get_func::<PFNMESAGLINTEROPGLXFLUSHOBJECTSPROC>("glXGLInteropFlushObjectsMESA")
123 }
124 }
125
126 #[allow(clippy::upper_case_acronyms)]
127 #[derive(PartialEq, Eq)]
128 enum GLCtx {
129 EGL(EGLDisplay, EGLContext),
130 GLX(*mut _XDisplay, *mut __GLXcontextRec),
131 }
132
133 pub struct GLCtxManager {
134 pub interop_dev_info: mesa_glinterop_device_info,
135 pub xplat_manager: XPlatManager,
136 gl_ctx: GLCtx,
137 }
138
139 // SAFETY: We do have a few pointers inside [GLCtxManager], but nothing really relevant here:
140 // * pointers of the GLX/EGL context and _XDisplay/EGLDisplay, but we don't do much with them
141 // except calling into our mesa internal GL sharing extension, which properly locks data.
142 // * pointer to the _XDisplay/EGLDisplay
143 unsafe impl Send for GLCtxManager {}
144 unsafe impl Sync for GLCtxManager {}
145
146 impl GLCtxManager {
new( gl_context: *mut c_void, glx_display: *mut _XDisplay, egl_display: EGLDisplay, ) -> CLResult<Option<Self>>147 pub fn new(
148 gl_context: *mut c_void,
149 glx_display: *mut _XDisplay,
150 egl_display: EGLDisplay,
151 ) -> CLResult<Option<Self>> {
152 let mut info = mesa_glinterop_device_info {
153 version: 4,
154 ..Default::default()
155 };
156 let xplat_manager = XPlatManager::new();
157
158 // More than one of the attributes CL_CGL_SHAREGROUP_KHR, CL_EGL_DISPLAY_KHR,
159 // CL_GLX_DISPLAY_KHR, and CL_WGL_HDC_KHR is set to a non-default value.
160 if !egl_display.is_null() && !glx_display.is_null() {
161 return Err(CL_INVALID_OPERATION);
162 }
163
164 if gl_context.is_null() {
165 return Ok(None);
166 }
167
168 if !egl_display.is_null() {
169 let egl_query_device_info_func = xplat_manager
170 .MesaGLInteropEGLQueryDeviceInfo()?
171 .ok_or(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)?;
172
173 let err = unsafe {
174 egl_query_device_info_func(egl_display.cast(), gl_context.cast(), &mut info)
175 };
176
177 if err != MESA_GLINTEROP_SUCCESS as i32 {
178 return Err(interop_to_cl_error(err));
179 }
180
181 Ok(Some(GLCtxManager {
182 gl_ctx: GLCtx::EGL(egl_display.cast(), gl_context),
183 interop_dev_info: info,
184 xplat_manager: xplat_manager,
185 }))
186 } else if !glx_display.is_null() && cfg!(glx) {
187 let glx_query_device_info_func = xplat_manager
188 .MesaGLInteropGLXQueryDeviceInfo()?
189 .ok_or(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)?;
190
191 let err = unsafe {
192 glx_query_device_info_func(glx_display.cast(), gl_context.cast(), &mut info)
193 };
194
195 if err != MESA_GLINTEROP_SUCCESS as i32 {
196 return Err(interop_to_cl_error(err));
197 }
198
199 Ok(Some(GLCtxManager {
200 gl_ctx: GLCtx::GLX(glx_display.cast(), gl_context.cast()),
201 interop_dev_info: info,
202 xplat_manager: xplat_manager,
203 }))
204 } else {
205 Err(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)
206 }
207 }
208
export_object( &self, cl_ctx: &Arc<Context>, target: cl_GLenum, flags: u32, miplevel: cl_GLint, texture: cl_GLuint, ) -> CLResult<GLExportManager>209 pub fn export_object(
210 &self,
211 cl_ctx: &Arc<Context>,
212 target: cl_GLenum,
213 flags: u32,
214 miplevel: cl_GLint,
215 texture: cl_GLuint,
216 ) -> CLResult<GLExportManager> {
217 let xplat_manager = &self.xplat_manager;
218 let mut export_in = mesa_glinterop_export_in {
219 version: 2,
220 target: target,
221 obj: texture,
222 miplevel: miplevel as u32,
223 access: cl_to_interop_flags(flags),
224 ..Default::default()
225 };
226
227 let mut export_out = mesa_glinterop_export_out {
228 version: 2,
229 ..Default::default()
230 };
231
232 let mut fd = -1;
233
234 let mut flush_out = mesa_glinterop_flush_out {
235 version: 1,
236 fence_fd: &mut fd,
237 ..Default::default()
238 };
239
240 let err = unsafe {
241 match &self.gl_ctx {
242 GLCtx::EGL(disp, ctx) => {
243 let egl_export_object_func = xplat_manager
244 .MesaGLInteropEGLExportObject()?
245 .ok_or(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)?;
246
247 let egl_flush_objects_func = xplat_manager
248 .MesaGLInteropEGLFlushObjects()?
249 .ok_or(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)?;
250
251 let err_flush = egl_flush_objects_func(
252 disp.cast(),
253 ctx.cast(),
254 1,
255 &mut export_in,
256 &mut flush_out,
257 );
258 // TODO: use fence_server_sync in ctx inside the queue thread
259 let fence_fd = FenceFd { fd };
260 cl_ctx.devs.iter().for_each(|dev| {
261 let fence = dev.helper_ctx().import_fence(&fence_fd);
262 fence.wait();
263 });
264
265 if err_flush != 0 {
266 err_flush
267 } else {
268 egl_export_object_func(
269 disp.cast(),
270 ctx.cast(),
271 &mut export_in,
272 &mut export_out,
273 )
274 }
275 }
276 GLCtx::GLX(disp, ctx) => {
277 let glx_export_object_func = xplat_manager
278 .MesaGLInteropGLXExportObject()?
279 .ok_or(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)?;
280
281 let glx_flush_objects_func = xplat_manager
282 .MesaGLInteropGLXFlushObjects()?
283 .ok_or(CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR)?;
284
285 let err_flush = glx_flush_objects_func(
286 disp.cast(),
287 ctx.cast(),
288 1,
289 &mut export_in,
290 &mut flush_out,
291 );
292 // TODO: use fence_server_sync in ctx inside the queue thread
293 let fence_fd = FenceFd { fd };
294 cl_ctx.devs.iter().for_each(|dev| {
295 let fence = dev.helper_ctx().import_fence(&fence_fd);
296 fence.wait();
297 });
298
299 if err_flush != 0 {
300 err_flush
301 } else {
302 glx_export_object_func(
303 disp.cast(),
304 ctx.cast(),
305 &mut export_in,
306 &mut export_out,
307 )
308 }
309 }
310 }
311 };
312
313 if err != MESA_GLINTEROP_SUCCESS as i32 {
314 return Err(interop_to_cl_error(err));
315 }
316
317 // CL_INVALID_GL_OBJECT if bufobj is not a GL buffer object or is a GL buffer
318 // object but does not have an existing data store or the size of the buffer is 0.
319 if [GL_ARRAY_BUFFER, GL_TEXTURE_BUFFER].contains(&target) && export_out.buf_size == 0 {
320 return Err(CL_INVALID_GL_OBJECT);
321 }
322
323 Ok(GLExportManager {
324 export_in: export_in,
325 export_out: export_out,
326 })
327 }
328 }
329
330 #[derive(Clone)]
331 pub struct GLMemProps {
332 pub height: u16,
333 pub depth: u16,
334 pub width: u32,
335 pub offset: u32,
336 pub array_size: u16,
337 pub pixel_size: u8,
338 pub stride: u32,
339 }
340
341 impl GLMemProps {
size(&self) -> usize342 pub fn size(&self) -> usize {
343 self.height as usize
344 * self.depth as usize
345 * self.array_size as usize
346 * self.width as usize
347 * self.pixel_size as usize
348 }
349 }
350
351 pub struct GLExportManager {
352 pub export_in: mesa_glinterop_export_in,
353 pub export_out: mesa_glinterop_export_out,
354 }
355
356 impl GLExportManager {
get_gl_mem_props(&self) -> CLResult<GLMemProps>357 pub fn get_gl_mem_props(&self) -> CLResult<GLMemProps> {
358 let pixel_size = if self.is_gl_buffer() {
359 1
360 } else {
361 format_from_gl(self.export_out.internal_format)
362 .ok_or(CL_OUT_OF_HOST_MEMORY)?
363 .pixel_size()
364 .unwrap()
365 };
366
367 let mut height = self.export_out.height as u16;
368 let mut depth = self.export_out.depth as u16;
369 let mut width = self.export_out.width;
370 let mut array_size = 1;
371 let mut offset = 0;
372
373 // some fixups
374 match self.export_in.target {
375 GL_TEXTURE_1D_ARRAY => {
376 array_size = height;
377 height = 1;
378 depth = 1;
379 }
380 GL_TEXTURE_2D_ARRAY => {
381 array_size = depth;
382 depth = 1;
383 }
384 GL_ARRAY_BUFFER | GL_TEXTURE_BUFFER => {
385 array_size = 1;
386 width = self.export_out.buf_size as u32;
387 offset = self.export_out.buf_offset as u32;
388 height = 1;
389 depth = 1;
390 }
391 _ => {}
392 }
393 if is_cube_map_face(self.export_in.target) {
394 array_size = 6;
395 }
396
397 Ok(GLMemProps {
398 height: height,
399 depth: depth,
400 width: width,
401 offset: offset,
402 array_size: array_size,
403 pixel_size: pixel_size,
404 stride: self.export_out.stride,
405 })
406 }
407
is_gl_buffer(&self) -> bool408 pub fn is_gl_buffer(&self) -> bool {
409 self.export_out.internal_format == GL_NONE
410 }
411 }
412
413 impl Drop for GLExportManager {
drop(&mut self)414 fn drop(&mut self) {
415 unsafe {
416 close(self.export_out.dmabuf_fd);
417 }
418 }
419 }
420
421 pub struct GLObject {
422 pub gl_object_target: cl_GLenum,
423 pub gl_object_type: cl_gl_object_type,
424 pub gl_object_name: cl_GLuint,
425 pub shadow_map: CLGLMappings,
426 }
427
create_shadow_slice( cube_map: &HashMap<&'static Device, Arc<PipeResource>>, image_format: cl_image_format, ) -> CLResult<HashMap<&'static Device, Arc<PipeResource>>>428 pub fn create_shadow_slice(
429 cube_map: &HashMap<&'static Device, Arc<PipeResource>>,
430 image_format: cl_image_format,
431 ) -> CLResult<HashMap<&'static Device, Arc<PipeResource>>> {
432 let mut slice = HashMap::new();
433
434 for (dev, imported_gl_res) in cube_map {
435 let width = imported_gl_res.width();
436 let height = imported_gl_res.height();
437
438 let shadow = dev
439 .screen()
440 .resource_create_texture(
441 width,
442 height,
443 1,
444 1,
445 cl_mem_type_to_texture_target(CL_MEM_OBJECT_IMAGE2D),
446 image_format.to_pipe_format().unwrap(),
447 ResourceType::Normal,
448 false,
449 )
450 .ok_or(CL_OUT_OF_HOST_MEMORY)?;
451
452 slice.insert(*dev, Arc::new(shadow));
453 }
454
455 Ok(slice)
456 }
457
copy_cube_to_slice(ctx: &QueueContext, mem_objects: &[Mem]) -> CLResult<()>458 pub fn copy_cube_to_slice(ctx: &QueueContext, mem_objects: &[Mem]) -> CLResult<()> {
459 for mem in mem_objects {
460 let Mem::Image(image) = mem else {
461 continue;
462 };
463 let gl_obj = image.gl_obj.as_ref().unwrap();
464 if !is_cube_map_face(gl_obj.gl_object_target) {
465 continue;
466 }
467 let width = image.image_desc.image_width;
468 let height = image.image_desc.image_height;
469
470 // Fill in values for doing the copy
471 let idx = get_array_slice_idx(gl_obj.gl_object_target);
472 let src_origin = CLVec::<usize>::new([0, 0, idx]);
473 let dst_offset: [u32; 3] = [0, 0, 0];
474 let region = CLVec::<usize>::new([width, height, 1]);
475 let src_bx = create_pipe_box(src_origin, region, CL_MEM_OBJECT_IMAGE2D_ARRAY)?;
476
477 let cl_res = image.get_res_for_access(ctx, RWFlags::WR)?;
478 let gl_res = gl_obj.shadow_map.as_ref().unwrap().get(cl_res).unwrap();
479
480 ctx.resource_copy_region(gl_res.as_ref(), cl_res.as_ref(), &dst_offset, &src_bx);
481 }
482
483 Ok(())
484 }
485
copy_slice_to_cube(ctx: &QueueContext, mem_objects: &[Mem]) -> CLResult<()>486 pub fn copy_slice_to_cube(ctx: &QueueContext, mem_objects: &[Mem]) -> CLResult<()> {
487 for mem in mem_objects {
488 let Mem::Image(image) = mem else {
489 continue;
490 };
491 let gl_obj = image.gl_obj.as_ref().unwrap();
492 if !is_cube_map_face(gl_obj.gl_object_target) {
493 continue;
494 }
495 let width = image.image_desc.image_width;
496 let height = image.image_desc.image_height;
497
498 // Fill in values for doing the copy
499 let idx = get_array_slice_idx(gl_obj.gl_object_target) as u32;
500 let src_origin = CLVec::<usize>::new([0, 0, 0]);
501 let dst_offset: [u32; 3] = [0, 0, idx];
502 let region = CLVec::<usize>::new([width, height, 1]);
503 let src_bx = create_pipe_box(src_origin, region, CL_MEM_OBJECT_IMAGE2D_ARRAY)?;
504
505 let cl_res = image.get_res_for_access(ctx, RWFlags::WR)?;
506 let gl_res = gl_obj.shadow_map.as_ref().unwrap().get(cl_res).unwrap();
507
508 ctx.resource_copy_region(cl_res.as_ref(), gl_res.as_ref(), &dst_offset, &src_bx);
509 }
510
511 Ok(())
512 }
513
interop_to_cl_error(error: i32) -> CLError514 pub fn interop_to_cl_error(error: i32) -> CLError {
515 match error.try_into().unwrap() {
516 MESA_GLINTEROP_OUT_OF_RESOURCES => CL_OUT_OF_RESOURCES,
517 MESA_GLINTEROP_OUT_OF_HOST_MEMORY => CL_OUT_OF_HOST_MEMORY,
518 MESA_GLINTEROP_INVALID_OPERATION => CL_INVALID_OPERATION,
519 MESA_GLINTEROP_INVALID_CONTEXT | MESA_GLINTEROP_INVALID_DISPLAY => {
520 CL_INVALID_GL_SHAREGROUP_REFERENCE_KHR
521 }
522 MESA_GLINTEROP_INVALID_TARGET | MESA_GLINTEROP_INVALID_OBJECT => CL_INVALID_GL_OBJECT,
523 MESA_GLINTEROP_INVALID_MIP_LEVEL => CL_INVALID_MIP_LEVEL,
524 _ => CL_OUT_OF_HOST_MEMORY,
525 }
526 }
527
cl_to_interop_flags(flags: u32) -> u32528 pub fn cl_to_interop_flags(flags: u32) -> u32 {
529 match flags {
530 CL_MEM_READ_WRITE => MESA_GLINTEROP_ACCESS_READ_WRITE,
531 CL_MEM_READ_ONLY => MESA_GLINTEROP_ACCESS_READ_ONLY,
532 CL_MEM_WRITE_ONLY => MESA_GLINTEROP_ACCESS_WRITE_ONLY,
533 _ => 0,
534 }
535 }
536
target_from_gl(target: u32) -> CLResult<(u32, u32)>537 pub fn target_from_gl(target: u32) -> CLResult<(u32, u32)> {
538 // CL_INVALID_IMAGE_FORMAT_DESCRIPTOR if the OpenGL texture
539 // internal format does not map to a supported OpenCL image format.
540 Ok(match target {
541 GL_ARRAY_BUFFER => (CL_MEM_OBJECT_BUFFER, CL_GL_OBJECT_BUFFER),
542 GL_TEXTURE_BUFFER => (CL_MEM_OBJECT_IMAGE1D_BUFFER, CL_GL_OBJECT_TEXTURE_BUFFER),
543 GL_RENDERBUFFER => (CL_MEM_OBJECT_IMAGE2D, CL_GL_OBJECT_RENDERBUFFER),
544 GL_TEXTURE_1D => (CL_MEM_OBJECT_IMAGE1D, CL_GL_OBJECT_TEXTURE1D),
545 GL_TEXTURE_1D_ARRAY => (CL_MEM_OBJECT_IMAGE1D_ARRAY, CL_GL_OBJECT_TEXTURE1D_ARRAY),
546 GL_TEXTURE_CUBE_MAP_NEGATIVE_X
547 | GL_TEXTURE_CUBE_MAP_NEGATIVE_Y
548 | GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
549 | GL_TEXTURE_CUBE_MAP_POSITIVE_X
550 | GL_TEXTURE_CUBE_MAP_POSITIVE_Y
551 | GL_TEXTURE_CUBE_MAP_POSITIVE_Z
552 | GL_TEXTURE_2D
553 | GL_TEXTURE_RECTANGLE => (CL_MEM_OBJECT_IMAGE2D, CL_GL_OBJECT_TEXTURE2D),
554 GL_TEXTURE_2D_ARRAY => (CL_MEM_OBJECT_IMAGE2D_ARRAY, CL_GL_OBJECT_TEXTURE2D_ARRAY),
555 GL_TEXTURE_3D => (CL_MEM_OBJECT_IMAGE3D, CL_GL_OBJECT_TEXTURE3D),
556 _ => return Err(CL_INVALID_VALUE),
557 })
558 }
559
is_valid_gl_texture(target: u32) -> bool560 pub fn is_valid_gl_texture(target: u32) -> bool {
561 matches!(
562 target,
563 GL_TEXTURE_1D
564 | GL_TEXTURE_1D_ARRAY
565 | GL_TEXTURE_BUFFER
566 | GL_TEXTURE_2D_ARRAY
567 | GL_TEXTURE_3D
568 ) || is_valid_gl_texture_2d(target)
569 }
570
is_valid_gl_texture_2d(target: u32) -> bool571 pub fn is_valid_gl_texture_2d(target: u32) -> bool {
572 matches!(
573 target,
574 GL_TEXTURE_2D
575 | GL_TEXTURE_RECTANGLE
576 | GL_TEXTURE_CUBE_MAP_NEGATIVE_X
577 | GL_TEXTURE_CUBE_MAP_NEGATIVE_Y
578 | GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
579 | GL_TEXTURE_CUBE_MAP_POSITIVE_X
580 | GL_TEXTURE_CUBE_MAP_POSITIVE_Y
581 | GL_TEXTURE_CUBE_MAP_POSITIVE_Z
582 )
583 }
584
get_array_slice_idx(target: u32) -> usize585 pub fn get_array_slice_idx(target: u32) -> usize {
586 match target {
587 GL_TEXTURE_CUBE_MAP_NEGATIVE_X
588 | GL_TEXTURE_CUBE_MAP_NEGATIVE_Y
589 | GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
590 | GL_TEXTURE_CUBE_MAP_POSITIVE_X
591 | GL_TEXTURE_CUBE_MAP_POSITIVE_Y
592 | GL_TEXTURE_CUBE_MAP_POSITIVE_Z => (target - GL_TEXTURE_CUBE_MAP_POSITIVE_X) as usize,
593 _ => 0,
594 }
595 }
596
is_cube_map_face(target: u32) -> bool597 pub fn is_cube_map_face(target: u32) -> bool {
598 matches!(
599 target,
600 GL_TEXTURE_CUBE_MAP_NEGATIVE_X
601 | GL_TEXTURE_CUBE_MAP_NEGATIVE_Y
602 | GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
603 | GL_TEXTURE_CUBE_MAP_POSITIVE_X
604 | GL_TEXTURE_CUBE_MAP_POSITIVE_Y
605 | GL_TEXTURE_CUBE_MAP_POSITIVE_Z
606 )
607 }
608