• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #![allow(non_upper_case_globals)]
2 
3 use crate::api::event::create_and_queue;
4 use crate::api::icd::*;
5 use crate::api::types::*;
6 use crate::api::util::*;
7 use crate::core::context::Context;
8 use crate::core::device::*;
9 use crate::core::event::EventSig;
10 use crate::core::format::*;
11 use crate::core::gl::*;
12 use crate::core::memory::*;
13 use crate::core::queue::*;
14 
15 use mesa_rust_util::properties::Properties;
16 use mesa_rust_util::ptr::*;
17 use mesa_rust_util::static_assert;
18 use rusticl_opencl_gen::*;
19 use rusticl_proc_macros::cl_entrypoint;
20 use rusticl_proc_macros::cl_info_entrypoint;
21 
22 use std::alloc;
23 use std::alloc::Layout;
24 use std::cmp;
25 use std::cmp::Ordering;
26 use std::mem::{self, MaybeUninit};
27 use std::os::raw::c_void;
28 use std::ptr;
29 use std::slice;
30 use std::sync::Arc;
31 
validate_mem_flags(flags: cl_mem_flags, images: bool) -> CLResult<()>32 fn validate_mem_flags(flags: cl_mem_flags, images: bool) -> CLResult<()> {
33     let mut valid_flags = cl_bitfield::from(
34         CL_MEM_READ_WRITE | CL_MEM_WRITE_ONLY | CL_MEM_READ_ONLY | CL_MEM_KERNEL_READ_AND_WRITE,
35     );
36 
37     if !images {
38         valid_flags |= cl_bitfield::from(
39             CL_MEM_USE_HOST_PTR
40                 | CL_MEM_ALLOC_HOST_PTR
41                 | CL_MEM_COPY_HOST_PTR
42                 | CL_MEM_HOST_WRITE_ONLY
43                 | CL_MEM_HOST_READ_ONLY
44                 | CL_MEM_HOST_NO_ACCESS,
45         );
46     }
47 
48     let read_write_group =
49         cl_bitfield::from(CL_MEM_READ_WRITE | CL_MEM_WRITE_ONLY | CL_MEM_READ_ONLY);
50 
51     let alloc_host_group = cl_bitfield::from(CL_MEM_ALLOC_HOST_PTR | CL_MEM_USE_HOST_PTR);
52 
53     let copy_host_group = cl_bitfield::from(CL_MEM_COPY_HOST_PTR | CL_MEM_USE_HOST_PTR);
54 
55     let host_read_write_group =
56         cl_bitfield::from(CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS);
57 
58     if (flags & !valid_flags != 0)
59         || (flags & read_write_group).count_ones() > 1
60         || (flags & alloc_host_group).count_ones() > 1
61         || (flags & copy_host_group).count_ones() > 1
62         || (flags & host_read_write_group).count_ones() > 1
63     {
64         return Err(CL_INVALID_VALUE);
65     }
66     Ok(())
67 }
68 
validate_map_flags_common(map_flags: cl_mem_flags) -> CLResult<()>69 fn validate_map_flags_common(map_flags: cl_mem_flags) -> CLResult<()> {
70     // CL_INVALID_VALUE ... if values specified in map_flags are not valid.
71     let valid_flags =
72         cl_bitfield::from(CL_MAP_READ | CL_MAP_WRITE | CL_MAP_WRITE_INVALIDATE_REGION);
73     let read_write_group = cl_bitfield::from(CL_MAP_READ | CL_MAP_WRITE);
74     let invalidate_group = cl_bitfield::from(CL_MAP_WRITE_INVALIDATE_REGION);
75 
76     if (map_flags & !valid_flags != 0)
77         || ((map_flags & read_write_group != 0) && (map_flags & invalidate_group != 0))
78     {
79         return Err(CL_INVALID_VALUE);
80     }
81 
82     Ok(())
83 }
84 
validate_map_flags(m: &MemBase, map_flags: cl_mem_flags) -> CLResult<()>85 fn validate_map_flags(m: &MemBase, map_flags: cl_mem_flags) -> CLResult<()> {
86     validate_map_flags_common(map_flags)?;
87 
88     // CL_INVALID_OPERATION if buffer has been created with CL_MEM_HOST_WRITE_ONLY or
89     // CL_MEM_HOST_NO_ACCESS and CL_MAP_READ is set in map_flags
90     if bit_check(m.flags, CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_NO_ACCESS) &&
91       bit_check(map_flags, CL_MAP_READ) ||
92       // or if buffer has been created with CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_NO_ACCESS and
93       // CL_MAP_WRITE or CL_MAP_WRITE_INVALIDATE_REGION is set in map_flags.
94       bit_check(m.flags, CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS) &&
95       bit_check(map_flags, CL_MAP_WRITE | CL_MAP_WRITE_INVALIDATE_REGION)
96     {
97         return Err(CL_INVALID_OPERATION);
98     }
99 
100     Ok(())
101 }
102 
filter_image_access_flags(flags: cl_mem_flags) -> cl_mem_flags103 fn filter_image_access_flags(flags: cl_mem_flags) -> cl_mem_flags {
104     flags
105         & (CL_MEM_READ_WRITE | CL_MEM_WRITE_ONLY | CL_MEM_READ_ONLY | CL_MEM_KERNEL_READ_AND_WRITE)
106             as cl_mem_flags
107 }
108 
inherit_mem_flags(mut flags: cl_mem_flags, mem: &MemBase) -> cl_mem_flags109 fn inherit_mem_flags(mut flags: cl_mem_flags, mem: &MemBase) -> cl_mem_flags {
110     let read_write_mask = cl_bitfield::from(
111         CL_MEM_READ_WRITE |
112       CL_MEM_WRITE_ONLY |
113       CL_MEM_READ_ONLY |
114       // not in spec, but...
115       CL_MEM_KERNEL_READ_AND_WRITE,
116     );
117     let host_ptr_mask =
118         cl_bitfield::from(CL_MEM_USE_HOST_PTR | CL_MEM_ALLOC_HOST_PTR | CL_MEM_COPY_HOST_PTR);
119     let host_mask =
120         cl_bitfield::from(CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS);
121 
122     // For CL_MEM_OBJECT_IMAGE1D_BUFFER image type, or an image created from another memory object
123     // (image or buffer)...
124     //
125     // ... if the CL_MEM_READ_WRITE, CL_MEM_READ_ONLY or CL_MEM_WRITE_ONLY values are not
126     // specified in flags, they are inherited from the corresponding memory access qualifiers
127     // associated with mem_object. ...
128     if flags & read_write_mask == 0 {
129         flags |= mem.flags & read_write_mask;
130     }
131 
132     // ... The CL_MEM_USE_HOST_PTR, CL_MEM_ALLOC_HOST_PTR and CL_MEM_COPY_HOST_PTR values cannot
133     // be specified in flags but are inherited from the corresponding memory access qualifiers
134     // associated with mem_object. ...
135     flags &= !host_ptr_mask;
136     flags |= mem.flags & host_ptr_mask;
137 
138     // ... If the CL_MEM_HOST_WRITE_ONLY, CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_NO_ACCESS values
139     // are not specified in flags, they are inherited from the corresponding memory access
140     // qualifiers associated with mem_object.
141     if flags & host_mask == 0 {
142         flags |= mem.flags & host_mask;
143     }
144 
145     flags
146 }
147 
image_type_valid(image_type: cl_mem_object_type) -> bool148 fn image_type_valid(image_type: cl_mem_object_type) -> bool {
149     CL_IMAGE_TYPES.contains(&image_type)
150 }
151 
validate_addressing_mode(addressing_mode: cl_addressing_mode) -> CLResult<()>152 fn validate_addressing_mode(addressing_mode: cl_addressing_mode) -> CLResult<()> {
153     match addressing_mode {
154         CL_ADDRESS_NONE
155         | CL_ADDRESS_CLAMP_TO_EDGE
156         | CL_ADDRESS_CLAMP
157         | CL_ADDRESS_REPEAT
158         | CL_ADDRESS_MIRRORED_REPEAT => Ok(()),
159         _ => Err(CL_INVALID_VALUE),
160     }
161 }
162 
validate_filter_mode(filter_mode: cl_filter_mode) -> CLResult<()>163 fn validate_filter_mode(filter_mode: cl_filter_mode) -> CLResult<()> {
164     match filter_mode {
165         CL_FILTER_NEAREST | CL_FILTER_LINEAR => Ok(()),
166         _ => Err(CL_INVALID_VALUE),
167     }
168 }
169 
validate_host_ptr(host_ptr: *mut ::std::os::raw::c_void, flags: cl_mem_flags) -> CLResult<()>170 fn validate_host_ptr(host_ptr: *mut ::std::os::raw::c_void, flags: cl_mem_flags) -> CLResult<()> {
171     // CL_INVALID_HOST_PTR if host_ptr is NULL and CL_MEM_USE_HOST_PTR or CL_MEM_COPY_HOST_PTR are
172     // set in flags
173     if host_ptr.is_null()
174         && flags & (cl_mem_flags::from(CL_MEM_USE_HOST_PTR | CL_MEM_COPY_HOST_PTR)) != 0
175     {
176         return Err(CL_INVALID_HOST_PTR);
177     }
178 
179     // or if host_ptr is not NULL but CL_MEM_COPY_HOST_PTR or CL_MEM_USE_HOST_PTR are not set in
180     // flags.
181     if !host_ptr.is_null()
182         && flags & (cl_mem_flags::from(CL_MEM_USE_HOST_PTR | CL_MEM_COPY_HOST_PTR)) == 0
183     {
184         return Err(CL_INVALID_HOST_PTR);
185     }
186 
187     Ok(())
188 }
189 
validate_matching_buffer_flags(mem: &MemBase, flags: cl_mem_flags) -> CLResult<()>190 fn validate_matching_buffer_flags(mem: &MemBase, flags: cl_mem_flags) -> CLResult<()> {
191     // CL_INVALID_VALUE if an image is being created from another memory object (buffer or image)
192     // under one of the following circumstances:
193     //
194     // 1) mem_object was created with CL_MEM_WRITE_ONLY and
195     //    flags specifies CL_MEM_READ_WRITE or CL_MEM_READ_ONLY,
196     if bit_check(mem.flags, CL_MEM_WRITE_ONLY) && bit_check(flags, CL_MEM_READ_WRITE | CL_MEM_READ_ONLY) ||
197       // 2) mem_object was created with CL_MEM_READ_ONLY and
198       //    flags specifies CL_MEM_READ_WRITE or CL_MEM_WRITE_ONLY,
199       bit_check(mem.flags, CL_MEM_READ_ONLY) && bit_check(flags, CL_MEM_READ_WRITE | CL_MEM_WRITE_ONLY) ||
200       // 3) flags specifies CL_MEM_USE_HOST_PTR or CL_MEM_ALLOC_HOST_PTR or CL_MEM_COPY_HOST_PTR.
201       bit_check(flags, CL_MEM_USE_HOST_PTR | CL_MEM_ALLOC_HOST_PTR | CL_MEM_COPY_HOST_PTR) ||
202       // CL_INVALID_VALUE if an image is being created from another memory object (buffer or image)
203       // and mem_object was created with CL_MEM_HOST_WRITE_ONLY and flags specifies CL_MEM_HOST_READ_ONLY
204       bit_check(mem.flags, CL_MEM_HOST_WRITE_ONLY) && bit_check(flags, CL_MEM_HOST_READ_ONLY) ||
205       // or if mem_object was created with CL_MEM_HOST_READ_ONLY and flags specifies CL_MEM_HOST_WRITE_ONLY
206       bit_check(mem.flags, CL_MEM_HOST_READ_ONLY) && bit_check(flags, CL_MEM_HOST_WRITE_ONLY) ||
207       // or if mem_object was created with CL_MEM_HOST_NO_ACCESS and_flags_ specifies CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_WRITE_ONLY.
208       bit_check(mem.flags, CL_MEM_HOST_NO_ACCESS) && bit_check(flags, CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_WRITE_ONLY)
209     {
210         return Err(CL_INVALID_VALUE);
211     }
212 
213     Ok(())
214 }
215 
216 #[cl_info_entrypoint(clGetMemObjectInfo)]
217 unsafe impl CLInfo<cl_mem_info> for cl_mem {
query(&self, q: cl_mem_info, v: CLInfoValue) -> CLResult<CLInfoRes>218     fn query(&self, q: cl_mem_info, v: CLInfoValue) -> CLResult<CLInfoRes> {
219         let mem = MemBase::ref_from_raw(*self)?;
220         match *q {
221             CL_MEM_ASSOCIATED_MEMOBJECT => {
222                 let ptr = match mem.parent() {
223                     // Note we use as_ptr here which doesn't increase the reference count.
224                     Some(Mem::Buffer(buffer)) => cl_mem::from_ptr(Arc::as_ptr(buffer)),
225                     Some(Mem::Image(image)) => cl_mem::from_ptr(Arc::as_ptr(image)),
226                     None => ptr::null_mut(),
227                 };
228                 v.write::<cl_mem>(ptr.cast())
229             }
230             CL_MEM_CONTEXT => {
231                 // Note we use as_ptr here which doesn't increase the reference count.
232                 let ptr = Arc::as_ptr(&mem.context);
233                 v.write::<cl_context>(cl_context::from_ptr(ptr))
234             }
235             CL_MEM_FLAGS => v.write::<cl_mem_flags>(mem.flags),
236             // TODO debugging feature
237             CL_MEM_MAP_COUNT => v.write::<cl_uint>(0),
238             CL_MEM_HOST_PTR => v.write::<*mut c_void>(mem.host_ptr()),
239             CL_MEM_OFFSET => v.write::<usize>(if mem.is_buffer() {
240                 Buffer::ref_from_raw(*self)?.offset()
241             } else {
242                 0
243             }),
244             CL_MEM_PROPERTIES => v.write::<&Properties<cl_mem_properties>>(&mem.props),
245             CL_MEM_REFERENCE_COUNT => v.write::<cl_uint>(if mem.is_buffer() {
246                 Buffer::refcnt(*self)?
247             } else {
248                 Image::refcnt(*self)?
249             }),
250             CL_MEM_SIZE => v.write::<usize>(mem.size),
251             CL_MEM_TYPE => v.write::<cl_mem_object_type>(mem.mem_type),
252             CL_MEM_USES_SVM_POINTER | CL_MEM_USES_SVM_POINTER_ARM => {
253                 v.write::<cl_bool>(mem.is_svm().into())
254             }
255             _ => Err(CL_INVALID_VALUE),
256         }
257     }
258 }
259 
260 #[cl_entrypoint(clCreateBufferWithProperties)]
create_buffer_with_properties( context: cl_context, properties: *const cl_mem_properties, flags: cl_mem_flags, size: usize, host_ptr: *mut ::std::os::raw::c_void, ) -> CLResult<cl_mem>261 fn create_buffer_with_properties(
262     context: cl_context,
263     properties: *const cl_mem_properties,
264     flags: cl_mem_flags,
265     size: usize,
266     host_ptr: *mut ::std::os::raw::c_void,
267 ) -> CLResult<cl_mem> {
268     let c = Context::arc_from_raw(context)?;
269 
270     // CL_INVALID_VALUE if values specified in flags are not valid as defined in the Memory Flags table.
271     validate_mem_flags(flags, false)?;
272 
273     // CL_INVALID_BUFFER_SIZE if size is 0
274     if size == 0 {
275         return Err(CL_INVALID_BUFFER_SIZE);
276     }
277 
278     // ... or if size is greater than CL_DEVICE_MAX_MEM_ALLOC_SIZE for all devices in context,
279     if checked_compare(size, Ordering::Greater, c.max_mem_alloc()) {
280         return Err(CL_INVALID_BUFFER_SIZE);
281     }
282 
283     validate_host_ptr(host_ptr, flags)?;
284 
285     // or if CL_MEM_USE_HOST_PTR is set in flags and host_ptr is a pointer returned by clSVMAlloc
286     // and size is greater than the size passed to clSVMAlloc.
287     if let Some((svm_ptr, svm_layout)) = c.find_svm_alloc(host_ptr as usize) {
288         // SAFETY: they are part of the same allocation, and because host_ptr >= svm_ptr we can cast
289         // to usize.
290         let diff = unsafe { host_ptr.byte_offset_from(svm_ptr) } as usize;
291 
292         // technically we don't have to account for the offset, but it's almost for free.
293         if size > svm_layout - diff {
294             return Err(CL_INVALID_BUFFER_SIZE);
295         }
296     }
297 
298     // CL_INVALID_PROPERTY [...] if the same property name is specified more than once.
299     let props = unsafe { Properties::new(properties) }.ok_or(CL_INVALID_PROPERTY)?;
300 
301     // CL_INVALID_PROPERTY if a property name in properties is not a supported property name, if
302     // the value specified for a supported property name is not valid, or if the same property name
303     // is specified more than once.
304     if !props.is_empty() {
305         // we don't support any properties
306         return Err(CL_INVALID_PROPERTY);
307     }
308 
309     Ok(MemBase::new_buffer(c, flags, size, host_ptr, props)?.into_cl())
310 }
311 
312 #[cl_entrypoint(clCreateBuffer)]
create_buffer( context: cl_context, flags: cl_mem_flags, size: usize, host_ptr: *mut ::std::os::raw::c_void, ) -> CLResult<cl_mem>313 fn create_buffer(
314     context: cl_context,
315     flags: cl_mem_flags,
316     size: usize,
317     host_ptr: *mut ::std::os::raw::c_void,
318 ) -> CLResult<cl_mem> {
319     create_buffer_with_properties(context, ptr::null(), flags, size, host_ptr)
320 }
321 
322 #[cl_entrypoint(clCreateSubBuffer)]
create_sub_buffer( buffer: cl_mem, mut flags: cl_mem_flags, buffer_create_type: cl_buffer_create_type, buffer_create_info: *const ::std::os::raw::c_void, ) -> CLResult<cl_mem>323 fn create_sub_buffer(
324     buffer: cl_mem,
325     mut flags: cl_mem_flags,
326     buffer_create_type: cl_buffer_create_type,
327     buffer_create_info: *const ::std::os::raw::c_void,
328 ) -> CLResult<cl_mem> {
329     let b = Buffer::arc_from_raw(buffer)?;
330 
331     // CL_INVALID_MEM_OBJECT if buffer ... is a sub-buffer object.
332     if b.parent().is_some() {
333         return Err(CL_INVALID_MEM_OBJECT);
334     }
335 
336     validate_matching_buffer_flags(&b, flags)?;
337 
338     flags = inherit_mem_flags(flags, &b);
339     validate_mem_flags(flags, false)?;
340 
341     let (offset, size) = match buffer_create_type {
342         CL_BUFFER_CREATE_TYPE_REGION => {
343             // buffer_create_info is a pointer to a cl_buffer_region structure specifying a region of
344             // the buffer.
345             // CL_INVALID_VALUE if value(s) specified in buffer_create_info (for a given
346             // buffer_create_type) is not valid or if buffer_create_info is NULL.
347             let region = unsafe { buffer_create_info.cast::<cl_buffer_region>().as_ref() }
348                 .ok_or(CL_INVALID_VALUE)?;
349 
350             // CL_INVALID_BUFFER_SIZE if the size field of the cl_buffer_region structure passed in
351             // buffer_create_info is 0.
352             if region.size == 0 {
353                 return Err(CL_INVALID_BUFFER_SIZE);
354             }
355 
356             // CL_INVALID_VALUE if the region specified by the cl_buffer_region structure passed in
357             // buffer_create_info is out of bounds in buffer.
358             if region.origin >= b.size || region.size > b.size - region.origin {
359                 return Err(CL_INVALID_VALUE);
360             }
361 
362             (region.origin, region.size)
363         }
364         // CL_INVALID_VALUE if the value specified in buffer_create_type is not valid.
365         _ => return Err(CL_INVALID_VALUE),
366     };
367 
368     Ok(MemBase::new_sub_buffer(b, flags, offset, size).into_cl())
369 
370     // TODO
371     // CL_MISALIGNED_SUB_BUFFER_OFFSET if there are no devices in context associated with buffer for which the origin field of the cl_buffer_region structure passed in buffer_create_info is aligned to the CL_DEVICE_MEM_BASE_ADDR_ALIGN value.
372 }
373 
374 #[cl_entrypoint(clSetMemObjectDestructorCallback)]
set_mem_object_destructor_callback( memobj: cl_mem, pfn_notify: Option<FuncMemCB>, user_data: *mut ::std::os::raw::c_void, ) -> CLResult<()>375 fn set_mem_object_destructor_callback(
376     memobj: cl_mem,
377     pfn_notify: Option<FuncMemCB>,
378     user_data: *mut ::std::os::raw::c_void,
379 ) -> CLResult<()> {
380     let m = MemBase::ref_from_raw(memobj)?;
381 
382     // SAFETY: The requirements on `MemCB::new` match the requirements
383     // imposed by the OpenCL specification. It is the caller's duty to uphold them.
384     let cb = unsafe { MemCB::new(pfn_notify, user_data)? };
385 
386     m.cbs.lock().unwrap().push(cb);
387     Ok(())
388 }
389 
validate_image_format<'a>( image_format: *const cl_image_format, ) -> CLResult<(&'a cl_image_format, u8)>390 fn validate_image_format<'a>(
391     image_format: *const cl_image_format,
392 ) -> CLResult<(&'a cl_image_format, u8)> {
393     // CL_INVALID_IMAGE_FORMAT_DESCRIPTOR ... if image_format is NULL.
394     let format = unsafe { image_format.as_ref() }.ok_or(CL_INVALID_IMAGE_FORMAT_DESCRIPTOR)?;
395     let pixel_size = format
396         .pixel_size()
397         .ok_or(CL_INVALID_IMAGE_FORMAT_DESCRIPTOR)?;
398 
399     // Depth images with an image channel order of CL_DEPTH_STENCIL can only be created using the
400     // clCreateFromGLTexture API
401     if format.image_channel_order == CL_DEPTH_STENCIL {
402         return Err(CL_INVALID_IMAGE_FORMAT_DESCRIPTOR);
403     }
404 
405     // special validation
406     let valid_combination = match format.image_channel_data_type {
407         CL_UNORM_SHORT_565 | CL_UNORM_SHORT_555 | CL_UNORM_INT_101010 => {
408             [CL_RGB, CL_RGBx].contains(&format.image_channel_order)
409         }
410         CL_UNORM_INT_101010_2 => format.image_channel_order == CL_RGBA,
411         _ => true,
412     };
413     if !valid_combination {
414         return Err(CL_INVALID_IMAGE_FORMAT_DESCRIPTOR);
415     }
416 
417     Ok((format, pixel_size))
418 }
419 
validate_image_desc( image_desc: *const cl_image_desc, host_ptr: *mut ::std::os::raw::c_void, elem_size: usize, devs: &[&Device], ) -> CLResult<(cl_image_desc, Option<Mem>)>420 fn validate_image_desc(
421     image_desc: *const cl_image_desc,
422     host_ptr: *mut ::std::os::raw::c_void,
423     elem_size: usize,
424     devs: &[&Device],
425 ) -> CLResult<(cl_image_desc, Option<Mem>)> {
426     // CL_INVALID_IMAGE_DESCRIPTOR if values specified in image_desc are not valid
427     const err: cl_int = CL_INVALID_IMAGE_DESCRIPTOR;
428 
429     // CL_INVALID_IMAGE_DESCRIPTOR ... if image_desc is NULL.
430     let mut desc = *unsafe { image_desc.as_ref() }.ok_or(err)?;
431 
432     // image_type describes the image type and must be either CL_MEM_OBJECT_IMAGE1D,
433     // CL_MEM_OBJECT_IMAGE1D_BUFFER, CL_MEM_OBJECT_IMAGE1D_ARRAY, CL_MEM_OBJECT_IMAGE2D,
434     // CL_MEM_OBJECT_IMAGE2D_ARRAY, or CL_MEM_OBJECT_IMAGE3D.
435     if !CL_IMAGE_TYPES.contains(&desc.image_type) {
436         return Err(err);
437     }
438 
439     let (dims, array) = desc.type_info();
440 
441     // image_width is the width of the image in pixels. For a 2D image and image array, the image
442     // width must be a value ≥ 1 and ≤ CL_DEVICE_IMAGE2D_MAX_WIDTH. For a 3D image, the image width
443     // must be a value ≥ 1 and ≤ CL_DEVICE_IMAGE3D_MAX_WIDTH. For a 1D image buffer, the image width
444     // must be a value ≥ 1 and ≤ CL_DEVICE_IMAGE_MAX_BUFFER_SIZE. For a 1D image and 1D image array,
445     // the image width must be a value ≥ 1 and ≤ CL_DEVICE_IMAGE2D_MAX_WIDTH.
446     //
447     // image_height is the height of the image in pixels. This is only used if the image is a 2D or
448     // 3D image, or a 2D image array. For a 2D image or image array, the image height must be a
449     // value ≥ 1 and ≤ CL_DEVICE_IMAGE2D_MAX_HEIGHT. For a 3D image, the image height must be a
450     // value ≥ 1 and ≤ CL_DEVICE_IMAGE3D_MAX_HEIGHT.
451     //
452     // image_depth is the depth of the image in pixels. This is only used if the image is a 3D image
453     // and must be a value ≥ 1 and ≤ CL_DEVICE_IMAGE3D_MAX_DEPTH.
454     if desc.image_width < 1
455         || desc.image_height < 1 && dims >= 2
456         || desc.image_depth < 1 && dims >= 3
457         || desc.image_array_size < 1 && array
458     {
459         return Err(err);
460     }
461 
462     let max_size = if dims == 3 {
463         devs.iter().map(|d| d.image_3d_size()).min()
464     } else if desc.image_type == CL_MEM_OBJECT_IMAGE1D_BUFFER {
465         devs.iter().map(|d| d.image_buffer_max_size_pixels()).min()
466     } else {
467         devs.iter().map(|d| d.caps.image_2d_size as usize).min()
468     }
469     .unwrap();
470     let max_array = devs.iter().map(|d| d.image_array_size()).min().unwrap();
471 
472     // CL_INVALID_IMAGE_SIZE if image dimensions specified in image_desc exceed the maximum image
473     // dimensions described in the Device Queries table for all devices in context.
474     if desc.image_width > max_size
475         || desc.image_height > max_size && dims >= 2
476         || desc.image_depth > max_size && dims >= 3
477         || desc.image_array_size > max_array && array
478     {
479         return Err(CL_INVALID_IMAGE_SIZE);
480     }
481 
482     // num_mip_levels and num_samples must be 0.
483     if desc.num_mip_levels != 0 || desc.num_samples != 0 {
484         return Err(err);
485     }
486 
487     // mem_object may refer to a valid buffer or image memory object. mem_object can be a buffer
488     // memory object if image_type is CL_MEM_OBJECT_IMAGE1D_BUFFER or CL_MEM_OBJECT_IMAGE2D.
489     // mem_object can be an image object if image_type is CL_MEM_OBJECT_IMAGE2D. Otherwise it must
490     // be NULL.
491     //
492     // TODO: cl_khr_image2d_from_buffer is an optional feature
493     let p = unsafe { &desc.anon_1.mem_object };
494     let parent = if !p.is_null() {
495         let p = MemBase::arc_from_raw(*p)?;
496         if !match desc.image_type {
497             CL_MEM_OBJECT_IMAGE1D_BUFFER => p.is_buffer(),
498             CL_MEM_OBJECT_IMAGE2D => {
499                 (p.is_buffer() && devs.iter().any(|d| d.image2d_from_buffer_supported()))
500                     || p.mem_type == CL_MEM_OBJECT_IMAGE2D
501             }
502             _ => false,
503         } {
504             return Err(CL_INVALID_OPERATION);
505         }
506         Some(p)
507     } else {
508         None
509     };
510 
511     // image_row_pitch is the scan-line pitch in bytes. This must be 0 if host_ptr is NULL and can
512     // be either 0 or ≥ image_width × size of element in bytes if host_ptr is not NULL. If host_ptr
513     // is not NULL and image_row_pitch = 0, image_row_pitch is calculated as image_width × size of
514     // element in bytes. If image_row_pitch is not 0, it must be a multiple of the image element
515     // size in bytes. For a 2D image created from a buffer, the pitch specified (or computed if
516     // pitch specified is 0) must be a multiple of the maximum of the
517     // CL_DEVICE_IMAGE_PITCH_ALIGNMENT value for all devices in the context associated with the
518     // buffer specified by mem_object that support images.
519     //
520     // image_slice_pitch is the size in bytes of each 2D slice in the 3D image or the size in bytes
521     // of each image in a 1D or 2D image array. This must be 0 if host_ptr is NULL. If host_ptr is
522     // not NULL, image_slice_pitch can be either 0 or ≥ image_row_pitch × image_height for a 2D
523     // image array or 3D image and can be either 0 or ≥ image_row_pitch for a 1D image array. If
524     // host_ptr is not NULL and image_slice_pitch = 0, image_slice_pitch is calculated as
525     // image_row_pitch × image_height for a 2D image array or 3D image and image_row_pitch for a 1D
526     // image array. If image_slice_pitch is not 0, it must be a multiple of the image_row_pitch.
527     let has_buf_parent = parent.as_ref().map_or(false, |p| p.is_buffer());
528     if host_ptr.is_null() {
529         if (desc.image_row_pitch != 0 || desc.image_slice_pitch != 0) && !has_buf_parent {
530             return Err(err);
531         }
532 
533         if desc.image_row_pitch == 0 {
534             desc.image_row_pitch = desc.image_width * elem_size;
535         }
536         if desc.image_slice_pitch == 0 {
537             desc.image_slice_pitch = desc.image_row_pitch * cmp::max(1, desc.image_height);
538         }
539 
540         if has_buf_parent && desc.image_type != CL_MEM_OBJECT_IMAGE1D_BUFFER {
541             let pitch_alignment = devs
542                 .iter()
543                 .map(|d| d.image_pitch_alignment())
544                 .max()
545                 .unwrap() as usize;
546             if desc.image_row_pitch % (pitch_alignment * elem_size) != 0 {
547                 return Err(err);
548             }
549         }
550     } else {
551         if desc.image_row_pitch == 0 {
552             desc.image_row_pitch = desc.image_width * elem_size;
553         } else if desc.image_row_pitch % elem_size != 0 {
554             return Err(err);
555         }
556 
557         if dims == 3 || array {
558             let valid_slice_pitch = desc.image_row_pitch * cmp::max(1, desc.image_height);
559             if desc.image_slice_pitch == 0 {
560                 desc.image_slice_pitch = valid_slice_pitch;
561             } else if desc.image_slice_pitch < valid_slice_pitch
562                 || desc.image_slice_pitch % desc.image_row_pitch != 0
563             {
564                 return Err(err);
565             }
566         }
567     }
568 
569     Ok((desc, parent))
570 }
571 
validate_image_bounds(i: &Image, origin: CLVec<usize>, region: CLVec<usize>) -> CLResult<()>572 fn validate_image_bounds(i: &Image, origin: CLVec<usize>, region: CLVec<usize>) -> CLResult<()> {
573     let dims = i.image_desc.dims_with_array();
574     let bound = region + origin;
575     if bound > i.image_desc.size() {
576         return Err(CL_INVALID_VALUE);
577     }
578 
579     // If image is a 2D image object, origin[2] must be 0. If image is a 1D image or 1D image buffer
580     // object, origin[1] and origin[2] must be 0. If image is a 1D image array object, origin[2]
581     // must be 0.
582     if dims < 3 && origin[2] != 0 || dims < 2 && origin[1] != 0 {
583         return Err(CL_INVALID_VALUE);
584     }
585 
586     // If image is a 2D image object, region[2] must be 1. If image is a 1D image or 1D image buffer
587     // object, region[1] and region[2] must be 1. If image is a 1D image array object, region[2]
588     // must be 1. The values in region cannot be 0.
589     if dims < 3 && region[2] != 1 || dims < 2 && region[1] != 1 || region.contains(&0) {
590         return Err(CL_INVALID_VALUE);
591     }
592 
593     Ok(())
594 }
595 
desc_eq_no_buffer(a: &cl_image_desc, b: &cl_image_desc) -> bool596 fn desc_eq_no_buffer(a: &cl_image_desc, b: &cl_image_desc) -> bool {
597     a.image_type == b.image_type
598         && a.image_width == b.image_width
599         && a.image_height == b.image_height
600         && a.image_depth == b.image_depth
601         && a.image_array_size == b.image_array_size
602         && a.image_row_pitch == b.image_row_pitch
603         && a.image_slice_pitch == b.image_slice_pitch
604         && a.num_mip_levels == b.num_mip_levels
605         && a.num_samples == b.num_samples
606 }
607 
validate_buffer( desc: &cl_image_desc, mut flags: cl_mem_flags, format: &cl_image_format, host_ptr: *mut ::std::os::raw::c_void, elem_size: usize, ) -> CLResult<cl_mem_flags>608 fn validate_buffer(
609     desc: &cl_image_desc,
610     mut flags: cl_mem_flags,
611     format: &cl_image_format,
612     host_ptr: *mut ::std::os::raw::c_void,
613     elem_size: usize,
614 ) -> CLResult<cl_mem_flags> {
615     // CL_INVALID_IMAGE_DESCRIPTOR if values specified in image_desc are not valid
616     const err: cl_int = CL_INVALID_IMAGE_DESCRIPTOR;
617     let mem_object = unsafe { desc.anon_1.mem_object };
618 
619     // mem_object may refer to a valid buffer or image memory object. mem_object can be a buffer
620     // memory object if image_type is CL_MEM_OBJECT_IMAGE1D_BUFFER or CL_MEM_OBJECT_IMAGE2D
621     // mem_object can be an image object if image_type is CL_MEM_OBJECT_IMAGE2D. Otherwise it must
622     // be NULL. The image pixels are taken from the memory objects data store. When the contents of
623     // the specified memory objects data store are modified, those changes are reflected in the
624     // contents of the image object and vice-versa at corresponding synchronization points.
625     if !mem_object.is_null() {
626         let mem = MemBase::ref_from_raw(mem_object)?;
627 
628         match mem.mem_type {
629             CL_MEM_OBJECT_BUFFER => {
630                 match desc.image_type {
631                     // For a 1D image buffer created from a buffer object, the image_width × size of
632                     // element in bytes must be ≤ size of the buffer object.
633                     CL_MEM_OBJECT_IMAGE1D_BUFFER => {
634                         if desc.image_width * elem_size > mem.size {
635                             return Err(err);
636                         }
637                     }
638                     // For a 2D image created from a buffer object, the image_row_pitch × image_height
639                     // must be ≤ size of the buffer object specified by mem_object.
640                     CL_MEM_OBJECT_IMAGE2D => {
641                         //TODO
642                         //• CL_INVALID_IMAGE_FORMAT_DESCRIPTOR if a 2D image is created from a buffer and the row pitch and base address alignment does not follow the rules described for creating a 2D image from a buffer.
643                         if desc.image_row_pitch * desc.image_height > mem.size {
644                             return Err(err);
645                         }
646 
647                         // If the buffer object specified by mem_object was created with
648                         // CL_MEM_USE_HOST_PTR, the host_ptr specified to clCreateBuffer or
649                         // clCreateBufferWithProperties must be aligned to the maximum of the
650                         // CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT value for all devices in the
651                         // context associated with the buffer specified by mem_object that support
652                         // images.
653                         if mem.flags & CL_MEM_USE_HOST_PTR as cl_mem_flags != 0 {
654                             for dev in &mem.context.devs {
655                                 // CL_DEVICE_IMAGE_BASE_ADDRESS_ALIGNMENT is only relevant for 2D
656                                 // images created from a buffer object.
657                                 let addr_alignment = dev.image_base_address_alignment();
658                                 if addr_alignment == 0 {
659                                     return Err(CL_INVALID_OPERATION);
660                                 } else if !is_alligned(host_ptr, addr_alignment as usize) {
661                                     return Err(err);
662                                 }
663                             }
664                         }
665                     }
666                     _ => return Err(err),
667                 }
668             }
669             // For an image object created from another image object, the values specified in the
670             // image descriptor except for mem_object must match the image descriptor information
671             // associated with mem_object.
672             CL_MEM_OBJECT_IMAGE2D => {
673                 let image = Image::ref_from_raw(mem_object).unwrap();
674                 if desc.image_type != mem.mem_type || !desc_eq_no_buffer(desc, &image.image_desc) {
675                     return Err(err);
676                 }
677 
678                 // CL_INVALID_IMAGE_FORMAT_DESCRIPTOR if a 2D image is created from a 2D image object
679                 // and the rules described above are not followed.
680 
681                 // Creating a 2D image object from another 2D image object creates a new 2D image
682                 // object that shares the image data store with mem_object but views the pixels in the
683                 //  image with a different image channel order. Restrictions are:
684                 //
685                 // The image channel data type specified in image_format must match the image channel
686                 // data type associated with mem_object.
687                 if format.image_channel_data_type != image.image_format.image_channel_data_type {
688                     return Err(CL_INVALID_IMAGE_FORMAT_DESCRIPTOR);
689                 }
690 
691                 // The image channel order specified in image_format must be compatible with the image
692                 // channel order associated with mem_object. Compatible image channel orders are:
693                 if format.image_channel_order != image.image_format.image_channel_order {
694                     // in image_format | in  mem_object:
695                     // CL_sBGRA | CL_BGRA
696                     // CL_BGRA  | CL_sBGRA
697                     // CL_sRGBA | CL_RGBA
698                     // CL_RGBA  | CL_sRGBA
699                     // CL_sRGB  | CL_RGB
700                     // CL_RGB   | CL_sRGB
701                     // CL_sRGBx | CL_RGBx
702                     // CL_RGBx  | CL_sRGBx
703                     // CL_DEPTH | CL_R
704                     match (
705                         format.image_channel_order,
706                         image.image_format.image_channel_order,
707                     ) {
708                         (CL_sBGRA, CL_BGRA)
709                         | (CL_BGRA, CL_sBGRA)
710                         | (CL_sRGBA, CL_RGBA)
711                         | (CL_RGBA, CL_sRGBA)
712                         | (CL_sRGB, CL_RGB)
713                         | (CL_RGB, CL_sRGB)
714                         | (CL_sRGBx, CL_RGBx)
715                         | (CL_RGBx, CL_sRGBx)
716                         | (CL_DEPTH, CL_R) => (),
717                         _ => return Err(CL_INVALID_IMAGE_FORMAT_DESCRIPTOR),
718                     }
719                 }
720             }
721             _ => return Err(err),
722         }
723 
724         validate_matching_buffer_flags(mem, flags)?;
725 
726         flags = inherit_mem_flags(flags, mem);
727     // implied by spec
728     } else if desc.image_type == CL_MEM_OBJECT_IMAGE1D_BUFFER {
729         return Err(err);
730     }
731 
732     Ok(flags)
733 }
734 
735 #[cl_info_entrypoint(clGetImageInfo)]
736 unsafe impl CLInfo<cl_image_info> for cl_mem {
query(&self, q: cl_image_info, v: CLInfoValue) -> CLResult<CLInfoRes>737     fn query(&self, q: cl_image_info, v: CLInfoValue) -> CLResult<CLInfoRes> {
738         let mem = Image::ref_from_raw(*self)?;
739         match *q {
740             CL_IMAGE_ARRAY_SIZE => v.write::<usize>(mem.image_desc.image_array_size),
741             CL_IMAGE_BUFFER => v.write::<cl_mem>(unsafe { mem.image_desc.anon_1.buffer }),
742             CL_IMAGE_DEPTH => v.write::<usize>(mem.image_desc.image_depth),
743             CL_IMAGE_ELEMENT_SIZE => v.write::<usize>(mem.image_elem_size.into()),
744             CL_IMAGE_FORMAT => v.write::<cl_image_format>(mem.image_format),
745             CL_IMAGE_HEIGHT => v.write::<usize>(mem.image_desc.image_height),
746             CL_IMAGE_NUM_MIP_LEVELS => v.write::<cl_uint>(mem.image_desc.num_mip_levels),
747             CL_IMAGE_NUM_SAMPLES => v.write::<cl_uint>(mem.image_desc.num_samples),
748             CL_IMAGE_ROW_PITCH => v.write::<usize>(mem.image_desc.image_row_pitch),
749             CL_IMAGE_SLICE_PITCH => v.write::<usize>(if mem.image_desc.dims() == 1 {
750                 0
751             } else {
752                 mem.image_desc.image_slice_pitch
753             }),
754             CL_IMAGE_WIDTH => v.write::<usize>(mem.image_desc.image_width),
755             _ => Err(CL_INVALID_VALUE),
756         }
757     }
758 }
759 
760 #[cl_entrypoint(clCreateImageWithProperties)]
create_image_with_properties( context: cl_context, properties: *const cl_mem_properties, mut flags: cl_mem_flags, image_format: *const cl_image_format, image_desc: *const cl_image_desc, host_ptr: *mut ::std::os::raw::c_void, ) -> CLResult<cl_mem>761 fn create_image_with_properties(
762     context: cl_context,
763     properties: *const cl_mem_properties,
764     mut flags: cl_mem_flags,
765     image_format: *const cl_image_format,
766     image_desc: *const cl_image_desc,
767     host_ptr: *mut ::std::os::raw::c_void,
768 ) -> CLResult<cl_mem> {
769     let c = Context::arc_from_raw(context)?;
770 
771     // CL_INVALID_OPERATION if there are no devices in context that support images (i.e.
772     // CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
773     c.devs
774         .iter()
775         .find(|d| d.caps.has_images)
776         .ok_or(CL_INVALID_OPERATION)?;
777 
778     let (format, elem_size) = validate_image_format(image_format)?;
779     let (desc, parent) = validate_image_desc(image_desc, host_ptr, elem_size.into(), &c.devs)?;
780 
781     // validate host_ptr before merging flags
782     validate_host_ptr(host_ptr, flags)?;
783 
784     flags = validate_buffer(&desc, flags, format, host_ptr, elem_size.into())?;
785 
786     // For all image types except CL_MEM_OBJECT_IMAGE1D_BUFFER, if the value specified for flags is 0, the
787     // default is used which is CL_MEM_READ_WRITE.
788     if flags == 0 && desc.image_type != CL_MEM_OBJECT_IMAGE1D_BUFFER {
789         flags = CL_MEM_READ_WRITE.into();
790     }
791 
792     validate_mem_flags(flags, false)?;
793 
794     let filtered_flags = filter_image_access_flags(flags);
795     // CL_IMAGE_FORMAT_NOT_SUPPORTED if there are no devices in context that support image_format.
796     c.devs
797         .iter()
798         .filter_map(|d| d.formats.get(format))
799         .filter_map(|f| f.get(&desc.image_type))
800         .find(|f| *f & filtered_flags == filtered_flags)
801         .ok_or(CL_IMAGE_FORMAT_NOT_SUPPORTED)?;
802 
803     // CL_INVALID_PROPERTY [...] if the same property name is specified more than once.
804     let props = unsafe { Properties::new(properties) }.ok_or(CL_INVALID_PROPERTY)?;
805 
806     // CL_INVALID_PROPERTY if a property name in properties is not a supported property name, if
807     // the value specified for a supported property name is not valid, or if the same property name
808     // is specified more than once.
809     if !props.is_empty() {
810         // we don't support any properties
811         return Err(CL_INVALID_PROPERTY);
812     }
813 
814     Ok(MemBase::new_image(c, parent, flags, format, desc, elem_size, host_ptr, props)?.into_cl())
815 }
816 
817 #[cl_entrypoint(clCreateImage)]
create_image( context: cl_context, flags: cl_mem_flags, image_format: *const cl_image_format, image_desc: *const cl_image_desc, host_ptr: *mut ::std::os::raw::c_void, ) -> CLResult<cl_mem>818 fn create_image(
819     context: cl_context,
820     flags: cl_mem_flags,
821     image_format: *const cl_image_format,
822     image_desc: *const cl_image_desc,
823     host_ptr: *mut ::std::os::raw::c_void,
824 ) -> CLResult<cl_mem> {
825     create_image_with_properties(
826         context,
827         ptr::null(),
828         flags,
829         image_format,
830         image_desc,
831         host_ptr,
832     )
833 }
834 
835 #[cl_entrypoint(clCreateImage2D)]
create_image_2d( context: cl_context, flags: cl_mem_flags, image_format: *const cl_image_format, image_width: usize, image_height: usize, image_row_pitch: usize, host_ptr: *mut ::std::os::raw::c_void, ) -> CLResult<cl_mem>836 fn create_image_2d(
837     context: cl_context,
838     flags: cl_mem_flags,
839     image_format: *const cl_image_format,
840     image_width: usize,
841     image_height: usize,
842     image_row_pitch: usize,
843     host_ptr: *mut ::std::os::raw::c_void,
844 ) -> CLResult<cl_mem> {
845     let image_desc = cl_image_desc {
846         image_type: CL_MEM_OBJECT_IMAGE2D,
847         image_width: image_width,
848         image_height: image_height,
849         image_row_pitch: image_row_pitch,
850         ..Default::default()
851     };
852 
853     create_image(context, flags, image_format, &image_desc, host_ptr)
854 }
855 
856 #[cl_entrypoint(clCreateImage3D)]
create_image_3d( context: cl_context, flags: cl_mem_flags, image_format: *const cl_image_format, image_width: usize, image_height: usize, image_depth: usize, image_row_pitch: usize, image_slice_pitch: usize, host_ptr: *mut ::std::os::raw::c_void, ) -> CLResult<cl_mem>857 fn create_image_3d(
858     context: cl_context,
859     flags: cl_mem_flags,
860     image_format: *const cl_image_format,
861     image_width: usize,
862     image_height: usize,
863     image_depth: usize,
864     image_row_pitch: usize,
865     image_slice_pitch: usize,
866     host_ptr: *mut ::std::os::raw::c_void,
867 ) -> CLResult<cl_mem> {
868     let image_desc = cl_image_desc {
869         image_type: CL_MEM_OBJECT_IMAGE3D,
870         image_width: image_width,
871         image_height: image_height,
872         image_depth: image_depth,
873         image_row_pitch: image_row_pitch,
874         image_slice_pitch: image_slice_pitch,
875         ..Default::default()
876     };
877 
878     create_image(context, flags, image_format, &image_desc, host_ptr)
879 }
880 
881 #[cl_entrypoint(clGetSupportedImageFormats)]
get_supported_image_formats( context: cl_context, flags: cl_mem_flags, image_type: cl_mem_object_type, num_entries: cl_uint, image_formats: *mut cl_image_format, num_image_formats: *mut cl_uint, ) -> CLResult<()>882 fn get_supported_image_formats(
883     context: cl_context,
884     flags: cl_mem_flags,
885     image_type: cl_mem_object_type,
886     num_entries: cl_uint,
887     image_formats: *mut cl_image_format,
888     num_image_formats: *mut cl_uint,
889 ) -> CLResult<()> {
890     let c = Context::ref_from_raw(context)?;
891 
892     // CL_INVALID_VALUE if flags
893     validate_mem_flags(flags, true)?;
894 
895     // or image_type are not valid
896     if !image_type_valid(image_type) {
897         return Err(CL_INVALID_VALUE);
898     }
899 
900     // CL_INVALID_VALUE ... if num_entries is 0 and image_formats is not NULL.
901     if num_entries == 0 && !image_formats.is_null() {
902         return Err(CL_INVALID_VALUE);
903     }
904 
905     let mut res = Vec::<cl_image_format>::new();
906     let filtered_flags = filter_image_access_flags(flags);
907     for dev in &c.devs {
908         for f in &dev.formats {
909             let s = f.1.get(&image_type).unwrap_or(&0);
910 
911             if filtered_flags & s == filtered_flags {
912                 res.push(*f.0);
913             }
914         }
915     }
916 
917     res.sort();
918     res.dedup();
919 
920     num_image_formats.write_checked(res.len() as cl_uint);
921     unsafe { image_formats.copy_checked(res.as_ptr(), res.len()) };
922 
923     Ok(())
924 }
925 
926 #[cl_info_entrypoint(clGetSamplerInfo)]
927 unsafe impl CLInfo<cl_sampler_info> for cl_sampler {
query(&self, q: cl_sampler_info, v: CLInfoValue) -> CLResult<CLInfoRes>928     fn query(&self, q: cl_sampler_info, v: CLInfoValue) -> CLResult<CLInfoRes> {
929         let sampler = Sampler::ref_from_raw(*self)?;
930         match q {
931             CL_SAMPLER_ADDRESSING_MODE => v.write::<cl_addressing_mode>(sampler.addressing_mode),
932             CL_SAMPLER_CONTEXT => {
933                 // Note we use as_ptr here which doesn't increase the reference count.
934                 let ptr = Arc::as_ptr(&sampler.context);
935                 v.write::<cl_context>(cl_context::from_ptr(ptr))
936             }
937             CL_SAMPLER_FILTER_MODE => v.write::<cl_filter_mode>(sampler.filter_mode),
938             CL_SAMPLER_NORMALIZED_COORDS => v.write::<bool>(sampler.normalized_coords),
939             CL_SAMPLER_REFERENCE_COUNT => v.write::<cl_uint>(Sampler::refcnt(*self)?),
940             CL_SAMPLER_PROPERTIES => v.write::<&Properties<cl_sampler_properties>>(&sampler.props),
941             // CL_INVALID_VALUE if param_name is not one of the supported values
942             _ => Err(CL_INVALID_VALUE),
943         }
944     }
945 }
946 
create_sampler_impl( context: cl_context, normalized_coords: cl_bool, addressing_mode: cl_addressing_mode, filter_mode: cl_filter_mode, props: Properties<cl_sampler_properties>, ) -> CLResult<cl_sampler>947 fn create_sampler_impl(
948     context: cl_context,
949     normalized_coords: cl_bool,
950     addressing_mode: cl_addressing_mode,
951     filter_mode: cl_filter_mode,
952     props: Properties<cl_sampler_properties>,
953 ) -> CLResult<cl_sampler> {
954     let c = Context::arc_from_raw(context)?;
955 
956     // CL_INVALID_OPERATION if images are not supported by any device associated with context (i.e.
957     // CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
958     c.devs
959         .iter()
960         .find(|d| d.caps.has_images)
961         .ok_or(CL_INVALID_OPERATION)?;
962 
963     // CL_INVALID_VALUE if addressing_mode, filter_mode, normalized_coords or a combination of these
964     // arguements are not valid.
965     validate_addressing_mode(addressing_mode)?;
966     validate_filter_mode(filter_mode)?;
967 
968     let sampler = Sampler::new(
969         c,
970         check_cl_bool(normalized_coords).ok_or(CL_INVALID_VALUE)?,
971         addressing_mode,
972         filter_mode,
973         props,
974     );
975     Ok(sampler.into_cl())
976 }
977 
978 #[cl_entrypoint(clCreateSampler)]
create_sampler( context: cl_context, normalized_coords: cl_bool, addressing_mode: cl_addressing_mode, filter_mode: cl_filter_mode, ) -> CLResult<cl_sampler>979 fn create_sampler(
980     context: cl_context,
981     normalized_coords: cl_bool,
982     addressing_mode: cl_addressing_mode,
983     filter_mode: cl_filter_mode,
984 ) -> CLResult<cl_sampler> {
985     create_sampler_impl(
986         context,
987         normalized_coords,
988         addressing_mode,
989         filter_mode,
990         Properties::default(),
991     )
992 }
993 
994 #[cl_entrypoint(clCreateSamplerWithProperties)]
create_sampler_with_properties( context: cl_context, sampler_properties: *const cl_sampler_properties, ) -> CLResult<cl_sampler>995 fn create_sampler_with_properties(
996     context: cl_context,
997     sampler_properties: *const cl_sampler_properties,
998 ) -> CLResult<cl_sampler> {
999     let mut normalized_coords = CL_TRUE;
1000     let mut addressing_mode = CL_ADDRESS_CLAMP;
1001     let mut filter_mode = CL_FILTER_NEAREST;
1002 
1003     // CL_INVALID_VALUE if the same property name is specified more than once.
1004     // SAFETY: sampler_properties is a 0 terminated array by spec.
1005     let sampler_properties =
1006         unsafe { Properties::new(sampler_properties) }.ok_or(CL_INVALID_VALUE)?;
1007     for (&key, &val) in sampler_properties.iter() {
1008         match key as u32 {
1009             CL_SAMPLER_ADDRESSING_MODE => addressing_mode = val as u32,
1010             CL_SAMPLER_FILTER_MODE => filter_mode = val as u32,
1011             CL_SAMPLER_NORMALIZED_COORDS => normalized_coords = val as u32,
1012             // CL_INVALID_VALUE if the property name in sampler_properties is not a supported
1013             // property name
1014             _ => return Err(CL_INVALID_VALUE),
1015         }
1016     }
1017 
1018     create_sampler_impl(
1019         context,
1020         normalized_coords,
1021         addressing_mode,
1022         filter_mode,
1023         sampler_properties,
1024     )
1025 }
1026 
1027 #[cl_entrypoint(clRetainSampler)]
retain_sampler(sampler: cl_sampler) -> CLResult<()>1028 fn retain_sampler(sampler: cl_sampler) -> CLResult<()> {
1029     Sampler::retain(sampler)
1030 }
1031 
1032 #[cl_entrypoint(clReleaseSampler)]
release_sampler(sampler: cl_sampler) -> CLResult<()>1033 fn release_sampler(sampler: cl_sampler) -> CLResult<()> {
1034     Sampler::release(sampler)
1035 }
1036 
1037 #[cl_entrypoint(clEnqueueReadBuffer)]
enqueue_read_buffer( command_queue: cl_command_queue, buffer: cl_mem, blocking_read: cl_bool, offset: usize, cb: usize, ptr: *mut ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1038 fn enqueue_read_buffer(
1039     command_queue: cl_command_queue,
1040     buffer: cl_mem,
1041     blocking_read: cl_bool,
1042     offset: usize,
1043     cb: usize,
1044     ptr: *mut ::std::os::raw::c_void,
1045     num_events_in_wait_list: cl_uint,
1046     event_wait_list: *const cl_event,
1047     event: *mut cl_event,
1048 ) -> CLResult<()> {
1049     let q = Queue::arc_from_raw(command_queue)?;
1050     let b = Buffer::arc_from_raw(buffer)?;
1051     let block = check_cl_bool(blocking_read).ok_or(CL_INVALID_VALUE)?;
1052     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1053 
1054     // CL_INVALID_VALUE if the region being read or written specified by (offset, size) is out of
1055     // bounds or if ptr is a NULL value.
1056     if offset + cb > b.size || ptr.is_null() {
1057         return Err(CL_INVALID_VALUE);
1058     }
1059 
1060     // CL_INVALID_CONTEXT if the context associated with command_queue and buffer are not the same
1061     if b.context != q.context {
1062         return Err(CL_INVALID_CONTEXT);
1063     }
1064 
1065     // CL_INVALID_OPERATION if clEnqueueReadBuffer is called on buffer which has been created with
1066     // CL_MEM_HOST_WRITE_ONLY or CL_MEM_HOST_NO_ACCESS.
1067     if bit_check(b.flags, CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_NO_ACCESS) {
1068         return Err(CL_INVALID_OPERATION);
1069     }
1070 
1071     // SAFETY: it's required that applications do not cause data races
1072     let ptr = unsafe { MutMemoryPtr::from_ptr(ptr) };
1073     create_and_queue(
1074         q,
1075         CL_COMMAND_READ_BUFFER,
1076         evs,
1077         event,
1078         block,
1079         Box::new(move |_, ctx| b.read(ctx, offset, ptr, cb)),
1080     )
1081 
1082     // TODO
1083     // CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1084 }
1085 
1086 #[cl_entrypoint(clEnqueueWriteBuffer)]
enqueue_write_buffer( command_queue: cl_command_queue, buffer: cl_mem, blocking_write: cl_bool, offset: usize, cb: usize, ptr: *const ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1087 fn enqueue_write_buffer(
1088     command_queue: cl_command_queue,
1089     buffer: cl_mem,
1090     blocking_write: cl_bool,
1091     offset: usize,
1092     cb: usize,
1093     ptr: *const ::std::os::raw::c_void,
1094     num_events_in_wait_list: cl_uint,
1095     event_wait_list: *const cl_event,
1096     event: *mut cl_event,
1097 ) -> CLResult<()> {
1098     let q = Queue::arc_from_raw(command_queue)?;
1099     let b = Buffer::arc_from_raw(buffer)?;
1100     let block = check_cl_bool(blocking_write).ok_or(CL_INVALID_VALUE)?;
1101     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1102 
1103     // CL_INVALID_VALUE if the region being read or written specified by (offset, size) is out of
1104     // bounds or if ptr is a NULL value.
1105     if offset + cb > b.size || ptr.is_null() {
1106         return Err(CL_INVALID_VALUE);
1107     }
1108 
1109     // CL_INVALID_CONTEXT if the context associated with command_queue and buffer are not the same
1110     if b.context != q.context {
1111         return Err(CL_INVALID_CONTEXT);
1112     }
1113 
1114     // CL_INVALID_OPERATION if clEnqueueWriteBuffer is called on buffer which has been created with
1115     // CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_NO_ACCESS.
1116     if bit_check(b.flags, CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS) {
1117         return Err(CL_INVALID_OPERATION);
1118     }
1119 
1120     // SAFETY: it's required that applications do not cause data races
1121     let ptr = unsafe { ConstMemoryPtr::from_ptr(ptr) };
1122     create_and_queue(
1123         q,
1124         CL_COMMAND_WRITE_BUFFER,
1125         evs,
1126         event,
1127         block,
1128         Box::new(move |_, ctx| b.write(ctx, offset, ptr, cb)),
1129     )
1130 
1131     // TODO
1132     // CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1133 }
1134 
1135 #[cl_entrypoint(clEnqueueCopyBuffer)]
enqueue_copy_buffer( command_queue: cl_command_queue, src_buffer: cl_mem, dst_buffer: cl_mem, src_offset: usize, dst_offset: usize, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1136 fn enqueue_copy_buffer(
1137     command_queue: cl_command_queue,
1138     src_buffer: cl_mem,
1139     dst_buffer: cl_mem,
1140     src_offset: usize,
1141     dst_offset: usize,
1142     size: usize,
1143     num_events_in_wait_list: cl_uint,
1144     event_wait_list: *const cl_event,
1145     event: *mut cl_event,
1146 ) -> CLResult<()> {
1147     let q = Queue::arc_from_raw(command_queue)?;
1148     let src = Buffer::arc_from_raw(src_buffer)?;
1149     let dst = Buffer::arc_from_raw(dst_buffer)?;
1150     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1151 
1152     // CL_INVALID_CONTEXT if the context associated with command_queue, src_buffer and dst_buffer
1153     // are not the same
1154     if q.context != src.context || q.context != dst.context {
1155         return Err(CL_INVALID_CONTEXT);
1156     }
1157 
1158     // CL_INVALID_VALUE if src_offset, dst_offset, size, src_offset + size or dst_offset + size
1159     // require accessing elements outside the src_buffer and dst_buffer buffer objects respectively.
1160     if src_offset + size > src.size || dst_offset + size > dst.size {
1161         return Err(CL_INVALID_VALUE);
1162     }
1163 
1164     // CL_MEM_COPY_OVERLAP if src_buffer and dst_buffer are the same buffer or sub-buffer object
1165     // and the source and destination regions overlap or if src_buffer and dst_buffer are different
1166     // sub-buffers of the same associated buffer object and they overlap. The regions overlap if
1167     // src_offset ≤ dst_offset ≤ src_offset + size - 1 or if dst_offset ≤ src_offset ≤ dst_offset + size - 1.
1168     if src.backing_memory_eq(&dst) {
1169         let src_offset = src_offset + src.offset();
1170         let dst_offset = dst_offset + dst.offset();
1171 
1172         if (src_offset <= dst_offset && dst_offset < src_offset + size)
1173             || (dst_offset <= src_offset && src_offset < dst_offset + size)
1174         {
1175             return Err(CL_MEM_COPY_OVERLAP);
1176         }
1177     }
1178 
1179     create_and_queue(
1180         q,
1181         CL_COMMAND_COPY_BUFFER,
1182         evs,
1183         event,
1184         false,
1185         Box::new(move |_, ctx| src.copy_to_buffer(ctx, &dst, src_offset, dst_offset, size)),
1186     )
1187 
1188     // TODO
1189     //• CL_MISALIGNED_SUB_BUFFER_OFFSET if src_buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1190     //• CL_MISALIGNED_SUB_BUFFER_OFFSET if dst_buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1191     //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for data store associated with src_buffer or dst_buffer.
1192 }
1193 
1194 #[cl_entrypoint(clEnqueueReadBufferRect)]
enqueue_read_buffer_rect( command_queue: cl_command_queue, buffer: cl_mem, blocking_read: cl_bool, buffer_origin: *const usize, host_origin: *const usize, region: *const usize, mut buffer_row_pitch: usize, mut buffer_slice_pitch: usize, mut host_row_pitch: usize, mut host_slice_pitch: usize, ptr: *mut ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1195 fn enqueue_read_buffer_rect(
1196     command_queue: cl_command_queue,
1197     buffer: cl_mem,
1198     blocking_read: cl_bool,
1199     buffer_origin: *const usize,
1200     host_origin: *const usize,
1201     region: *const usize,
1202     mut buffer_row_pitch: usize,
1203     mut buffer_slice_pitch: usize,
1204     mut host_row_pitch: usize,
1205     mut host_slice_pitch: usize,
1206     ptr: *mut ::std::os::raw::c_void,
1207     num_events_in_wait_list: cl_uint,
1208     event_wait_list: *const cl_event,
1209     event: *mut cl_event,
1210 ) -> CLResult<()> {
1211     let block = check_cl_bool(blocking_read).ok_or(CL_INVALID_VALUE)?;
1212     let q = Queue::arc_from_raw(command_queue)?;
1213     let buf = Buffer::arc_from_raw(buffer)?;
1214     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1215 
1216     // CL_INVALID_OPERATION if clEnqueueReadBufferRect is called on buffer which has been created
1217     // with CL_MEM_HOST_WRITE_ONLY or CL_MEM_HOST_NO_ACCESS.
1218     if bit_check(buf.flags, CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_NO_ACCESS) {
1219         return Err(CL_INVALID_OPERATION);
1220     }
1221 
1222     // CL_INVALID_VALUE if buffer_origin, host_origin, or region is NULL.
1223     if buffer_origin.is_null() ||
1224       host_origin.is_null() ||
1225       region.is_null() ||
1226       // CL_INVALID_VALUE if ptr is NULL.
1227       ptr.is_null()
1228     {
1229         return Err(CL_INVALID_VALUE);
1230     }
1231 
1232     let r = unsafe { CLVec::from_raw(region) };
1233     let buf_ori = unsafe { CLVec::from_raw(buffer_origin) };
1234     let host_ori = unsafe { CLVec::from_raw(host_origin) };
1235 
1236     // CL_INVALID_VALUE if any region array element is 0.
1237     if r.contains(&0) ||
1238       // CL_INVALID_VALUE if buffer_row_pitch is not 0 and is less than region[0].
1239       buffer_row_pitch != 0 && buffer_row_pitch < r[0] ||
1240       // CL_INVALID_VALUE if host_row_pitch is not 0 and is less than region[0].
1241       host_row_pitch != 0 && host_row_pitch < r[0]
1242     {
1243         return Err(CL_INVALID_VALUE);
1244     }
1245 
1246     // If buffer_row_pitch is 0, buffer_row_pitch is computed as region[0].
1247     if buffer_row_pitch == 0 {
1248         buffer_row_pitch = r[0];
1249     }
1250 
1251     // If host_row_pitch is 0, host_row_pitch is computed as region[0].
1252     if host_row_pitch == 0 {
1253         host_row_pitch = r[0];
1254     }
1255 
1256     // CL_INVALID_VALUE if buffer_slice_pitch is not 0 and is less than region[1] × buffer_row_pitch and not a multiple of buffer_row_pitch.
1257     if buffer_slice_pitch != 0 && buffer_slice_pitch < r[1] * buffer_row_pitch && buffer_slice_pitch % buffer_row_pitch != 0 ||
1258       // CL_INVALID_VALUE if host_slice_pitch is not 0 and is less than region[1] × host_row_pitch and not a multiple of host_row_pitch.
1259       host_slice_pitch != 0 && host_slice_pitch < r[1] * host_row_pitch && host_slice_pitch % host_row_pitch != 0
1260     {
1261         return Err(CL_INVALID_VALUE);
1262     }
1263 
1264     // If buffer_slice_pitch is 0, buffer_slice_pitch is computed as region[1] × buffer_row_pitch.
1265     if buffer_slice_pitch == 0 {
1266         buffer_slice_pitch = r[1] * buffer_row_pitch;
1267     }
1268 
1269     // If host_slice_pitch is 0, host_slice_pitch is computed as region[1] × host_row_pitch.
1270     if host_slice_pitch == 0 {
1271         host_slice_pitch = r[1] * host_row_pitch
1272     }
1273 
1274     // CL_INVALID_VALUE if the region being read or written specified by (buffer_origin, region,
1275     // buffer_row_pitch, buffer_slice_pitch) is out of bounds.
1276     if CLVec::calc_size(r + buf_ori, [1, buffer_row_pitch, buffer_slice_pitch]) > buf.size {
1277         return Err(CL_INVALID_VALUE);
1278     }
1279 
1280     // CL_INVALID_CONTEXT if the context associated with command_queue and buffer are not the same
1281     if q.context != buf.context {
1282         return Err(CL_INVALID_CONTEXT);
1283     }
1284 
1285     // SAFETY: it's required that applications do not cause data races
1286     let ptr = unsafe { MutMemoryPtr::from_ptr(ptr) };
1287     create_and_queue(
1288         q,
1289         CL_COMMAND_READ_BUFFER_RECT,
1290         evs,
1291         event,
1292         block,
1293         Box::new(move |_, ctx| {
1294             buf.read_rect(
1295                 ptr,
1296                 ctx,
1297                 &r,
1298                 &buf_ori,
1299                 buffer_row_pitch,
1300                 buffer_slice_pitch,
1301                 &host_ori,
1302                 host_row_pitch,
1303                 host_slice_pitch,
1304             )
1305         }),
1306     )
1307 
1308     // TODO
1309     // CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1310 }
1311 
1312 #[cl_entrypoint(clEnqueueWriteBufferRect)]
enqueue_write_buffer_rect( command_queue: cl_command_queue, buffer: cl_mem, blocking_write: cl_bool, buffer_origin: *const usize, host_origin: *const usize, region: *const usize, mut buffer_row_pitch: usize, mut buffer_slice_pitch: usize, mut host_row_pitch: usize, mut host_slice_pitch: usize, ptr: *const ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1313 fn enqueue_write_buffer_rect(
1314     command_queue: cl_command_queue,
1315     buffer: cl_mem,
1316     blocking_write: cl_bool,
1317     buffer_origin: *const usize,
1318     host_origin: *const usize,
1319     region: *const usize,
1320     mut buffer_row_pitch: usize,
1321     mut buffer_slice_pitch: usize,
1322     mut host_row_pitch: usize,
1323     mut host_slice_pitch: usize,
1324     ptr: *const ::std::os::raw::c_void,
1325     num_events_in_wait_list: cl_uint,
1326     event_wait_list: *const cl_event,
1327     event: *mut cl_event,
1328 ) -> CLResult<()> {
1329     let block = check_cl_bool(blocking_write).ok_or(CL_INVALID_VALUE)?;
1330     let q = Queue::arc_from_raw(command_queue)?;
1331     let buf = Buffer::arc_from_raw(buffer)?;
1332     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1333 
1334     // CL_INVALID_OPERATION if clEnqueueWriteBufferRect is called on buffer which has been created
1335     // with CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_NO_ACCESS.
1336     if bit_check(buf.flags, CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS) {
1337         return Err(CL_INVALID_OPERATION);
1338     }
1339 
1340     // CL_INVALID_VALUE if buffer_origin, host_origin, or region is NULL.
1341     if buffer_origin.is_null() ||
1342       host_origin.is_null() ||
1343       region.is_null() ||
1344       // CL_INVALID_VALUE if ptr is NULL.
1345       ptr.is_null()
1346     {
1347         return Err(CL_INVALID_VALUE);
1348     }
1349 
1350     let r = unsafe { CLVec::from_raw(region) };
1351     let buf_ori = unsafe { CLVec::from_raw(buffer_origin) };
1352     let host_ori = unsafe { CLVec::from_raw(host_origin) };
1353 
1354     // CL_INVALID_VALUE if any region array element is 0.
1355     if r.contains(&0) ||
1356       // CL_INVALID_VALUE if buffer_row_pitch is not 0 and is less than region[0].
1357       buffer_row_pitch != 0 && buffer_row_pitch < r[0] ||
1358       // CL_INVALID_VALUE if host_row_pitch is not 0 and is less than region[0].
1359       host_row_pitch != 0 && host_row_pitch < r[0]
1360     {
1361         return Err(CL_INVALID_VALUE);
1362     }
1363 
1364     // If buffer_row_pitch is 0, buffer_row_pitch is computed as region[0].
1365     if buffer_row_pitch == 0 {
1366         buffer_row_pitch = r[0];
1367     }
1368 
1369     // If host_row_pitch is 0, host_row_pitch is computed as region[0].
1370     if host_row_pitch == 0 {
1371         host_row_pitch = r[0];
1372     }
1373 
1374     // CL_INVALID_VALUE if buffer_slice_pitch is not 0 and is less than region[1] × buffer_row_pitch and not a multiple of buffer_row_pitch.
1375     if buffer_slice_pitch != 0 && buffer_slice_pitch < r[1] * buffer_row_pitch && buffer_slice_pitch % buffer_row_pitch != 0 ||
1376       // CL_INVALID_VALUE if host_slice_pitch is not 0 and is less than region[1] × host_row_pitch and not a multiple of host_row_pitch.
1377       host_slice_pitch != 0 && host_slice_pitch < r[1] * host_row_pitch && host_slice_pitch % host_row_pitch != 0
1378     {
1379         return Err(CL_INVALID_VALUE);
1380     }
1381 
1382     // If buffer_slice_pitch is 0, buffer_slice_pitch is computed as region[1] × buffer_row_pitch.
1383     if buffer_slice_pitch == 0 {
1384         buffer_slice_pitch = r[1] * buffer_row_pitch;
1385     }
1386 
1387     // If host_slice_pitch is 0, host_slice_pitch is computed as region[1] × host_row_pitch.
1388     if host_slice_pitch == 0 {
1389         host_slice_pitch = r[1] * host_row_pitch
1390     }
1391 
1392     // CL_INVALID_VALUE if the region being read or written specified by (buffer_origin, region,
1393     // buffer_row_pitch, buffer_slice_pitch) is out of bounds.
1394     if CLVec::calc_size(r + buf_ori, [1, buffer_row_pitch, buffer_slice_pitch]) > buf.size {
1395         return Err(CL_INVALID_VALUE);
1396     }
1397 
1398     // CL_INVALID_CONTEXT if the context associated with command_queue and buffer are not the same
1399     if q.context != buf.context {
1400         return Err(CL_INVALID_CONTEXT);
1401     }
1402 
1403     // SAFETY: it's required that applications do not cause data races
1404     let ptr = unsafe { ConstMemoryPtr::from_ptr(ptr) };
1405     create_and_queue(
1406         q,
1407         CL_COMMAND_WRITE_BUFFER_RECT,
1408         evs,
1409         event,
1410         block,
1411         Box::new(move |_, ctx| {
1412             buf.write_rect(
1413                 ptr,
1414                 ctx,
1415                 &r,
1416                 &host_ori,
1417                 host_row_pitch,
1418                 host_slice_pitch,
1419                 &buf_ori,
1420                 buffer_row_pitch,
1421                 buffer_slice_pitch,
1422             )
1423         }),
1424     )
1425 
1426     // TODO
1427     // CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1428 }
1429 
1430 #[cl_entrypoint(clEnqueueCopyBufferRect)]
enqueue_copy_buffer_rect( command_queue: cl_command_queue, src_buffer: cl_mem, dst_buffer: cl_mem, src_origin: *const usize, dst_origin: *const usize, region: *const usize, mut src_row_pitch: usize, mut src_slice_pitch: usize, mut dst_row_pitch: usize, mut dst_slice_pitch: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1431 fn enqueue_copy_buffer_rect(
1432     command_queue: cl_command_queue,
1433     src_buffer: cl_mem,
1434     dst_buffer: cl_mem,
1435     src_origin: *const usize,
1436     dst_origin: *const usize,
1437     region: *const usize,
1438     mut src_row_pitch: usize,
1439     mut src_slice_pitch: usize,
1440     mut dst_row_pitch: usize,
1441     mut dst_slice_pitch: usize,
1442     num_events_in_wait_list: cl_uint,
1443     event_wait_list: *const cl_event,
1444     event: *mut cl_event,
1445 ) -> CLResult<()> {
1446     let q = Queue::arc_from_raw(command_queue)?;
1447     let src = Buffer::arc_from_raw(src_buffer)?;
1448     let dst = Buffer::arc_from_raw(dst_buffer)?;
1449     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1450 
1451     // CL_INVALID_VALUE if src_origin, dst_origin, or region is NULL.
1452     if src_origin.is_null() || dst_origin.is_null() || region.is_null() {
1453         return Err(CL_INVALID_VALUE);
1454     }
1455 
1456     let r = unsafe { CLVec::from_raw(region) };
1457     let src_ori = unsafe { CLVec::from_raw(src_origin) };
1458     let dst_ori = unsafe { CLVec::from_raw(dst_origin) };
1459 
1460     // CL_INVALID_VALUE if any region array element is 0.
1461     if r.contains(&0) ||
1462       // CL_INVALID_VALUE if src_row_pitch is not 0 and is less than region[0].
1463       src_row_pitch != 0 && src_row_pitch < r[0] ||
1464       // CL_INVALID_VALUE if dst_row_pitch is not 0 and is less than region[0].
1465       dst_row_pitch != 0 && dst_row_pitch < r[0]
1466     {
1467         return Err(CL_INVALID_VALUE);
1468     }
1469 
1470     // If src_row_pitch is 0, src_row_pitch is computed as region[0].
1471     if src_row_pitch == 0 {
1472         src_row_pitch = r[0];
1473     }
1474 
1475     // If dst_row_pitch is 0, dst_row_pitch is computed as region[0].
1476     if dst_row_pitch == 0 {
1477         dst_row_pitch = r[0];
1478     }
1479 
1480     // CL_INVALID_VALUE if src_slice_pitch is not 0 and is less than region[1] × src_row_pitch
1481     if src_slice_pitch != 0 && src_slice_pitch < r[1] * src_row_pitch ||
1482       // CL_INVALID_VALUE if dst_slice_pitch is not 0 and is less than region[1] × dst_row_pitch
1483       dst_slice_pitch != 0 && dst_slice_pitch < r[1] * dst_row_pitch ||
1484       // if src_slice_pitch is not 0 and is not a multiple of src_row_pitch.
1485       src_slice_pitch != 0 && src_slice_pitch % src_row_pitch != 0 ||
1486       // if dst_slice_pitch is not 0 and is not a multiple of dst_row_pitch.
1487       dst_slice_pitch != 0 && dst_slice_pitch % dst_row_pitch != 0
1488     {
1489         return Err(CL_INVALID_VALUE);
1490     }
1491 
1492     // If src_slice_pitch is 0, src_slice_pitch is computed as region[1] × src_row_pitch.
1493     if src_slice_pitch == 0 {
1494         src_slice_pitch = r[1] * src_row_pitch;
1495     }
1496 
1497     // If dst_slice_pitch is 0, dst_slice_pitch is computed as region[1] × dst_row_pitch.
1498     if dst_slice_pitch == 0 {
1499         dst_slice_pitch = r[1] * dst_row_pitch;
1500     }
1501 
1502     // CL_INVALID_VALUE if src_buffer and dst_buffer are the same buffer object and src_slice_pitch
1503     // is not equal to dst_slice_pitch and src_row_pitch is not equal to dst_row_pitch.
1504     if src_buffer == dst_buffer
1505         && src_slice_pitch != dst_slice_pitch
1506         && src_row_pitch != dst_row_pitch
1507     {
1508         return Err(CL_INVALID_VALUE);
1509     }
1510 
1511     // CL_INVALID_VALUE if (src_origin, region, src_row_pitch, src_slice_pitch) or (dst_origin,
1512     // region, dst_row_pitch, dst_slice_pitch) require accessing elements outside the src_buffer
1513     // and dst_buffer buffer objects respectively.
1514     if CLVec::calc_size(r + src_ori, [1, src_row_pitch, src_slice_pitch]) > src.size
1515         || CLVec::calc_size(r + dst_ori, [1, dst_row_pitch, dst_slice_pitch]) > dst.size
1516     {
1517         return Err(CL_INVALID_VALUE);
1518     }
1519 
1520     // CL_MEM_COPY_OVERLAP if src_buffer and dst_buffer are the same buffer or sub-buffer object and
1521     // the source and destination regions overlap or if src_buffer and dst_buffer are different
1522     // sub-buffers of the same associated buffer object and they overlap.
1523     if src.backing_memory_eq(&dst)
1524         && check_copy_overlap(
1525             &src_ori,
1526             src.offset(),
1527             &dst_ori,
1528             dst.offset(),
1529             &r,
1530             src_row_pitch,
1531             src_slice_pitch,
1532         )
1533     {
1534         return Err(CL_MEM_COPY_OVERLAP);
1535     }
1536 
1537     // CL_INVALID_CONTEXT if the context associated with command_queue, src_buffer and dst_buffer
1538     // are not the same
1539     if src.context != q.context || dst.context != q.context {
1540         return Err(CL_INVALID_CONTEXT);
1541     }
1542 
1543     create_and_queue(
1544         q,
1545         CL_COMMAND_COPY_BUFFER_RECT,
1546         evs,
1547         event,
1548         false,
1549         Box::new(move |_, ctx| {
1550             src.copy_rect(
1551                 &dst,
1552                 ctx,
1553                 &r,
1554                 &src_ori,
1555                 src_row_pitch,
1556                 src_slice_pitch,
1557                 &dst_ori,
1558                 dst_row_pitch,
1559                 dst_slice_pitch,
1560             )
1561         }),
1562     )
1563 
1564     // TODO
1565     // CL_MISALIGNED_SUB_BUFFER_OFFSET if src_buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1566 }
1567 
1568 #[cl_entrypoint(clEnqueueFillBuffer)]
enqueue_fill_buffer( command_queue: cl_command_queue, buffer: cl_mem, pattern: *const ::std::os::raw::c_void, pattern_size: usize, offset: usize, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1569 fn enqueue_fill_buffer(
1570     command_queue: cl_command_queue,
1571     buffer: cl_mem,
1572     pattern: *const ::std::os::raw::c_void,
1573     pattern_size: usize,
1574     offset: usize,
1575     size: usize,
1576     num_events_in_wait_list: cl_uint,
1577     event_wait_list: *const cl_event,
1578     event: *mut cl_event,
1579 ) -> CLResult<()> {
1580     let q = Queue::arc_from_raw(command_queue)?;
1581     let b = Buffer::arc_from_raw(buffer)?;
1582     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1583 
1584     // CL_INVALID_VALUE if offset or offset + size require accessing elements outside the buffer
1585     // buffer object respectively.
1586     if offset + size > b.size {
1587         return Err(CL_INVALID_VALUE);
1588     }
1589 
1590     // CL_INVALID_VALUE if pattern is NULL or if pattern_size is 0 or if pattern_size is not one of
1591     // { 1, 2, 4, 8, 16, 32, 64, 128 }.
1592     if pattern.is_null() || pattern_size.count_ones() != 1 || pattern_size > 128 {
1593         return Err(CL_INVALID_VALUE);
1594     }
1595 
1596     // CL_INVALID_VALUE if offset and size are not a multiple of pattern_size.
1597     if offset % pattern_size != 0 || size % pattern_size != 0 {
1598         return Err(CL_INVALID_VALUE);
1599     }
1600 
1601     // CL_INVALID_CONTEXT if the context associated with command_queue and buffer are not the same
1602     if b.context != q.context {
1603         return Err(CL_INVALID_CONTEXT);
1604     }
1605 
1606     // we have to copy memory
1607     let pattern = unsafe { slice::from_raw_parts(pattern.cast(), pattern_size).to_vec() };
1608     create_and_queue(
1609         q,
1610         CL_COMMAND_FILL_BUFFER,
1611         evs,
1612         event,
1613         false,
1614         Box::new(move |_, ctx| b.fill(ctx, &pattern, offset, size)),
1615     )
1616 
1617     // TODO
1618     //• CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
1619     //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for data store associated with buffer.
1620 }
1621 
1622 #[cl_entrypoint(clEnqueueMapBuffer)]
enqueue_map_buffer( command_queue: cl_command_queue, buffer: cl_mem, blocking_map: cl_bool, map_flags: cl_map_flags, offset: usize, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<*mut c_void>1623 fn enqueue_map_buffer(
1624     command_queue: cl_command_queue,
1625     buffer: cl_mem,
1626     blocking_map: cl_bool,
1627     map_flags: cl_map_flags,
1628     offset: usize,
1629     size: usize,
1630     num_events_in_wait_list: cl_uint,
1631     event_wait_list: *const cl_event,
1632     event: *mut cl_event,
1633 ) -> CLResult<*mut c_void> {
1634     let q = Queue::arc_from_raw(command_queue)?;
1635     let b = Buffer::arc_from_raw(buffer)?;
1636     let block = check_cl_bool(blocking_map).ok_or(CL_INVALID_VALUE)?;
1637     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1638 
1639     validate_map_flags(&b, map_flags)?;
1640 
1641     // CL_INVALID_VALUE if region being mapped given by (offset, size) is out of bounds or if size
1642     // is 0
1643     if offset >= b.size || size > b.size - offset || size == 0 {
1644         return Err(CL_INVALID_VALUE);
1645     }
1646 
1647     // CL_INVALID_CONTEXT if context associated with command_queue and buffer are not the same
1648     if b.context != q.context {
1649         return Err(CL_INVALID_CONTEXT);
1650     }
1651 
1652     let ptr = b.map(size, offset, map_flags != CL_MAP_READ.into())?;
1653     create_and_queue(
1654         q,
1655         CL_COMMAND_MAP_BUFFER,
1656         evs,
1657         event,
1658         block,
1659         Box::new(move |_, ctx| {
1660             if map_flags != CL_MAP_WRITE_INVALIDATE_REGION.into() {
1661                 b.sync_map(ctx, ptr)
1662             } else {
1663                 Ok(())
1664             }
1665         }),
1666     )?;
1667 
1668     Ok(ptr.as_ptr())
1669 
1670     // TODO
1671     // CL_MISALIGNED_SUB_BUFFER_OFFSET if buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for the device associated with queue. This error code is missing before version 1.1.
1672     // CL_MAP_FAILURE if there is a failure to map the requested region into the host address space. This error cannot occur for buffer objects created with CL_MEM_USE_HOST_PTR or CL_MEM_ALLOC_HOST_PTR.
1673     // CL_INVALID_OPERATION if mapping would lead to overlapping regions being mapped for writing.
1674 }
1675 
1676 #[cl_entrypoint(clEnqueueReadImage)]
enqueue_read_image( command_queue: cl_command_queue, image: cl_mem, blocking_read: cl_bool, origin: *const usize, region: *const usize, mut row_pitch: usize, mut slice_pitch: usize, ptr: *mut ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1677 fn enqueue_read_image(
1678     command_queue: cl_command_queue,
1679     image: cl_mem,
1680     blocking_read: cl_bool,
1681     origin: *const usize,
1682     region: *const usize,
1683     mut row_pitch: usize,
1684     mut slice_pitch: usize,
1685     ptr: *mut ::std::os::raw::c_void,
1686     num_events_in_wait_list: cl_uint,
1687     event_wait_list: *const cl_event,
1688     event: *mut cl_event,
1689 ) -> CLResult<()> {
1690     let q = Queue::arc_from_raw(command_queue)?;
1691     let i = Image::arc_from_raw(image)?;
1692     let block = check_cl_bool(blocking_read).ok_or(CL_INVALID_VALUE)?;
1693     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1694     let pixel_size = i.image_format.pixel_size().unwrap() as usize;
1695 
1696     // CL_INVALID_CONTEXT if the context associated with command_queue and image are not the same
1697     if i.context != q.context {
1698         return Err(CL_INVALID_CONTEXT);
1699     }
1700 
1701     // CL_INVALID_OPERATION if clEnqueueReadImage is called on image which has been created with
1702     // CL_MEM_HOST_WRITE_ONLY or CL_MEM_HOST_NO_ACCESS.
1703     if bit_check(i.flags, CL_MEM_HOST_WRITE_ONLY | CL_MEM_HOST_NO_ACCESS) {
1704         return Err(CL_INVALID_OPERATION);
1705     }
1706 
1707     // Not supported with depth stencil or msaa images.
1708     if i.image_format.image_channel_order == CL_DEPTH_STENCIL || i.image_desc.num_samples > 0 {
1709         return Err(CL_INVALID_OPERATION);
1710     }
1711 
1712     // CL_INVALID_VALUE if origin or region is NULL.
1713     // CL_INVALID_VALUE if ptr is NULL.
1714     if origin.is_null() || region.is_null() || ptr.is_null() {
1715         return Err(CL_INVALID_VALUE);
1716     }
1717 
1718     // CL_INVALID_VALUE if image is a 1D or 2D image and slice_pitch or input_slice_pitch is not 0.
1719     if !i.image_desc.has_slice() && slice_pitch != 0 {
1720         return Err(CL_INVALID_VALUE);
1721     }
1722 
1723     let r = unsafe { CLVec::from_raw(region) };
1724     let o = unsafe { CLVec::from_raw(origin) };
1725 
1726     // CL_INVALID_VALUE if the region being read or written specified by origin and region is out of
1727     // bounds.
1728     // CL_INVALID_VALUE if values in origin and region do not follow rules described in the argument
1729     // description for origin and region.
1730     validate_image_bounds(&i, o, r)?;
1731 
1732     // If row_pitch (or input_row_pitch) is set to 0, the appropriate row pitch is calculated based
1733     // on the size of each element in bytes multiplied by width.
1734     if row_pitch == 0 {
1735         row_pitch = r[0] * pixel_size;
1736     }
1737 
1738     // If slice_pitch (or input_slice_pitch) is set to 0, the appropriate slice pitch is calculated
1739     // based on the row_pitch × height.
1740     if slice_pitch == 0 {
1741         slice_pitch = row_pitch * r[1];
1742     }
1743 
1744     // SAFETY: it's required that applications do not cause data races
1745     let ptr = unsafe { MutMemoryPtr::from_ptr(ptr) };
1746     create_and_queue(
1747         q,
1748         CL_COMMAND_READ_IMAGE,
1749         evs,
1750         event,
1751         block,
1752         Box::new(move |_, ctx| i.read(ptr, ctx, &r, &o, row_pitch, slice_pitch)),
1753     )
1754 
1755     //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for image are not supported by device associated with queue.
1756     //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for image are not supported by device associated with queue.
1757     //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
1758 }
1759 
1760 #[cl_entrypoint(clEnqueueWriteImage)]
enqueue_write_image( command_queue: cl_command_queue, image: cl_mem, blocking_write: cl_bool, origin: *const usize, region: *const usize, mut row_pitch: usize, mut slice_pitch: usize, ptr: *const ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1761 fn enqueue_write_image(
1762     command_queue: cl_command_queue,
1763     image: cl_mem,
1764     blocking_write: cl_bool,
1765     origin: *const usize,
1766     region: *const usize,
1767     mut row_pitch: usize,
1768     mut slice_pitch: usize,
1769     ptr: *const ::std::os::raw::c_void,
1770     num_events_in_wait_list: cl_uint,
1771     event_wait_list: *const cl_event,
1772     event: *mut cl_event,
1773 ) -> CLResult<()> {
1774     let q = Queue::arc_from_raw(command_queue)?;
1775     let i = Image::arc_from_raw(image)?;
1776     let block = check_cl_bool(blocking_write).ok_or(CL_INVALID_VALUE)?;
1777     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1778     let pixel_size = i.image_format.pixel_size().unwrap() as usize;
1779 
1780     // CL_INVALID_CONTEXT if the context associated with command_queue and image are not the same
1781     if i.context != q.context {
1782         return Err(CL_INVALID_CONTEXT);
1783     }
1784 
1785     // CL_INVALID_OPERATION if clEnqueueWriteImage is called on image which has been created with
1786     // CL_MEM_HOST_READ_ONLY or CL_MEM_HOST_NO_ACCESS.
1787     if bit_check(i.flags, CL_MEM_HOST_READ_ONLY | CL_MEM_HOST_NO_ACCESS) {
1788         return Err(CL_INVALID_OPERATION);
1789     }
1790 
1791     // Not supported with depth stencil or msaa images.
1792     if i.image_format.image_channel_order == CL_DEPTH_STENCIL || i.image_desc.num_samples > 0 {
1793         return Err(CL_INVALID_OPERATION);
1794     }
1795 
1796     // CL_INVALID_VALUE if origin or region is NULL.
1797     // CL_INVALID_VALUE if ptr is NULL.
1798     if origin.is_null() || region.is_null() || ptr.is_null() {
1799         return Err(CL_INVALID_VALUE);
1800     }
1801 
1802     // CL_INVALID_VALUE if image is a 1D or 2D image and slice_pitch or input_slice_pitch is not 0.
1803     if !i.image_desc.has_slice() && slice_pitch != 0 {
1804         return Err(CL_INVALID_VALUE);
1805     }
1806 
1807     let r = unsafe { CLVec::from_raw(region) };
1808     let o = unsafe { CLVec::from_raw(origin) };
1809 
1810     // CL_INVALID_VALUE if the region being read or written specified by origin and region is out of
1811     // bounds.
1812     // CL_INVALID_VALUE if values in origin and region do not follow rules described in the argument
1813     // description for origin and region.
1814     validate_image_bounds(&i, o, r)?;
1815 
1816     // If row_pitch (or input_row_pitch) is set to 0, the appropriate row pitch is calculated based
1817     // on the size of each element in bytes multiplied by width.
1818     if row_pitch == 0 {
1819         row_pitch = r[0] * pixel_size;
1820     }
1821 
1822     // If slice_pitch (or input_slice_pitch) is set to 0, the appropriate slice pitch is calculated
1823     // based on the row_pitch × height.
1824     if slice_pitch == 0 {
1825         slice_pitch = row_pitch * r[1];
1826     }
1827 
1828     // SAFETY: it's required that applications do not cause data races
1829     let ptr = unsafe { ConstMemoryPtr::from_ptr(ptr) };
1830     create_and_queue(
1831         q,
1832         CL_COMMAND_WRITE_BUFFER_RECT,
1833         evs,
1834         event,
1835         block,
1836         Box::new(move |_, ctx| i.write(ptr, ctx, &r, row_pitch, slice_pitch, &o)),
1837     )
1838 
1839     //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for image are not supported by device associated with queue.
1840     //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for image are not supported by device associated with queue.
1841     //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
1842 }
1843 
1844 #[cl_entrypoint(clEnqueueCopyImage)]
enqueue_copy_image( command_queue: cl_command_queue, src_image: cl_mem, dst_image: cl_mem, src_origin: *const usize, dst_origin: *const usize, region: *const usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1845 fn enqueue_copy_image(
1846     command_queue: cl_command_queue,
1847     src_image: cl_mem,
1848     dst_image: cl_mem,
1849     src_origin: *const usize,
1850     dst_origin: *const usize,
1851     region: *const usize,
1852     num_events_in_wait_list: cl_uint,
1853     event_wait_list: *const cl_event,
1854     event: *mut cl_event,
1855 ) -> CLResult<()> {
1856     let q = Queue::arc_from_raw(command_queue)?;
1857     let src_image = Image::arc_from_raw(src_image)?;
1858     let dst_image = Image::arc_from_raw(dst_image)?;
1859     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1860 
1861     // CL_INVALID_CONTEXT if the context associated with command_queue, src_image and dst_image are not the same
1862     if src_image.context != q.context || dst_image.context != q.context {
1863         return Err(CL_INVALID_CONTEXT);
1864     }
1865 
1866     // CL_IMAGE_FORMAT_MISMATCH if src_image and dst_image do not use the same image format.
1867     if src_image.image_format != dst_image.image_format {
1868         return Err(CL_IMAGE_FORMAT_MISMATCH);
1869     }
1870 
1871     // Not supported with depth stencil or msaa images.
1872     if src_image.image_format.image_channel_order == CL_DEPTH_STENCIL
1873         || dst_image.image_format.image_channel_order == CL_DEPTH_STENCIL
1874         || src_image.image_desc.num_samples > 0
1875         || dst_image.image_desc.num_samples > 0
1876     {
1877         return Err(CL_INVALID_OPERATION);
1878     }
1879 
1880     // CL_INVALID_VALUE if src_origin, dst_origin, or region is NULL.
1881     if src_origin.is_null() || dst_origin.is_null() || region.is_null() {
1882         return Err(CL_INVALID_VALUE);
1883     }
1884 
1885     let region = unsafe { CLVec::from_raw(region) };
1886     let dst_origin = unsafe { CLVec::from_raw(dst_origin) };
1887     let src_origin = unsafe { CLVec::from_raw(src_origin) };
1888 
1889     // CL_INVALID_VALUE if the 2D or 3D rectangular region specified by src_origin and
1890     // src_origin + region refers to a region outside src_image, or if the 2D or 3D rectangular
1891     // region specified by dst_origin and dst_origin + region refers to a region outside dst_image.
1892     // CL_INVALID_VALUE if values in src_origin, dst_origin and region do not follow rules described
1893     // in the argument description for src_origin, dst_origin and region.
1894     validate_image_bounds(&src_image, src_origin, region)?;
1895     validate_image_bounds(&dst_image, dst_origin, region)?;
1896 
1897     create_and_queue(
1898         q,
1899         CL_COMMAND_COPY_IMAGE,
1900         evs,
1901         event,
1902         false,
1903         Box::new(move |_, ctx| {
1904             src_image.copy_to_image(ctx, &dst_image, src_origin, dst_origin, &region)
1905         }),
1906     )
1907 
1908     //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for src_image or dst_image are not supported by device associated with queue.
1909     //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for src_image or dst_image are not supported by device associated with queue.
1910     //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
1911     //• CL_MEM_COPY_OVERLAP if src_image and dst_image are the same image object and the source and destination regions overlap.
1912 }
1913 
1914 #[cl_entrypoint(clEnqueueFillImage)]
enqueue_fill_image( command_queue: cl_command_queue, image: cl_mem, fill_color: *const ::std::os::raw::c_void, origin: *const usize, region: *const usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1915 fn enqueue_fill_image(
1916     command_queue: cl_command_queue,
1917     image: cl_mem,
1918     fill_color: *const ::std::os::raw::c_void,
1919     origin: *const usize,
1920     region: *const usize,
1921     num_events_in_wait_list: cl_uint,
1922     event_wait_list: *const cl_event,
1923     event: *mut cl_event,
1924 ) -> CLResult<()> {
1925     let q = Queue::arc_from_raw(command_queue)?;
1926     let i = Image::arc_from_raw(image)?;
1927     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1928 
1929     // CL_INVALID_CONTEXT if the context associated with command_queue and image are not the same
1930     if i.context != q.context {
1931         return Err(CL_INVALID_CONTEXT);
1932     }
1933 
1934     // Not supported with depth stencil or msaa images.
1935     if i.image_format.image_channel_order == CL_DEPTH_STENCIL || i.image_desc.num_samples > 0 {
1936         return Err(CL_INVALID_OPERATION);
1937     }
1938 
1939     // CL_INVALID_VALUE if fill_color is NULL.
1940     // CL_INVALID_VALUE if origin or region is NULL.
1941     if fill_color.is_null() || origin.is_null() || region.is_null() {
1942         return Err(CL_INVALID_VALUE);
1943     }
1944 
1945     let region = unsafe { CLVec::from_raw(region.cast()) };
1946     let origin = unsafe { CLVec::from_raw(origin.cast()) };
1947 
1948     // CL_INVALID_VALUE if the region being filled as specified by origin and region is out of
1949     // bounds.
1950     // CL_INVALID_VALUE if values in origin and region do not follow rules described in the argument
1951     // description for origin and region.
1952     validate_image_bounds(&i, origin, region)?;
1953 
1954     // The fill color is a single floating-point value if the channel order is CL_DEPTH. Otherwise,
1955     // the fill color is a four component RGBA floating-point color value if the image channel data
1956     // type is not an unnormalized signed or unsigned integer type, is a four component signed
1957     // integer value if the image channel data type is an unnormalized signed integer type and is a
1958     // four component unsigned integer value if the image channel data type is an unnormalized
1959     // unsigned integer type.
1960     let fill_color = if i.image_format.image_channel_order == CL_DEPTH {
1961         [unsafe { fill_color.cast::<u32>().read() }, 0, 0, 0]
1962     } else {
1963         unsafe { fill_color.cast::<[u32; 4]>().read() }
1964     };
1965 
1966     create_and_queue(
1967         q,
1968         CL_COMMAND_FILL_BUFFER,
1969         evs,
1970         event,
1971         false,
1972         Box::new(move |_, ctx| i.fill(ctx, fill_color, &origin, &region)),
1973     )
1974 
1975     //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for image are not supported by device associated with queue.
1976     //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for
1977     //image are not supported by device associated with queue.
1978 }
1979 
1980 #[cl_entrypoint(clEnqueueCopyBufferToImage)]
enqueue_copy_buffer_to_image( command_queue: cl_command_queue, src_buffer: cl_mem, dst_image: cl_mem, src_offset: usize, dst_origin: *const usize, region: *const usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>1981 fn enqueue_copy_buffer_to_image(
1982     command_queue: cl_command_queue,
1983     src_buffer: cl_mem,
1984     dst_image: cl_mem,
1985     src_offset: usize,
1986     dst_origin: *const usize,
1987     region: *const usize,
1988     num_events_in_wait_list: cl_uint,
1989     event_wait_list: *const cl_event,
1990     event: *mut cl_event,
1991 ) -> CLResult<()> {
1992     let q = Queue::arc_from_raw(command_queue)?;
1993     let src = Buffer::arc_from_raw(src_buffer)?;
1994     let dst = Image::arc_from_raw(dst_image)?;
1995     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
1996 
1997     // CL_INVALID_CONTEXT if the context associated with command_queue, src_buffer and dst_image
1998     // are not the same
1999     if q.context != src.context || q.context != dst.context {
2000         return Err(CL_INVALID_CONTEXT);
2001     }
2002 
2003     // Not supported with depth stencil or msaa images.
2004     if dst.image_format.image_channel_order == CL_DEPTH_STENCIL || dst.image_desc.num_samples > 0 {
2005         return Err(CL_INVALID_OPERATION);
2006     }
2007 
2008     // CL_INVALID_VALUE if dst_origin or region is NULL.
2009     if dst_origin.is_null() || region.is_null() {
2010         return Err(CL_INVALID_VALUE);
2011     }
2012 
2013     let region = unsafe { CLVec::from_raw(region) };
2014     let dst_origin = unsafe { CLVec::from_raw(dst_origin) };
2015 
2016     // CL_INVALID_VALUE if values in dst_origin and region do not follow rules described in the
2017     // argument description for dst_origin and region.
2018     // CL_INVALID_VALUE if the 1D, 2D or 3D rectangular region specified by dst_origin and
2019     // dst_origin + region refer to a region outside dst_image,
2020     validate_image_bounds(&dst, dst_origin, region)?;
2021 
2022     create_and_queue(
2023         q,
2024         CL_COMMAND_COPY_BUFFER_TO_IMAGE,
2025         evs,
2026         event,
2027         false,
2028         Box::new(move |_, ctx| src.copy_to_image(ctx, &dst, src_offset, dst_origin, &region)),
2029     )
2030 
2031     //• CL_INVALID_MEM_OBJECT if src_buffer is not a valid buffer object or dst_image is not a valid image object or if dst_image is a 1D image buffer object created from src_buffer.
2032     //• CL_INVALID_VALUE ... if the region specified by src_offset and src_offset + src_cb refer to a region outside src_buffer.
2033     //• CL_MISALIGNED_SUB_BUFFER_OFFSET if src_buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue.
2034     //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for dst_image are not supported by device associated with queue.
2035     //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for dst_image are not supported by device associated with queue.
2036     //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for data store associated with src_buffer or dst_image.
2037     //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
2038 }
2039 
2040 #[cl_entrypoint(clEnqueueCopyImageToBuffer)]
enqueue_copy_image_to_buffer( command_queue: cl_command_queue, src_image: cl_mem, dst_buffer: cl_mem, src_origin: *const usize, region: *const usize, dst_offset: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2041 fn enqueue_copy_image_to_buffer(
2042     command_queue: cl_command_queue,
2043     src_image: cl_mem,
2044     dst_buffer: cl_mem,
2045     src_origin: *const usize,
2046     region: *const usize,
2047     dst_offset: usize,
2048     num_events_in_wait_list: cl_uint,
2049     event_wait_list: *const cl_event,
2050     event: *mut cl_event,
2051 ) -> CLResult<()> {
2052     let q = Queue::arc_from_raw(command_queue)?;
2053     let src = Image::arc_from_raw(src_image)?;
2054     let dst = Buffer::arc_from_raw(dst_buffer)?;
2055     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2056 
2057     // CL_INVALID_CONTEXT if the context associated with command_queue, src_image and dst_buffer
2058     // are not the same
2059     if q.context != src.context || q.context != dst.context {
2060         return Err(CL_INVALID_CONTEXT);
2061     }
2062 
2063     // Not supported with depth stencil or msaa images.
2064     if src.image_format.image_channel_order == CL_DEPTH_STENCIL || src.image_desc.num_samples > 0 {
2065         return Err(CL_INVALID_OPERATION);
2066     }
2067 
2068     // CL_INVALID_VALUE if src_origin or region is NULL.
2069     if src_origin.is_null() || region.is_null() {
2070         return Err(CL_INVALID_VALUE);
2071     }
2072 
2073     let region = unsafe { CLVec::from_raw(region) };
2074     let src_origin = unsafe { CLVec::from_raw(src_origin) };
2075 
2076     // CL_INVALID_VALUE if values in src_origin and region do not follow rules described in the
2077     // argument description for src_origin and region.
2078     // CL_INVALID_VALUE if the 1D, 2D or 3D rectangular region specified by src_origin and
2079     // src_origin + region refers to a region outside src_image, or if the region specified by
2080     // dst_offset and dst_offset + dst_cb to a region outside dst_buffer.
2081     validate_image_bounds(&src, src_origin, region)?;
2082 
2083     create_and_queue(
2084         q,
2085         CL_COMMAND_COPY_IMAGE_TO_BUFFER,
2086         evs,
2087         event,
2088         false,
2089         Box::new(move |_, ctx| src.copy_to_buffer(ctx, &dst, src_origin, dst_offset, &region)),
2090     )
2091 
2092     //• CL_INVALID_MEM_OBJECT if src_image is not a valid image object or dst_buffer is not a valid buffer object or if src_image is a 1D image buffer object created from dst_buffer.
2093     //• CL_INVALID_VALUE ... if the region specified by dst_offset and dst_offset + dst_cb to a region outside dst_buffer.
2094     //• CL_MISALIGNED_SUB_BUFFER_OFFSET if dst_buffer is a sub-buffer object and offset specified when the sub-buffer object is created is not aligned to CL_DEVICE_MEM_BASE_ADDR_ALIGN value for device associated with queue. This error code is missing before version 1.1.
2095     //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for src_image are not supported by device associated with queue.
2096     //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for src_image are not supported by device associated with queue.
2097     //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for data store associated with src_image or dst_buffer.
2098     //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
2099 }
2100 
2101 #[cl_entrypoint(clEnqueueMapImage)]
enqueue_map_image( command_queue: cl_command_queue, image: cl_mem, blocking_map: cl_bool, map_flags: cl_map_flags, origin: *const usize, region: *const usize, image_row_pitch: *mut usize, image_slice_pitch: *mut usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<*mut ::std::os::raw::c_void>2102 fn enqueue_map_image(
2103     command_queue: cl_command_queue,
2104     image: cl_mem,
2105     blocking_map: cl_bool,
2106     map_flags: cl_map_flags,
2107     origin: *const usize,
2108     region: *const usize,
2109     image_row_pitch: *mut usize,
2110     image_slice_pitch: *mut usize,
2111     num_events_in_wait_list: cl_uint,
2112     event_wait_list: *const cl_event,
2113     event: *mut cl_event,
2114 ) -> CLResult<*mut ::std::os::raw::c_void> {
2115     let q = Queue::arc_from_raw(command_queue)?;
2116     let i = Image::arc_from_raw(image)?;
2117     let block = check_cl_bool(blocking_map).ok_or(CL_INVALID_VALUE)?;
2118     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2119 
2120     // CL_INVALID_VALUE ... or if values specified in map_flags are not valid.
2121     validate_map_flags(&i, map_flags)?;
2122 
2123     // CL_INVALID_CONTEXT if context associated with command_queue and image are not the same
2124     if i.context != q.context {
2125         return Err(CL_INVALID_CONTEXT);
2126     }
2127 
2128     // Not supported with depth stencil or msaa images.
2129     if i.image_format.image_channel_order == CL_DEPTH_STENCIL || i.image_desc.num_samples > 0 {
2130         return Err(CL_INVALID_OPERATION);
2131     }
2132 
2133     // CL_INVALID_VALUE if origin or region is NULL.
2134     // CL_INVALID_VALUE if image_row_pitch is NULL.
2135     if origin.is_null() || region.is_null() || image_row_pitch.is_null() {
2136         return Err(CL_INVALID_VALUE);
2137     }
2138 
2139     let region = unsafe { CLVec::from_raw(region) };
2140     let origin = unsafe { CLVec::from_raw(origin) };
2141 
2142     // CL_INVALID_VALUE if region being mapped given by (origin, origin + region) is out of bounds
2143     // CL_INVALID_VALUE if values in origin and region do not follow rules described in the argument
2144     // description for origin and region.
2145     validate_image_bounds(&i, origin, region)?;
2146 
2147     let mut dummy_slice_pitch: usize = 0;
2148     let image_slice_pitch = if image_slice_pitch.is_null() {
2149         // CL_INVALID_VALUE if image is a 3D image, 1D or 2D image array object and
2150         // image_slice_pitch is NULL.
2151         if i.image_desc.is_array() || i.image_desc.image_type == CL_MEM_OBJECT_IMAGE3D {
2152             return Err(CL_INVALID_VALUE);
2153         }
2154         &mut dummy_slice_pitch
2155     } else {
2156         unsafe { image_slice_pitch.as_mut().unwrap() }
2157     };
2158 
2159     let ptr = i.map(
2160         origin,
2161         region,
2162         unsafe { image_row_pitch.as_mut().unwrap() },
2163         image_slice_pitch,
2164         map_flags != CL_MAP_READ.into(),
2165     )?;
2166 
2167     create_and_queue(
2168         q,
2169         CL_COMMAND_MAP_IMAGE,
2170         evs,
2171         event,
2172         block,
2173         Box::new(move |_, ctx| {
2174             if map_flags != CL_MAP_WRITE_INVALIDATE_REGION.into() {
2175                 i.sync_map(ctx, ptr)
2176             } else {
2177                 Ok(())
2178             }
2179         }),
2180     )?;
2181 
2182     Ok(ptr.as_ptr())
2183 
2184     //• CL_INVALID_IMAGE_SIZE if image dimensions (image width, height, specified or compute row and/or slice pitch) for image are not supported by device associated with queue.
2185     //• CL_IMAGE_FORMAT_NOT_SUPPORTED if image format (image channel order and data type) for image are not supported by device associated with queue.
2186     //• CL_MAP_FAILURE if there is a failure to map the requested region into the host address space. This error cannot occur for image objects created with CL_MEM_USE_HOST_PTR or CL_MEM_ALLOC_HOST_PTR.
2187     //• CL_INVALID_OPERATION if the device associated with command_queue does not support images (i.e. CL_DEVICE_IMAGE_SUPPORT specified in the Device Queries table is CL_FALSE).
2188     //• CL_INVALID_OPERATION if mapping would lead to overlapping regions being mapped for writing.
2189 }
2190 
2191 #[cl_entrypoint(clRetainMemObject)]
retain_mem_object(mem: cl_mem) -> CLResult<()>2192 fn retain_mem_object(mem: cl_mem) -> CLResult<()> {
2193     let m = MemBase::ref_from_raw(mem)?;
2194     match m.base.get_type()? {
2195         RusticlTypes::Buffer => Buffer::retain(mem),
2196         RusticlTypes::Image => Image::retain(mem),
2197         _ => Err(CL_INVALID_MEM_OBJECT),
2198     }
2199 }
2200 
2201 #[cl_entrypoint(clReleaseMemObject)]
release_mem_object(mem: cl_mem) -> CLResult<()>2202 fn release_mem_object(mem: cl_mem) -> CLResult<()> {
2203     let m = MemBase::ref_from_raw(mem)?;
2204     match m.base.get_type()? {
2205         RusticlTypes::Buffer => Buffer::release(mem),
2206         RusticlTypes::Image => Image::release(mem),
2207         _ => Err(CL_INVALID_MEM_OBJECT),
2208     }
2209 }
2210 
2211 #[cl_entrypoint(clEnqueueUnmapMemObject)]
enqueue_unmap_mem_object( command_queue: cl_command_queue, memobj: cl_mem, mapped_ptr: *mut ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2212 fn enqueue_unmap_mem_object(
2213     command_queue: cl_command_queue,
2214     memobj: cl_mem,
2215     mapped_ptr: *mut ::std::os::raw::c_void,
2216     num_events_in_wait_list: cl_uint,
2217     event_wait_list: *const cl_event,
2218     event: *mut cl_event,
2219 ) -> CLResult<()> {
2220     let q = Queue::arc_from_raw(command_queue)?;
2221     let m = MemBase::arc_from_raw(memobj)?;
2222     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2223 
2224     // CL_INVALID_CONTEXT if context associated with command_queue and memobj are not the same
2225     if q.context != m.context {
2226         return Err(CL_INVALID_CONTEXT);
2227     }
2228 
2229     // CL_INVALID_VALUE if mapped_ptr is not a valid pointer returned by clEnqueueMapBuffer or
2230     // clEnqueueMapImage for memobj.
2231     if !m.is_mapped_ptr(mapped_ptr) {
2232         return Err(CL_INVALID_VALUE);
2233     }
2234 
2235     // SAFETY: it's required that applications do not cause data races
2236     let mapped_ptr = unsafe { MutMemoryPtr::from_ptr(mapped_ptr) };
2237     let needs_sync = m.unmap(mapped_ptr)?;
2238     create_and_queue(
2239         q,
2240         CL_COMMAND_UNMAP_MEM_OBJECT,
2241         evs,
2242         event,
2243         false,
2244         Box::new(move |_, ctx| {
2245             if needs_sync {
2246                 m.sync_unmap(ctx, mapped_ptr)
2247             } else {
2248                 Ok(())
2249             }
2250         }),
2251     )
2252 }
2253 
2254 #[cl_entrypoint(clEnqueueMigrateMemObjects)]
enqueue_migrate_mem_objects( command_queue: cl_command_queue, num_mem_objects: cl_uint, mem_objects: *const cl_mem, flags: cl_mem_migration_flags, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2255 fn enqueue_migrate_mem_objects(
2256     command_queue: cl_command_queue,
2257     num_mem_objects: cl_uint,
2258     mem_objects: *const cl_mem,
2259     flags: cl_mem_migration_flags,
2260     num_events_in_wait_list: cl_uint,
2261     event_wait_list: *const cl_event,
2262     event: *mut cl_event,
2263 ) -> CLResult<()> {
2264     let q = Queue::arc_from_raw(command_queue)?;
2265     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2266     let bufs = MemBase::refs_from_arr(mem_objects, num_mem_objects)?;
2267 
2268     // CL_INVALID_VALUE if num_mem_objects is zero or if mem_objects is NULL.
2269     if bufs.is_empty() {
2270         return Err(CL_INVALID_VALUE);
2271     }
2272 
2273     // CL_INVALID_CONTEXT if the context associated with command_queue and memory objects in
2274     // mem_objects are not the same
2275     if bufs.iter().any(|b| b.context != q.context) {
2276         return Err(CL_INVALID_CONTEXT);
2277     }
2278 
2279     // CL_INVALID_VALUE if flags is not 0 or is not any of the values described in the table above.
2280     if flags != 0
2281         && bit_check(
2282             flags,
2283             !(CL_MIGRATE_MEM_OBJECT_HOST | CL_MIGRATE_MEM_OBJECT_CONTENT_UNDEFINED),
2284         )
2285     {
2286         return Err(CL_INVALID_VALUE);
2287     }
2288 
2289     // we should do something, but it's legal to not do anything at all
2290     create_and_queue(
2291         q,
2292         CL_COMMAND_MIGRATE_MEM_OBJECTS,
2293         evs,
2294         event,
2295         false,
2296         Box::new(|_, _| Ok(())),
2297     )
2298 
2299     //• CL_MEM_OBJECT_ALLOCATION_FAILURE if there is a failure to allocate memory for the specified set of memory objects in mem_objects.
2300 }
2301 
2302 #[cl_info_entrypoint(clGetPipeInfo)]
2303 unsafe impl CLInfo<cl_pipe_info> for cl_mem {
query(&self, _q: cl_pipe_info, _v: CLInfoValue) -> CLResult<CLInfoRes>2304     fn query(&self, _q: cl_pipe_info, _v: CLInfoValue) -> CLResult<CLInfoRes> {
2305         // CL_INVALID_MEM_OBJECT if pipe is a not a valid pipe object.
2306         Err(CL_INVALID_MEM_OBJECT)
2307     }
2308 }
2309 
svm_alloc( context: cl_context, flags: cl_svm_mem_flags, size: usize, mut alignment: cl_uint, ) -> CLResult<*mut c_void>2310 pub fn svm_alloc(
2311     context: cl_context,
2312     flags: cl_svm_mem_flags,
2313     size: usize,
2314     mut alignment: cl_uint,
2315 ) -> CLResult<*mut c_void> {
2316     // clSVMAlloc will fail if
2317 
2318     // context is not a valid context
2319     let c = Context::ref_from_raw(context)?;
2320 
2321     // or no devices in context support SVM.
2322     if !c.has_svm_devs() {
2323         return Err(CL_INVALID_OPERATION);
2324     }
2325 
2326     // flags does not contain CL_MEM_SVM_FINE_GRAIN_BUFFER but does contain CL_MEM_SVM_ATOMICS.
2327     if !bit_check(flags, CL_MEM_SVM_FINE_GRAIN_BUFFER) && bit_check(flags, CL_MEM_SVM_ATOMICS) {
2328         return Err(CL_INVALID_VALUE);
2329     }
2330 
2331     // size is 0 or > CL_DEVICE_MAX_MEM_ALLOC_SIZE value for any device in context.
2332     if size == 0 || checked_compare(size, Ordering::Greater, c.max_mem_alloc()) {
2333         return Err(CL_INVALID_VALUE);
2334     }
2335 
2336     if alignment == 0 {
2337         alignment = mem::size_of::<[u64; 16]>() as cl_uint;
2338     }
2339 
2340     // alignment is not a power of two
2341     if !alignment.is_power_of_two() {
2342         return Err(CL_INVALID_VALUE);
2343     }
2344 
2345     let layout;
2346     let ptr;
2347 
2348     // SAFETY: we already verify the parameters to from_size_align above and layout is of non zero
2349     // size
2350     unsafe {
2351         layout = Layout::from_size_align_unchecked(size, alignment as usize);
2352         ptr = alloc::alloc(layout);
2353     }
2354 
2355     if ptr.is_null() {
2356         return Err(CL_OUT_OF_HOST_MEMORY);
2357     }
2358 
2359     c.add_svm_ptr(ptr as usize, layout);
2360     Ok(ptr.cast())
2361 
2362     // Values specified in flags do not follow rules described for supported values in the SVM Memory Flags table.
2363     // CL_MEM_SVM_FINE_GRAIN_BUFFER or CL_MEM_SVM_ATOMICS is specified in flags and these are not supported by at least one device in context.
2364     // The values specified in flags are not valid, i.e. don’t match those defined in the SVM Memory Flags table.
2365     // the OpenCL implementation cannot support the specified alignment for at least one device in context.
2366     // There was a failure to allocate resources.
2367 }
2368 
svm_free_impl(c: &Context, svm_pointer: usize)2369 fn svm_free_impl(c: &Context, svm_pointer: usize) {
2370     if let Some(layout) = c.remove_svm_ptr(svm_pointer) {
2371         // SAFETY: we make sure that svm_pointer is a valid allocation and reuse the same layout
2372         // from the allocation
2373         unsafe {
2374             alloc::dealloc(svm_pointer as *mut u8, layout);
2375         }
2376     }
2377 }
2378 
svm_free(context: cl_context, svm_pointer: usize) -> CLResult<()>2379 pub fn svm_free(context: cl_context, svm_pointer: usize) -> CLResult<()> {
2380     let c = Context::ref_from_raw(context)?;
2381     svm_free_impl(c, svm_pointer);
2382     Ok(())
2383 }
2384 
enqueue_svm_free_impl( command_queue: cl_command_queue, num_svm_pointers: cl_uint, svm_pointers: *mut *mut c_void, pfn_free_func: Option<FuncSVMFreeCb>, user_data: *mut c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, cmd_type: cl_command_type, ) -> CLResult<()>2385 fn enqueue_svm_free_impl(
2386     command_queue: cl_command_queue,
2387     num_svm_pointers: cl_uint,
2388     svm_pointers: *mut *mut c_void,
2389     pfn_free_func: Option<FuncSVMFreeCb>,
2390     user_data: *mut c_void,
2391     num_events_in_wait_list: cl_uint,
2392     event_wait_list: *const cl_event,
2393     event: *mut cl_event,
2394     cmd_type: cl_command_type,
2395 ) -> CLResult<()> {
2396     let q = Queue::arc_from_raw(command_queue)?;
2397     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2398 
2399     // CL_INVALID_VALUE if num_svm_pointers is 0 and svm_pointers is non-NULL, or if svm_pointers is
2400     // NULL and num_svm_pointers is not 0.
2401     if num_svm_pointers == 0 && !svm_pointers.is_null()
2402         || num_svm_pointers != 0 && svm_pointers.is_null()
2403     {
2404         return Err(CL_INVALID_VALUE);
2405     }
2406 
2407     // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2408     if !q.device.svm_supported() {
2409         return Err(CL_INVALID_OPERATION);
2410     }
2411 
2412     // The application is allowed to reuse or free the memory referenced by `svm_pointers` after this
2413     // function returns so we have to make a copy.
2414     // SAFETY: num_svm_pointers specifies the amount of elements in svm_pointers
2415     let mut svm_pointers =
2416         unsafe { slice::from_raw_parts(svm_pointers.cast(), num_svm_pointers as usize) }.to_vec();
2417     // SAFETY: The requirements on `SVMFreeCb::new` match the requirements
2418     // imposed by the OpenCL specification. It is the caller's duty to uphold them.
2419     let cb_opt = unsafe { SVMFreeCb::new(pfn_free_func, user_data) }.ok();
2420 
2421     create_and_queue(
2422         q,
2423         cmd_type,
2424         evs,
2425         event,
2426         false,
2427         Box::new(move |q, _| {
2428             if let Some(cb) = cb_opt {
2429                 cb.call(q, &mut svm_pointers);
2430             } else {
2431                 for ptr in svm_pointers {
2432                     svm_free_impl(&q.context, ptr);
2433                 }
2434             }
2435 
2436             Ok(())
2437         }),
2438     )
2439 }
2440 
2441 #[cl_entrypoint(clEnqueueSVMFree)]
enqueue_svm_free( command_queue: cl_command_queue, num_svm_pointers: cl_uint, svm_pointers: *mut *mut c_void, pfn_free_func: Option<FuncSVMFreeCb>, user_data: *mut c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2442 fn enqueue_svm_free(
2443     command_queue: cl_command_queue,
2444     num_svm_pointers: cl_uint,
2445     svm_pointers: *mut *mut c_void,
2446     pfn_free_func: Option<FuncSVMFreeCb>,
2447     user_data: *mut c_void,
2448     num_events_in_wait_list: cl_uint,
2449     event_wait_list: *const cl_event,
2450     event: *mut cl_event,
2451 ) -> CLResult<()> {
2452     enqueue_svm_free_impl(
2453         command_queue,
2454         num_svm_pointers,
2455         svm_pointers,
2456         pfn_free_func,
2457         user_data,
2458         num_events_in_wait_list,
2459         event_wait_list,
2460         event,
2461         CL_COMMAND_SVM_FREE,
2462     )
2463 }
2464 
2465 #[cl_entrypoint(clEnqueueSVMFreeARM)]
enqueue_svm_free_arm( command_queue: cl_command_queue, num_svm_pointers: cl_uint, svm_pointers: *mut *mut c_void, pfn_free_func: Option<FuncSVMFreeCb>, user_data: *mut c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2466 fn enqueue_svm_free_arm(
2467     command_queue: cl_command_queue,
2468     num_svm_pointers: cl_uint,
2469     svm_pointers: *mut *mut c_void,
2470     pfn_free_func: Option<FuncSVMFreeCb>,
2471     user_data: *mut c_void,
2472     num_events_in_wait_list: cl_uint,
2473     event_wait_list: *const cl_event,
2474     event: *mut cl_event,
2475 ) -> CLResult<()> {
2476     enqueue_svm_free_impl(
2477         command_queue,
2478         num_svm_pointers,
2479         svm_pointers,
2480         pfn_free_func,
2481         user_data,
2482         num_events_in_wait_list,
2483         event_wait_list,
2484         event,
2485         CL_COMMAND_SVM_FREE_ARM,
2486     )
2487 }
2488 
enqueue_svm_memcpy_impl( command_queue: cl_command_queue, blocking_copy: cl_bool, dst_ptr: *mut c_void, src_ptr: *const c_void, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, cmd_type: cl_command_type, ) -> CLResult<()>2489 fn enqueue_svm_memcpy_impl(
2490     command_queue: cl_command_queue,
2491     blocking_copy: cl_bool,
2492     dst_ptr: *mut c_void,
2493     src_ptr: *const c_void,
2494     size: usize,
2495     num_events_in_wait_list: cl_uint,
2496     event_wait_list: *const cl_event,
2497     event: *mut cl_event,
2498     cmd_type: cl_command_type,
2499 ) -> CLResult<()> {
2500     let q = Queue::arc_from_raw(command_queue)?;
2501     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2502     let block = check_cl_bool(blocking_copy).ok_or(CL_INVALID_VALUE)?;
2503 
2504     // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2505     if !q.device.svm_supported() {
2506         return Err(CL_INVALID_OPERATION);
2507     }
2508 
2509     // CL_MEM_COPY_OVERLAP if the values specified for dst_ptr, src_ptr and size result in an
2510     // overlapping copy.
2511     let dst_ptr_addr = dst_ptr as usize;
2512     let src_ptr_addr = src_ptr as usize;
2513     if (src_ptr_addr <= dst_ptr_addr && dst_ptr_addr < src_ptr_addr + size)
2514         || (dst_ptr_addr <= src_ptr_addr && src_ptr_addr < dst_ptr_addr + size)
2515     {
2516         return Err(CL_MEM_COPY_OVERLAP);
2517     }
2518 
2519     // CAST: We have no idea about the type or initialization status of these bytes.
2520     // MaybeUninit<u8> is the safe bet.
2521     let src_ptr = src_ptr.cast::<MaybeUninit<u8>>();
2522 
2523     // CAST: We have no idea about the type or initialization status of these bytes.
2524     // MaybeUninit<u8> is the safe bet.
2525     let dst_ptr = dst_ptr.cast::<MaybeUninit<u8>>();
2526 
2527     // SAFETY: It is up to the application to ensure the memory is valid to read for `size` bytes
2528     // and that it doesn't modify it until the command has completed.
2529     let src = unsafe { cl_slice::from_raw_parts(src_ptr, size)? };
2530 
2531     // SAFETY: We've ensured there's no aliasing between src and dst. It is up to the application
2532     // to ensure the memory is valid to read and write for `size` bytes and that it doesn't modify
2533     // or read from it until the command has completed.
2534     let dst = unsafe { cl_slice::from_raw_parts_mut(dst_ptr, size)? };
2535 
2536     create_and_queue(
2537         q,
2538         cmd_type,
2539         evs,
2540         event,
2541         block,
2542         Box::new(move |_, _| {
2543             dst.copy_from_slice(src);
2544             Ok(())
2545         }),
2546     )
2547 }
2548 
2549 #[cl_entrypoint(clEnqueueSVMMemcpy)]
enqueue_svm_memcpy( command_queue: cl_command_queue, blocking_copy: cl_bool, dst_ptr: *mut c_void, src_ptr: *const c_void, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2550 fn enqueue_svm_memcpy(
2551     command_queue: cl_command_queue,
2552     blocking_copy: cl_bool,
2553     dst_ptr: *mut c_void,
2554     src_ptr: *const c_void,
2555     size: usize,
2556     num_events_in_wait_list: cl_uint,
2557     event_wait_list: *const cl_event,
2558     event: *mut cl_event,
2559 ) -> CLResult<()> {
2560     enqueue_svm_memcpy_impl(
2561         command_queue,
2562         blocking_copy,
2563         dst_ptr,
2564         src_ptr,
2565         size,
2566         num_events_in_wait_list,
2567         event_wait_list,
2568         event,
2569         CL_COMMAND_SVM_MEMCPY,
2570     )
2571 }
2572 
2573 #[cl_entrypoint(clEnqueueSVMMemcpyARM)]
enqueue_svm_memcpy_arm( command_queue: cl_command_queue, blocking_copy: cl_bool, dst_ptr: *mut c_void, src_ptr: *const c_void, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2574 fn enqueue_svm_memcpy_arm(
2575     command_queue: cl_command_queue,
2576     blocking_copy: cl_bool,
2577     dst_ptr: *mut c_void,
2578     src_ptr: *const c_void,
2579     size: usize,
2580     num_events_in_wait_list: cl_uint,
2581     event_wait_list: *const cl_event,
2582     event: *mut cl_event,
2583 ) -> CLResult<()> {
2584     enqueue_svm_memcpy_impl(
2585         command_queue,
2586         blocking_copy,
2587         dst_ptr,
2588         src_ptr,
2589         size,
2590         num_events_in_wait_list,
2591         event_wait_list,
2592         event,
2593         CL_COMMAND_SVM_MEMCPY_ARM,
2594     )
2595 }
2596 
enqueue_svm_mem_fill_impl( command_queue: cl_command_queue, svm_ptr: *mut ::std::os::raw::c_void, pattern: *const ::std::os::raw::c_void, pattern_size: usize, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, cmd_type: cl_command_type, ) -> CLResult<()>2597 fn enqueue_svm_mem_fill_impl(
2598     command_queue: cl_command_queue,
2599     svm_ptr: *mut ::std::os::raw::c_void,
2600     pattern: *const ::std::os::raw::c_void,
2601     pattern_size: usize,
2602     size: usize,
2603     num_events_in_wait_list: cl_uint,
2604     event_wait_list: *const cl_event,
2605     event: *mut cl_event,
2606     cmd_type: cl_command_type,
2607 ) -> CLResult<()> {
2608     let q = Queue::arc_from_raw(command_queue)?;
2609     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2610 
2611     // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2612     if !q.device.svm_supported() {
2613         return Err(CL_INVALID_OPERATION);
2614     }
2615 
2616     // CL_INVALID_VALUE if pattern is NULL [...]
2617     if pattern.is_null() {
2618         return Err(CL_INVALID_VALUE);
2619     }
2620 
2621     // CL_INVALID_VALUE if size is not a multiple of pattern_size.
2622     if size % pattern_size != 0 {
2623         return Err(CL_INVALID_VALUE);
2624     }
2625 
2626     // The provided `$bytesize` must equal `pattern_size`.
2627     macro_rules! generate_fill_closure {
2628         ($bytesize:literal) => {{
2629             // We need the value of `$bytesize`` at compile time, so we need to pass it in, but it
2630             // should always match `pattern_size`.
2631             assert!($bytesize == pattern_size);
2632 
2633             // Three reasons we define our own bag-of-bytes type here:
2634             //
2635             // We'd otherwise have to pass a type to this macro. Verifying that the type we passed
2636             // upholds all the properties we need or want is more trouble than defining our own.
2637             //
2638             // The primitive Rust types only go up to `u128` anyway and their alignments are
2639             // platfrom defined. E.g. At the time of this writing `u128` only has an alignment of 8
2640             // on x86-64, even though its size is 16. Defining our own type with an alignment of 16
2641             // allows the compiler to generate better code.
2642             //
2643             // The alignment of OpenCL types is currently what we need on x86-64, but the spec
2644             // explicitly states that's just a recommendation and ultimately it's up to the
2645             // cl_platform.h header. The very descriptive names of the CL types don't make
2646             // verifying the match calling this macro any easier on a glance.
2647             // "Was `cl_uint` 4 byte or 8 byte? Eh, I'm sure nobody got it wrong by accident."
2648             #[repr(C)]
2649             #[repr(align($bytesize))]
2650             #[derive(Copy, Clone)]
2651             struct Pattern([u8; $bytesize]);
2652 
2653             // Just to make sure the compiler didn't generate anything weird.
2654             static_assert!($bytesize == mem::size_of::<Pattern>());
2655             static_assert!($bytesize == mem::align_of::<Pattern>());
2656 
2657             // CAST: We don't know exactly which type `pattern` points to, but we know it's an
2658             // Application Scalar Data Type (cl_char, cl_ulong, etc.) or an Application Vector Data
2659             // Type (cl_double4, etc.). All of them are `Copy`, do not contain padding bytes, and
2660             // have no invalid bit patterns. AKA they are POD data types.
2661             // Since we only copy it around, we can cast to any POD type as long as its size
2662             // matches `pattern_size`.
2663             let pattern_ptr = pattern.cast::<Pattern>();
2664 
2665             // The application is allowed to reuse or free the memory referenced by `pattern_ptr`
2666             // after this function returns, so we need to create a copy.
2667             //
2668             // There's no explicit alignment guarantee and we don't rely on `Pattern` matching the
2669             // alignment of whichever Application Data Type we're actually presented with. Thus, do
2670             // an unaligned read.
2671             //
2672             // SAFETY: We've checked that `pattern_ptr` is not NULL above. It is otherwise the
2673             // calling application's responsibility to ensure that it is valid for reads of
2674             // `pattern_size` bytes and properly initialized.
2675             // Creating a bitwise copy can't create memory safety issues, since `Pattern` is `Copy`.
2676             let pattern = unsafe { pattern_ptr.read_unaligned() };
2677 
2678             // CAST: Same as with `pattern`, we don't know the exact type of `svm_ptr`, but we do
2679             // know it's fine if we choose the same type here. The application might reasonably
2680             // give us uninitialized memory though, so cast to a `MaybeUninit<Pattern>`, which has
2681             // the same layout as `Pattern`.
2682             let svm_ptr = svm_ptr.cast::<MaybeUninit<Pattern>>();
2683 
2684             // SAFETY: It is the calling application's responsibility to ensure that `svm_ptr` is
2685             // valid for reads and writes up to `size` bytes.
2686             // Since `pattern_size == mem::size_of::<Pattern>()` and `MaybeUninit<Pattern>` has the
2687             // same layout as `Pattern`, we know that
2688             // `size / pattern_size * mem::size_of<MaybeUninit<Pattern>>` equals `size`.
2689             //
2690             // Since we're creating a `&[MaybeUninit<Pattern>]` the initialization status does not
2691             // matter.
2692             //
2693             // From here on out we only access the referenced memory though this slice. In
2694             // particular, since we've made a copy of `pattern`, it doesn't matter if the memory
2695             // region referenced by `pattern` aliases the one referenced by this slice. It is up to
2696             // the application not to access it at all until this command has been completed.
2697             let svm_slice = unsafe { cl_slice::from_raw_parts_mut(svm_ptr, size / pattern_size)? };
2698 
2699             Box::new(move |_, _| {
2700                 for x in svm_slice {
2701                     x.write(pattern);
2702                 }
2703 
2704                 Ok(())
2705             })
2706         }};
2707     }
2708 
2709     // Generate optimized code paths for each of the possible pattern sizes.
2710     let work: EventSig = match pattern_size {
2711         1 => generate_fill_closure!(1),
2712         2 => generate_fill_closure!(2),
2713         4 => generate_fill_closure!(4),
2714         8 => generate_fill_closure!(8),
2715         16 => generate_fill_closure!(16),
2716         32 => generate_fill_closure!(32),
2717         64 => generate_fill_closure!(64),
2718         128 => generate_fill_closure!(128),
2719         _ => {
2720             // CL_INVALID_VALUE if [...] pattern_size is 0 or if pattern_size is not one of
2721             // {1, 2, 4, 8, 16, 32, 64, 128}.
2722             return Err(CL_INVALID_VALUE);
2723         }
2724     };
2725 
2726     create_and_queue(q, cmd_type, evs, event, false, work)
2727 }
2728 
2729 #[cl_entrypoint(clEnqueueSVMMemFill)]
enqueue_svm_mem_fill( command_queue: cl_command_queue, svm_ptr: *mut ::std::os::raw::c_void, pattern: *const ::std::os::raw::c_void, pattern_size: usize, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2730 fn enqueue_svm_mem_fill(
2731     command_queue: cl_command_queue,
2732     svm_ptr: *mut ::std::os::raw::c_void,
2733     pattern: *const ::std::os::raw::c_void,
2734     pattern_size: usize,
2735     size: usize,
2736     num_events_in_wait_list: cl_uint,
2737     event_wait_list: *const cl_event,
2738     event: *mut cl_event,
2739 ) -> CLResult<()> {
2740     enqueue_svm_mem_fill_impl(
2741         command_queue,
2742         svm_ptr,
2743         pattern,
2744         pattern_size,
2745         size,
2746         num_events_in_wait_list,
2747         event_wait_list,
2748         event,
2749         CL_COMMAND_SVM_MEMFILL,
2750     )
2751 }
2752 
2753 #[cl_entrypoint(clEnqueueSVMMemFillARM)]
enqueue_svm_mem_fill_arm( command_queue: cl_command_queue, svm_ptr: *mut ::std::os::raw::c_void, pattern: *const ::std::os::raw::c_void, pattern_size: usize, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2754 fn enqueue_svm_mem_fill_arm(
2755     command_queue: cl_command_queue,
2756     svm_ptr: *mut ::std::os::raw::c_void,
2757     pattern: *const ::std::os::raw::c_void,
2758     pattern_size: usize,
2759     size: usize,
2760     num_events_in_wait_list: cl_uint,
2761     event_wait_list: *const cl_event,
2762     event: *mut cl_event,
2763 ) -> CLResult<()> {
2764     enqueue_svm_mem_fill_impl(
2765         command_queue,
2766         svm_ptr,
2767         pattern,
2768         pattern_size,
2769         size,
2770         num_events_in_wait_list,
2771         event_wait_list,
2772         event,
2773         CL_COMMAND_SVM_MEMFILL_ARM,
2774     )
2775 }
2776 
enqueue_svm_map_impl( command_queue: cl_command_queue, blocking_map: cl_bool, flags: cl_map_flags, svm_ptr: *mut ::std::os::raw::c_void, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, cmd_type: cl_command_type, ) -> CLResult<()>2777 fn enqueue_svm_map_impl(
2778     command_queue: cl_command_queue,
2779     blocking_map: cl_bool,
2780     flags: cl_map_flags,
2781     svm_ptr: *mut ::std::os::raw::c_void,
2782     size: usize,
2783     num_events_in_wait_list: cl_uint,
2784     event_wait_list: *const cl_event,
2785     event: *mut cl_event,
2786     cmd_type: cl_command_type,
2787 ) -> CLResult<()> {
2788     let q = Queue::arc_from_raw(command_queue)?;
2789     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2790     let block = check_cl_bool(blocking_map).ok_or(CL_INVALID_VALUE)?;
2791 
2792     // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2793     if !q.device.svm_supported() {
2794         return Err(CL_INVALID_OPERATION);
2795     }
2796 
2797     // CL_INVALID_VALUE if svm_ptr is NULL.
2798     if svm_ptr.is_null() {
2799         return Err(CL_INVALID_VALUE);
2800     }
2801 
2802     // CL_INVALID_VALUE if size is 0 ...
2803     if size == 0 {
2804         return Err(CL_INVALID_VALUE);
2805     }
2806 
2807     // ... or if values specified in map_flags are not valid.
2808     validate_map_flags_common(flags)?;
2809 
2810     create_and_queue(q, cmd_type, evs, event, block, Box::new(|_, _| Ok(())))
2811 }
2812 
2813 #[cl_entrypoint(clEnqueueSVMMap)]
enqueue_svm_map( command_queue: cl_command_queue, blocking_map: cl_bool, flags: cl_map_flags, svm_ptr: *mut ::std::os::raw::c_void, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2814 fn enqueue_svm_map(
2815     command_queue: cl_command_queue,
2816     blocking_map: cl_bool,
2817     flags: cl_map_flags,
2818     svm_ptr: *mut ::std::os::raw::c_void,
2819     size: usize,
2820     num_events_in_wait_list: cl_uint,
2821     event_wait_list: *const cl_event,
2822     event: *mut cl_event,
2823 ) -> CLResult<()> {
2824     enqueue_svm_map_impl(
2825         command_queue,
2826         blocking_map,
2827         flags,
2828         svm_ptr,
2829         size,
2830         num_events_in_wait_list,
2831         event_wait_list,
2832         event,
2833         CL_COMMAND_SVM_MAP,
2834     )
2835 }
2836 
2837 #[cl_entrypoint(clEnqueueSVMMapARM)]
enqueue_svm_map_arm( command_queue: cl_command_queue, blocking_map: cl_bool, flags: cl_map_flags, svm_ptr: *mut ::std::os::raw::c_void, size: usize, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2838 fn enqueue_svm_map_arm(
2839     command_queue: cl_command_queue,
2840     blocking_map: cl_bool,
2841     flags: cl_map_flags,
2842     svm_ptr: *mut ::std::os::raw::c_void,
2843     size: usize,
2844     num_events_in_wait_list: cl_uint,
2845     event_wait_list: *const cl_event,
2846     event: *mut cl_event,
2847 ) -> CLResult<()> {
2848     enqueue_svm_map_impl(
2849         command_queue,
2850         blocking_map,
2851         flags,
2852         svm_ptr,
2853         size,
2854         num_events_in_wait_list,
2855         event_wait_list,
2856         event,
2857         CL_COMMAND_SVM_MAP_ARM,
2858     )
2859 }
2860 
enqueue_svm_unmap_impl( command_queue: cl_command_queue, svm_ptr: *mut ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, cmd_type: cl_command_type, ) -> CLResult<()>2861 fn enqueue_svm_unmap_impl(
2862     command_queue: cl_command_queue,
2863     svm_ptr: *mut ::std::os::raw::c_void,
2864     num_events_in_wait_list: cl_uint,
2865     event_wait_list: *const cl_event,
2866     event: *mut cl_event,
2867     cmd_type: cl_command_type,
2868 ) -> CLResult<()> {
2869     let q = Queue::arc_from_raw(command_queue)?;
2870     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2871 
2872     // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2873     if !q.device.svm_supported() {
2874         return Err(CL_INVALID_OPERATION);
2875     }
2876 
2877     // CL_INVALID_VALUE if svm_ptr is NULL.
2878     if svm_ptr.is_null() {
2879         return Err(CL_INVALID_VALUE);
2880     }
2881 
2882     create_and_queue(q, cmd_type, evs, event, false, Box::new(|_, _| Ok(())))
2883 }
2884 
2885 #[cl_entrypoint(clEnqueueSVMUnmap)]
enqueue_svm_unmap( command_queue: cl_command_queue, svm_ptr: *mut ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2886 fn enqueue_svm_unmap(
2887     command_queue: cl_command_queue,
2888     svm_ptr: *mut ::std::os::raw::c_void,
2889     num_events_in_wait_list: cl_uint,
2890     event_wait_list: *const cl_event,
2891     event: *mut cl_event,
2892 ) -> CLResult<()> {
2893     enqueue_svm_unmap_impl(
2894         command_queue,
2895         svm_ptr,
2896         num_events_in_wait_list,
2897         event_wait_list,
2898         event,
2899         CL_COMMAND_SVM_UNMAP,
2900     )
2901 }
2902 
2903 #[cl_entrypoint(clEnqueueSVMUnmapARM)]
enqueue_svm_unmap_arm( command_queue: cl_command_queue, svm_ptr: *mut ::std::os::raw::c_void, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2904 fn enqueue_svm_unmap_arm(
2905     command_queue: cl_command_queue,
2906     svm_ptr: *mut ::std::os::raw::c_void,
2907     num_events_in_wait_list: cl_uint,
2908     event_wait_list: *const cl_event,
2909     event: *mut cl_event,
2910 ) -> CLResult<()> {
2911     enqueue_svm_unmap_impl(
2912         command_queue,
2913         svm_ptr,
2914         num_events_in_wait_list,
2915         event_wait_list,
2916         event,
2917         CL_COMMAND_SVM_UNMAP_ARM,
2918     )
2919 }
2920 
2921 #[cl_entrypoint(clEnqueueSVMMigrateMem)]
enqueue_svm_migrate_mem( command_queue: cl_command_queue, num_svm_pointers: cl_uint, svm_pointers: *mut *const ::std::os::raw::c_void, sizes: *const usize, flags: cl_mem_migration_flags, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>2922 fn enqueue_svm_migrate_mem(
2923     command_queue: cl_command_queue,
2924     num_svm_pointers: cl_uint,
2925     svm_pointers: *mut *const ::std::os::raw::c_void,
2926     sizes: *const usize,
2927     flags: cl_mem_migration_flags,
2928     num_events_in_wait_list: cl_uint,
2929     event_wait_list: *const cl_event,
2930     event: *mut cl_event,
2931 ) -> CLResult<()> {
2932     let q = Queue::arc_from_raw(command_queue)?;
2933     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
2934 
2935     // CL_INVALID_OPERATION if the device associated with command queue does not support SVM.
2936     if !q.device.svm_supported() {
2937         return Err(CL_INVALID_OPERATION);
2938     }
2939 
2940     // CL_INVALID_VALUE if num_svm_pointers is zero
2941     if num_svm_pointers == 0 {
2942         return Err(CL_INVALID_VALUE);
2943     }
2944 
2945     let num_svm_pointers = num_svm_pointers as usize;
2946     // SAFETY: Just hoping the application is alright.
2947     let mut svm_pointers: Vec<usize> =
2948         unsafe { cl_slice::from_raw_parts(svm_pointers.cast(), num_svm_pointers)? }.to_owned();
2949     // if sizes is NULL, every allocation containing the pointers need to be migrated
2950     let mut sizes = if sizes.is_null() {
2951         vec![0; num_svm_pointers]
2952     } else {
2953         unsafe { cl_slice::from_raw_parts(sizes, num_svm_pointers)? }.to_owned()
2954     };
2955 
2956     // CL_INVALID_VALUE if sizes[i] is non-zero range [svm_pointers[i], svm_pointers[i]+sizes[i]) is
2957     // not contained within an existing clSVMAlloc allocation.
2958     for (ptr, size) in svm_pointers.iter_mut().zip(&mut sizes) {
2959         if let Some((alloc, alloc_size)) = q.context.find_svm_alloc(*ptr) {
2960             let ptr_addr = *ptr;
2961             let alloc_addr = alloc as usize;
2962 
2963             // if the offset + size is bigger than the allocation we are out of bounds
2964             if (ptr_addr - alloc_addr) + *size <= alloc_size {
2965                 // if the size is 0, the entire allocation should be migrated
2966                 if *size == 0 {
2967                     *ptr = alloc as usize;
2968                     *size = alloc_size;
2969                 }
2970                 continue;
2971             }
2972         }
2973 
2974         return Err(CL_INVALID_VALUE);
2975     }
2976 
2977     let to_device = !bit_check(flags, CL_MIGRATE_MEM_OBJECT_HOST);
2978     let content_undefined = bit_check(flags, CL_MIGRATE_MEM_OBJECT_CONTENT_UNDEFINED);
2979 
2980     create_and_queue(
2981         q,
2982         CL_COMMAND_SVM_MIGRATE_MEM,
2983         evs,
2984         event,
2985         false,
2986         Box::new(move |_, ctx| {
2987             ctx.svm_migrate(&svm_pointers, &sizes, to_device, content_undefined);
2988             Ok(())
2989         }),
2990     )
2991 }
2992 
2993 #[cl_entrypoint(clCreatePipe)]
create_pipe( _context: cl_context, _flags: cl_mem_flags, _pipe_packet_size: cl_uint, _pipe_max_packets: cl_uint, _properties: *const cl_pipe_properties, ) -> CLResult<cl_mem>2994 fn create_pipe(
2995     _context: cl_context,
2996     _flags: cl_mem_flags,
2997     _pipe_packet_size: cl_uint,
2998     _pipe_max_packets: cl_uint,
2999     _properties: *const cl_pipe_properties,
3000 ) -> CLResult<cl_mem> {
3001     Err(CL_INVALID_OPERATION)
3002 }
3003 
3004 #[cl_info_entrypoint(clGetGLTextureInfo)]
3005 unsafe impl CLInfo<cl_gl_texture_info> for cl_mem {
query(&self, q: cl_gl_texture_info, v: CLInfoValue) -> CLResult<CLInfoRes>3006     fn query(&self, q: cl_gl_texture_info, v: CLInfoValue) -> CLResult<CLInfoRes> {
3007         let mem = MemBase::ref_from_raw(*self)?;
3008         match *q {
3009             CL_GL_MIPMAP_LEVEL => v.write::<cl_GLint>(0),
3010             CL_GL_TEXTURE_TARGET => v.write::<cl_GLenum>(
3011                 mem.gl_obj
3012                     .as_ref()
3013                     .ok_or(CL_INVALID_GL_OBJECT)?
3014                     .gl_object_target,
3015             ),
3016             _ => Err(CL_INVALID_VALUE),
3017         }
3018     }
3019 }
3020 
create_from_gl( context: cl_context, flags: cl_mem_flags, target: cl_GLenum, miplevel: cl_GLint, texture: cl_GLuint, ) -> CLResult<cl_mem>3021 fn create_from_gl(
3022     context: cl_context,
3023     flags: cl_mem_flags,
3024     target: cl_GLenum,
3025     miplevel: cl_GLint,
3026     texture: cl_GLuint,
3027 ) -> CLResult<cl_mem> {
3028     let c = Context::arc_from_raw(context)?;
3029     let gl_ctx_manager = &c.gl_ctx_manager;
3030 
3031     // CL_INVALID_CONTEXT if context associated with command_queue was not created from an OpenGL context
3032     if gl_ctx_manager.is_none() {
3033         return Err(CL_INVALID_CONTEXT);
3034     }
3035 
3036     // CL_INVALID_VALUE if values specified in flags are not valid or if value specified in
3037     // texture_target is not one of the values specified in the description of texture_target.
3038     validate_mem_flags(flags, target == GL_ARRAY_BUFFER)?;
3039 
3040     // CL_INVALID_MIP_LEVEL if miplevel is greather than zero and the OpenGL
3041     // implementation does not support creating from non-zero mipmap levels.
3042     if miplevel > 0 {
3043         return Err(CL_INVALID_MIP_LEVEL);
3044     }
3045 
3046     // CL_INVALID_CONTEXT if context [..] was not created from a GL context.
3047     if let Some(gl_ctx_manager) = gl_ctx_manager {
3048         let gl_export_manager =
3049             gl_ctx_manager.export_object(&c, target, flags as u32, miplevel, texture)?;
3050 
3051         Ok(MemBase::from_gl(c, flags, &gl_export_manager)?)
3052     } else {
3053         Err(CL_INVALID_CONTEXT)
3054     }
3055 }
3056 
3057 #[cl_entrypoint(clCreateFromGLTexture)]
create_from_gl_texture( context: cl_context, flags: cl_mem_flags, target: cl_GLenum, miplevel: cl_GLint, texture: cl_GLuint, ) -> CLResult<cl_mem>3058 fn create_from_gl_texture(
3059     context: cl_context,
3060     flags: cl_mem_flags,
3061     target: cl_GLenum,
3062     miplevel: cl_GLint,
3063     texture: cl_GLuint,
3064 ) -> CLResult<cl_mem> {
3065     // CL_INVALID_VALUE if values specified in flags are not valid or if value specified in
3066     // texture_target is not one of the values specified in the description of texture_target.
3067     if !is_valid_gl_texture(target) {
3068         return Err(CL_INVALID_VALUE);
3069     }
3070 
3071     create_from_gl(context, flags, target, miplevel, texture)
3072 }
3073 
3074 #[cl_entrypoint(clCreateFromGLTexture2D)]
create_from_gl_texture_2d( context: cl_context, flags: cl_mem_flags, target: cl_GLenum, miplevel: cl_GLint, texture: cl_GLuint, ) -> CLResult<cl_mem>3075 fn create_from_gl_texture_2d(
3076     context: cl_context,
3077     flags: cl_mem_flags,
3078     target: cl_GLenum,
3079     miplevel: cl_GLint,
3080     texture: cl_GLuint,
3081 ) -> CLResult<cl_mem> {
3082     // CL_INVALID_VALUE if values specified in flags are not valid or if value specified in
3083     // texture_target is not one of the values specified in the description of texture_target.
3084     if !is_valid_gl_texture_2d(target) {
3085         return Err(CL_INVALID_VALUE);
3086     }
3087 
3088     create_from_gl(context, flags, target, miplevel, texture)
3089 }
3090 
3091 #[cl_entrypoint(clCreateFromGLTexture3D)]
create_from_gl_texture_3d( context: cl_context, flags: cl_mem_flags, target: cl_GLenum, miplevel: cl_GLint, texture: cl_GLuint, ) -> CLResult<cl_mem>3092 fn create_from_gl_texture_3d(
3093     context: cl_context,
3094     flags: cl_mem_flags,
3095     target: cl_GLenum,
3096     miplevel: cl_GLint,
3097     texture: cl_GLuint,
3098 ) -> CLResult<cl_mem> {
3099     // CL_INVALID_VALUE if values specified in flags are not valid or if value specified in
3100     // texture_target is not one of the values specified in the description of texture_target.
3101     if target != GL_TEXTURE_3D {
3102         return Err(CL_INVALID_VALUE);
3103     }
3104 
3105     create_from_gl(context, flags, target, miplevel, texture)
3106 }
3107 
3108 #[cl_entrypoint(clCreateFromGLBuffer)]
create_from_gl_buffer( context: cl_context, flags: cl_mem_flags, bufobj: cl_GLuint, ) -> CLResult<cl_mem>3109 fn create_from_gl_buffer(
3110     context: cl_context,
3111     flags: cl_mem_flags,
3112     bufobj: cl_GLuint,
3113 ) -> CLResult<cl_mem> {
3114     create_from_gl(context, flags, GL_ARRAY_BUFFER, 0, bufobj)
3115 }
3116 
3117 #[cl_entrypoint(clCreateFromGLRenderbuffer)]
create_from_gl_renderbuffer( context: cl_context, flags: cl_mem_flags, renderbuffer: cl_GLuint, ) -> CLResult<cl_mem>3118 fn create_from_gl_renderbuffer(
3119     context: cl_context,
3120     flags: cl_mem_flags,
3121     renderbuffer: cl_GLuint,
3122 ) -> CLResult<cl_mem> {
3123     create_from_gl(context, flags, GL_RENDERBUFFER, 0, renderbuffer)
3124 }
3125 
3126 #[cl_entrypoint(clGetGLObjectInfo)]
get_gl_object_info( memobj: cl_mem, gl_object_type: *mut cl_gl_object_type, gl_object_name: *mut cl_GLuint, ) -> CLResult<()>3127 fn get_gl_object_info(
3128     memobj: cl_mem,
3129     gl_object_type: *mut cl_gl_object_type,
3130     gl_object_name: *mut cl_GLuint,
3131 ) -> CLResult<()> {
3132     let m = MemBase::ref_from_raw(memobj)?;
3133 
3134     match &m.gl_obj {
3135         Some(gl_obj) => {
3136             gl_object_type.write_checked(gl_obj.gl_object_type);
3137             gl_object_name.write_checked(gl_obj.gl_object_name);
3138         }
3139         None => {
3140             // CL_INVALID_GL_OBJECT if there is no GL object associated with memobj.
3141             return Err(CL_INVALID_GL_OBJECT);
3142         }
3143     }
3144 
3145     Ok(())
3146 }
3147 
3148 #[cl_entrypoint(clEnqueueAcquireGLObjects)]
enqueue_acquire_gl_objects( command_queue: cl_command_queue, num_objects: cl_uint, mem_objects: *const cl_mem, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>3149 fn enqueue_acquire_gl_objects(
3150     command_queue: cl_command_queue,
3151     num_objects: cl_uint,
3152     mem_objects: *const cl_mem,
3153     num_events_in_wait_list: cl_uint,
3154     event_wait_list: *const cl_event,
3155     event: *mut cl_event,
3156 ) -> CLResult<()> {
3157     let q = Queue::arc_from_raw(command_queue)?;
3158     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
3159     let objs = MemBase::arcs_from_arr(mem_objects, num_objects)?;
3160     let gl_ctx_manager = &q.context.gl_ctx_manager;
3161 
3162     // CL_INVALID_CONTEXT if context associated with command_queue was not created from an OpenGL context
3163     if gl_ctx_manager.is_none() {
3164         return Err(CL_INVALID_CONTEXT);
3165     }
3166 
3167     // CL_INVALID_GL_OBJECT if memory objects in mem_objects have not been created from a GL object(s).
3168     if objs.iter().any(|o| o.gl_obj.is_none()) {
3169         return Err(CL_INVALID_GL_OBJECT);
3170     }
3171 
3172     create_and_queue(
3173         q,
3174         CL_COMMAND_ACQUIRE_GL_OBJECTS,
3175         evs,
3176         event,
3177         false,
3178         Box::new(move |_, ctx| copy_cube_to_slice(ctx, &objs)),
3179     )
3180 }
3181 
3182 #[cl_entrypoint(clEnqueueReleaseGLObjects)]
enqueue_release_gl_objects( command_queue: cl_command_queue, num_objects: cl_uint, mem_objects: *const cl_mem, num_events_in_wait_list: cl_uint, event_wait_list: *const cl_event, event: *mut cl_event, ) -> CLResult<()>3183 fn enqueue_release_gl_objects(
3184     command_queue: cl_command_queue,
3185     num_objects: cl_uint,
3186     mem_objects: *const cl_mem,
3187     num_events_in_wait_list: cl_uint,
3188     event_wait_list: *const cl_event,
3189     event: *mut cl_event,
3190 ) -> CLResult<()> {
3191     let q = Queue::arc_from_raw(command_queue)?;
3192     let evs = event_list_from_cl(&q, num_events_in_wait_list, event_wait_list)?;
3193     let objs = MemBase::arcs_from_arr(mem_objects, num_objects)?;
3194     let gl_ctx_manager = &q.context.gl_ctx_manager;
3195 
3196     // CL_INVALID_CONTEXT if context associated with command_queue was not created from an OpenGL context
3197     if gl_ctx_manager.is_none() {
3198         return Err(CL_INVALID_CONTEXT);
3199     }
3200 
3201     // CL_INVALID_GL_OBJECT if memory objects in mem_objects have not been created from a GL object(s).
3202     if objs.iter().any(|o| o.gl_obj.is_none()) {
3203         return Err(CL_INVALID_GL_OBJECT);
3204     }
3205 
3206     create_and_queue(
3207         q,
3208         CL_COMMAND_RELEASE_GL_OBJECTS,
3209         evs,
3210         event,
3211         false,
3212         Box::new(move |_, ctx| copy_slice_to_cube(ctx, &objs)),
3213     )
3214 }
3215