• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2016 The vulkano developers
2 // Licensed under the Apache License, Version 2.0
3 // <LICENSE-APACHE or
4 // https://www.apache.org/licenses/LICENSE-2.0> or the MIT
5 // license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
6 // at your option. All files in the project carrying such
7 // notice may not be copied, modified, or distributed except
8 // according to those terms.
9 
10 //! Buffer that is written once then read for as long as it is alive.
11 //!
12 //! Use this buffer when you have data that you never modify.
13 //!
14 //! Only the first ever command buffer that uses this buffer can write to it (for example by
15 //! copying from another buffer). Any subsequent command buffer **must** only read from the buffer,
16 //! or a panic will happen.
17 //!
18 //! The buffer will be stored in device-local memory if possible
19 //!
20 
21 use crate::buffer::sys::BufferCreationError;
22 use crate::buffer::sys::UnsafeBuffer;
23 use crate::buffer::traits::BufferAccess;
24 use crate::buffer::traits::BufferInner;
25 use crate::buffer::traits::TypedBufferAccess;
26 use crate::buffer::BufferUsage;
27 use crate::buffer::CpuAccessibleBuffer;
28 use crate::command_buffer::AutoCommandBufferBuilder;
29 use crate::command_buffer::CommandBufferExecFuture;
30 use crate::command_buffer::CommandBufferUsage;
31 use crate::command_buffer::PrimaryAutoCommandBuffer;
32 use crate::command_buffer::PrimaryCommandBuffer;
33 use crate::device::physical::QueueFamily;
34 use crate::device::Device;
35 use crate::device::DeviceOwned;
36 use crate::device::Queue;
37 use crate::memory::pool::AllocFromRequirementsFilter;
38 use crate::memory::pool::AllocLayout;
39 use crate::memory::pool::MappingRequirement;
40 use crate::memory::pool::MemoryPool;
41 use crate::memory::pool::MemoryPoolAlloc;
42 use crate::memory::pool::PotentialDedicatedAllocation;
43 use crate::memory::pool::StdMemoryPoolAlloc;
44 use crate::memory::DedicatedAlloc;
45 use crate::memory::DeviceMemoryAllocError;
46 use crate::sync::AccessError;
47 use crate::sync::NowFuture;
48 use crate::sync::Sharing;
49 use crate::DeviceSize;
50 use smallvec::SmallVec;
51 use std::hash::Hash;
52 use std::hash::Hasher;
53 use std::marker::PhantomData;
54 use std::mem;
55 use std::sync::atomic::AtomicBool;
56 use std::sync::atomic::Ordering;
57 use std::sync::Arc;
58 
59 /// Buffer that is written once then read for as long as it is alive.
60 // TODO: implement Debug
61 pub struct ImmutableBuffer<T: ?Sized, A = PotentialDedicatedAllocation<StdMemoryPoolAlloc>> {
62     // Inner content.
63     inner: UnsafeBuffer,
64 
65     // Memory allocated for the buffer.
66     memory: A,
67 
68     // True if the `ImmutableBufferInitialization` object was used by the GPU then dropped.
69     // This means that the `ImmutableBuffer` can be used as much as we want without any restriction.
70     initialized: AtomicBool,
71 
72     // Queue families allowed to access this buffer.
73     queue_families: SmallVec<[u32; 4]>,
74 
75     // Necessary to have the appropriate template parameter.
76     marker: PhantomData<Box<T>>,
77 }
78 
79 // TODO: make this prettier
80 type ImmutableBufferFromBufferFuture = CommandBufferExecFuture<NowFuture, PrimaryAutoCommandBuffer>;
81 
82 impl<T: ?Sized> ImmutableBuffer<T> {
83     /// Builds an `ImmutableBuffer` from some data.
84     ///
85     /// This function builds a memory-mapped intermediate buffer, writes the data to it, builds a
86     /// command buffer that copies from this intermediate buffer to the final buffer, and finally
87     /// submits the command buffer as a future.
88     ///
89     /// This function returns two objects: the newly-created buffer, and a future representing
90     /// the initial upload operation. In order to be allowed to use the `ImmutableBuffer`, you must
91     /// either submit your operation after this future, or execute this future and wait for it to
92     /// be finished before submitting your own operation.
from_data( data: T, usage: BufferUsage, queue: Arc<Queue>, ) -> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), DeviceMemoryAllocError> where T: 'static + Copy + Send + Sync + Sized,93     pub fn from_data(
94         data: T,
95         usage: BufferUsage,
96         queue: Arc<Queue>,
97     ) -> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), DeviceMemoryAllocError>
98     where
99         T: 'static + Copy + Send + Sync + Sized,
100     {
101         let source = CpuAccessibleBuffer::from_data(
102             queue.device().clone(),
103             BufferUsage::transfer_source(),
104             false,
105             data,
106         )?;
107         ImmutableBuffer::from_buffer(source, usage, queue)
108     }
109 
110     /// Builds an `ImmutableBuffer` that copies its data from another buffer.
111     ///
112     /// This function returns two objects: the newly-created buffer, and a future representing
113     /// the initial upload operation. In order to be allowed to use the `ImmutableBuffer`, you must
114     /// either submit your operation after this future, or execute this future and wait for it to
115     /// be finished before submitting your own operation.
from_buffer<B>( source: B, usage: BufferUsage, queue: Arc<Queue>, ) -> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), DeviceMemoryAllocError> where B: BufferAccess + TypedBufferAccess<Content = T> + 'static + Clone + Send + Sync, T: 'static + Send + Sync,116     pub fn from_buffer<B>(
117         source: B,
118         usage: BufferUsage,
119         queue: Arc<Queue>,
120     ) -> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferFromBufferFuture), DeviceMemoryAllocError>
121     where
122         B: BufferAccess + TypedBufferAccess<Content = T> + 'static + Clone + Send + Sync,
123         T: 'static + Send + Sync,
124     {
125         unsafe {
126             // We automatically set `transfer_destination` to true in order to avoid annoying errors.
127             let actual_usage = BufferUsage {
128                 transfer_destination: true,
129                 ..usage
130             };
131 
132             let (buffer, init) = ImmutableBuffer::raw(
133                 source.device().clone(),
134                 source.size(),
135                 actual_usage,
136                 source.device().active_queue_families(),
137             )?;
138 
139             let mut cbb = AutoCommandBufferBuilder::primary(
140                 source.device().clone(),
141                 queue.family(),
142                 CommandBufferUsage::MultipleSubmit,
143             )?;
144             cbb.copy_buffer(source, init).unwrap(); // TODO: return error?
145             let cb = cbb.build().unwrap(); // TODO: return OomError
146 
147             let future = match cb.execute(queue) {
148                 Ok(f) => f,
149                 Err(_) => unreachable!(),
150             };
151 
152             Ok((buffer, future))
153         }
154     }
155 }
156 
157 impl<T> ImmutableBuffer<T> {
158     /// Builds a new buffer with uninitialized data. Only allowed for sized data.
159     ///
160     /// Returns two things: the buffer, and a special access that should be used for the initial
161     /// upload to the buffer.
162     ///
163     /// You will get an error if you try to use the buffer before using the initial upload access.
164     /// However this function doesn't check whether you actually used this initial upload to fill
165     /// the buffer like you're supposed to do.
166     ///
167     /// You will also get an error if you try to get exclusive access to the final buffer.
168     ///
169     /// # Safety
170     ///
171     /// - The `ImmutableBufferInitialization` should be used to fill the buffer with some initial
172     ///   data, otherwise the content is undefined.
173     ///
174     #[inline]
uninitialized( device: Arc<Device>, usage: BufferUsage, ) -> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), DeviceMemoryAllocError>175     pub unsafe fn uninitialized(
176         device: Arc<Device>,
177         usage: BufferUsage,
178     ) -> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), DeviceMemoryAllocError>
179     {
180         ImmutableBuffer::raw(
181             device.clone(),
182             mem::size_of::<T>() as DeviceSize,
183             usage,
184             device.active_queue_families(),
185         )
186     }
187 }
188 
189 impl<T> ImmutableBuffer<[T]> {
from_iter<D>( data: D, usage: BufferUsage, queue: Arc<Queue>, ) -> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferFromBufferFuture), DeviceMemoryAllocError> where D: ExactSizeIterator<Item = T>, T: 'static + Send + Sync + Sized,190     pub fn from_iter<D>(
191         data: D,
192         usage: BufferUsage,
193         queue: Arc<Queue>,
194     ) -> Result<(Arc<ImmutableBuffer<[T]>>, ImmutableBufferFromBufferFuture), DeviceMemoryAllocError>
195     where
196         D: ExactSizeIterator<Item = T>,
197         T: 'static + Send + Sync + Sized,
198     {
199         let source = CpuAccessibleBuffer::from_iter(
200             queue.device().clone(),
201             BufferUsage::transfer_source(),
202             false,
203             data,
204         )?;
205         ImmutableBuffer::from_buffer(source, usage, queue)
206     }
207 
208     /// Builds a new buffer with uninitialized data. Can be used for arrays.
209     ///
210     /// Returns two things: the buffer, and a special access that should be used for the initial
211     /// upload to the buffer.
212     ///
213     /// You will get an error if you try to use the buffer before using the initial upload access.
214     /// However this function doesn't check whether you actually used this initial upload to fill
215     /// the buffer like you're supposed to do.
216     ///
217     /// You will also get an error if you try to get exclusive access to the final buffer.
218     ///
219     /// # Safety
220     ///
221     /// - The `ImmutableBufferInitialization` should be used to fill the buffer with some initial
222     ///   data, otherwise the content is undefined.
223     ///
224     #[inline]
uninitialized_array( device: Arc<Device>, len: DeviceSize, usage: BufferUsage, ) -> Result< ( Arc<ImmutableBuffer<[T]>>, ImmutableBufferInitialization<[T]>, ), DeviceMemoryAllocError, >225     pub unsafe fn uninitialized_array(
226         device: Arc<Device>,
227         len: DeviceSize,
228         usage: BufferUsage,
229     ) -> Result<
230         (
231             Arc<ImmutableBuffer<[T]>>,
232             ImmutableBufferInitialization<[T]>,
233         ),
234         DeviceMemoryAllocError,
235     > {
236         ImmutableBuffer::raw(
237             device.clone(),
238             len * mem::size_of::<T>() as DeviceSize,
239             usage,
240             device.active_queue_families(),
241         )
242     }
243 }
244 
245 impl<T: ?Sized> ImmutableBuffer<T> {
246     /// Builds a new buffer without checking the size and granting free access for the initial
247     /// upload.
248     ///
249     /// Returns two things: the buffer, and a special access that should be used for the initial
250     /// upload to the buffer.
251     /// You will get an error if you try to use the buffer before using the initial upload access.
252     /// However this function doesn't check whether you used this initial upload to fill the buffer.
253     /// You will also get an error if you try to get exclusive access to the final buffer.
254     ///
255     /// # Safety
256     ///
257     /// - You must ensure that the size that you pass is correct for `T`.
258     /// - The `ImmutableBufferInitialization` should be used to fill the buffer with some initial
259     ///   data.
260     ///
261     #[inline]
raw<'a, I>( device: Arc<Device>, size: DeviceSize, usage: BufferUsage, queue_families: I, ) -> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), DeviceMemoryAllocError> where I: IntoIterator<Item = QueueFamily<'a>>,262     pub unsafe fn raw<'a, I>(
263         device: Arc<Device>,
264         size: DeviceSize,
265         usage: BufferUsage,
266         queue_families: I,
267     ) -> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), DeviceMemoryAllocError>
268     where
269         I: IntoIterator<Item = QueueFamily<'a>>,
270     {
271         let queue_families = queue_families.into_iter().map(|f| f.id()).collect();
272         ImmutableBuffer::raw_impl(device, size, usage, queue_families)
273     }
274 
275     // Internal implementation of `raw`. This is separated from `raw` so that it doesn't need to be
276     // inlined.
raw_impl( device: Arc<Device>, size: DeviceSize, usage: BufferUsage, queue_families: SmallVec<[u32; 4]>, ) -> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), DeviceMemoryAllocError>277     unsafe fn raw_impl(
278         device: Arc<Device>,
279         size: DeviceSize,
280         usage: BufferUsage,
281         queue_families: SmallVec<[u32; 4]>,
282     ) -> Result<(Arc<ImmutableBuffer<T>>, ImmutableBufferInitialization<T>), DeviceMemoryAllocError>
283     {
284         let (buffer, mem_reqs) = {
285             let sharing = if queue_families.len() >= 2 {
286                 Sharing::Concurrent(queue_families.iter().cloned())
287             } else {
288                 Sharing::Exclusive
289             };
290 
291             match UnsafeBuffer::new(device.clone(), size, usage, sharing, None) {
292                 Ok(b) => b,
293                 Err(BufferCreationError::AllocError(err)) => return Err(err),
294                 Err(_) => unreachable!(), // We don't use sparse binding, therefore the other
295                                           // errors can't happen
296             }
297         };
298 
299         let mem = MemoryPool::alloc_from_requirements(
300             &Device::standard_pool(&device),
301             &mem_reqs,
302             AllocLayout::Linear,
303             MappingRequirement::DoNotMap,
304             DedicatedAlloc::Buffer(&buffer),
305             |t| {
306                 if t.is_device_local() {
307                     AllocFromRequirementsFilter::Preferred
308                 } else {
309                     AllocFromRequirementsFilter::Allowed
310                 }
311             },
312         )?;
313         debug_assert!((mem.offset() % mem_reqs.alignment) == 0);
314         buffer.bind_memory(mem.memory(), mem.offset())?;
315 
316         let final_buf = Arc::new(ImmutableBuffer {
317             inner: buffer,
318             memory: mem,
319             queue_families: queue_families,
320             initialized: AtomicBool::new(false),
321             marker: PhantomData,
322         });
323 
324         let initialization = ImmutableBufferInitialization {
325             buffer: final_buf.clone(),
326             used: Arc::new(AtomicBool::new(false)),
327         };
328 
329         Ok((final_buf, initialization))
330     }
331 }
332 
333 impl<T: ?Sized, A> ImmutableBuffer<T, A> {
334     /// Returns the device used to create this buffer.
335     #[inline]
device(&self) -> &Arc<Device>336     pub fn device(&self) -> &Arc<Device> {
337         self.inner.device()
338     }
339 
340     /// Returns the queue families this buffer can be used on.
341     // TODO: use a custom iterator
342     #[inline]
queue_families(&self) -> Vec<QueueFamily>343     pub fn queue_families(&self) -> Vec<QueueFamily> {
344         self.queue_families
345             .iter()
346             .map(|&num| {
347                 self.device()
348                     .physical_device()
349                     .queue_family_by_id(num)
350                     .unwrap()
351             })
352             .collect()
353     }
354 }
355 
356 unsafe impl<T: ?Sized, A> BufferAccess for ImmutableBuffer<T, A> {
357     #[inline]
inner(&self) -> BufferInner358     fn inner(&self) -> BufferInner {
359         BufferInner {
360             buffer: &self.inner,
361             offset: 0,
362         }
363     }
364 
365     #[inline]
size(&self) -> DeviceSize366     fn size(&self) -> DeviceSize {
367         self.inner.size()
368     }
369 
370     #[inline]
conflict_key(&self) -> (u64, u64)371     fn conflict_key(&self) -> (u64, u64) {
372         (self.inner.key(), 0)
373     }
374 
375     #[inline]
try_gpu_lock(&self, exclusive_access: bool, _: &Queue) -> Result<(), AccessError>376     fn try_gpu_lock(&self, exclusive_access: bool, _: &Queue) -> Result<(), AccessError> {
377         if exclusive_access {
378             return Err(AccessError::ExclusiveDenied);
379         }
380 
381         if !self.initialized.load(Ordering::Relaxed) {
382             return Err(AccessError::BufferNotInitialized);
383         }
384 
385         Ok(())
386     }
387 
388     #[inline]
increase_gpu_lock(&self)389     unsafe fn increase_gpu_lock(&self) {}
390 
391     #[inline]
unlock(&self)392     unsafe fn unlock(&self) {}
393 }
394 
395 unsafe impl<T: ?Sized, A> TypedBufferAccess for ImmutableBuffer<T, A> {
396     type Content = T;
397 }
398 
399 unsafe impl<T: ?Sized, A> DeviceOwned for ImmutableBuffer<T, A> {
400     #[inline]
device(&self) -> &Arc<Device>401     fn device(&self) -> &Arc<Device> {
402         self.inner.device()
403     }
404 }
405 
406 impl<T: ?Sized, A> PartialEq for ImmutableBuffer<T, A> {
407     #[inline]
eq(&self, other: &Self) -> bool408     fn eq(&self, other: &Self) -> bool {
409         self.inner() == other.inner() && self.size() == other.size()
410     }
411 }
412 
413 impl<T: ?Sized, A> Eq for ImmutableBuffer<T, A> {}
414 
415 impl<T: ?Sized, A> Hash for ImmutableBuffer<T, A> {
416     #[inline]
hash<H: Hasher>(&self, state: &mut H)417     fn hash<H: Hasher>(&self, state: &mut H) {
418         self.inner().hash(state);
419         self.size().hash(state);
420     }
421 }
422 
423 /// Access to the immutable buffer that can be used for the initial upload.
424 //#[derive(Debug)]      // TODO:
425 pub struct ImmutableBufferInitialization<
426     T: ?Sized,
427     A = PotentialDedicatedAllocation<StdMemoryPoolAlloc>,
428 > {
429     buffer: Arc<ImmutableBuffer<T, A>>,
430     used: Arc<AtomicBool>,
431 }
432 
433 unsafe impl<T: ?Sized, A> BufferAccess for ImmutableBufferInitialization<T, A> {
434     #[inline]
inner(&self) -> BufferInner435     fn inner(&self) -> BufferInner {
436         self.buffer.inner()
437     }
438 
439     #[inline]
size(&self) -> DeviceSize440     fn size(&self) -> DeviceSize {
441         self.buffer.size()
442     }
443 
444     #[inline]
conflict_key(&self) -> (u64, u64)445     fn conflict_key(&self) -> (u64, u64) {
446         (self.buffer.inner.key(), 0)
447     }
448 
449     #[inline]
try_gpu_lock(&self, _: bool, _: &Queue) -> Result<(), AccessError>450     fn try_gpu_lock(&self, _: bool, _: &Queue) -> Result<(), AccessError> {
451         if self.buffer.initialized.load(Ordering::Relaxed) {
452             return Err(AccessError::AlreadyInUse);
453         }
454 
455         if !self
456             .used
457             .compare_exchange(false, true, Ordering::Relaxed, Ordering::Relaxed)
458             .unwrap_or_else(|e| e)
459         {
460             Ok(())
461         } else {
462             Err(AccessError::AlreadyInUse)
463         }
464     }
465 
466     #[inline]
increase_gpu_lock(&self)467     unsafe fn increase_gpu_lock(&self) {
468         debug_assert!(self.used.load(Ordering::Relaxed));
469     }
470 
471     #[inline]
unlock(&self)472     unsafe fn unlock(&self) {
473         self.buffer.initialized.store(true, Ordering::Relaxed);
474     }
475 }
476 
477 unsafe impl<T: ?Sized, A> TypedBufferAccess for ImmutableBufferInitialization<T, A> {
478     type Content = T;
479 }
480 
481 unsafe impl<T: ?Sized, A> DeviceOwned for ImmutableBufferInitialization<T, A> {
482     #[inline]
device(&self) -> &Arc<Device>483     fn device(&self) -> &Arc<Device> {
484         self.buffer.inner.device()
485     }
486 }
487 
488 impl<T: ?Sized, A> Clone for ImmutableBufferInitialization<T, A> {
489     #[inline]
clone(&self) -> ImmutableBufferInitialization<T, A>490     fn clone(&self) -> ImmutableBufferInitialization<T, A> {
491         ImmutableBufferInitialization {
492             buffer: self.buffer.clone(),
493             used: self.used.clone(),
494         }
495     }
496 }
497 
498 impl<T: ?Sized, A> PartialEq for ImmutableBufferInitialization<T, A> {
499     #[inline]
eq(&self, other: &Self) -> bool500     fn eq(&self, other: &Self) -> bool {
501         self.inner() == other.inner() && self.size() == other.size()
502     }
503 }
504 
505 impl<T: ?Sized, A> Eq for ImmutableBufferInitialization<T, A> {}
506 
507 impl<T: ?Sized, A> Hash for ImmutableBufferInitialization<T, A> {
508     #[inline]
hash<H: Hasher>(&self, state: &mut H)509     fn hash<H: Hasher>(&self, state: &mut H) {
510         self.inner().hash(state);
511         self.size().hash(state);
512     }
513 }
514 
515 #[cfg(test)]
516 mod tests {
517     use crate::buffer::cpu_access::CpuAccessibleBuffer;
518     use crate::buffer::immutable::ImmutableBuffer;
519     use crate::buffer::BufferUsage;
520     use crate::command_buffer::AutoCommandBufferBuilder;
521     use crate::command_buffer::CommandBufferUsage;
522     use crate::command_buffer::PrimaryCommandBuffer;
523     use crate::sync::GpuFuture;
524 
525     #[test]
from_data_working()526     fn from_data_working() {
527         let (device, queue) = gfx_dev_and_queue!();
528 
529         let (buffer, _) =
530             ImmutableBuffer::from_data(12u32, BufferUsage::all(), queue.clone()).unwrap();
531 
532         let destination =
533             CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), false, 0).unwrap();
534 
535         let mut cbb = AutoCommandBufferBuilder::primary(
536             device.clone(),
537             queue.family(),
538             CommandBufferUsage::MultipleSubmit,
539         )
540         .unwrap();
541         cbb.copy_buffer(buffer, destination.clone()).unwrap();
542         let _ = cbb
543             .build()
544             .unwrap()
545             .execute(queue.clone())
546             .unwrap()
547             .then_signal_fence_and_flush()
548             .unwrap();
549 
550         let destination_content = destination.read().unwrap();
551         assert_eq!(*destination_content, 12);
552     }
553 
554     #[test]
from_iter_working()555     fn from_iter_working() {
556         let (device, queue) = gfx_dev_and_queue!();
557 
558         let (buffer, _) = ImmutableBuffer::from_iter(
559             (0..512u32).map(|n| n * 2),
560             BufferUsage::all(),
561             queue.clone(),
562         )
563         .unwrap();
564 
565         let destination = CpuAccessibleBuffer::from_iter(
566             device.clone(),
567             BufferUsage::all(),
568             false,
569             (0..512).map(|_| 0u32),
570         )
571         .unwrap();
572 
573         let mut cbb = AutoCommandBufferBuilder::primary(
574             device.clone(),
575             queue.family(),
576             CommandBufferUsage::MultipleSubmit,
577         )
578         .unwrap();
579         cbb.copy_buffer(buffer, destination.clone()).unwrap();
580         let _ = cbb
581             .build()
582             .unwrap()
583             .execute(queue.clone())
584             .unwrap()
585             .then_signal_fence_and_flush()
586             .unwrap();
587 
588         let destination_content = destination.read().unwrap();
589         for (n, &v) in destination_content.iter().enumerate() {
590             assert_eq!(n * 2, v as usize);
591         }
592     }
593 
594     #[test]
writing_forbidden()595     fn writing_forbidden() {
596         let (device, queue) = gfx_dev_and_queue!();
597 
598         let (buffer, _) =
599             ImmutableBuffer::from_data(12u32, BufferUsage::all(), queue.clone()).unwrap();
600 
601         assert_should_panic!({
602             // TODO: check Result error instead of panicking
603             let mut cbb = AutoCommandBufferBuilder::primary(
604                 device.clone(),
605                 queue.family(),
606                 CommandBufferUsage::MultipleSubmit,
607             )
608             .unwrap();
609             cbb.fill_buffer(buffer, 50).unwrap();
610             let _ = cbb
611                 .build()
612                 .unwrap()
613                 .execute(queue.clone())
614                 .unwrap()
615                 .then_signal_fence_and_flush()
616                 .unwrap();
617         });
618     }
619 
620     #[test]
read_uninitialized_forbidden()621     fn read_uninitialized_forbidden() {
622         let (device, queue) = gfx_dev_and_queue!();
623 
624         let (buffer, _) = unsafe {
625             ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
626         };
627 
628         let source =
629             CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), false, 0).unwrap();
630 
631         assert_should_panic!({
632             // TODO: check Result error instead of panicking
633             let mut cbb = AutoCommandBufferBuilder::primary(
634                 device.clone(),
635                 queue.family(),
636                 CommandBufferUsage::MultipleSubmit,
637             )
638             .unwrap();
639             cbb.copy_buffer(source, buffer).unwrap();
640             let _ = cbb
641                 .build()
642                 .unwrap()
643                 .execute(queue.clone())
644                 .unwrap()
645                 .then_signal_fence_and_flush()
646                 .unwrap();
647         });
648     }
649 
650     #[test]
init_then_read_same_cb()651     fn init_then_read_same_cb() {
652         let (device, queue) = gfx_dev_and_queue!();
653 
654         let (buffer, init) = unsafe {
655             ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
656         };
657 
658         let source =
659             CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), false, 0).unwrap();
660 
661         let mut cbb = AutoCommandBufferBuilder::primary(
662             device.clone(),
663             queue.family(),
664             CommandBufferUsage::MultipleSubmit,
665         )
666         .unwrap();
667         cbb.copy_buffer(source.clone(), init)
668             .unwrap()
669             .copy_buffer(buffer, source.clone())
670             .unwrap();
671         let _ = cbb
672             .build()
673             .unwrap()
674             .execute(queue.clone())
675             .unwrap()
676             .then_signal_fence_and_flush()
677             .unwrap();
678     }
679 
680     #[test]
681     #[ignore] // TODO: doesn't work because the submit sync layer isn't properly implemented
init_then_read_same_future()682     fn init_then_read_same_future() {
683         let (device, queue) = gfx_dev_and_queue!();
684 
685         let (buffer, init) = unsafe {
686             ImmutableBuffer::<u32>::uninitialized(device.clone(), BufferUsage::all()).unwrap()
687         };
688 
689         let source =
690             CpuAccessibleBuffer::from_data(device.clone(), BufferUsage::all(), false, 0).unwrap();
691 
692         let mut cbb = AutoCommandBufferBuilder::primary(
693             device.clone(),
694             queue.family(),
695             CommandBufferUsage::MultipleSubmit,
696         )
697         .unwrap();
698         cbb.copy_buffer(source.clone(), init).unwrap();
699         let cb1 = cbb.build().unwrap();
700 
701         let mut cbb = AutoCommandBufferBuilder::primary(
702             device.clone(),
703             queue.family(),
704             CommandBufferUsage::MultipleSubmit,
705         )
706         .unwrap();
707         cbb.copy_buffer(buffer, source.clone()).unwrap();
708         let cb2 = cbb.build().unwrap();
709 
710         let _ = cb1
711             .execute(queue.clone())
712             .unwrap()
713             .then_execute(queue.clone(), cb2)
714             .unwrap()
715             .then_signal_fence_and_flush()
716             .unwrap();
717     }
718 
719     #[test]
create_buffer_zero_size_data()720     fn create_buffer_zero_size_data() {
721         let (device, queue) = gfx_dev_and_queue!();
722 
723         let _ = ImmutableBuffer::from_data((), BufferUsage::all(), queue.clone());
724     }
725 
726     // TODO: write tons of tests that try to exploit loopholes
727     // this isn't possible yet because checks aren't correctly implemented yet
728 }
729