1 // Copyright 2023 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! EXPERIMENTAL overlapped IO based async IO wrapper. Do not use in production.
6
7 use std::fs::File;
8 use std::io;
9 use std::io::Write;
10 use std::mem::ManuallyDrop;
11 use std::sync::Arc;
12
13 use base::error;
14 use base::AsRawDescriptor;
15 use base::Descriptor;
16 use base::FromRawDescriptor;
17 use base::PunchHole;
18 use base::RawDescriptor;
19 use base::WriteZeroesAt;
20 use thiserror::Error as ThisError;
21 use winapi::um::minwinbase::OVERLAPPED;
22
23 use crate::common_executor::RawExecutor;
24 use crate::mem::BackingMemory;
25 use crate::mem::MemRegion;
26 use crate::sys::windows::handle_executor::HandleReactor;
27 use crate::sys::windows::handle_executor::RegisteredOverlappedSource;
28 use crate::AsyncError;
29 use crate::AsyncResult;
30 use crate::BlockingPool;
31
32 #[derive(ThisError, Debug)]
33 pub enum Error {
34 #[error("An error occurred trying to get a VolatileSlice into BackingMemory: {0}.")]
35 BackingMemoryVolatileSliceFetchFailed(crate::mem::Error),
36 #[error("An error occurred trying to seek: {0}.")]
37 IoSeekError(io::Error),
38 #[error("An error occurred trying to read: {0}.")]
39 IoReadError(base::Error),
40 #[error("An error occurred trying to write: {0}.")]
41 IoWriteError(base::Error),
42 #[error("An error occurred trying to flush: {0}.")]
43 IoFlushError(io::Error),
44 #[error("An error occurred trying to punch hole: {0}.")]
45 IoPunchHoleError(io::Error),
46 #[error("An error occurred trying to write zeroes: {0}.")]
47 IoWriteZeroesError(io::Error),
48 #[error("An error occurred trying to duplicate source handles: {0}.")]
49 HandleDuplicationFailed(io::Error),
50 #[error("A IO error occurred trying to read: {0}.")]
51 StdIoReadError(io::Error),
52 #[error("An IO error occurred trying to write: {0}.")]
53 StdIoWriteError(io::Error),
54 }
55
56 impl From<Error> for io::Error {
from(e: Error) -> Self57 fn from(e: Error) -> Self {
58 use Error::*;
59 match e {
60 BackingMemoryVolatileSliceFetchFailed(e) => io::Error::new(io::ErrorKind::Other, e),
61 IoSeekError(e) => e,
62 IoReadError(e) => e.into(),
63 IoWriteError(e) => e.into(),
64 IoFlushError(e) => e,
65 IoPunchHoleError(e) => e,
66 IoWriteZeroesError(e) => e,
67 HandleDuplicationFailed(e) => e,
68 StdIoReadError(e) => e,
69 StdIoWriteError(e) => e,
70 }
71 }
72 }
73
74 impl From<Error> for AsyncError {
from(e: Error) -> AsyncError75 fn from(e: Error) -> AsyncError {
76 AsyncError::SysVariants(e.into())
77 }
78 }
79
80 pub type Result<T> = std::result::Result<T, Error>;
81
82 /// Async IO source for Windows that uses a multi-threaded, multi-handle approach to provide fast IO
83 /// operations. It demuxes IO requests across a set of handles that refer to the same underlying IO
84 /// source, such as a file, and executes those requests across multiple threads. Benchmarks show
85 /// that this is the fastest method to perform IO on Windows, especially for file reads.
86 pub struct OverlappedSource<F: AsRawDescriptor> {
87 blocking_pool: BlockingPool,
88 reg_source: RegisteredOverlappedSource,
89 source: F,
90 seek_forbidden: bool,
91 }
92
93 impl<F: AsRawDescriptor> OverlappedSource<F> {
94 /// Create a new `OverlappedSource` from the given IO source. The source MUST be opened in
95 /// overlapped mode or undefined behavior will result.
96 ///
97 /// seek_forbidden should be set for non seekable types like named pipes.
new( source: F, ex: &Arc<RawExecutor<HandleReactor>>, seek_forbidden: bool, ) -> AsyncResult<Self>98 pub fn new(
99 source: F,
100 ex: &Arc<RawExecutor<HandleReactor>>,
101 seek_forbidden: bool,
102 ) -> AsyncResult<Self> {
103 Ok(Self {
104 blocking_pool: BlockingPool::default(),
105 reg_source: ex.reactor.register_overlapped_source(ex, &source)?,
106 source,
107 seek_forbidden,
108 })
109 }
110 }
111
112 /// SAFETY:
113 /// Safety requirements:
114 /// Same as base::windows::read_file.
read( file: RawDescriptor, buf: *mut u8, buf_len: usize, overlapped: &mut OVERLAPPED, ) -> AsyncResult<()>115 unsafe fn read(
116 file: RawDescriptor,
117 buf: *mut u8,
118 buf_len: usize,
119 overlapped: &mut OVERLAPPED,
120 ) -> AsyncResult<()> {
121 Ok(
122 base::windows::read_file(&Descriptor(file), buf, buf_len, Some(overlapped))
123 .map(|_len| ())
124 .map_err(Error::StdIoReadError)?,
125 )
126 }
127
128 /// SAFETY:
129 /// Safety requirements:
130 /// Same as base::windows::write_file.
write( file: RawDescriptor, buf: *const u8, buf_len: usize, overlapped: &mut OVERLAPPED, ) -> AsyncResult<()>131 unsafe fn write(
132 file: RawDescriptor,
133 buf: *const u8,
134 buf_len: usize,
135 overlapped: &mut OVERLAPPED,
136 ) -> AsyncResult<()> {
137 Ok(
138 base::windows::write_file(&Descriptor(file), buf, buf_len, Some(overlapped))
139 .map(|_len| ())
140 .map_err(Error::StdIoWriteError)?,
141 )
142 }
143
144 impl<F: AsRawDescriptor> OverlappedSource<F> {
145 /// Reads from the iosource at `file_offset` and fill the given `vec`.
read_to_vec( &self, file_offset: Option<u64>, mut vec: Vec<u8>, ) -> AsyncResult<(usize, Vec<u8>)>146 pub async fn read_to_vec(
147 &self,
148 file_offset: Option<u64>,
149 mut vec: Vec<u8>,
150 ) -> AsyncResult<(usize, Vec<u8>)> {
151 if self.seek_forbidden && file_offset.is_some() {
152 return Err(Error::IoSeekError(io::Error::new(
153 io::ErrorKind::InvalidInput,
154 "seek on non-seekable handle",
155 ))
156 .into());
157 }
158 let mut overlapped_op = self.reg_source.register_overlapped_operation(file_offset)?;
159
160 // SAFETY:
161 // Safe because we pass a pointer to a valid vec and that same vector's length.
162 unsafe {
163 read(
164 self.source.as_raw_descriptor(),
165 vec.as_mut_ptr(),
166 vec.len(),
167 overlapped_op.get_overlapped(),
168 )?
169 };
170 let overlapped_result = overlapped_op.await?;
171 let bytes_read = overlapped_result.result.map_err(Error::IoReadError)?;
172 Ok((bytes_read, vec))
173 }
174
175 /// Reads to the given `mem` at the given offsets from the file starting at `file_offset`.
read_to_mem( &self, file_offset: Option<u64>, mem: Arc<dyn BackingMemory + Send + Sync>, mem_offsets: impl IntoIterator<Item = MemRegion>, ) -> AsyncResult<usize>176 pub async fn read_to_mem(
177 &self,
178 file_offset: Option<u64>,
179 mem: Arc<dyn BackingMemory + Send + Sync>,
180 mem_offsets: impl IntoIterator<Item = MemRegion>,
181 ) -> AsyncResult<usize> {
182 let mut total_bytes_read = 0;
183 let mut offset = match file_offset {
184 Some(offset) if !self.seek_forbidden => Some(offset),
185 None if self.seek_forbidden => None,
186 // For devices that are seekable (files), we have to track the offset otherwise
187 // subsequent read calls will just read the same bytes into each of the memory regions.
188 None => Some(0),
189 _ => {
190 return Err(Error::IoSeekError(io::Error::new(
191 io::ErrorKind::InvalidInput,
192 "seek on non-seekable handle",
193 ))
194 .into())
195 }
196 };
197
198 for region in mem_offsets.into_iter() {
199 let mut overlapped_op = self.reg_source.register_overlapped_operation(offset)?;
200
201 let slice = mem
202 .get_volatile_slice(region)
203 .map_err(Error::BackingMemoryVolatileSliceFetchFailed)?;
204
205 // SAFETY:
206 // Safe because we're passing a volatile slice (valid ptr), and the size of the memory
207 // region it refers to.
208 unsafe {
209 read(
210 self.source.as_raw_descriptor(),
211 slice.as_mut_ptr(),
212 slice.size(),
213 overlapped_op.get_overlapped(),
214 )?
215 };
216 let overlapped_result = overlapped_op.await?;
217 let bytes_read = overlapped_result.result.map_err(Error::IoReadError)?;
218 offset = offset.map(|offset| offset + bytes_read as u64);
219 total_bytes_read += bytes_read;
220 }
221 Ok(total_bytes_read)
222 }
223
224 /// Wait for the handle of `self` to be readable.
wait_readable(&self) -> AsyncResult<()>225 pub async fn wait_readable(&self) -> AsyncResult<()> {
226 unimplemented!()
227 }
228
229 /// Reads a single u64 from the current offset.
read_u64(&self) -> AsyncResult<u64>230 pub async fn read_u64(&self) -> AsyncResult<u64> {
231 unimplemented!()
232 }
233
234 /// Writes from the given `vec` to the file starting at `file_offset`.
write_from_vec( &self, file_offset: Option<u64>, vec: Vec<u8>, ) -> AsyncResult<(usize, Vec<u8>)>235 pub async fn write_from_vec(
236 &self,
237 file_offset: Option<u64>,
238 vec: Vec<u8>,
239 ) -> AsyncResult<(usize, Vec<u8>)> {
240 if self.seek_forbidden && file_offset.is_some() {
241 return Err(Error::IoSeekError(io::Error::new(
242 io::ErrorKind::InvalidInput,
243 "seek on non-seekable handle",
244 ))
245 .into());
246 }
247 let mut overlapped_op = self.reg_source.register_overlapped_operation(file_offset)?;
248
249 // SAFETY:
250 // Safe because we pass a pointer to a valid vec and that same vector's length.
251 unsafe {
252 write(
253 self.source.as_raw_descriptor(),
254 vec.as_ptr(),
255 vec.len(),
256 overlapped_op.get_overlapped(),
257 )?
258 };
259
260 let bytes_written = overlapped_op.await?.result.map_err(Error::IoWriteError)?;
261 Ok((bytes_written, vec))
262 }
263
264 /// Writes from the given `mem` from the given offsets to the file starting at `file_offset`.
write_from_mem( &self, file_offset: Option<u64>, mem: Arc<dyn BackingMemory + Send + Sync>, mem_offsets: impl IntoIterator<Item = MemRegion>, ) -> AsyncResult<usize>265 pub async fn write_from_mem(
266 &self,
267 file_offset: Option<u64>,
268 mem: Arc<dyn BackingMemory + Send + Sync>,
269 mem_offsets: impl IntoIterator<Item = MemRegion>,
270 ) -> AsyncResult<usize> {
271 let mut total_bytes_written = 0;
272 let mut offset = match file_offset {
273 Some(offset) if !self.seek_forbidden => Some(offset),
274 None if self.seek_forbidden => None,
275 // For devices that are seekable (files), we have to track the offset otherwise
276 // subsequent read calls will just read the same bytes into each of the memory regions.
277 None => Some(0),
278 _ => {
279 return Err(Error::IoSeekError(io::Error::new(
280 io::ErrorKind::InvalidInput,
281 "seek on non-seekable handle",
282 ))
283 .into())
284 }
285 };
286
287 for region in mem_offsets.into_iter() {
288 let mut overlapped_op = self.reg_source.register_overlapped_operation(offset)?;
289
290 let slice = mem
291 .get_volatile_slice(region)
292 .map_err(Error::BackingMemoryVolatileSliceFetchFailed)?;
293
294 // SAFETY:
295 // Safe because we're passing a volatile slice (valid ptr), and the size of the memory
296 // region it refers to.
297 unsafe {
298 write(
299 self.source.as_raw_descriptor(),
300 slice.as_ptr(),
301 slice.size(),
302 overlapped_op.get_overlapped(),
303 )?
304 };
305 let bytes_written = overlapped_op.await?.result.map_err(Error::IoReadError)?;
306 offset = offset.map(|offset| offset + bytes_written as u64);
307 total_bytes_written += bytes_written;
308 }
309 Ok(total_bytes_written)
310 }
311
312 /// Deallocates the given range of a file.
313 ///
314 /// TODO(nkgold): currently this is sync on the executor, which is bad / very hacky. With a
315 /// little wrapper work, we can make overlapped DeviceIoControl calls instead.
punch_hole(&self, file_offset: u64, len: u64) -> AsyncResult<()>316 pub async fn punch_hole(&self, file_offset: u64, len: u64) -> AsyncResult<()> {
317 if self.seek_forbidden {
318 return Err(Error::IoSeekError(io::Error::new(
319 io::ErrorKind::InvalidInput,
320 "fallocate cannot be called on a non-seekable handle",
321 ))
322 .into());
323 }
324 // SAFETY:
325 // Safe because self.source lives as long as file.
326 let file = ManuallyDrop::new(unsafe {
327 File::from_raw_descriptor(self.source.as_raw_descriptor())
328 });
329 file.punch_hole(file_offset, len)
330 .map_err(Error::IoPunchHoleError)?;
331 Ok(())
332 }
333
334 /// Fills the given range with zeroes.
335 ///
336 /// TODO(nkgold): currently this is sync on the executor, which is bad / very hacky. With a
337 /// little wrapper work, we can make overlapped DeviceIoControl calls instead.
write_zeroes_at(&self, file_offset: u64, len: u64) -> AsyncResult<()>338 pub async fn write_zeroes_at(&self, file_offset: u64, len: u64) -> AsyncResult<()> {
339 if self.seek_forbidden {
340 return Err(Error::IoSeekError(io::Error::new(
341 io::ErrorKind::InvalidInput,
342 "write_zeroes_at cannot be called on a non-seekable handle",
343 ))
344 .into());
345 }
346 // SAFETY:
347 // Safe because self.source lives as long as file.
348 let mut file = ManuallyDrop::new(unsafe {
349 File::from_raw_descriptor(self.source.as_raw_descriptor())
350 });
351 // ZeroRange calls `punch_hole` which doesn't extend the File size if it needs to.
352 // Will fix if it becomes a problem.
353 file.write_zeroes_at(file_offset, len as usize)
354 .map_err(Error::IoWriteZeroesError)?;
355 Ok(())
356 }
357
358 /// Sync all completed write operations to the backing storage.
fsync(&self) -> AsyncResult<()>359 pub async fn fsync(&self) -> AsyncResult<()> {
360 // SAFETY:
361 // Safe because self.source lives at least as long as the blocking pool thread. Note that
362 // if the blocking pool stalls and shutdown fails, the thread could outlive the file;
363 // however, this would mean things are already badly broken and we have a similar risk in
364 // HandleSource.
365 let mut file = unsafe {
366 ManuallyDrop::new(File::from_raw_descriptor(self.source.as_raw_descriptor()))
367 .try_clone()
368 .map_err(Error::HandleDuplicationFailed)?
369 };
370
371 Ok(self
372 .blocking_pool
373 .spawn(move || file.flush().map_err(Error::IoFlushError))
374 .await?)
375 }
376
377 /// Sync all data of completed write operations to the backing storage. Currently, the
378 /// implementation is equivalent to fsync.
fdatasync(&self) -> AsyncResult<()>379 pub async fn fdatasync(&self) -> AsyncResult<()> {
380 // TODO(b/282003931): Fall back to regular fsync.
381 self.fsync().await
382 }
383
384 /// Yields the underlying IO source.
into_source(self) -> F385 pub fn into_source(self) -> F {
386 self.source
387 }
388
389 /// Provides a mutable ref to the underlying IO source.
as_source_mut(&mut self) -> &mut F390 pub fn as_source_mut(&mut self) -> &mut F {
391 &mut self.source
392 }
393
394 /// Provides a ref to the underlying IO source.
395 ///
396 /// In the multi-source case, the 0th source will be returned. If sources are not
397 /// interchangeable, behavior is undefined.
as_source(&self) -> &F398 pub fn as_source(&self) -> &F {
399 &self.source
400 }
401
wait_for_handle(&self) -> AsyncResult<()>402 pub async fn wait_for_handle(&self) -> AsyncResult<()> {
403 let waiter = super::WaitForHandle::new(&self.source);
404 Ok(waiter.await?)
405 }
406 }
407
408 // NOTE: Prefer adding tests to io_source.rs if not backend specific.
409 #[cfg(test)]
410 mod tests {
411 use std::fs::OpenOptions;
412 use std::io::Read;
413 use std::os::windows::fs::OpenOptionsExt;
414 use std::path::PathBuf;
415
416 use tempfile::TempDir;
417 use winapi::um::winbase::FILE_FLAG_OVERLAPPED;
418
419 use super::*;
420 use crate::mem::VecIoWrapper;
421 use crate::ExecutorTrait;
422
tempfile_path() -> (PathBuf, TempDir)423 fn tempfile_path() -> (PathBuf, TempDir) {
424 let dir = tempfile::TempDir::new().unwrap();
425 let mut file_path = PathBuf::from(dir.path());
426 file_path.push("test");
427 (file_path, dir)
428 }
429
open_overlapped(path: &PathBuf) -> File430 fn open_overlapped(path: &PathBuf) -> File {
431 OpenOptions::new()
432 .create(true)
433 .read(true)
434 .write(true)
435 .custom_flags(FILE_FLAG_OVERLAPPED)
436 .open(path)
437 .unwrap()
438 }
439
open_blocking(path: &PathBuf) -> File440 fn open_blocking(path: &PathBuf) -> File {
441 OpenOptions::new()
442 .create(true)
443 .read(true)
444 .write(true)
445 .open(path)
446 .unwrap()
447 }
448
449 #[test]
test_read_vec()450 fn test_read_vec() {
451 let (file_path, _tmpdir) = tempfile_path();
452 let mut f = open_blocking(&file_path);
453 f.write_all("data".as_bytes()).unwrap();
454 f.flush().unwrap();
455 drop(f);
456
457 async fn read_data(src: &OverlappedSource<File>) {
458 let buf: Vec<u8> = vec![0; 4];
459 let (bytes_read, buf) = src.read_to_vec(Some(0), buf).await.unwrap();
460 assert_eq!(bytes_read, 4);
461 assert_eq!(std::str::from_utf8(buf.as_slice()).unwrap(), "data");
462 }
463
464 let ex = RawExecutor::<HandleReactor>::new().unwrap();
465 let src = OverlappedSource::new(open_overlapped(&file_path), &ex, false).unwrap();
466 ex.run_until(read_data(&src)).unwrap();
467 }
468
469 #[test]
test_read_mem()470 fn test_read_mem() {
471 let (file_path, _tmpdir) = tempfile_path();
472 let mut f = open_blocking(&file_path);
473 f.write_all("data".as_bytes()).unwrap();
474 f.flush().unwrap();
475 drop(f);
476
477 async fn read_data(src: &OverlappedSource<File>) {
478 let mem = Arc::new(VecIoWrapper::from(vec![0; 4]));
479 let bytes_read = src
480 .read_to_mem(
481 Some(0),
482 Arc::<VecIoWrapper>::clone(&mem),
483 [
484 MemRegion { offset: 0, len: 2 },
485 MemRegion { offset: 2, len: 2 },
486 ],
487 )
488 .await
489 .unwrap();
490 assert_eq!(bytes_read, 4);
491 let vec: Vec<u8> = match Arc::try_unwrap(mem) {
492 Ok(v) => v.into(),
493 Err(_) => panic!("Too many vec refs"),
494 };
495 assert_eq!(std::str::from_utf8(vec.as_slice()).unwrap(), "data");
496 }
497
498 let ex = RawExecutor::<HandleReactor>::new().unwrap();
499 let src = OverlappedSource::new(open_overlapped(&file_path), &ex, false).unwrap();
500 ex.run_until(read_data(&src)).unwrap();
501 }
502
503 #[test]
test_write_vec()504 fn test_write_vec() {
505 let (file_path, _tmpdir) = tempfile_path();
506
507 async fn write_data(src: &OverlappedSource<File>) {
508 let mut buf: Vec<u8> = Vec::new();
509 buf.extend_from_slice("data".as_bytes());
510
511 let (bytes_written, _) = src.write_from_vec(Some(0), buf).await.unwrap();
512 assert_eq!(bytes_written, 4);
513 }
514
515 let ex = RawExecutor::<HandleReactor>::new().unwrap();
516 let f = open_overlapped(&file_path);
517 let src = OverlappedSource::new(f, &ex, false).unwrap();
518 ex.run_until(write_data(&src)).unwrap();
519 drop(src);
520
521 let mut buf = vec![0; 4];
522 let mut f = open_blocking(&file_path);
523 f.read_exact(&mut buf).unwrap();
524 assert_eq!(std::str::from_utf8(buf.as_slice()).unwrap(), "data");
525 }
526
527 #[test]
test_write_mem()528 fn test_write_mem() {
529 let (file_path, _tmpdir) = tempfile_path();
530
531 async fn write_data(src: &OverlappedSource<File>) {
532 let mut buf: Vec<u8> = Vec::new();
533 buf.extend_from_slice("data".as_bytes());
534 let mem = Arc::new(VecIoWrapper::from(buf));
535 let bytes_written = src
536 .write_from_mem(
537 Some(0),
538 Arc::<VecIoWrapper>::clone(&mem),
539 [
540 MemRegion { offset: 0, len: 2 },
541 MemRegion { offset: 2, len: 2 },
542 ],
543 )
544 .await
545 .unwrap();
546 assert_eq!(bytes_written, 4);
547 match Arc::try_unwrap(mem) {
548 Ok(_) => (),
549 Err(_) => panic!("Too many vec refs"),
550 };
551 }
552
553 let ex = RawExecutor::<HandleReactor>::new().unwrap();
554 let f = open_overlapped(&file_path);
555 let src = OverlappedSource::new(f, &ex, false).unwrap();
556 ex.run_until(write_data(&src)).unwrap();
557 drop(src);
558
559 let mut buf = vec![0; 4];
560 let mut f = open_blocking(&file_path);
561 f.read_exact(&mut buf).unwrap();
562 assert_eq!(std::str::from_utf8(buf.as_slice()).unwrap(), "data");
563 }
564
565 #[cfg_attr(all(target_os = "windows", target_env = "gnu"), ignore)]
566 #[test]
test_punch_holes()567 fn test_punch_holes() {
568 let (file_path, _tmpdir) = tempfile_path();
569 let mut temp_file = open_blocking(&file_path);
570 temp_file.write_all("abcdefghijk".as_bytes()).unwrap();
571 temp_file.flush().unwrap();
572 drop(temp_file);
573
574 async fn punch_hole(src: &OverlappedSource<File>) {
575 let offset = 1;
576 let len = 3;
577 src.punch_hole(offset, len).await.unwrap();
578 }
579
580 let ex = RawExecutor::<HandleReactor>::new().unwrap();
581 let f = open_overlapped(&file_path);
582 let src = OverlappedSource::new(f, &ex, false).unwrap();
583 ex.run_until(punch_hole(&src)).unwrap();
584 drop(src);
585
586 let mut buf = vec![0; 11];
587 let mut f = open_blocking(&file_path);
588 f.read_exact(&mut buf).unwrap();
589 assert_eq!(
590 std::str::from_utf8(buf.as_slice()).unwrap(),
591 "a\0\0\0efghijk"
592 );
593 }
594
595 /// Test should fail because punch hole should not be allowed to allocate more memory
596 #[cfg_attr(all(target_os = "windows", target_env = "gnu"), ignore)]
597 #[test]
test_punch_holes_fail_out_of_bounds()598 fn test_punch_holes_fail_out_of_bounds() {
599 let (file_path, _tmpdir) = tempfile_path();
600 let mut temp_file = open_blocking(&file_path);
601 temp_file.write_all("abcdefghijk".as_bytes()).unwrap();
602 temp_file.flush().unwrap();
603 drop(temp_file);
604
605 async fn punch_hole(src: &OverlappedSource<File>) {
606 let offset = 9;
607 let len = 4;
608 src.punch_hole(offset, len).await.unwrap();
609 }
610
611 let ex = RawExecutor::<HandleReactor>::new().unwrap();
612 let f = open_overlapped(&file_path);
613 let src = OverlappedSource::new(f, &ex, false).unwrap();
614 ex.run_until(punch_hole(&src)).unwrap();
615 drop(src);
616
617 let mut buf = vec![0; 13];
618 let mut f = open_blocking(&file_path);
619 assert!(f.read_exact(&mut buf).is_err());
620 }
621
622 // TODO(b/194338842): "ZeroRange" is supposed to allocate more memory if it goes out of the
623 // bounds of the file. Determine if we need to support this, since Windows doesn't do this yet.
624 // use tempfile::NamedTempFile;
625 // #[test]
626 // fn test_write_zeroes() {
627 // let mut temp_file = NamedTempFile::new().unwrap();
628 // temp_file.write("abcdefghijk".as_bytes()).unwrap();
629 // temp_file.flush().unwrap();
630 // temp_file.seek(SeekFrom::Start(0)).unwrap();
631
632 // async fn punch_hole(src: &OverlappedSource<File>) {
633 // let offset = 9;
634 // let len = 4;
635 // src
636 // .fallocate(offset, len, AllocateMode::ZeroRange)
637 // .await
638 // .unwrap();
639 // }
640
641 // let ex = RawExecutor::<HandleReactor>::new();
642 // let f = fs::OpenOptions::new()
643 // .write(true)
644 // .open(temp_file.path())
645 // .unwrap();
646 // let src = OverlappedSource::new(vec![f].into_boxed_slice()).unwrap();
647 // ex.run_until(punch_hole(&src)).unwrap();
648
649 // let mut buf = vec![0; 13];
650 // temp_file.read_exact(&mut buf).unwrap();
651 // assert_eq!(
652 // std::str::from_utf8(buf.as_slice()).unwrap(),
653 // "abcdefghi\0\0\0\0"
654 // );
655 // }
656 }
657