• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use crate::io::Interest;
2 use crate::runtime::io::{ReadyEvent, Registration};
3 use crate::runtime::scheduler;
4 
5 use mio::unix::SourceFd;
6 use std::io;
7 use std::os::unix::io::{AsRawFd, RawFd};
8 use std::{task::Context, task::Poll};
9 
10 /// Associates an IO object backed by a Unix file descriptor with the tokio
11 /// reactor, allowing for readiness to be polled. The file descriptor must be of
12 /// a type that can be used with the OS polling facilities (ie, `poll`, `epoll`,
13 /// `kqueue`, etc), such as a network socket or pipe, and the file descriptor
14 /// must have the nonblocking mode set to true.
15 ///
16 /// Creating an AsyncFd registers the file descriptor with the current tokio
17 /// Reactor, allowing you to directly await the file descriptor being readable
18 /// or writable. Once registered, the file descriptor remains registered until
19 /// the AsyncFd is dropped.
20 ///
21 /// The AsyncFd takes ownership of an arbitrary object to represent the IO
22 /// object. It is intended that this object will handle closing the file
23 /// descriptor when it is dropped, avoiding resource leaks and ensuring that the
24 /// AsyncFd can clean up the registration before closing the file descriptor.
25 /// The [`AsyncFd::into_inner`] function can be used to extract the inner object
26 /// to retake control from the tokio IO reactor.
27 ///
28 /// The inner object is required to implement [`AsRawFd`]. This file descriptor
29 /// must not change while [`AsyncFd`] owns the inner object, i.e. the
30 /// [`AsRawFd::as_raw_fd`] method on the inner type must always return the same
31 /// file descriptor when called multiple times. Failure to uphold this results
32 /// in unspecified behavior in the IO driver, which may include breaking
33 /// notifications for other sockets/etc.
34 ///
35 /// Polling for readiness is done by calling the async functions [`readable`]
36 /// and [`writable`]. These functions complete when the associated readiness
37 /// condition is observed. Any number of tasks can query the same `AsyncFd` in
38 /// parallel, on the same or different conditions.
39 ///
40 /// On some platforms, the readiness detecting mechanism relies on
41 /// edge-triggered notifications. This means that the OS will only notify Tokio
42 /// when the file descriptor transitions from not-ready to ready. For this to
43 /// work you should first try to read or write and only poll for readiness
44 /// if that fails with an error of [`std::io::ErrorKind::WouldBlock`].
45 ///
46 /// Tokio internally tracks when it has received a ready notification, and when
47 /// readiness checking functions like [`readable`] and [`writable`] are called,
48 /// if the readiness flag is set, these async functions will complete
49 /// immediately. This however does mean that it is critical to ensure that this
50 /// ready flag is cleared when (and only when) the file descriptor ceases to be
51 /// ready. The [`AsyncFdReadyGuard`] returned from readiness checking functions
52 /// serves this function; after calling a readiness-checking async function,
53 /// you must use this [`AsyncFdReadyGuard`] to signal to tokio whether the file
54 /// descriptor is no longer in a ready state.
55 ///
56 /// ## Use with to a poll-based API
57 ///
58 /// In some cases it may be desirable to use `AsyncFd` from APIs similar to
59 /// [`TcpStream::poll_read_ready`]. The [`AsyncFd::poll_read_ready`] and
60 /// [`AsyncFd::poll_write_ready`] functions are provided for this purpose.
61 /// Because these functions don't create a future to hold their state, they have
62 /// the limitation that only one task can wait on each direction (read or write)
63 /// at a time.
64 ///
65 /// # Examples
66 ///
67 /// This example shows how to turn [`std::net::TcpStream`] asynchronous using
68 /// `AsyncFd`.  It implements `read` as an async fn, and `AsyncWrite` as a trait
69 /// to show how to implement both approaches.
70 ///
71 /// ```no_run
72 /// use futures::ready;
73 /// use std::io::{self, Read, Write};
74 /// use std::net::TcpStream;
75 /// use std::pin::Pin;
76 /// use std::task::{Context, Poll};
77 /// use tokio::io::AsyncWrite;
78 /// use tokio::io::unix::AsyncFd;
79 ///
80 /// pub struct AsyncTcpStream {
81 ///     inner: AsyncFd<TcpStream>,
82 /// }
83 ///
84 /// impl AsyncTcpStream {
85 ///     pub fn new(tcp: TcpStream) -> io::Result<Self> {
86 ///         tcp.set_nonblocking(true)?;
87 ///         Ok(Self {
88 ///             inner: AsyncFd::new(tcp)?,
89 ///         })
90 ///     }
91 ///
92 ///     pub async fn read(&self, out: &mut [u8]) -> io::Result<usize> {
93 ///         loop {
94 ///             let mut guard = self.inner.readable().await?;
95 ///
96 ///             match guard.try_io(|inner| inner.get_ref().read(out)) {
97 ///                 Ok(result) => return result,
98 ///                 Err(_would_block) => continue,
99 ///             }
100 ///         }
101 ///     }
102 /// }
103 ///
104 /// impl AsyncWrite for AsyncTcpStream {
105 ///     fn poll_write(
106 ///         self: Pin<&mut Self>,
107 ///         cx: &mut Context<'_>,
108 ///         buf: &[u8]
109 ///     ) -> Poll<io::Result<usize>> {
110 ///         loop {
111 ///             let mut guard = ready!(self.inner.poll_write_ready(cx))?;
112 ///
113 ///             match guard.try_io(|inner| inner.get_ref().write(buf)) {
114 ///                 Ok(result) => return Poll::Ready(result),
115 ///                 Err(_would_block) => continue,
116 ///             }
117 ///         }
118 ///     }
119 ///
120 ///     fn poll_flush(
121 ///         self: Pin<&mut Self>,
122 ///         cx: &mut Context<'_>,
123 ///     ) -> Poll<io::Result<()>> {
124 ///         // tcp flush is a no-op
125 ///         Poll::Ready(Ok(()))
126 ///     }
127 ///
128 ///     fn poll_shutdown(
129 ///         self: Pin<&mut Self>,
130 ///         cx: &mut Context<'_>,
131 ///     ) -> Poll<io::Result<()>> {
132 ///         self.inner.get_ref().shutdown(std::net::Shutdown::Write)?;
133 ///         Poll::Ready(Ok(()))
134 ///     }
135 /// }
136 /// ```
137 ///
138 /// [`readable`]: method@Self::readable
139 /// [`writable`]: method@Self::writable
140 /// [`AsyncFdReadyGuard`]: struct@self::AsyncFdReadyGuard
141 /// [`TcpStream::poll_read_ready`]: struct@crate::net::TcpStream
142 pub struct AsyncFd<T: AsRawFd> {
143     registration: Registration,
144     inner: Option<T>,
145 }
146 
147 /// Represents an IO-ready event detected on a particular file descriptor that
148 /// has not yet been acknowledged. This is a `must_use` structure to help ensure
149 /// that you do not forget to explicitly clear (or not clear) the event.
150 ///
151 /// This type exposes an immutable reference to the underlying IO object.
152 #[must_use = "You must explicitly choose whether to clear the readiness state by calling a method on ReadyGuard"]
153 pub struct AsyncFdReadyGuard<'a, T: AsRawFd> {
154     async_fd: &'a AsyncFd<T>,
155     event: Option<ReadyEvent>,
156 }
157 
158 /// Represents an IO-ready event detected on a particular file descriptor that
159 /// has not yet been acknowledged. This is a `must_use` structure to help ensure
160 /// that you do not forget to explicitly clear (or not clear) the event.
161 ///
162 /// This type exposes a mutable reference to the underlying IO object.
163 #[must_use = "You must explicitly choose whether to clear the readiness state by calling a method on ReadyGuard"]
164 pub struct AsyncFdReadyMutGuard<'a, T: AsRawFd> {
165     async_fd: &'a mut AsyncFd<T>,
166     event: Option<ReadyEvent>,
167 }
168 
169 const ALL_INTEREST: Interest = Interest::READABLE.add(Interest::WRITABLE);
170 
171 impl<T: AsRawFd> AsyncFd<T> {
172     /// Creates an AsyncFd backed by (and taking ownership of) an object
173     /// implementing [`AsRawFd`]. The backing file descriptor is cached at the
174     /// time of creation.
175     ///
176     /// This method must be called in the context of a tokio runtime.
177     ///
178     /// # Panics
179     ///
180     /// This function panics if there is no current reactor set, or if the `rt`
181     /// feature flag is not enabled.
182     #[inline]
183     #[track_caller]
new(inner: T) -> io::Result<Self> where T: AsRawFd,184     pub fn new(inner: T) -> io::Result<Self>
185     where
186         T: AsRawFd,
187     {
188         Self::with_interest(inner, ALL_INTEREST)
189     }
190 
191     /// Creates new instance as `new` with additional ability to customize interest,
192     /// allowing to specify whether file descriptor will be polled for read, write or both.
193     ///
194     /// # Panics
195     ///
196     /// This function panics if there is no current reactor set, or if the `rt`
197     /// feature flag is not enabled.
198     #[inline]
199     #[track_caller]
with_interest(inner: T, interest: Interest) -> io::Result<Self> where T: AsRawFd,200     pub fn with_interest(inner: T, interest: Interest) -> io::Result<Self>
201     where
202         T: AsRawFd,
203     {
204         Self::new_with_handle_and_interest(inner, scheduler::Handle::current(), interest)
205     }
206 
207     #[track_caller]
new_with_handle_and_interest( inner: T, handle: scheduler::Handle, interest: Interest, ) -> io::Result<Self>208     pub(crate) fn new_with_handle_and_interest(
209         inner: T,
210         handle: scheduler::Handle,
211         interest: Interest,
212     ) -> io::Result<Self> {
213         let fd = inner.as_raw_fd();
214 
215         let registration =
216             Registration::new_with_interest_and_handle(&mut SourceFd(&fd), interest, handle)?;
217 
218         Ok(AsyncFd {
219             registration,
220             inner: Some(inner),
221         })
222     }
223 
224     /// Returns a shared reference to the backing object of this [`AsyncFd`].
225     #[inline]
get_ref(&self) -> &T226     pub fn get_ref(&self) -> &T {
227         self.inner.as_ref().unwrap()
228     }
229 
230     /// Returns a mutable reference to the backing object of this [`AsyncFd`].
231     #[inline]
get_mut(&mut self) -> &mut T232     pub fn get_mut(&mut self) -> &mut T {
233         self.inner.as_mut().unwrap()
234     }
235 
take_inner(&mut self) -> Option<T>236     fn take_inner(&mut self) -> Option<T> {
237         let fd = self.inner.as_ref().map(AsRawFd::as_raw_fd);
238 
239         if let Some(fd) = fd {
240             let _ = self.registration.deregister(&mut SourceFd(&fd));
241         }
242 
243         self.inner.take()
244     }
245 
246     /// Deregisters this file descriptor and returns ownership of the backing
247     /// object.
into_inner(mut self) -> T248     pub fn into_inner(mut self) -> T {
249         self.take_inner().unwrap()
250     }
251 
252     /// Polls for read readiness.
253     ///
254     /// If the file descriptor is not currently ready for reading, this method
255     /// will store a clone of the [`Waker`] from the provided [`Context`]. When the
256     /// file descriptor becomes ready for reading, [`Waker::wake`] will be called.
257     ///
258     /// Note that on multiple calls to [`poll_read_ready`] or
259     /// [`poll_read_ready_mut`], only the `Waker` from the `Context` passed to the
260     /// most recent call is scheduled to receive a wakeup. (However,
261     /// [`poll_write_ready`] retains a second, independent waker).
262     ///
263     /// This method is intended for cases where creating and pinning a future
264     /// via [`readable`] is not feasible. Where possible, using [`readable`] is
265     /// preferred, as this supports polling from multiple tasks at once.
266     ///
267     /// This method takes `&self`, so it is possible to call this method
268     /// concurrently with other methods on this struct. This method only
269     /// provides shared access to the inner IO resource when handling the
270     /// [`AsyncFdReadyGuard`].
271     ///
272     /// [`poll_read_ready`]: method@Self::poll_read_ready
273     /// [`poll_read_ready_mut`]: method@Self::poll_read_ready_mut
274     /// [`poll_write_ready`]: method@Self::poll_write_ready
275     /// [`readable`]: method@Self::readable
276     /// [`Context`]: struct@std::task::Context
277     /// [`Waker`]: struct@std::task::Waker
278     /// [`Waker::wake`]: method@std::task::Waker::wake
poll_read_ready<'a>( &'a self, cx: &mut Context<'_>, ) -> Poll<io::Result<AsyncFdReadyGuard<'a, T>>>279     pub fn poll_read_ready<'a>(
280         &'a self,
281         cx: &mut Context<'_>,
282     ) -> Poll<io::Result<AsyncFdReadyGuard<'a, T>>> {
283         let event = ready!(self.registration.poll_read_ready(cx))?;
284 
285         Ok(AsyncFdReadyGuard {
286             async_fd: self,
287             event: Some(event),
288         })
289         .into()
290     }
291 
292     /// Polls for read readiness.
293     ///
294     /// If the file descriptor is not currently ready for reading, this method
295     /// will store a clone of the [`Waker`] from the provided [`Context`]. When the
296     /// file descriptor becomes ready for reading, [`Waker::wake`] will be called.
297     ///
298     /// Note that on multiple calls to [`poll_read_ready`] or
299     /// [`poll_read_ready_mut`], only the `Waker` from the `Context` passed to the
300     /// most recent call is scheduled to receive a wakeup. (However,
301     /// [`poll_write_ready`] retains a second, independent waker).
302     ///
303     /// This method is intended for cases where creating and pinning a future
304     /// via [`readable`] is not feasible. Where possible, using [`readable`] is
305     /// preferred, as this supports polling from multiple tasks at once.
306     ///
307     /// This method takes `&mut self`, so it is possible to access the inner IO
308     /// resource mutably when handling the [`AsyncFdReadyMutGuard`].
309     ///
310     /// [`poll_read_ready`]: method@Self::poll_read_ready
311     /// [`poll_read_ready_mut`]: method@Self::poll_read_ready_mut
312     /// [`poll_write_ready`]: method@Self::poll_write_ready
313     /// [`readable`]: method@Self::readable
314     /// [`Context`]: struct@std::task::Context
315     /// [`Waker`]: struct@std::task::Waker
316     /// [`Waker::wake`]: method@std::task::Waker::wake
poll_read_ready_mut<'a>( &'a mut self, cx: &mut Context<'_>, ) -> Poll<io::Result<AsyncFdReadyMutGuard<'a, T>>>317     pub fn poll_read_ready_mut<'a>(
318         &'a mut self,
319         cx: &mut Context<'_>,
320     ) -> Poll<io::Result<AsyncFdReadyMutGuard<'a, T>>> {
321         let event = ready!(self.registration.poll_read_ready(cx))?;
322 
323         Ok(AsyncFdReadyMutGuard {
324             async_fd: self,
325             event: Some(event),
326         })
327         .into()
328     }
329 
330     /// Polls for write readiness.
331     ///
332     /// If the file descriptor is not currently ready for writing, this method
333     /// will store a clone of the [`Waker`] from the provided [`Context`]. When the
334     /// file descriptor becomes ready for writing, [`Waker::wake`] will be called.
335     ///
336     /// Note that on multiple calls to [`poll_write_ready`] or
337     /// [`poll_write_ready_mut`], only the `Waker` from the `Context` passed to the
338     /// most recent call is scheduled to receive a wakeup. (However,
339     /// [`poll_read_ready`] retains a second, independent waker).
340     ///
341     /// This method is intended for cases where creating and pinning a future
342     /// via [`writable`] is not feasible. Where possible, using [`writable`] is
343     /// preferred, as this supports polling from multiple tasks at once.
344     ///
345     /// This method takes `&self`, so it is possible to call this method
346     /// concurrently with other methods on this struct. This method only
347     /// provides shared access to the inner IO resource when handling the
348     /// [`AsyncFdReadyGuard`].
349     ///
350     /// [`poll_read_ready`]: method@Self::poll_read_ready
351     /// [`poll_write_ready`]: method@Self::poll_write_ready
352     /// [`poll_write_ready_mut`]: method@Self::poll_write_ready_mut
353     /// [`writable`]: method@Self::readable
354     /// [`Context`]: struct@std::task::Context
355     /// [`Waker`]: struct@std::task::Waker
356     /// [`Waker::wake`]: method@std::task::Waker::wake
poll_write_ready<'a>( &'a self, cx: &mut Context<'_>, ) -> Poll<io::Result<AsyncFdReadyGuard<'a, T>>>357     pub fn poll_write_ready<'a>(
358         &'a self,
359         cx: &mut Context<'_>,
360     ) -> Poll<io::Result<AsyncFdReadyGuard<'a, T>>> {
361         let event = ready!(self.registration.poll_write_ready(cx))?;
362 
363         Ok(AsyncFdReadyGuard {
364             async_fd: self,
365             event: Some(event),
366         })
367         .into()
368     }
369 
370     /// Polls for write readiness.
371     ///
372     /// If the file descriptor is not currently ready for writing, this method
373     /// will store a clone of the [`Waker`] from the provided [`Context`]. When the
374     /// file descriptor becomes ready for writing, [`Waker::wake`] will be called.
375     ///
376     /// Note that on multiple calls to [`poll_write_ready`] or
377     /// [`poll_write_ready_mut`], only the `Waker` from the `Context` passed to the
378     /// most recent call is scheduled to receive a wakeup. (However,
379     /// [`poll_read_ready`] retains a second, independent waker).
380     ///
381     /// This method is intended for cases where creating and pinning a future
382     /// via [`writable`] is not feasible. Where possible, using [`writable`] is
383     /// preferred, as this supports polling from multiple tasks at once.
384     ///
385     /// This method takes `&mut self`, so it is possible to access the inner IO
386     /// resource mutably when handling the [`AsyncFdReadyMutGuard`].
387     ///
388     /// [`poll_read_ready`]: method@Self::poll_read_ready
389     /// [`poll_write_ready`]: method@Self::poll_write_ready
390     /// [`poll_write_ready_mut`]: method@Self::poll_write_ready_mut
391     /// [`writable`]: method@Self::readable
392     /// [`Context`]: struct@std::task::Context
393     /// [`Waker`]: struct@std::task::Waker
394     /// [`Waker::wake`]: method@std::task::Waker::wake
poll_write_ready_mut<'a>( &'a mut self, cx: &mut Context<'_>, ) -> Poll<io::Result<AsyncFdReadyMutGuard<'a, T>>>395     pub fn poll_write_ready_mut<'a>(
396         &'a mut self,
397         cx: &mut Context<'_>,
398     ) -> Poll<io::Result<AsyncFdReadyMutGuard<'a, T>>> {
399         let event = ready!(self.registration.poll_write_ready(cx))?;
400 
401         Ok(AsyncFdReadyMutGuard {
402             async_fd: self,
403             event: Some(event),
404         })
405         .into()
406     }
407 
readiness(&self, interest: Interest) -> io::Result<AsyncFdReadyGuard<'_, T>>408     async fn readiness(&self, interest: Interest) -> io::Result<AsyncFdReadyGuard<'_, T>> {
409         let event = self.registration.readiness(interest).await?;
410 
411         Ok(AsyncFdReadyGuard {
412             async_fd: self,
413             event: Some(event),
414         })
415     }
416 
readiness_mut( &mut self, interest: Interest, ) -> io::Result<AsyncFdReadyMutGuard<'_, T>>417     async fn readiness_mut(
418         &mut self,
419         interest: Interest,
420     ) -> io::Result<AsyncFdReadyMutGuard<'_, T>> {
421         let event = self.registration.readiness(interest).await?;
422 
423         Ok(AsyncFdReadyMutGuard {
424             async_fd: self,
425             event: Some(event),
426         })
427     }
428 
429     /// Waits for the file descriptor to become readable, returning a
430     /// [`AsyncFdReadyGuard`] that must be dropped to resume read-readiness
431     /// polling.
432     ///
433     /// This method takes `&self`, so it is possible to call this method
434     /// concurrently with other methods on this struct. This method only
435     /// provides shared access to the inner IO resource when handling the
436     /// [`AsyncFdReadyGuard`].
437     #[allow(clippy::needless_lifetimes)] // The lifetime improves rustdoc rendering.
readable<'a>(&'a self) -> io::Result<AsyncFdReadyGuard<'a, T>>438     pub async fn readable<'a>(&'a self) -> io::Result<AsyncFdReadyGuard<'a, T>> {
439         self.readiness(Interest::READABLE).await
440     }
441 
442     /// Waits for the file descriptor to become readable, returning a
443     /// [`AsyncFdReadyMutGuard`] that must be dropped to resume read-readiness
444     /// polling.
445     ///
446     /// This method takes `&mut self`, so it is possible to access the inner IO
447     /// resource mutably when handling the [`AsyncFdReadyMutGuard`].
448     #[allow(clippy::needless_lifetimes)] // The lifetime improves rustdoc rendering.
readable_mut<'a>(&'a mut self) -> io::Result<AsyncFdReadyMutGuard<'a, T>>449     pub async fn readable_mut<'a>(&'a mut self) -> io::Result<AsyncFdReadyMutGuard<'a, T>> {
450         self.readiness_mut(Interest::READABLE).await
451     }
452 
453     /// Waits for the file descriptor to become writable, returning a
454     /// [`AsyncFdReadyGuard`] that must be dropped to resume write-readiness
455     /// polling.
456     ///
457     /// This method takes `&self`, so it is possible to call this method
458     /// concurrently with other methods on this struct. This method only
459     /// provides shared access to the inner IO resource when handling the
460     /// [`AsyncFdReadyGuard`].
461     #[allow(clippy::needless_lifetimes)] // The lifetime improves rustdoc rendering.
writable<'a>(&'a self) -> io::Result<AsyncFdReadyGuard<'a, T>>462     pub async fn writable<'a>(&'a self) -> io::Result<AsyncFdReadyGuard<'a, T>> {
463         self.readiness(Interest::WRITABLE).await
464     }
465 
466     /// Waits for the file descriptor to become writable, returning a
467     /// [`AsyncFdReadyMutGuard`] that must be dropped to resume write-readiness
468     /// polling.
469     ///
470     /// This method takes `&mut self`, so it is possible to access the inner IO
471     /// resource mutably when handling the [`AsyncFdReadyMutGuard`].
472     #[allow(clippy::needless_lifetimes)] // The lifetime improves rustdoc rendering.
writable_mut<'a>(&'a mut self) -> io::Result<AsyncFdReadyMutGuard<'a, T>>473     pub async fn writable_mut<'a>(&'a mut self) -> io::Result<AsyncFdReadyMutGuard<'a, T>> {
474         self.readiness_mut(Interest::WRITABLE).await
475     }
476 }
477 
478 impl<T: AsRawFd> AsRawFd for AsyncFd<T> {
as_raw_fd(&self) -> RawFd479     fn as_raw_fd(&self) -> RawFd {
480         self.inner.as_ref().unwrap().as_raw_fd()
481     }
482 }
483 
484 impl<T: std::fmt::Debug + AsRawFd> std::fmt::Debug for AsyncFd<T> {
fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result485     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
486         f.debug_struct("AsyncFd")
487             .field("inner", &self.inner)
488             .finish()
489     }
490 }
491 
492 impl<T: AsRawFd> Drop for AsyncFd<T> {
drop(&mut self)493     fn drop(&mut self) {
494         let _ = self.take_inner();
495     }
496 }
497 
498 impl<'a, Inner: AsRawFd> AsyncFdReadyGuard<'a, Inner> {
499     /// Indicates to tokio that the file descriptor is no longer ready. The
500     /// internal readiness flag will be cleared, and tokio will wait for the
501     /// next edge-triggered readiness notification from the OS.
502     ///
503     /// It is critical that this function not be called unless your code
504     /// _actually observes_ that the file descriptor is _not_ ready. Do not call
505     /// it simply because, for example, a read succeeded; it should be called
506     /// when a read is observed to block.
507     ///
508     /// [`drop`]: method@std::mem::drop
clear_ready(&mut self)509     pub fn clear_ready(&mut self) {
510         if let Some(event) = self.event.take() {
511             self.async_fd.registration.clear_readiness(event);
512         }
513     }
514 
515     /// This method should be invoked when you intentionally want to keep the
516     /// ready flag asserted.
517     ///
518     /// While this function is itself a no-op, it satisfies the `#[must_use]`
519     /// constraint on the [`AsyncFdReadyGuard`] type.
retain_ready(&mut self)520     pub fn retain_ready(&mut self) {
521         // no-op
522     }
523 
524     /// Performs the provided IO operation.
525     ///
526     /// If `f` returns a [`WouldBlock`] error, the readiness state associated
527     /// with this file descriptor is cleared, and the method returns
528     /// `Err(TryIoError::WouldBlock)`. You will typically need to poll the
529     /// `AsyncFd` again when this happens.
530     ///
531     /// This method helps ensure that the readiness state of the underlying file
532     /// descriptor remains in sync with the tokio-side readiness state, by
533     /// clearing the tokio-side state only when a [`WouldBlock`] condition
534     /// occurs. It is the responsibility of the caller to ensure that `f`
535     /// returns [`WouldBlock`] only if the file descriptor that originated this
536     /// `AsyncFdReadyGuard` no longer expresses the readiness state that was queried to
537     /// create this `AsyncFdReadyGuard`.
538     ///
539     /// [`WouldBlock`]: std::io::ErrorKind::WouldBlock
540     // Alias for old name in 0.x
541     #[cfg_attr(docsrs, doc(alias = "with_io"))]
try_io<R>( &mut self, f: impl FnOnce(&'a AsyncFd<Inner>) -> io::Result<R>, ) -> Result<io::Result<R>, TryIoError>542     pub fn try_io<R>(
543         &mut self,
544         f: impl FnOnce(&'a AsyncFd<Inner>) -> io::Result<R>,
545     ) -> Result<io::Result<R>, TryIoError> {
546         let result = f(self.async_fd);
547 
548         if let Err(e) = result.as_ref() {
549             if e.kind() == io::ErrorKind::WouldBlock {
550                 self.clear_ready();
551             }
552         }
553 
554         match result {
555             Err(err) if err.kind() == io::ErrorKind::WouldBlock => Err(TryIoError(())),
556             result => Ok(result),
557         }
558     }
559 
560     /// Returns a shared reference to the inner [`AsyncFd`].
get_ref(&self) -> &'a AsyncFd<Inner>561     pub fn get_ref(&self) -> &'a AsyncFd<Inner> {
562         self.async_fd
563     }
564 
565     /// Returns a shared reference to the backing object of the inner [`AsyncFd`].
get_inner(&self) -> &'a Inner566     pub fn get_inner(&self) -> &'a Inner {
567         self.get_ref().get_ref()
568     }
569 }
570 
571 impl<'a, Inner: AsRawFd> AsyncFdReadyMutGuard<'a, Inner> {
572     /// Indicates to tokio that the file descriptor is no longer ready. The
573     /// internal readiness flag will be cleared, and tokio will wait for the
574     /// next edge-triggered readiness notification from the OS.
575     ///
576     /// It is critical that this function not be called unless your code
577     /// _actually observes_ that the file descriptor is _not_ ready. Do not call
578     /// it simply because, for example, a read succeeded; it should be called
579     /// when a read is observed to block.
580     ///
581     /// [`drop`]: method@std::mem::drop
clear_ready(&mut self)582     pub fn clear_ready(&mut self) {
583         if let Some(event) = self.event.take() {
584             self.async_fd.registration.clear_readiness(event);
585         }
586     }
587 
588     /// This method should be invoked when you intentionally want to keep the
589     /// ready flag asserted.
590     ///
591     /// While this function is itself a no-op, it satisfies the `#[must_use]`
592     /// constraint on the [`AsyncFdReadyGuard`] type.
retain_ready(&mut self)593     pub fn retain_ready(&mut self) {
594         // no-op
595     }
596 
597     /// Performs the provided IO operation.
598     ///
599     /// If `f` returns a [`WouldBlock`] error, the readiness state associated
600     /// with this file descriptor is cleared, and the method returns
601     /// `Err(TryIoError::WouldBlock)`. You will typically need to poll the
602     /// `AsyncFd` again when this happens.
603     ///
604     /// This method helps ensure that the readiness state of the underlying file
605     /// descriptor remains in sync with the tokio-side readiness state, by
606     /// clearing the tokio-side state only when a [`WouldBlock`] condition
607     /// occurs. It is the responsibility of the caller to ensure that `f`
608     /// returns [`WouldBlock`] only if the file descriptor that originated this
609     /// `AsyncFdReadyGuard` no longer expresses the readiness state that was queried to
610     /// create this `AsyncFdReadyGuard`.
611     ///
612     /// [`WouldBlock`]: std::io::ErrorKind::WouldBlock
try_io<R>( &mut self, f: impl FnOnce(&mut AsyncFd<Inner>) -> io::Result<R>, ) -> Result<io::Result<R>, TryIoError>613     pub fn try_io<R>(
614         &mut self,
615         f: impl FnOnce(&mut AsyncFd<Inner>) -> io::Result<R>,
616     ) -> Result<io::Result<R>, TryIoError> {
617         let result = f(self.async_fd);
618 
619         if let Err(e) = result.as_ref() {
620             if e.kind() == io::ErrorKind::WouldBlock {
621                 self.clear_ready();
622             }
623         }
624 
625         match result {
626             Err(err) if err.kind() == io::ErrorKind::WouldBlock => Err(TryIoError(())),
627             result => Ok(result),
628         }
629     }
630 
631     /// Returns a shared reference to the inner [`AsyncFd`].
get_ref(&self) -> &AsyncFd<Inner>632     pub fn get_ref(&self) -> &AsyncFd<Inner> {
633         self.async_fd
634     }
635 
636     /// Returns a mutable reference to the inner [`AsyncFd`].
get_mut(&mut self) -> &mut AsyncFd<Inner>637     pub fn get_mut(&mut self) -> &mut AsyncFd<Inner> {
638         self.async_fd
639     }
640 
641     /// Returns a shared reference to the backing object of the inner [`AsyncFd`].
get_inner(&self) -> &Inner642     pub fn get_inner(&self) -> &Inner {
643         self.get_ref().get_ref()
644     }
645 
646     /// Returns a mutable reference to the backing object of the inner [`AsyncFd`].
get_inner_mut(&mut self) -> &mut Inner647     pub fn get_inner_mut(&mut self) -> &mut Inner {
648         self.get_mut().get_mut()
649     }
650 }
651 
652 impl<'a, T: std::fmt::Debug + AsRawFd> std::fmt::Debug for AsyncFdReadyGuard<'a, T> {
fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result653     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
654         f.debug_struct("ReadyGuard")
655             .field("async_fd", &self.async_fd)
656             .finish()
657     }
658 }
659 
660 impl<'a, T: std::fmt::Debug + AsRawFd> std::fmt::Debug for AsyncFdReadyMutGuard<'a, T> {
fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result661     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
662         f.debug_struct("MutReadyGuard")
663             .field("async_fd", &self.async_fd)
664             .finish()
665     }
666 }
667 
668 /// The error type returned by [`try_io`].
669 ///
670 /// This error indicates that the IO resource returned a [`WouldBlock`] error.
671 ///
672 /// [`WouldBlock`]: std::io::ErrorKind::WouldBlock
673 /// [`try_io`]: method@AsyncFdReadyGuard::try_io
674 #[derive(Debug)]
675 pub struct TryIoError(());
676