• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 
3 // Copyright (C) 2024 Google LLC.
4 
5 //! Logic for closing files in a deferred manner.
6 //!
7 //! This file could make sense to have in `kernel::fs`, but it was rejected for being too
8 //! Binder-specific.
9 
10 use core::mem::MaybeUninit;
11 use kernel::{
12     alloc::{AllocError, Flags},
13     bindings,
14     prelude::*,
15 };
16 
17 /// Helper used for closing file descriptors in a way that is safe even if the file is currently
18 /// held using `fdget`.
19 ///
20 /// Additional motivation can be found in commit 80cd795630d6 ("binder: fix use-after-free due to
21 /// ksys_close() during fdget()") and in the comments on `binder_do_fd_close`.
22 pub(crate) struct DeferredFdCloser {
23     inner: KBox<DeferredFdCloserInner>,
24 }
25 
26 /// SAFETY: This just holds an allocation with no real content, so there's no safety issue with
27 /// moving it across threads.
28 unsafe impl Send for DeferredFdCloser {}
29 unsafe impl Sync for DeferredFdCloser {}
30 
31 /// # Invariants
32 ///
33 /// If the `file` pointer is non-null, then it points at a `struct file` and owns a refcount to
34 /// that file.
35 #[repr(C)]
36 struct DeferredFdCloserInner {
37     twork: MaybeUninit<bindings::callback_head>,
38     file: *mut bindings::file,
39 }
40 
41 impl DeferredFdCloser {
42     /// Create a new [`DeferredFdCloser`].
new(flags: Flags) -> Result<Self, AllocError>43     pub(crate) fn new(flags: Flags) -> Result<Self, AllocError> {
44         Ok(Self {
45             // INVARIANT: The `file` pointer is null, so the type invariant does not apply.
46             inner: KBox::new(
47                 DeferredFdCloserInner {
48                     twork: MaybeUninit::uninit(),
49                     file: core::ptr::null_mut(),
50                 },
51                 flags,
52             )?,
53         })
54     }
55 
56     /// Schedule a task work that closes the file descriptor when this task returns to userspace.
57     ///
58     /// Fails if this is called from a context where we cannot run work when returning to
59     /// userspace. (E.g., from a kthread.)
close_fd(self, fd: u32) -> Result<(), DeferredFdCloseError>60     pub(crate) fn close_fd(self, fd: u32) -> Result<(), DeferredFdCloseError> {
61         use bindings::task_work_notify_mode_TWA_RESUME as TWA_RESUME;
62 
63         // In this method, we schedule the task work before closing the file. This is because
64         // scheduling a task work is fallible, and we need to know whether it will fail before we
65         // attempt to close the file.
66 
67         // Task works are not available on kthreads.
68         let current = kernel::current!();
69 
70         // Check if this is a kthread.
71         // SAFETY: Reading `flags` from a task is always okay.
72         if unsafe { ((*current.as_ptr()).flags & bindings::PF_KTHREAD) != 0 } {
73             return Err(DeferredFdCloseError::TaskWorkUnavailable);
74         }
75 
76         // Transfer ownership of the box's allocation to a raw pointer. This disables the
77         // destructor, so we must manually convert it back to a KBox to drop it.
78         //
79         // Until we convert it back to a `KBox`, there are no aliasing requirements on this
80         // pointer.
81         let inner = KBox::into_raw(self.inner);
82 
83         // The `callback_head` field is first in the struct, so this cast correctly gives us a
84         // pointer to the field.
85         let callback_head = inner.cast::<bindings::callback_head>();
86         // SAFETY: This pointer offset operation does not go out-of-bounds.
87         let file_field = unsafe { core::ptr::addr_of_mut!((*inner).file) };
88 
89         let current = current.as_ptr();
90 
91         // SAFETY: This function currently has exclusive access to the `DeferredFdCloserInner`, so
92         // it is okay for us to perform unsynchronized writes to its `callback_head` field.
93         unsafe { bindings::init_task_work(callback_head, Some(Self::do_close_fd)) };
94 
95         // SAFETY: This inserts the `DeferredFdCloserInner` into the task workqueue for the current
96         // task. If this operation is successful, then this transfers exclusive ownership of the
97         // `callback_head` field to the C side until it calls `do_close_fd`, and we don't touch or
98         // invalidate the field during that time.
99         //
100         // When the C side calls `do_close_fd`, the safety requirements of that method are
101         // satisfied because when a task work is executed, the callback is given ownership of the
102         // pointer.
103         //
104         // The file pointer is currently null. If it is changed to be non-null before `do_close_fd`
105         // is called, then that change happens due to the write at the end of this function, and
106         // that write has a safety comment that explains why the refcount can be dropped when
107         // `do_close_fd` runs.
108         let res = unsafe { bindings::task_work_add(current, callback_head, TWA_RESUME) };
109 
110         if res != 0 {
111             // SAFETY: Scheduling the task work failed, so we still have ownership of the box, so
112             // we may destroy it.
113             unsafe { drop(KBox::from_raw(inner)) };
114 
115             return Err(DeferredFdCloseError::TaskWorkUnavailable);
116         }
117 
118         // This removes the fd from the fd table in `current`. The file is not fully closed until
119         // `filp_close` is called. We are given ownership of one refcount to the file.
120         //
121         // SAFETY: This is safe no matter what `fd` is. If the `fd` is valid (that is, if the
122         // pointer is non-null), then we call `filp_close` on the returned pointer as required by
123         // `file_close_fd`.
124         let file = unsafe { bindings::file_close_fd(fd) };
125         if file.is_null() {
126             // We don't clean up the task work since that might be expensive if the task work queue
127             // is long. Just let it execute and let it clean up for itself.
128             return Err(DeferredFdCloseError::BadFd);
129         }
130 
131         // Acquire a second refcount to the file.
132         //
133         // SAFETY: The `file` pointer points at a file with a non-zero refcount.
134         unsafe { bindings::get_file(file) };
135 
136         // This method closes the fd, consuming one of our two refcounts. There could be active
137         // light refcounts created from that fd, so we must ensure that the file has a positive
138         // refcount for the duration of those active light refcounts. We do that by holding on to
139         // the second refcount until the current task returns to userspace.
140         //
141         // SAFETY: The `file` pointer is valid. Passing `current->files` as the file table to close
142         // it in is correct, since we just got the `fd` from `file_close_fd` which also uses
143         // `current->files`.
144         //
145         // Note: fl_owner_t is currently a void pointer.
146         unsafe { bindings::filp_close(file, (*current).files as bindings::fl_owner_t) };
147 
148         // We update the file pointer that the task work is supposed to fput. This transfers
149         // ownership of our last refcount.
150         //
151         // INVARIANT: This changes the `file` field of a `DeferredFdCloserInner` from null to
152         // non-null. This doesn't break the type invariant for `DeferredFdCloserInner` because we
153         // still own a refcount to the file, so we can pass ownership of that refcount to the
154         // `DeferredFdCloserInner`.
155         //
156         // When `do_close_fd` runs, it must be safe for it to `fput` the refcount. However, this is
157         // the case because all light refcounts that are associated with the fd we closed
158         // previously must be dropped when `do_close_fd`, since light refcounts must be dropped
159         // before returning to userspace.
160         //
161         // SAFETY: Task works are executed on the current thread right before we return to
162         // userspace, so this write is guaranteed to happen before `do_close_fd` is called, which
163         // means that a race is not possible here.
164         unsafe { *file_field = file };
165 
166         Ok(())
167     }
168 
169     /// # Safety
170     ///
171     /// The provided pointer must point at the `twork` field of a `DeferredFdCloserInner` stored in
172     /// a `KBox`, and the caller must pass exclusive ownership of that `KBox`. Furthermore, if the
173     /// file pointer is non-null, then it must be okay to release the refcount by calling `fput`.
do_close_fd(inner: *mut bindings::callback_head)174     unsafe extern "C" fn do_close_fd(inner: *mut bindings::callback_head) {
175         // SAFETY: The caller just passed us ownership of this box.
176         let inner = unsafe { KBox::from_raw(inner.cast::<DeferredFdCloserInner>()) };
177         if !inner.file.is_null() {
178             // SAFETY: By the type invariants, we own a refcount to this file, and the caller
179             // guarantees that dropping the refcount now is okay.
180             unsafe { bindings::fput(inner.file) };
181         }
182         // The allocation is freed when `inner` goes out of scope.
183     }
184 }
185 
186 /// Represents a failure to close an fd in a deferred manner.
187 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
188 pub(crate) enum DeferredFdCloseError {
189     /// Closing the fd failed because we were unable to schedule a task work.
190     TaskWorkUnavailable,
191     /// Closing the fd failed because the fd does not exist.
192     BadFd,
193 }
194 
195 impl From<DeferredFdCloseError> for Error {
from(err: DeferredFdCloseError) -> Error196     fn from(err: DeferredFdCloseError) -> Error {
197         match err {
198             DeferredFdCloseError::TaskWorkUnavailable => ESRCH,
199             DeferredFdCloseError::BadFd => EBADF,
200         }
201     }
202 }
203