• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 
3 // Copyright (C) 2024 Google LLC.
4 
5 use core::mem::{size_of, size_of_val, MaybeUninit};
6 use core::ops::Range;
7 
8 use kernel::{
9     bindings,
10     fs::file::{File, FileDescriptorReservation},
11     prelude::*,
12     sync::Arc,
13     types::{ARef, AsBytes, FromBytes},
14     uaccess::UserSliceReader,
15     uapi,
16 };
17 
18 use crate::{
19     deferred_close::DeferredFdCloser,
20     defs::*,
21     node::{Node, NodeRef},
22     process::Process,
23     DArc,
24 };
25 
26 #[derive(Default)]
27 pub(crate) struct AllocationInfo {
28     /// Range within the allocation where we can find the offsets to the object descriptors.
29     pub(crate) offsets: Option<Range<usize>>,
30     /// The target node of the transaction this allocation is associated to.
31     /// Not set for replies.
32     pub(crate) target_node: Option<NodeRef>,
33     /// When this allocation is dropped, call `pending_oneway_finished` on the node.
34     ///
35     /// This is used to serialize oneway transaction on the same node. Binder guarantees that
36     /// oneway transactions to the same node are delivered sequentially in the order they are sent.
37     pub(crate) oneway_node: Option<DArc<Node>>,
38     /// Zero the data in the buffer on free.
39     pub(crate) clear_on_free: bool,
40     /// List of files embedded in this transaction.
41     file_list: FileList,
42 }
43 
44 /// Represents an allocation that the kernel is currently using.
45 ///
46 /// When allocations are idle, the range allocator holds the data related to them.
47 ///
48 /// # Invariants
49 ///
50 /// This allocation corresponds to an allocation in the range allocator, so the relevant pages are
51 /// marked in use in the page range.
52 pub(crate) struct Allocation {
53     pub(crate) offset: usize,
54     size: usize,
55     pub(crate) ptr: usize,
56     pub(crate) process: Arc<Process>,
57     allocation_info: Option<AllocationInfo>,
58     free_on_drop: bool,
59     pub(crate) oneway_spam_detected: bool,
60     #[allow(dead_code)]
61     pub(crate) debug_id: usize,
62 }
63 
64 impl Allocation {
new( process: Arc<Process>, debug_id: usize, offset: usize, size: usize, ptr: usize, oneway_spam_detected: bool, ) -> Self65     pub(crate) fn new(
66         process: Arc<Process>,
67         debug_id: usize,
68         offset: usize,
69         size: usize,
70         ptr: usize,
71         oneway_spam_detected: bool,
72     ) -> Self {
73         Self {
74             process,
75             offset,
76             size,
77             ptr,
78             debug_id,
79             oneway_spam_detected,
80             allocation_info: None,
81             free_on_drop: true,
82         }
83     }
84 
size_check(&self, offset: usize, size: usize) -> Result85     fn size_check(&self, offset: usize, size: usize) -> Result {
86         let overflow_fail = offset.checked_add(size).is_none();
87         let cmp_size_fail = offset.wrapping_add(size) > self.size;
88         if overflow_fail || cmp_size_fail {
89             return Err(EFAULT);
90         }
91         Ok(())
92     }
93 
copy_into( &self, reader: &mut UserSliceReader, offset: usize, size: usize, ) -> Result94     pub(crate) fn copy_into(
95         &self,
96         reader: &mut UserSliceReader,
97         offset: usize,
98         size: usize,
99     ) -> Result {
100         self.size_check(offset, size)?;
101 
102         // SAFETY: While this object exists, the range allocator will keep the range allocated, and
103         // in turn, the pages will be marked as in use.
104         unsafe {
105             self.process
106                 .pages
107                 .copy_from_user_slice(reader, self.offset + offset, size)
108         }
109     }
110 
read<T: FromBytes>(&self, offset: usize) -> Result<T>111     pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
112         self.size_check(offset, size_of::<T>())?;
113 
114         // SAFETY: While this object exists, the range allocator will keep the range allocated, and
115         // in turn, the pages will be marked as in use.
116         unsafe { self.process.pages.read(self.offset + offset) }
117     }
118 
write<T: ?Sized>(&self, offset: usize, obj: &T) -> Result119     pub(crate) fn write<T: ?Sized>(&self, offset: usize, obj: &T) -> Result {
120         self.size_check(offset, size_of_val::<T>(obj))?;
121 
122         // SAFETY: While this object exists, the range allocator will keep the range allocated, and
123         // in turn, the pages will be marked as in use.
124         unsafe { self.process.pages.write(self.offset + offset, obj) }
125     }
126 
fill_zero(&self) -> Result127     pub(crate) fn fill_zero(&self) -> Result {
128         // SAFETY: While this object exists, the range allocator will keep the range allocated, and
129         // in turn, the pages will be marked as in use.
130         unsafe { self.process.pages.fill_zero(self.offset, self.size) }
131     }
132 
keep_alive(mut self)133     pub(crate) fn keep_alive(mut self) {
134         self.process
135             .buffer_make_freeable(self.offset, self.allocation_info.take());
136         self.free_on_drop = false;
137     }
138 
set_info(&mut self, info: AllocationInfo)139     pub(crate) fn set_info(&mut self, info: AllocationInfo) {
140         self.allocation_info = Some(info);
141     }
142 
get_or_init_info(&mut self) -> &mut AllocationInfo143     pub(crate) fn get_or_init_info(&mut self) -> &mut AllocationInfo {
144         self.allocation_info.get_or_insert_with(Default::default)
145     }
146 
set_info_offsets(&mut self, offsets: Range<usize>)147     pub(crate) fn set_info_offsets(&mut self, offsets: Range<usize>) {
148         self.get_or_init_info().offsets = Some(offsets);
149     }
150 
set_info_oneway_node(&mut self, oneway_node: DArc<Node>)151     pub(crate) fn set_info_oneway_node(&mut self, oneway_node: DArc<Node>) {
152         self.get_or_init_info().oneway_node = Some(oneway_node);
153     }
154 
set_info_clear_on_drop(&mut self)155     pub(crate) fn set_info_clear_on_drop(&mut self) {
156         self.get_or_init_info().clear_on_free = true;
157     }
158 
set_info_target_node(&mut self, target_node: NodeRef)159     pub(crate) fn set_info_target_node(&mut self, target_node: NodeRef) {
160         self.get_or_init_info().target_node = Some(target_node);
161     }
162 
163     /// Reserve enough space to push at least `num_fds` fds.
info_add_fd_reserve(&mut self, num_fds: usize) -> Result164     pub(crate) fn info_add_fd_reserve(&mut self, num_fds: usize) -> Result {
165         self.get_or_init_info()
166             .file_list
167             .files_to_translate
168             .reserve(num_fds, GFP_KERNEL)?;
169 
170         Ok(())
171     }
172 
info_add_fd( &mut self, file: ARef<File>, buffer_offset: usize, close_on_free: bool, ) -> Result173     pub(crate) fn info_add_fd(
174         &mut self,
175         file: ARef<File>,
176         buffer_offset: usize,
177         close_on_free: bool,
178     ) -> Result {
179         self.get_or_init_info().file_list.files_to_translate.push(
180             FileEntry {
181                 file,
182                 buffer_offset,
183                 close_on_free,
184             },
185             GFP_KERNEL,
186         )?;
187 
188         Ok(())
189     }
190 
set_info_close_on_free(&mut self, cof: FdsCloseOnFree)191     pub(crate) fn set_info_close_on_free(&mut self, cof: FdsCloseOnFree) {
192         self.get_or_init_info().file_list.close_on_free = cof.0;
193     }
194 
translate_fds(&mut self) -> Result<TranslatedFds>195     pub(crate) fn translate_fds(&mut self) -> Result<TranslatedFds> {
196         let file_list = match self.allocation_info.as_mut() {
197             Some(info) => &mut info.file_list,
198             None => return Ok(TranslatedFds::new()),
199         };
200 
201         let files = core::mem::take(&mut file_list.files_to_translate);
202 
203         let num_close_on_free = files.iter().filter(|entry| entry.close_on_free).count();
204         let mut close_on_free = KVec::with_capacity(num_close_on_free, GFP_KERNEL)?;
205 
206         let mut reservations = KVec::with_capacity(files.len(), GFP_KERNEL)?;
207         for file_info in files {
208             let res = FileDescriptorReservation::get_unused_fd_flags(bindings::O_CLOEXEC)?;
209             let fd = res.reserved_fd();
210             self.write::<u32>(file_info.buffer_offset, &fd)?;
211             crate::trace::trace_transaction_fd_recv(self.debug_id, fd, file_info.buffer_offset);
212 
213             reservations.push(
214                 Reservation {
215                     res,
216                     file: file_info.file,
217                 },
218                 GFP_KERNEL,
219             )?;
220             if file_info.close_on_free {
221                 close_on_free.push(fd, GFP_KERNEL)?;
222             }
223         }
224 
225         Ok(TranslatedFds {
226             reservations,
227             close_on_free: FdsCloseOnFree(close_on_free),
228         })
229     }
230 
231     /// Should the looper return to userspace when freeing this allocation?
looper_need_return_on_free(&self) -> bool232     pub(crate) fn looper_need_return_on_free(&self) -> bool {
233         // Closing fds involves pushing task_work for execution when we return to userspace. Hence,
234         // we should return to userspace asap if we are closing fds.
235         match self.allocation_info {
236             Some(ref info) => !info.file_list.close_on_free.is_empty(),
237             None => false,
238         }
239     }
240 }
241 
242 impl Drop for Allocation {
drop(&mut self)243     fn drop(&mut self) {
244         if !self.free_on_drop {
245             return;
246         }
247 
248         if let Some(mut info) = self.allocation_info.take() {
249             if let Some(oneway_node) = info.oneway_node.as_ref() {
250                 oneway_node.pending_oneway_finished();
251             }
252 
253             info.target_node = None;
254 
255             if let Some(offsets) = info.offsets.clone() {
256                 let view = AllocationView::new(self, offsets.start);
257                 for i in offsets.step_by(size_of::<usize>()) {
258                     if view.cleanup_object(i).is_err() {
259                         pr_warn!("Error cleaning up object at offset {}\n", i)
260                     }
261                 }
262             }
263 
264             for &fd in &info.file_list.close_on_free {
265                 let closer = match DeferredFdCloser::new(GFP_KERNEL) {
266                     Ok(closer) => closer,
267                     Err(kernel::alloc::AllocError) => {
268                         // Ignore allocation failures.
269                         break;
270                     }
271                 };
272 
273                 // Here, we ignore errors. The operation can fail if the fd is not valid, or if the
274                 // method is called from a kthread. However, this is always called from a syscall,
275                 // so the latter case cannot happen, and we don't care about the first case.
276                 let _ = closer.close_fd(fd);
277             }
278 
279             if info.clear_on_free {
280                 if let Err(e) = self.fill_zero() {
281                     pr_warn!("Failed to clear data on free: {:?}", e);
282                 }
283             }
284         }
285 
286         self.process.buffer_raw_free(self.ptr);
287     }
288 }
289 
290 /// A wrapper around `Allocation` that is being created.
291 ///
292 /// If the allocation is destroyed while wrapped in this wrapper, then the allocation will be
293 /// considered to be part of a failed transaction. Successful transactions avoid that by calling
294 /// `success`, which skips the destructor.
295 #[repr(transparent)]
296 pub(crate) struct NewAllocation(pub(crate) Allocation);
297 
298 impl NewAllocation {
success(self) -> Allocation299     pub(crate) fn success(self) -> Allocation {
300         // This skips the destructor.
301         //
302         // SAFETY: This type is `#[repr(transparent)]`, so the layout matches.
303         unsafe { core::mem::transmute(self) }
304     }
305 }
306 
307 impl core::ops::Deref for NewAllocation {
308     type Target = Allocation;
deref(&self) -> &Allocation309     fn deref(&self) -> &Allocation {
310         &self.0
311     }
312 }
313 
314 impl core::ops::DerefMut for NewAllocation {
deref_mut(&mut self) -> &mut Allocation315     fn deref_mut(&mut self) -> &mut Allocation {
316         &mut self.0
317     }
318 }
319 
320 impl Drop for NewAllocation {
drop(&mut self)321     fn drop(&mut self) {
322         crate::trace::trace_transaction_failed_buffer_release(self.debug_id);
323     }
324 }
325 
326 /// A view into the beginning of an allocation.
327 ///
328 /// All attempts to read or write outside of the view will fail. To intentionally access outside of
329 /// this view, use the `alloc` field of this struct directly.
330 pub(crate) struct AllocationView<'a> {
331     pub(crate) alloc: &'a mut Allocation,
332     limit: usize,
333 }
334 
335 impl<'a> AllocationView<'a> {
new(alloc: &'a mut Allocation, limit: usize) -> Self336     pub(crate) fn new(alloc: &'a mut Allocation, limit: usize) -> Self {
337         AllocationView { alloc, limit }
338     }
339 
read<T: FromBytes>(&self, offset: usize) -> Result<T>340     pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
341         if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
342             return Err(EINVAL);
343         }
344         self.alloc.read(offset)
345     }
346 
write<T: AsBytes>(&self, offset: usize, obj: &T) -> Result347     pub(crate) fn write<T: AsBytes>(&self, offset: usize, obj: &T) -> Result {
348         if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
349             return Err(EINVAL);
350         }
351         self.alloc.write(offset, obj)
352     }
353 
copy_into( &self, reader: &mut UserSliceReader, offset: usize, size: usize, ) -> Result354     pub(crate) fn copy_into(
355         &self,
356         reader: &mut UserSliceReader,
357         offset: usize,
358         size: usize,
359     ) -> Result {
360         if offset.checked_add(size).ok_or(EINVAL)? > self.limit {
361             return Err(EINVAL);
362         }
363         self.alloc.copy_into(reader, offset, size)
364     }
365 
transfer_binder_object( &self, offset: usize, obj: &uapi::flat_binder_object, strong: bool, node_ref: NodeRef, ) -> Result366     pub(crate) fn transfer_binder_object(
367         &self,
368         offset: usize,
369         obj: &uapi::flat_binder_object,
370         strong: bool,
371         node_ref: NodeRef,
372     ) -> Result {
373         let mut newobj = FlatBinderObject::default();
374         let node = node_ref.node.clone();
375         if Arc::ptr_eq(&node_ref.node.owner, &self.alloc.process) {
376             // The receiving process is the owner of the node, so send it a binder object (instead
377             // of a handle).
378             let (ptr, cookie) = node.get_id();
379             newobj.hdr.type_ = if strong {
380                 BINDER_TYPE_BINDER
381             } else {
382                 BINDER_TYPE_WEAK_BINDER
383             };
384             newobj.flags = obj.flags;
385             newobj.__bindgen_anon_1.binder = ptr as _;
386             newobj.cookie = cookie as _;
387             self.write(offset, &newobj)?;
388             // Increment the user ref count on the node. It will be decremented as part of the
389             // destruction of the buffer, when we see a binder or weak-binder object.
390             node.update_refcount(true, 1, strong);
391         } else {
392             // The receiving process is different from the owner, so we need to insert a handle to
393             // the binder object.
394             let handle = self
395                 .alloc
396                 .process
397                 .as_arc_borrow()
398                 .insert_or_update_handle(node_ref, false)?;
399             newobj.hdr.type_ = if strong {
400                 BINDER_TYPE_HANDLE
401             } else {
402                 BINDER_TYPE_WEAK_HANDLE
403             };
404             newobj.flags = obj.flags;
405             newobj.__bindgen_anon_1.handle = handle;
406             if self.write(offset, &newobj).is_err() {
407                 // Decrement ref count on the handle we just created.
408                 let _ = self
409                     .alloc
410                     .process
411                     .as_arc_borrow()
412                     .update_ref(handle, false, strong);
413                 return Err(EINVAL);
414             }
415         }
416 
417         crate::trace::trace_transaction_node_send(self.alloc.debug_id, &node, obj, &newobj);
418 
419         Ok(())
420     }
421 
cleanup_object(&self, index_offset: usize) -> Result422     fn cleanup_object(&self, index_offset: usize) -> Result {
423         let offset = self.alloc.read(index_offset)?;
424         let header = self.read::<BinderObjectHeader>(offset)?;
425         match header.type_ {
426             BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => {
427                 let obj = self.read::<FlatBinderObject>(offset)?;
428                 let strong = header.type_ == BINDER_TYPE_BINDER;
429                 // SAFETY: The type is `BINDER_TYPE_{WEAK_}BINDER`, so the `binder` field is
430                 // populated.
431                 let ptr = unsafe { obj.__bindgen_anon_1.binder };
432                 let cookie = obj.cookie;
433                 self.alloc.process.update_node(ptr, cookie, strong);
434                 Ok(())
435             }
436             BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => {
437                 let obj = self.read::<FlatBinderObject>(offset)?;
438                 let strong = header.type_ == BINDER_TYPE_HANDLE;
439                 // SAFETY: The type is `BINDER_TYPE_{WEAK_}HANDLE`, so the `handle` field is
440                 // populated.
441                 let handle = unsafe { obj.__bindgen_anon_1.handle };
442                 self.alloc
443                     .process
444                     .as_arc_borrow()
445                     .update_ref(handle, false, strong)
446             }
447             _ => Ok(()),
448         }
449     }
450 }
451 
452 /// A binder object as it is serialized.
453 ///
454 /// # Invariants
455 ///
456 /// All bytes must be initialized, and the value of `self.hdr.type_` must be one of the allowed
457 /// types.
458 #[repr(C)]
459 pub(crate) union BinderObject {
460     hdr: uapi::binder_object_header,
461     fbo: uapi::flat_binder_object,
462     fdo: uapi::binder_fd_object,
463     bbo: uapi::binder_buffer_object,
464     fdao: uapi::binder_fd_array_object,
465 }
466 
467 /// A view into a `BinderObject` that can be used in a match statement.
468 pub(crate) enum BinderObjectRef<'a> {
469     Binder(&'a mut uapi::flat_binder_object),
470     Handle(&'a mut uapi::flat_binder_object),
471     Fd(&'a mut uapi::binder_fd_object),
472     Ptr(&'a mut uapi::binder_buffer_object),
473     Fda(&'a mut uapi::binder_fd_array_object),
474 }
475 
476 impl BinderObject {
read_from(reader: &mut UserSliceReader) -> Result<BinderObject>477     pub(crate) fn read_from(reader: &mut UserSliceReader) -> Result<BinderObject> {
478         let object = Self::read_from_inner(|slice| {
479             let read_len = usize::min(slice.len(), reader.len());
480             reader.clone_reader().read_slice(&mut slice[..read_len])?;
481             Ok(())
482         })?;
483 
484         // If we used a object type smaller than the largest object size, then we've read more
485         // bytes than we needed to. However, we used `.clone_reader()` to avoid advancing the
486         // original reader. Now, we call `skip` so that the caller's reader is advanced by the
487         // right amount.
488         //
489         // The `skip` call fails if the reader doesn't have `size` bytes available. This could
490         // happen if the type header corresponds to an object type that is larger than the rest of
491         // the reader.
492         //
493         // Any extra bytes beyond the size of the object are inaccessible after this call, so
494         // reading them again from the `reader` later does not result in TOCTOU bugs.
495         reader.skip(object.size())?;
496 
497         Ok(object)
498     }
499 
500     /// Use the provided reader closure to construct a `BinderObject`.
501     ///
502     /// The closure should write the bytes for the object into the provided slice.
read_from_inner<R>(reader: R) -> Result<BinderObject> where R: FnOnce(&mut [u8; size_of::<BinderObject>()]) -> Result<()>,503     pub(crate) fn read_from_inner<R>(reader: R) -> Result<BinderObject>
504     where
505         R: FnOnce(&mut [u8; size_of::<BinderObject>()]) -> Result<()>,
506     {
507         let mut obj = MaybeUninit::<BinderObject>::zeroed();
508 
509         // SAFETY: The lengths of `BinderObject` and `[u8; size_of::<BinderObject>()]` are equal,
510         // and the byte array has an alignment requirement of one, so the pointer cast is okay.
511         // Additionally, `obj` was initialized to zeros, so the byte array will not be
512         // uninitialized.
513         (reader)(unsafe { &mut *obj.as_mut_ptr().cast() })?;
514 
515         // SAFETY: The entire object is initialized, so accessing this field is safe.
516         let type_ = unsafe { obj.assume_init_ref().hdr.type_ };
517         if Self::type_to_size(type_).is_none() {
518             // The value of `obj.hdr_type_` was invalid.
519             return Err(EINVAL);
520         }
521 
522         // SAFETY: All bytes are initialized (since we zeroed them at the start) and we checked
523         // that `self.hdr.type_` is one of the allowed types, so the type invariants are satisfied.
524         unsafe { Ok(obj.assume_init()) }
525     }
526 
as_ref(&mut self) -> BinderObjectRef<'_>527     pub(crate) fn as_ref(&mut self) -> BinderObjectRef<'_> {
528         use BinderObjectRef::*;
529         // SAFETY: The constructor ensures that all bytes of `self` are initialized, and all
530         // variants of this union accept all initialized bit patterns.
531         unsafe {
532             match self.hdr.type_ {
533                 BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => Binder(&mut self.fbo),
534                 BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => Handle(&mut self.fbo),
535                 BINDER_TYPE_FD => Fd(&mut self.fdo),
536                 BINDER_TYPE_PTR => Ptr(&mut self.bbo),
537                 BINDER_TYPE_FDA => Fda(&mut self.fdao),
538                 // SAFETY: By the type invariant, the value of `self.hdr.type_` cannot have any
539                 // other value than the ones checked above.
540                 _ => core::hint::unreachable_unchecked(),
541             }
542         }
543     }
544 
size(&self) -> usize545     pub(crate) fn size(&self) -> usize {
546         // SAFETY: The entire object is initialized, so accessing this field is safe.
547         let type_ = unsafe { self.hdr.type_ };
548 
549         // SAFETY: The type invariants guarantee that the type field is correct.
550         unsafe { Self::type_to_size(type_).unwrap_unchecked() }
551     }
552 
type_to_size(type_: u32) -> Option<usize>553     fn type_to_size(type_: u32) -> Option<usize> {
554         match type_ {
555             BINDER_TYPE_WEAK_BINDER => Some(size_of::<uapi::flat_binder_object>()),
556             BINDER_TYPE_BINDER => Some(size_of::<uapi::flat_binder_object>()),
557             BINDER_TYPE_WEAK_HANDLE => Some(size_of::<uapi::flat_binder_object>()),
558             BINDER_TYPE_HANDLE => Some(size_of::<uapi::flat_binder_object>()),
559             BINDER_TYPE_FD => Some(size_of::<uapi::binder_fd_object>()),
560             BINDER_TYPE_PTR => Some(size_of::<uapi::binder_buffer_object>()),
561             BINDER_TYPE_FDA => Some(size_of::<uapi::binder_fd_array_object>()),
562             _ => None,
563         }
564     }
565 }
566 
567 #[derive(Default)]
568 struct FileList {
569     files_to_translate: KVec<FileEntry>,
570     close_on_free: KVec<u32>,
571 }
572 
573 struct FileEntry {
574     /// The file for which a descriptor will be created in the recipient process.
575     file: ARef<File>,
576     /// The offset in the buffer where the file descriptor is stored.
577     buffer_offset: usize,
578     /// Whether this fd should be closed when the allocation is freed.
579     close_on_free: bool,
580 }
581 
582 pub(crate) struct TranslatedFds {
583     reservations: KVec<Reservation>,
584     /// If commit is called, then these fds should be closed. (If commit is not called, then they
585     /// shouldn't be closed.)
586     close_on_free: FdsCloseOnFree,
587 }
588 
589 struct Reservation {
590     res: FileDescriptorReservation,
591     file: ARef<File>,
592 }
593 
594 impl TranslatedFds {
new() -> Self595     pub(crate) fn new() -> Self {
596         Self {
597             reservations: KVec::new(),
598             close_on_free: FdsCloseOnFree(KVec::new()),
599         }
600     }
601 
commit(self) -> FdsCloseOnFree602     pub(crate) fn commit(self) -> FdsCloseOnFree {
603         for entry in self.reservations {
604             entry.res.fd_install(entry.file);
605         }
606 
607         self.close_on_free
608     }
609 }
610 
611 pub(crate) struct FdsCloseOnFree(KVec<u32>);
612