1 // SPDX-License-Identifier: GPL-2.0
2
3 // Copyright (C) 2024 Google LLC.
4
5 //! This module defines the `Process` type, which represents a process using a particular binder
6 //! context.
7 //!
8 //! The `Process` object keeps track of all of the resources that this process owns in the binder
9 //! context.
10 //!
11 //! There is one `Process` object for each binder fd that a process has opened, so processes using
12 //! several binder contexts have several `Process` objects. This ensures that the contexts are
13 //! fully separated.
14
15 use core::mem::take;
16
17 use kernel::{
18 bindings,
19 cred::Credential,
20 error::Error,
21 fs::file::{self, File},
22 list::{List, ListArc, ListArcField, ListLinks},
23 mm,
24 prelude::*,
25 rbtree::{self, RBTree, RBTreeNode, RBTreeNodeReservation},
26 seq_file::SeqFile,
27 seq_print,
28 sync::poll::PollTable,
29 sync::{
30 lock::{spinlock::SpinLockBackend, Guard},
31 Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
32 },
33 task::Task,
34 types::{ARef, Either},
35 uaccess::{UserSlice, UserSliceReader},
36 uapi,
37 workqueue::{self, Work},
38 };
39
40 use crate::{
41 allocation::{Allocation, AllocationInfo, NewAllocation},
42 context::Context,
43 defs::*,
44 error::{BinderError, BinderResult},
45 node::{CouldNotDeliverCriticalIncrement, CritIncrWrapper, Node, NodeDeath, NodeRef},
46 page_range::ShrinkablePageRange,
47 prio::{self, BinderPriority},
48 range_alloc::{RangeAllocator, ReserveNew, ReserveNewArgs},
49 stats::BinderStats,
50 thread::{PushWorkRes, Thread},
51 BinderfsProcFile, DArc, DLArc, DTRWrap, DeliverToRead,
52 };
53
54 #[path = "freeze.rs"]
55 mod freeze;
56 use self::freeze::{FreezeCookie, FreezeListener};
57
58 struct Mapping {
59 address: usize,
60 alloc: RangeAllocator<AllocationInfo>,
61 }
62
63 impl Mapping {
new(address: usize, size: usize) -> Self64 fn new(address: usize, size: usize) -> Self {
65 Self {
66 address,
67 alloc: RangeAllocator::new(size),
68 }
69 }
70 }
71
72 // bitflags for defer_work.
73 const PROC_DEFER_FLUSH: u8 = 1;
74 const PROC_DEFER_RELEASE: u8 = 2;
75
76 /// The fields of `Process` protected by the spinlock.
77 pub(crate) struct ProcessInner {
78 is_manager: bool,
79 pub(crate) is_dead: bool,
80 threads: RBTree<i32, Arc<Thread>>,
81 /// INVARIANT: Threads pushed to this list must be owned by this process.
82 ready_threads: List<Thread>,
83 nodes: RBTree<u64, DArc<Node>>,
84 mapping: Option<Mapping>,
85 work: List<DTRWrap<dyn DeliverToRead>>,
86 delivered_deaths: List<DTRWrap<NodeDeath>, 2>,
87
88 /// The number of requested threads that haven't registered yet.
89 requested_thread_count: u32,
90 /// The maximum number of threads used by the process thread pool.
91 max_threads: u32,
92 /// The number of threads the started and registered with the thread pool.
93 started_thread_count: u32,
94
95 /// Bitmap of deferred work to do.
96 defer_work: u8,
97
98 /// Number of transactions to be transmitted before processes in freeze_wait
99 /// are woken up.
100 outstanding_txns: u32,
101 /// Process is frozen and unable to service binder transactions.
102 pub(crate) is_frozen: bool,
103 /// Process received sync transactions since last frozen.
104 pub(crate) sync_recv: bool,
105 /// Process received async transactions since last frozen.
106 pub(crate) async_recv: bool,
107 pub(crate) binderfs_file: Option<BinderfsProcFile>,
108 /// Check for oneway spam
109 oneway_spam_detection_enabled: bool,
110 }
111
112 impl ProcessInner {
new() -> Self113 fn new() -> Self {
114 Self {
115 is_manager: false,
116 is_dead: false,
117 threads: RBTree::new(),
118 ready_threads: List::new(),
119 mapping: None,
120 nodes: RBTree::new(),
121 work: List::new(),
122 delivered_deaths: List::new(),
123 requested_thread_count: 0,
124 max_threads: 0,
125 started_thread_count: 0,
126 defer_work: 0,
127 outstanding_txns: 0,
128 is_frozen: false,
129 sync_recv: false,
130 async_recv: false,
131 binderfs_file: None,
132 oneway_spam_detection_enabled: false,
133 }
134 }
135
136 /// Schedule the work item for execution on this process.
137 ///
138 /// If any threads are ready for work, then the work item is given directly to that thread and
139 /// it is woken up. Otherwise, it is pushed to the process work list.
140 ///
141 /// This call can fail only if the process is dead. In this case, the work item is returned to
142 /// the caller so that the caller can drop it after releasing the inner process lock. This is
143 /// necessary since the destructor of `Transaction` will take locks that can't necessarily be
144 /// taken while holding the inner process lock.
push_work( &mut self, work: DLArc<dyn DeliverToRead>, ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)>145 pub(crate) fn push_work(
146 &mut self,
147 work: DLArc<dyn DeliverToRead>,
148 ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
149 // Try to find a ready thread to which to push the work.
150 if let Some(thread) = self.ready_threads.pop_front() {
151 work.on_thread_selected(&thread);
152
153 // Push to thread while holding state lock. This prevents the thread from giving up
154 // (for example, because of a signal) when we're about to deliver work.
155 match thread.push_work(work) {
156 PushWorkRes::Ok => Ok(()),
157 PushWorkRes::FailedDead(work) => Err((BinderError::new_dead(), work)),
158 }
159 } else if self.is_dead {
160 Err((BinderError::new_dead(), work))
161 } else {
162 let sync = work.should_sync_wakeup();
163
164 // Didn't find a thread waiting for proc work; this can happen
165 // in two scenarios:
166 // 1. All threads are busy handling transactions
167 // In that case, one of those threads should call back into
168 // the kernel driver soon and pick up this work.
169 // 2. Threads are using the (e)poll interface, in which case
170 // they may be blocked on the waitqueue without having been
171 // added to waiting_threads. For this case, we just iterate
172 // over all threads not handling transaction work, and
173 // wake them all up. We wake all because we don't know whether
174 // a thread that called into (e)poll is handling non-binder
175 // work currently.
176 self.work.push_back(work);
177
178 // Wake up polling threads, if any.
179 for thread in self.threads.values() {
180 thread.notify_if_poll_ready(sync);
181 }
182
183 Ok(())
184 }
185 }
186
remove_node(&mut self, ptr: u64)187 pub(crate) fn remove_node(&mut self, ptr: u64) {
188 self.nodes.remove(&ptr);
189 }
190
191 /// Updates the reference count on the given node.
update_node_refcount( &mut self, node: &DArc<Node>, inc: bool, strong: bool, count: usize, othread: Option<&Thread>, )192 pub(crate) fn update_node_refcount(
193 &mut self,
194 node: &DArc<Node>,
195 inc: bool,
196 strong: bool,
197 count: usize,
198 othread: Option<&Thread>,
199 ) {
200 let push = node.update_refcount_locked(inc, strong, count, self);
201
202 // If we decided that we need to push work, push either to the process or to a thread if
203 // one is specified.
204 if let Some(node) = push {
205 if let Some(thread) = othread {
206 thread.push_work_deferred(node);
207 } else {
208 let _ = self.push_work(node);
209 // Nothing to do: `push_work` may fail if the process is dead, but that's ok as in
210 // that case, it doesn't care about the notification.
211 }
212 }
213 }
214
new_node_ref( &mut self, node: DArc<Node>, strong: bool, thread: Option<&Thread>, ) -> NodeRef215 pub(crate) fn new_node_ref(
216 &mut self,
217 node: DArc<Node>,
218 strong: bool,
219 thread: Option<&Thread>,
220 ) -> NodeRef {
221 self.update_node_refcount(&node, true, strong, 1, thread);
222 let strong_count = if strong { 1 } else { 0 };
223 NodeRef::new(node, strong_count, 1 - strong_count)
224 }
225
new_node_ref_with_thread( &mut self, node: DArc<Node>, strong: bool, thread: &Thread, wrapper: Option<CritIncrWrapper>, ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement>226 pub(crate) fn new_node_ref_with_thread(
227 &mut self,
228 node: DArc<Node>,
229 strong: bool,
230 thread: &Thread,
231 wrapper: Option<CritIncrWrapper>,
232 ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement> {
233 let push = match wrapper {
234 None => node
235 .incr_refcount_allow_zero2one(strong, self)?
236 .map(|node| node as _),
237 Some(wrapper) => node.incr_refcount_allow_zero2one_with_wrapper(strong, wrapper, self),
238 };
239 if let Some(node) = push {
240 thread.push_work_deferred(node);
241 }
242 let strong_count = if strong { 1 } else { 0 };
243 Ok(NodeRef::new(node, strong_count, 1 - strong_count))
244 }
245
246 /// Returns an existing node with the given pointer and cookie, if one exists.
247 ///
248 /// Returns an error if a node with the given pointer but a different cookie exists.
get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>>249 fn get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>> {
250 match self.nodes.get(&ptr) {
251 None => Ok(None),
252 Some(node) => {
253 let (_, node_cookie) = node.get_id();
254 if node_cookie == cookie {
255 Ok(Some(node.clone()))
256 } else {
257 Err(EINVAL)
258 }
259 }
260 }
261 }
262
register_thread(&mut self) -> bool263 fn register_thread(&mut self) -> bool {
264 if self.requested_thread_count == 0 {
265 return false;
266 }
267
268 self.requested_thread_count -= 1;
269 self.started_thread_count += 1;
270 true
271 }
272
273 /// Finds a delivered death notification with the given cookie, removes it from the thread's
274 /// delivered list, and returns it.
pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>>275 fn pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>> {
276 let mut cursor = self.delivered_deaths.cursor_front();
277 while let Some(next) = cursor.peek_next() {
278 if next.cookie == cookie {
279 return Some(next.remove().into_arc());
280 }
281 cursor.move_next();
282 }
283 None
284 }
285
death_delivered(&mut self, death: DArc<NodeDeath>)286 pub(crate) fn death_delivered(&mut self, death: DArc<NodeDeath>) {
287 if let Some(death) = ListArc::try_from_arc_or_drop(death) {
288 self.delivered_deaths.push_back(death);
289 } else {
290 pr_warn!("Notification added to `delivered_deaths` twice.");
291 }
292 }
293
add_outstanding_txn(&mut self)294 pub(crate) fn add_outstanding_txn(&mut self) {
295 self.outstanding_txns += 1;
296 }
297
txns_pending_locked(&self) -> bool298 fn txns_pending_locked(&self) -> bool {
299 if self.outstanding_txns > 0 {
300 return true;
301 }
302 for thread in self.threads.values() {
303 if thread.has_current_transaction() {
304 return true;
305 }
306 }
307 false
308 }
309 }
310
311 /// Used to keep track of a node that this process has a handle to.
312 #[pin_data]
313 pub(crate) struct NodeRefInfo {
314 debug_id: usize,
315 /// The refcount that this process owns to the node.
316 node_ref: ListArcField<NodeRef, { Self::LIST_PROC }>,
317 death: ListArcField<Option<DArc<NodeDeath>>, { Self::LIST_PROC }>,
318 /// Cookie of the active freeze listener for this node.
319 freeze: ListArcField<Option<FreezeCookie>, { Self::LIST_PROC }>,
320 /// Used to store this `NodeRefInfo` in the node's `refs` list.
321 #[pin]
322 links: ListLinks<{ Self::LIST_NODE }>,
323 /// The handle for this `NodeRefInfo`.
324 handle: u32,
325 /// The process that has a handle to the node.
326 pub(crate) process: Arc<Process>,
327 }
328
329 impl NodeRefInfo {
330 /// The id used for the `Node::refs` list.
331 pub(crate) const LIST_NODE: u64 = 0x2da16350fb724a10;
332 /// The id used for the `ListArc` in `ProcessNodeRefs`.
333 const LIST_PROC: u64 = 0xd703a5263dcc8650;
334
new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self>335 fn new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self> {
336 pin_init!(Self {
337 debug_id: super::next_debug_id(),
338 node_ref: ListArcField::new(node_ref),
339 death: ListArcField::new(None),
340 freeze: ListArcField::new(None),
341 links <- ListLinks::new(),
342 handle,
343 process,
344 })
345 }
346
347 kernel::list::define_list_arc_field_getter! {
348 pub(crate) fn death(&mut self<{Self::LIST_PROC}>) -> &mut Option<DArc<NodeDeath>> { death }
349 pub(crate) fn freeze(&mut self<{Self::LIST_PROC}>) -> &mut Option<FreezeCookie> { freeze }
350 pub(crate) fn node_ref(&mut self<{Self::LIST_PROC}>) -> &mut NodeRef { node_ref }
351 pub(crate) fn node_ref2(&self<{Self::LIST_PROC}>) -> &NodeRef { node_ref }
352 }
353 }
354
355 kernel::list::impl_has_list_links! {
356 impl HasListLinks<{Self::LIST_NODE}> for NodeRefInfo { self.links }
357 }
358 kernel::list::impl_list_arc_safe! {
359 impl ListArcSafe<{Self::LIST_NODE}> for NodeRefInfo { untracked; }
360 impl ListArcSafe<{Self::LIST_PROC}> for NodeRefInfo { untracked; }
361 }
362 kernel::list::impl_list_item! {
363 impl ListItem<{Self::LIST_NODE}> for NodeRefInfo {
364 using ListLinks;
365 }
366 }
367
368 /// Keeps track of references this process has to nodes owned by other processes.
369 ///
370 /// TODO: Currently, the rbtree requires two allocations per node reference, and two tree
371 /// traversals to look up a node by `Node::global_id`. Once the rbtree is more powerful, these
372 /// extra costs should be eliminated.
373 struct ProcessNodeRefs {
374 /// Used to look up nodes using the 32-bit id that this process knows it by.
375 by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
376 /// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
377 /// the underlying `Node` struct as returned by `Node::global_id`.
378 by_node: RBTree<usize, u32>,
379 /// Used to look up a `FreezeListener` by cookie.
380 ///
381 /// There might be multiple freeze listeners for the same node, but at most one of them is
382 /// active.
383 freeze_listeners: RBTree<FreezeCookie, FreezeListener>,
384 }
385
386 impl ProcessNodeRefs {
new() -> Self387 fn new() -> Self {
388 Self {
389 by_handle: RBTree::new(),
390 by_node: RBTree::new(),
391 freeze_listeners: RBTree::new(),
392 }
393 }
394 }
395
396 use core::mem::offset_of;
397 use kernel::bindings::rb_process_layout;
398 pub(crate) const PROCESS_LAYOUT: rb_process_layout = rb_process_layout {
399 arc_offset: Arc::<Process>::DATA_OFFSET,
400 task: offset_of!(Process, task),
401 };
402
403 /// A process using binder.
404 ///
405 /// Strictly speaking, there can be multiple of these per process. There is one for each binder fd
406 /// that a process has opened, so processes using several binder contexts have several `Process`
407 /// objects. This ensures that the contexts are fully separated.
408 #[pin_data]
409 pub(crate) struct Process {
410 pub(crate) ctx: Arc<Context>,
411
412 // The task leader (process).
413 pub(crate) task: ARef<Task>,
414
415 // Credential associated with file when `Process` is created.
416 pub(crate) cred: ARef<Credential>,
417
418 #[pin]
419 pub(crate) inner: SpinLock<ProcessInner>,
420
421 pub(crate) default_priority: BinderPriority,
422
423 #[pin]
424 pub(crate) pages: ShrinkablePageRange,
425
426 // Waitqueue of processes waiting for all outstanding transactions to be
427 // processed.
428 #[pin]
429 freeze_wait: CondVar,
430
431 // Node references are in a different lock to avoid recursive acquisition when
432 // incrementing/decrementing a node in another process.
433 #[pin]
434 node_refs: Mutex<ProcessNodeRefs>,
435
436 // Work node for deferred work item.
437 #[pin]
438 defer_work: Work<Process>,
439
440 // Links for process list in Context.
441 #[pin]
442 links: ListLinks,
443
444 pub(crate) stats: BinderStats,
445 }
446
447 kernel::impl_has_work! {
448 impl HasWork<Process> for Process { self.defer_work }
449 }
450
451 kernel::list::impl_has_list_links! {
452 impl HasListLinks<0> for Process { self.links }
453 }
454 kernel::list::impl_list_arc_safe! {
455 impl ListArcSafe<0> for Process { untracked; }
456 }
457 kernel::list::impl_list_item! {
458 impl ListItem<0> for Process {
459 using ListLinks;
460 }
461 }
462
463 impl workqueue::WorkItem for Process {
464 type Pointer = Arc<Process>;
465
run(me: Arc<Self>)466 fn run(me: Arc<Self>) {
467 let defer;
468 {
469 let mut inner = me.inner.lock();
470 defer = inner.defer_work;
471 inner.defer_work = 0;
472 }
473
474 if defer & PROC_DEFER_FLUSH != 0 {
475 me.deferred_flush();
476 }
477 if defer & PROC_DEFER_RELEASE != 0 {
478 me.deferred_release();
479 }
480 }
481 }
482
483 impl Process {
new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>>484 fn new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>> {
485 let current = kernel::current!();
486 let list_process = ListArc::pin_init::<Error>(
487 try_pin_init!(Process {
488 ctx,
489 cred,
490 default_priority: prio::get_default_prio_from_task(current),
491 inner <- kernel::new_spinlock!(ProcessInner::new(), "Process::inner"),
492 pages <- ShrinkablePageRange::new(&super::BINDER_SHRINKER),
493 node_refs <- kernel::new_mutex!(ProcessNodeRefs::new(), "Process::node_refs"),
494 freeze_wait <- kernel::new_condvar!("Process::freeze_wait"),
495 task: current.group_leader().into(),
496 defer_work <- kernel::new_work!("Process::defer_work"),
497 links <- ListLinks::new(),
498 stats: BinderStats::new(),
499 }),
500 GFP_KERNEL,
501 )?;
502
503 let process = list_process.clone_arc();
504 process.ctx.register_process(list_process);
505
506 Ok(process)
507 }
508
pid_in_current_ns(&self) -> kernel::task::Pid509 pub(crate) fn pid_in_current_ns(&self) -> kernel::task::Pid {
510 self.task.tgid_nr_ns(None)
511 }
512
513 #[inline(never)]
debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()>514 pub(crate) fn debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()> {
515 seq_print!(m, "proc {}\n", self.pid_in_current_ns());
516 seq_print!(m, "context {}\n", &*ctx.name);
517
518 let inner = self.inner.lock();
519 seq_print!(m, " threads: {}\n", inner.threads.iter().count());
520 seq_print!(
521 m,
522 " requested threads: {}+{}/{}\n",
523 inner.requested_thread_count,
524 inner.started_thread_count,
525 inner.max_threads,
526 );
527 if let Some(mapping) = &inner.mapping {
528 seq_print!(
529 m,
530 " free oneway space: {}\n",
531 mapping.alloc.free_oneway_space()
532 );
533 seq_print!(m, " buffers: {}\n", mapping.alloc.count_buffers());
534 }
535 seq_print!(
536 m,
537 " outstanding transactions: {}\n",
538 inner.outstanding_txns
539 );
540 seq_print!(m, " nodes: {}\n", inner.nodes.iter().count());
541 drop(inner);
542
543 {
544 let mut refs = self.node_refs.lock();
545 let (mut count, mut weak, mut strong) = (0, 0, 0);
546 for r in refs.by_handle.values_mut() {
547 let node_ref = r.node_ref();
548 let (nstrong, nweak) = node_ref.get_count();
549 count += 1;
550 weak += nweak;
551 strong += nstrong;
552 }
553 seq_print!(m, " refs: {count} s {strong} w {weak}\n");
554 }
555
556 self.stats.debug_print(" ", m);
557
558 Ok(())
559 }
560
561 #[inline(never)]
debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()>562 pub(crate) fn debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()> {
563 seq_print!(m, "proc {}\n", self.pid_in_current_ns());
564 seq_print!(m, "context {}\n", &*ctx.name);
565
566 let mut all_threads = KVec::new();
567 let mut all_nodes = KVec::new();
568 loop {
569 let inner = self.inner.lock();
570 let num_threads = inner.threads.iter().count();
571 let num_nodes = inner.nodes.iter().count();
572
573 if all_threads.capacity() < num_threads || all_nodes.capacity() < num_nodes {
574 drop(inner);
575 all_threads.reserve(num_threads, GFP_KERNEL)?;
576 all_nodes.reserve(num_nodes, GFP_KERNEL)?;
577 continue;
578 }
579
580 for thread in inner.threads.values() {
581 assert!(all_threads.len() < all_threads.capacity());
582 let _ = all_threads.push(thread.clone(), GFP_ATOMIC);
583 }
584
585 for node in inner.nodes.values() {
586 assert!(all_nodes.len() < all_nodes.capacity());
587 let _ = all_nodes.push(node.clone(), GFP_ATOMIC);
588 }
589
590 break;
591 }
592
593 for thread in all_threads {
594 thread.debug_print(m, print_all)?;
595 }
596
597 let mut inner = self.inner.lock();
598 for node in all_nodes {
599 if print_all || node.has_oneway_transaction(&mut inner) {
600 node.full_debug_print(m, &mut inner)?;
601 }
602 }
603 drop(inner);
604
605 if print_all {
606 let mut refs = self.node_refs.lock();
607 for r in refs.by_handle.values_mut() {
608 let node_ref = r.node_ref();
609 let dead = node_ref.node.owner.inner.lock().is_dead;
610 let (strong, weak) = node_ref.get_count();
611 let debug_id = node_ref.node.debug_id;
612
613 seq_print!(
614 m,
615 " ref {}: desc {} {}node {debug_id} s {strong} w {weak}\n",
616 r.debug_id,
617 r.handle,
618 if dead { "dead " } else { "" },
619 );
620 }
621 }
622
623 let inner = self.inner.lock();
624 for work in &inner.work {
625 work.debug_print(m, " ", " pending transaction ")?;
626 }
627 for _death in &inner.delivered_deaths {
628 seq_print!(m, " has delivered dead binder\n");
629 }
630 if let Some(mapping) = &inner.mapping {
631 mapping.alloc.debug_print(m)?;
632 }
633 drop(inner);
634
635 Ok(())
636 }
637
638 /// Attempts to fetch a work item from the process queue.
get_work(&self) -> Option<DLArc<dyn DeliverToRead>>639 pub(crate) fn get_work(&self) -> Option<DLArc<dyn DeliverToRead>> {
640 self.inner.lock().work.pop_front()
641 }
642
643 /// Attempts to fetch a work item from the process queue. If none is available, it registers the
644 /// given thread as ready to receive work directly.
645 ///
646 /// This must only be called when the thread is not participating in a transaction chain; when
647 /// it is, work will always be delivered directly to the thread (and not through the process
648 /// queue).
get_work_or_register<'a>( &'a self, thread: &'a Arc<Thread>, ) -> Either<DLArc<dyn DeliverToRead>, Registration<'a>>649 pub(crate) fn get_work_or_register<'a>(
650 &'a self,
651 thread: &'a Arc<Thread>,
652 ) -> Either<DLArc<dyn DeliverToRead>, Registration<'a>> {
653 let mut inner = self.inner.lock();
654 // Try to get work from the process queue.
655 if let Some(work) = inner.work.pop_front() {
656 return Either::Left(work);
657 }
658
659 // Register the thread as ready.
660 Either::Right(Registration::new(thread, &mut inner))
661 }
662
get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>>663 fn get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>> {
664 let id = {
665 let current = kernel::current!();
666 if !core::ptr::eq(current.group_leader(), &*self.task) {
667 pr_err!("get_current_thread was called from the wrong process.");
668 return Err(EINVAL);
669 }
670 current.pid()
671 };
672
673 {
674 let inner = self.inner.lock();
675 if let Some(thread) = inner.threads.get(&id) {
676 return Ok(thread.clone());
677 }
678 }
679
680 // Allocate a new `Thread` without holding any locks.
681 let reservation = RBTreeNodeReservation::new(GFP_KERNEL)?;
682 let ta: Arc<Thread> = Thread::new(id, self.into())?;
683
684 let mut inner = self.inner.lock();
685 match inner.threads.entry(id) {
686 rbtree::Entry::Vacant(entry) => {
687 entry.insert(ta.clone(), reservation);
688 Ok(ta)
689 }
690 rbtree::Entry::Occupied(_entry) => {
691 pr_err!("Cannot create two threads with the same id.");
692 Err(EINVAL)
693 }
694 }
695 }
696
push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult697 pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
698 // If push_work fails, drop the work item outside the lock.
699 let res = self.inner.lock().push_work(work);
700 match res {
701 Ok(()) => Ok(()),
702 Err((err, work)) => {
703 drop(work);
704 Err(err)
705 }
706 }
707 }
708
set_as_manager( self: ArcBorrow<'_, Self>, info: Option<FlatBinderObject>, thread: &Thread, ) -> Result709 fn set_as_manager(
710 self: ArcBorrow<'_, Self>,
711 info: Option<FlatBinderObject>,
712 thread: &Thread,
713 ) -> Result {
714 let (ptr, cookie, flags) = if let Some(obj) = info {
715 (
716 // SAFETY: The object type for this ioctl is implicitly `BINDER_TYPE_BINDER`, so it
717 // is safe to access the `binder` field.
718 unsafe { obj.__bindgen_anon_1.binder },
719 obj.cookie,
720 obj.flags,
721 )
722 } else {
723 (0, 0, 0)
724 };
725 let node_ref = self.get_node(ptr, cookie, flags as _, true, thread)?;
726 let node = node_ref.node.clone();
727 self.ctx.set_manager_node(node_ref)?;
728 self.inner.lock().is_manager = true;
729
730 // Force the state of the node to prevent the delivery of acquire/increfs.
731 let mut owner_inner = node.owner.inner.lock();
732 node.force_has_count(&mut owner_inner);
733 Ok(())
734 }
735
get_node_inner( self: ArcBorrow<'_, Self>, ptr: u64, cookie: u64, flags: u32, strong: bool, thread: &Thread, wrapper: Option<CritIncrWrapper>, ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>>736 fn get_node_inner(
737 self: ArcBorrow<'_, Self>,
738 ptr: u64,
739 cookie: u64,
740 flags: u32,
741 strong: bool,
742 thread: &Thread,
743 wrapper: Option<CritIncrWrapper>,
744 ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>> {
745 // Try to find an existing node.
746 {
747 let mut inner = self.inner.lock();
748 if let Some(node) = inner.get_existing_node(ptr, cookie)? {
749 return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
750 }
751 }
752
753 // Allocate the node before reacquiring the lock.
754 let node = DTRWrap::arc_pin_init(Node::new(ptr, cookie, flags, self.into()))?.into_arc();
755 let rbnode = RBTreeNode::new(ptr, node.clone(), GFP_KERNEL)?;
756 let mut inner = self.inner.lock();
757 if let Some(node) = inner.get_existing_node(ptr, cookie)? {
758 return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
759 }
760
761 inner.nodes.insert(rbnode);
762 // This can only fail if someone has already pushed the node to a list, but we just created
763 // it and still hold the lock, so it can't fail right now.
764 let node_ref = inner
765 .new_node_ref_with_thread(node, strong, thread, wrapper)
766 .unwrap();
767
768 Ok(Ok(node_ref))
769 }
770
get_node( self: ArcBorrow<'_, Self>, ptr: u64, cookie: u64, flags: u32, strong: bool, thread: &Thread, ) -> Result<NodeRef>771 pub(crate) fn get_node(
772 self: ArcBorrow<'_, Self>,
773 ptr: u64,
774 cookie: u64,
775 flags: u32,
776 strong: bool,
777 thread: &Thread,
778 ) -> Result<NodeRef> {
779 let mut wrapper = None;
780 for _ in 0..2 {
781 match self.get_node_inner(ptr, cookie, flags, strong, thread, wrapper) {
782 Err(err) => return Err(err),
783 Ok(Ok(node_ref)) => return Ok(node_ref),
784 Ok(Err(CouldNotDeliverCriticalIncrement)) => {
785 wrapper = Some(CritIncrWrapper::new()?);
786 }
787 }
788 }
789 // We only get a `CouldNotDeliverCriticalIncrement` error if `wrapper` is `None`, so the
790 // loop should run at most twice.
791 unreachable!()
792 }
793
insert_or_update_handle( self: ArcBorrow<'_, Process>, node_ref: NodeRef, is_mananger: bool, ) -> Result<u32>794 pub(crate) fn insert_or_update_handle(
795 self: ArcBorrow<'_, Process>,
796 node_ref: NodeRef,
797 is_mananger: bool,
798 ) -> Result<u32> {
799 {
800 let mut refs = self.node_refs.lock();
801
802 // Do a lookup before inserting.
803 if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
804 let handle = *handle_ref;
805 let info = refs.by_handle.get_mut(&handle).unwrap();
806 info.node_ref().absorb(node_ref);
807 return Ok(handle);
808 }
809 }
810
811 // Reserve memory for tree nodes.
812 let reserve1 = RBTreeNodeReservation::new(GFP_KERNEL)?;
813 let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
814 let info = UniqueArc::new_uninit(GFP_KERNEL)?;
815
816 let mut refs = self.node_refs.lock();
817
818 // Do a lookup again as node may have been inserted before the lock was reacquired.
819 if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
820 let handle = *handle_ref;
821 let info = refs.by_handle.get_mut(&handle).unwrap();
822 info.node_ref().absorb(node_ref);
823 return Ok(handle);
824 }
825
826 // Find id.
827 let mut target: u32 = if is_mananger { 0 } else { 1 };
828 for handle in refs.by_handle.keys() {
829 if *handle > target {
830 break;
831 }
832 if *handle == target {
833 target = target.checked_add(1).ok_or(ENOMEM)?;
834 }
835 }
836
837 let gid = node_ref.node.global_id();
838 let (info_proc, info_node) = {
839 let info_init = NodeRefInfo::new(node_ref, target, self.into());
840 match info.pin_init_with(info_init) {
841 Ok(info) => ListArc::pair_from_pin_unique(info),
842 // error is infallible
843 Err(err) => match err {},
844 }
845 };
846
847 // Ensure the process is still alive while we insert a new reference.
848 //
849 // This releases the lock before inserting the nodes, but since `is_dead` is set as the
850 // first thing in `deferred_release`, process cleanup will not miss the items inserted into
851 // `refs` below.
852 if self.inner.lock().is_dead {
853 return Err(ESRCH);
854 }
855
856 // SAFETY: `info_proc` and `info_node` reference the same node, so we are inserting
857 // `info_node` into the right node's `refs` list.
858 unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
859
860 refs.by_node.insert(reserve1.into_node(gid, target));
861 refs.by_handle.insert(reserve2.into_node(target, info_proc));
862 Ok(target)
863 }
864
get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef>865 pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
866 // When handle is zero, try to get the context manager.
867 if handle == 0 {
868 Ok(self.ctx.get_manager_node(true)?)
869 } else {
870 Ok(self.get_node_from_handle(handle, true)?)
871 }
872 }
873
get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef>874 pub(crate) fn get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef> {
875 self.node_refs
876 .lock()
877 .by_handle
878 .get_mut(&handle)
879 .ok_or(ENOENT)?
880 .node_ref()
881 .clone(strong)
882 }
883
remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>)884 pub(crate) fn remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>) {
885 let mut inner = self.inner.lock();
886 // SAFETY: By the invariant on the `delivered_links` field, this is the right linked list.
887 let removed = unsafe { inner.delivered_deaths.remove(death) };
888 drop(inner);
889 drop(removed);
890 }
891
update_ref( self: ArcBorrow<'_, Process>, handle: u32, inc: bool, strong: bool, ) -> Result892 pub(crate) fn update_ref(
893 self: ArcBorrow<'_, Process>,
894 handle: u32,
895 inc: bool,
896 strong: bool,
897 ) -> Result {
898 if inc && handle == 0 {
899 if let Ok(node_ref) = self.ctx.get_manager_node(strong) {
900 if core::ptr::eq(&*self, &*node_ref.node.owner) {
901 return Err(EINVAL);
902 }
903 let _ = self.insert_or_update_handle(node_ref, true);
904 return Ok(());
905 }
906 }
907
908 // To preserve original binder behaviour, we only fail requests where the manager tries to
909 // increment references on itself.
910 let mut refs = self.node_refs.lock();
911 if let Some(info) = refs.by_handle.get_mut(&handle) {
912 if info.node_ref().update(inc, strong) {
913 // Clean up death if there is one attached to this node reference.
914 if let Some(death) = info.death().take() {
915 death.set_cleared(true);
916 self.remove_from_delivered_deaths(&death);
917 }
918
919 // Remove reference from process tables, and from the node's `refs` list.
920
921 // SAFETY: We are removing the `NodeRefInfo` from the right node.
922 unsafe { info.node_ref2().node.remove_node_info(&info) };
923
924 let id = info.node_ref().node.global_id();
925 refs.by_handle.remove(&handle);
926 refs.by_node.remove(&id);
927 }
928 } else {
929 // All refs are cleared in process exit, so this warning is expected in that case.
930 if !self.inner.lock().is_dead {
931 pr_warn!("{}: no such ref {handle}\n", self.pid_in_current_ns());
932 }
933 }
934 Ok(())
935 }
936
937 /// Decrements the refcount of the given node, if one exists.
update_node(&self, ptr: u64, cookie: u64, strong: bool)938 pub(crate) fn update_node(&self, ptr: u64, cookie: u64, strong: bool) {
939 let mut inner = self.inner.lock();
940 if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
941 inner.update_node_refcount(&node, false, strong, 1, None);
942 }
943 }
944
inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result945 pub(crate) fn inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result {
946 let ptr = reader.read::<u64>()?;
947 let cookie = reader.read::<u64>()?;
948 let mut inner = self.inner.lock();
949 if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
950 if let Some(node) = node.inc_ref_done_locked(strong, &mut inner) {
951 // This only fails if the process is dead.
952 let _ = inner.push_work(node);
953 }
954 }
955 Ok(())
956 }
957
buffer_alloc( self: &Arc<Self>, debug_id: usize, size: usize, is_oneway: bool, from_pid: i32, ) -> BinderResult<NewAllocation>958 pub(crate) fn buffer_alloc(
959 self: &Arc<Self>,
960 debug_id: usize,
961 size: usize,
962 is_oneway: bool,
963 from_pid: i32,
964 ) -> BinderResult<NewAllocation> {
965 use kernel::page::PAGE_SIZE;
966
967 let mut reserve_new_args = ReserveNewArgs {
968 debug_id,
969 size,
970 is_oneway,
971 pid: from_pid,
972 ..ReserveNewArgs::default()
973 };
974
975 let (new_alloc, addr) = loop {
976 let mut inner = self.inner.lock();
977 let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
978 let alloc_request = match mapping.alloc.reserve_new(reserve_new_args)? {
979 ReserveNew::Success(new_alloc) => break (new_alloc, mapping.address),
980 ReserveNew::NeedAlloc(request) => request,
981 };
982 drop(inner);
983 // We need to allocate memory and then call `reserve_new` again.
984 reserve_new_args = alloc_request.make_alloc()?;
985 };
986
987 let res = Allocation::new(
988 self.clone(),
989 debug_id,
990 new_alloc.offset,
991 size,
992 addr + new_alloc.offset,
993 new_alloc.oneway_spam_detected,
994 );
995
996 // This allocation will be marked as in use until the `Allocation` is used to free it.
997 //
998 // This method can't be called while holding a lock, so we release the lock first. It's
999 // okay for several threads to use the method on the same index at the same time. In that
1000 // case, one of the calls will allocate the given page (if missing), and the other call
1001 // will wait for the other call to finish allocating the page.
1002 //
1003 // We will not call `stop_using_range` in parallel with this on the same page, because the
1004 // allocation can only be removed via the destructor of the `Allocation` object that we
1005 // currently own.
1006 match self.pages.use_range(
1007 new_alloc.offset / PAGE_SIZE,
1008 (new_alloc.offset + size + (PAGE_SIZE - 1)) / PAGE_SIZE,
1009 ) {
1010 Ok(()) => {}
1011 Err(err) => {
1012 pr_warn!("use_range failure {:?}", err);
1013 return Err(err.into());
1014 }
1015 }
1016
1017 Ok(NewAllocation(res))
1018 }
1019
buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation>1020 pub(crate) fn buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation> {
1021 let mut inner = self.inner.lock();
1022 let mapping = inner.mapping.as_mut()?;
1023 let offset = ptr.checked_sub(mapping.address)?;
1024 let (size, debug_id, odata) = mapping.alloc.reserve_existing(offset).ok()?;
1025 let mut alloc = Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
1026 if let Some(data) = odata {
1027 alloc.set_info(data);
1028 }
1029 Some(alloc)
1030 }
1031
buffer_raw_free(&self, ptr: usize)1032 pub(crate) fn buffer_raw_free(&self, ptr: usize) {
1033 let mut inner = self.inner.lock();
1034 if let Some(ref mut mapping) = &mut inner.mapping {
1035 let offset = match ptr.checked_sub(mapping.address) {
1036 Some(offset) => offset,
1037 None => return,
1038 };
1039
1040 let freed_range = match mapping.alloc.reservation_abort(offset) {
1041 Ok(freed_range) => freed_range,
1042 Err(_) => {
1043 pr_warn!(
1044 "Pointer {:x} failed to free, base = {:x}\n",
1045 ptr,
1046 mapping.address
1047 );
1048 return;
1049 }
1050 };
1051
1052 // No more allocations in this range. Mark them as not in use.
1053 //
1054 // Must be done before we release the lock so that `use_range` is not used on these
1055 // indices until `stop_using_range` returns.
1056 self.pages
1057 .stop_using_range(freed_range.start_page_idx, freed_range.end_page_idx);
1058 }
1059 }
1060
buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>)1061 pub(crate) fn buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>) {
1062 let mut inner = self.inner.lock();
1063 if let Some(ref mut mapping) = &mut inner.mapping {
1064 if mapping.alloc.reservation_commit(offset, &mut data).is_err() {
1065 pr_warn!("Offset {} failed to be marked freeable\n", offset);
1066 }
1067 }
1068 }
1069
create_mapping(&self, vma: &mm::virt::VmaNew) -> Result1070 fn create_mapping(&self, vma: &mm::virt::VmaNew) -> Result {
1071 use kernel::page::PAGE_SIZE;
1072 let size = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
1073 let mapping = Mapping::new(vma.start(), size);
1074 let page_count = self.pages.register_with_vma(vma)?;
1075 if page_count * PAGE_SIZE != size {
1076 return Err(EINVAL);
1077 }
1078
1079 // Save range allocator for later.
1080 self.inner.lock().mapping = Some(mapping);
1081
1082 Ok(())
1083 }
1084
version(&self, data: UserSlice) -> Result1085 fn version(&self, data: UserSlice) -> Result {
1086 data.writer().write(&BinderVersion::current())
1087 }
1088
register_thread(&self) -> bool1089 pub(crate) fn register_thread(&self) -> bool {
1090 self.inner.lock().register_thread()
1091 }
1092
remove_thread(&self, thread: Arc<Thread>)1093 fn remove_thread(&self, thread: Arc<Thread>) {
1094 self.inner.lock().threads.remove(&thread.id);
1095 thread.release();
1096 }
1097
set_max_threads(&self, max: u32)1098 fn set_max_threads(&self, max: u32) {
1099 self.inner.lock().max_threads = max;
1100 }
1101
set_oneway_spam_detection_enabled(&self, enabled: u32)1102 fn set_oneway_spam_detection_enabled(&self, enabled: u32) {
1103 self.inner.lock().oneway_spam_detection_enabled = enabled != 0;
1104 }
1105
is_oneway_spam_detection_enabled(&self) -> bool1106 pub(crate) fn is_oneway_spam_detection_enabled(&self) -> bool {
1107 self.inner.lock().oneway_spam_detection_enabled
1108 }
1109
get_node_debug_info(&self, data: UserSlice) -> Result1110 fn get_node_debug_info(&self, data: UserSlice) -> Result {
1111 let (mut reader, mut writer) = data.reader_writer();
1112
1113 // Read the starting point.
1114 let ptr = reader.read::<BinderNodeDebugInfo>()?.ptr;
1115 let mut out = BinderNodeDebugInfo::default();
1116
1117 {
1118 let inner = self.inner.lock();
1119 for (node_ptr, node) in &inner.nodes {
1120 if *node_ptr > ptr {
1121 node.populate_debug_info(&mut out, &inner);
1122 break;
1123 }
1124 }
1125 }
1126
1127 writer.write(&out)
1128 }
1129
get_node_info_from_ref(&self, data: UserSlice) -> Result1130 fn get_node_info_from_ref(&self, data: UserSlice) -> Result {
1131 let (mut reader, mut writer) = data.reader_writer();
1132 let mut out = reader.read::<BinderNodeInfoForRef>()?;
1133
1134 if out.strong_count != 0
1135 || out.weak_count != 0
1136 || out.reserved1 != 0
1137 || out.reserved2 != 0
1138 || out.reserved3 != 0
1139 {
1140 return Err(EINVAL);
1141 }
1142
1143 // Only the context manager is allowed to use this ioctl.
1144 if !self.inner.lock().is_manager {
1145 return Err(EPERM);
1146 }
1147
1148 {
1149 let mut node_refs = self.node_refs.lock();
1150 let node_info = node_refs.by_handle.get_mut(&out.handle).ok_or(ENOENT)?;
1151 let node_ref = node_info.node_ref();
1152 let owner_inner = node_ref.node.owner.inner.lock();
1153 node_ref.node.populate_counts(&mut out, &owner_inner);
1154 }
1155
1156 // Write the result back.
1157 writer.write(&out)
1158 }
1159
needs_thread(&self) -> bool1160 pub(crate) fn needs_thread(&self) -> bool {
1161 let mut inner = self.inner.lock();
1162 let ret = inner.requested_thread_count == 0
1163 && inner.ready_threads.is_empty()
1164 && inner.started_thread_count < inner.max_threads;
1165 if ret {
1166 inner.requested_thread_count += 1
1167 }
1168 ret
1169 }
1170
request_death( self: &Arc<Self>, reader: &mut UserSliceReader, thread: &Thread, ) -> Result1171 pub(crate) fn request_death(
1172 self: &Arc<Self>,
1173 reader: &mut UserSliceReader,
1174 thread: &Thread,
1175 ) -> Result {
1176 let handle: u32 = reader.read()?;
1177 let cookie: u64 = reader.read()?;
1178
1179 // Queue BR_ERROR if we can't allocate memory for the death notification.
1180 let death = UniqueArc::new_uninit(GFP_KERNEL).map_err(|err| {
1181 thread.push_return_work(BR_ERROR);
1182 err
1183 })?;
1184 let mut refs = self.node_refs.lock();
1185 let Some(info) = refs.by_handle.get_mut(&handle) else {
1186 pr_warn!("BC_REQUEST_DEATH_NOTIFICATION invalid ref {handle}\n");
1187 return Ok(());
1188 };
1189
1190 // Nothing to do if there is already a death notification request for this handle.
1191 if info.death().is_some() {
1192 pr_warn!("BC_REQUEST_DEATH_NOTIFICATION death notification already set\n");
1193 return Ok(());
1194 }
1195
1196 let death = {
1197 let death_init = NodeDeath::new(info.node_ref().node.clone(), self.clone(), cookie);
1198 match death.pin_init_with(death_init) {
1199 Ok(death) => death,
1200 // error is infallible
1201 Err(err) => match err {},
1202 }
1203 };
1204
1205 // Register the death notification.
1206 {
1207 let owner = info.node_ref2().node.owner.clone();
1208 let mut owner_inner = owner.inner.lock();
1209 if owner_inner.is_dead {
1210 let death = Arc::from(death);
1211 *info.death() = Some(death.clone());
1212 drop(owner_inner);
1213 death.set_dead();
1214 } else {
1215 let death = ListArc::from(death);
1216 *info.death() = Some(death.clone_arc());
1217 info.node_ref().node.add_death(death, &mut owner_inner);
1218 }
1219 }
1220 Ok(())
1221 }
1222
clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result1223 pub(crate) fn clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result {
1224 let handle: u32 = reader.read()?;
1225 let cookie: u64 = reader.read()?;
1226
1227 let mut refs = self.node_refs.lock();
1228 let Some(info) = refs.by_handle.get_mut(&handle) else {
1229 pr_warn!("BC_CLEAR_DEATH_NOTIFICATION invalid ref {handle}\n");
1230 return Ok(());
1231 };
1232
1233 let Some(death) = info.death().take() else {
1234 pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification not active\n");
1235 return Ok(());
1236 };
1237 if death.cookie != cookie {
1238 *info.death() = Some(death);
1239 pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch\n");
1240 return Ok(());
1241 }
1242
1243 // Update state and determine if we need to queue a work item. We only need to do it when
1244 // the node is not dead or if the user already completed the death notification.
1245 if death.set_cleared(false) {
1246 if let Some(death) = ListArc::try_from_arc_or_drop(death) {
1247 let _ = thread.push_work_if_looper(death);
1248 }
1249 }
1250
1251 Ok(())
1252 }
1253
dead_binder_done(&self, cookie: u64, thread: &Thread)1254 pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
1255 if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
1256 death.set_notification_done(thread);
1257 }
1258 }
1259
1260 /// Locks the spinlock and move the `nodes` rbtree out.
1261 ///
1262 /// This allows you to iterate through `nodes` while also allowing you to give other parts of
1263 /// the codebase exclusive access to `ProcessInner`.
lock_with_nodes(&self) -> WithNodes<'_>1264 pub(crate) fn lock_with_nodes(&self) -> WithNodes<'_> {
1265 let mut inner = self.inner.lock();
1266 WithNodes {
1267 nodes: take(&mut inner.nodes),
1268 inner,
1269 }
1270 }
1271
deferred_flush(&self)1272 fn deferred_flush(&self) {
1273 let inner = self.inner.lock();
1274 for thread in inner.threads.values() {
1275 thread.exit_looper();
1276 }
1277 }
1278
deferred_release(self: Arc<Self>)1279 fn deferred_release(self: Arc<Self>) {
1280 let is_manager = {
1281 let mut inner = self.inner.lock();
1282 inner.is_dead = true;
1283 inner.is_frozen = false;
1284 inner.sync_recv = false;
1285 inner.async_recv = false;
1286 inner.is_manager
1287 };
1288
1289 if is_manager {
1290 self.ctx.unset_manager_node();
1291 }
1292
1293 self.ctx.deregister_process(&self);
1294
1295 let binderfs_file = self.inner.lock().binderfs_file.take();
1296 drop(binderfs_file);
1297
1298 // Release threads.
1299 let threads = {
1300 let mut inner = self.inner.lock();
1301 let threads = take(&mut inner.threads);
1302 let ready = take(&mut inner.ready_threads);
1303 drop(inner);
1304 drop(ready);
1305
1306 for thread in threads.values() {
1307 thread.release();
1308 }
1309 threads
1310 };
1311
1312 // Release nodes.
1313 {
1314 while let Some(node) = {
1315 let mut lock = self.inner.lock();
1316 lock.nodes.cursor_front().map(|c| c.remove_current().1)
1317 } {
1318 node.to_key_value().1.release();
1319 }
1320 }
1321
1322 // Clean up death listeners and remove nodes from external node info lists.
1323 for info in self.node_refs.lock().by_handle.values_mut() {
1324 // SAFETY: We are removing the `NodeRefInfo` from the right node.
1325 unsafe { info.node_ref2().node.remove_node_info(&info) };
1326
1327 // Remove all death notifications from the nodes (that belong to a different process).
1328 let death = if let Some(existing) = info.death().take() {
1329 existing
1330 } else {
1331 continue;
1332 };
1333 death.set_cleared(false);
1334 }
1335
1336 // Clean up freeze listeners.
1337 let freeze_listeners = take(&mut self.node_refs.lock().freeze_listeners);
1338 for listener in freeze_listeners.values() {
1339 listener.on_process_exit(&self);
1340 }
1341 drop(freeze_listeners);
1342
1343 // Release refs on foreign nodes.
1344 {
1345 let mut refs = self.node_refs.lock();
1346 let by_handle = take(&mut refs.by_handle);
1347 let by_node = take(&mut refs.by_node);
1348 drop(refs);
1349 drop(by_node);
1350 drop(by_handle);
1351 }
1352
1353 // Cancel all pending work items.
1354 while let Some(work) = self.get_work() {
1355 work.into_arc().cancel();
1356 }
1357
1358 let delivered_deaths = take(&mut self.inner.lock().delivered_deaths);
1359 drop(delivered_deaths);
1360
1361 // Free any resources kept alive by allocated buffers.
1362 let omapping = self.inner.lock().mapping.take();
1363 if let Some(mut mapping) = omapping {
1364 let address = mapping.address;
1365 mapping
1366 .alloc
1367 .take_for_each(|offset, size, debug_id, odata| {
1368 let ptr = offset + address;
1369 pr_warn!(
1370 "{}: removing orphan mapping {offset}:{size}\n",
1371 self.pid_in_current_ns()
1372 );
1373 let mut alloc =
1374 Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
1375 if let Some(data) = odata {
1376 alloc.set_info(data);
1377 }
1378 drop(alloc)
1379 });
1380 }
1381
1382 // calls to synchronize_rcu() in thread drop will happen here
1383 drop(threads);
1384 }
1385
drop_outstanding_txn(&self)1386 pub(crate) fn drop_outstanding_txn(&self) {
1387 let wake = {
1388 let mut inner = self.inner.lock();
1389 if inner.outstanding_txns == 0 {
1390 pr_err!("outstanding_txns underflow");
1391 return;
1392 }
1393 inner.outstanding_txns -= 1;
1394 inner.is_frozen && inner.outstanding_txns == 0
1395 };
1396
1397 if wake {
1398 self.freeze_wait.notify_all();
1399 }
1400 }
1401
ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result1402 pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
1403 if info.enable == 0 {
1404 let msgs = self.prepare_freeze_messages()?;
1405 let mut inner = self.inner.lock();
1406 inner.sync_recv = false;
1407 inner.async_recv = false;
1408 inner.is_frozen = false;
1409 drop(inner);
1410 msgs.send_messages();
1411 return Ok(());
1412 }
1413
1414 let mut inner = self.inner.lock();
1415 inner.sync_recv = false;
1416 inner.async_recv = false;
1417 inner.is_frozen = true;
1418
1419 if info.timeout_ms > 0 {
1420 let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
1421 while jiffies > 0 {
1422 if inner.outstanding_txns == 0 {
1423 break;
1424 }
1425
1426 match self
1427 .freeze_wait
1428 .wait_interruptible_timeout(&mut inner, jiffies)
1429 {
1430 CondVarTimeoutResult::Signal { .. } => {
1431 inner.is_frozen = false;
1432 return Err(ERESTARTSYS);
1433 }
1434 CondVarTimeoutResult::Woken { jiffies: remaining } => {
1435 jiffies = remaining;
1436 }
1437 CondVarTimeoutResult::Timeout => {
1438 jiffies = 0;
1439 }
1440 }
1441 }
1442 }
1443
1444 if inner.txns_pending_locked() {
1445 inner.is_frozen = false;
1446 Err(EAGAIN)
1447 } else {
1448 drop(inner);
1449 match self.prepare_freeze_messages() {
1450 Ok(batch) => {
1451 batch.send_messages();
1452 Ok(())
1453 }
1454 Err(kernel::alloc::AllocError) => {
1455 self.inner.lock().is_frozen = false;
1456 Err(ENOMEM)
1457 }
1458 }
1459 }
1460 }
1461 }
1462
get_frozen_status(data: UserSlice) -> Result1463 fn get_frozen_status(data: UserSlice) -> Result {
1464 let (mut reader, mut writer) = data.reader_writer();
1465
1466 let mut info = reader.read::<BinderFrozenStatusInfo>()?;
1467 info.sync_recv = 0;
1468 info.async_recv = 0;
1469 let mut found = false;
1470
1471 for ctx in crate::context::get_all_contexts()? {
1472 ctx.for_each_proc(|proc| {
1473 if proc.task.pid() == info.pid as _ {
1474 found = true;
1475 let inner = proc.inner.lock();
1476 let txns_pending = inner.txns_pending_locked();
1477 info.async_recv |= inner.async_recv as u32;
1478 info.sync_recv |= inner.sync_recv as u32;
1479 info.sync_recv |= (txns_pending as u32) << 1;
1480 }
1481 });
1482 }
1483
1484 if found {
1485 writer.write(&info)?;
1486 Ok(())
1487 } else {
1488 Err(EINVAL)
1489 }
1490 }
1491
ioctl_freeze(reader: &mut UserSliceReader) -> Result1492 fn ioctl_freeze(reader: &mut UserSliceReader) -> Result {
1493 let info = reader.read::<BinderFreezeInfo>()?;
1494
1495 // Very unlikely for there to be more than 3, since a process normally uses at most binder and
1496 // hwbinder.
1497 let mut procs = KVec::with_capacity(3, GFP_KERNEL)?;
1498
1499 let ctxs = crate::context::get_all_contexts()?;
1500 for ctx in ctxs {
1501 for proc in ctx.get_procs_with_pid(info.pid as i32)? {
1502 procs.push(proc, GFP_KERNEL)?;
1503 }
1504 }
1505
1506 for proc in procs {
1507 proc.ioctl_freeze(&info)?;
1508 }
1509 Ok(())
1510 }
1511
1512 /// The ioctl handler.
1513 impl Process {
1514 /// Ioctls that are write-only from the perspective of userspace.
1515 ///
1516 /// The kernel will only read from the pointer that userspace provided to us.
ioctl_write_only( this: ArcBorrow<'_, Process>, _file: &File, cmd: u32, reader: &mut UserSliceReader, ) -> Result1517 fn ioctl_write_only(
1518 this: ArcBorrow<'_, Process>,
1519 _file: &File,
1520 cmd: u32,
1521 reader: &mut UserSliceReader,
1522 ) -> Result {
1523 let thread = this.get_current_thread()?;
1524 match cmd {
1525 uapi::BINDER_SET_MAX_THREADS => this.set_max_threads(reader.read()?),
1526 uapi::BINDER_THREAD_EXIT => this.remove_thread(thread),
1527 uapi::BINDER_SET_CONTEXT_MGR => this.set_as_manager(None, &thread)?,
1528 uapi::BINDER_SET_CONTEXT_MGR_EXT => {
1529 this.set_as_manager(Some(reader.read()?), &thread)?
1530 }
1531 uapi::BINDER_ENABLE_ONEWAY_SPAM_DETECTION => {
1532 this.set_oneway_spam_detection_enabled(reader.read()?)
1533 }
1534 uapi::BINDER_FREEZE => ioctl_freeze(reader)?,
1535 _ => return Err(EINVAL),
1536 }
1537 Ok(())
1538 }
1539
1540 /// Ioctls that are read/write from the perspective of userspace.
1541 ///
1542 /// The kernel will both read from and write to the pointer that userspace provided to us.
ioctl_write_read( this: ArcBorrow<'_, Process>, file: &File, cmd: u32, data: UserSlice, ) -> Result1543 fn ioctl_write_read(
1544 this: ArcBorrow<'_, Process>,
1545 file: &File,
1546 cmd: u32,
1547 data: UserSlice,
1548 ) -> Result {
1549 let thread = this.get_current_thread()?;
1550 let blocking = (file.flags() & file::flags::O_NONBLOCK) == 0;
1551 match cmd {
1552 uapi::BINDER_WRITE_READ => thread.write_read(data, blocking)?,
1553 uapi::BINDER_GET_NODE_DEBUG_INFO => this.get_node_debug_info(data)?,
1554 uapi::BINDER_GET_NODE_INFO_FOR_REF => this.get_node_info_from_ref(data)?,
1555 uapi::BINDER_VERSION => this.version(data)?,
1556 uapi::BINDER_GET_FROZEN_INFO => get_frozen_status(data)?,
1557 uapi::BINDER_GET_EXTENDED_ERROR => thread.get_extended_error(data)?,
1558 _ => return Err(EINVAL),
1559 }
1560 Ok(())
1561 }
1562 }
1563
1564 /// The file operations supported by `Process`.
1565 impl Process {
open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>>1566 pub(crate) fn open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>> {
1567 Self::new(ctx.into(), ARef::from(file.cred()))
1568 }
1569
release(this: Arc<Process>, _file: &File)1570 pub(crate) fn release(this: Arc<Process>, _file: &File) {
1571 let binderfs_file;
1572 let should_schedule;
1573 {
1574 let mut inner = this.inner.lock();
1575 should_schedule = inner.defer_work == 0;
1576 inner.defer_work |= PROC_DEFER_RELEASE;
1577 binderfs_file = inner.binderfs_file.take();
1578 }
1579
1580 if should_schedule {
1581 // Ignore failures to schedule to the workqueue. Those just mean that we're already
1582 // scheduled for execution.
1583 let _ = workqueue::system().enqueue(this);
1584 }
1585
1586 drop(binderfs_file);
1587 }
1588
flush(this: ArcBorrow<'_, Process>) -> Result1589 pub(crate) fn flush(this: ArcBorrow<'_, Process>) -> Result {
1590 let should_schedule;
1591 {
1592 let mut inner = this.inner.lock();
1593 should_schedule = inner.defer_work == 0;
1594 inner.defer_work |= PROC_DEFER_FLUSH;
1595 }
1596
1597 if should_schedule {
1598 // Ignore failures to schedule to the workqueue. Those just mean that we're already
1599 // scheduled for execution.
1600 let _ = workqueue::system().enqueue(Arc::from(this));
1601 }
1602 Ok(())
1603 }
1604
ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result1605 pub(crate) fn ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result {
1606 use kernel::ioctl::{_IOC_DIR, _IOC_SIZE};
1607 use kernel::uapi::{_IOC_READ, _IOC_WRITE};
1608
1609 crate::trace::trace_ioctl(cmd, arg as usize);
1610
1611 let user_slice = UserSlice::new(arg, _IOC_SIZE(cmd));
1612
1613 const _IOC_READ_WRITE: u32 = _IOC_READ | _IOC_WRITE;
1614
1615 let res = match _IOC_DIR(cmd) {
1616 _IOC_WRITE => Self::ioctl_write_only(this, file, cmd, &mut user_slice.reader()),
1617 _IOC_READ_WRITE => Self::ioctl_write_read(this, file, cmd, user_slice),
1618 _ => Err(EINVAL),
1619 };
1620
1621 crate::trace::trace_ioctl_done(res);
1622 res
1623 }
1624
compat_ioctl( this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize, ) -> Result1625 pub(crate) fn compat_ioctl(
1626 this: ArcBorrow<'_, Process>,
1627 file: &File,
1628 cmd: u32,
1629 arg: usize,
1630 ) -> Result {
1631 Self::ioctl(this, file, cmd, arg)
1632 }
1633
mmap( this: ArcBorrow<'_, Process>, _file: &File, vma: &mm::virt::VmaNew, ) -> Result1634 pub(crate) fn mmap(
1635 this: ArcBorrow<'_, Process>,
1636 _file: &File,
1637 vma: &mm::virt::VmaNew,
1638 ) -> Result {
1639 // We don't allow mmap to be used in a different process.
1640 if !core::ptr::eq(kernel::current!().group_leader(), &*this.task) {
1641 return Err(EINVAL);
1642 }
1643 if vma.start() == 0 {
1644 return Err(EINVAL);
1645 }
1646
1647 vma.try_clear_maywrite().map_err(|_| EPERM)?;
1648 vma.set_dontcopy();
1649 vma.set_mixedmap();
1650
1651 // TODO: Set ops. We need to learn when the user unmaps so that we can stop using it.
1652 this.create_mapping(vma)
1653 }
1654
poll( this: ArcBorrow<'_, Process>, file: &File, table: PollTable<'_>, ) -> Result<u32>1655 pub(crate) fn poll(
1656 this: ArcBorrow<'_, Process>,
1657 file: &File,
1658 table: PollTable<'_>,
1659 ) -> Result<u32> {
1660 let thread = this.get_current_thread()?;
1661 let (from_proc, mut mask) = thread.poll(file, table);
1662 if mask == 0 && from_proc && !this.inner.lock().work.is_empty() {
1663 mask |= bindings::POLLIN;
1664 }
1665 Ok(mask)
1666 }
1667 }
1668
1669 /// Represents that a thread has registered with the `ready_threads` list of its process.
1670 ///
1671 /// The destructor of this type will unregister the thread from the list of ready threads.
1672 pub(crate) struct Registration<'a> {
1673 thread: &'a Arc<Thread>,
1674 }
1675
1676 impl<'a> Registration<'a> {
new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self1677 fn new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self {
1678 assert!(core::ptr::eq(&thread.process.inner, guard.lock_ref()));
1679 // INVARIANT: We are pushing this thread to the right `ready_threads` list.
1680 if let Ok(list_arc) = ListArc::try_from_arc(thread.clone()) {
1681 guard.ready_threads.push_front(list_arc);
1682 } else {
1683 // It is an error to hit this branch, and it should not be reachable. We try to do
1684 // something reasonable when the failure path happens. Most likely, the thread in
1685 // question will sleep forever.
1686 pr_err!("Same thread registered with `ready_threads` twice.");
1687 }
1688 Self { thread }
1689 }
1690 }
1691
1692 impl Drop for Registration<'_> {
drop(&mut self)1693 fn drop(&mut self) {
1694 let mut inner = self.thread.process.inner.lock();
1695 // SAFETY: The thread has the invariant that we never push it to any other linked list than
1696 // the `ready_threads` list of its parent process. Therefore, the thread is either in that
1697 // list, or in no list.
1698 unsafe { inner.ready_threads.remove(self.thread) };
1699 }
1700 }
1701
1702 pub(crate) struct WithNodes<'a> {
1703 pub(crate) inner: Guard<'a, ProcessInner, SpinLockBackend>,
1704 pub(crate) nodes: RBTree<u64, DArc<Node>>,
1705 }
1706
1707 impl Drop for WithNodes<'_> {
drop(&mut self)1708 fn drop(&mut self) {
1709 core::mem::swap(&mut self.nodes, &mut self.inner.nodes);
1710 if self.nodes.iter().next().is_some() {
1711 pr_err!("nodes array was modified while using lock_with_nodes\n");
1712 }
1713 }
1714 }
1715