• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 
3 // Copyright (C) 2024 Google LLC.
4 
5 use kernel::{
6     list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
7     prelude::*,
8     seq_file::SeqFile,
9     seq_print,
10     sync::lock::{spinlock::SpinLockBackend, Guard},
11     sync::{Arc, LockedBy, SpinLock},
12 };
13 
14 use crate::{
15     defs::*,
16     error::BinderError,
17     prio::{self, BinderPriority},
18     process::{NodeRefInfo, Process, ProcessInner},
19     thread::Thread,
20     transaction::Transaction,
21     BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead,
22 };
23 
24 use core::mem;
25 
26 mod wrapper;
27 pub(crate) use self::wrapper::CritIncrWrapper;
28 
29 #[derive(Debug)]
30 pub(crate) struct CouldNotDeliverCriticalIncrement;
31 
32 /// Keeps track of how this node is scheduled.
33 ///
34 /// There are two ways to schedule a node to a work list. Just schedule the node itself, or
35 /// allocate a wrapper that references the node and schedule the wrapper. These wrappers exists to
36 /// make it possible to "move" a node from one list to another - when `do_work` is called directly
37 /// on the `Node`, then it's a no-op if there's also a pending wrapper.
38 ///
39 /// Wrappers are generally only needed for zero-to-one refcount increments, and there are two cases
40 /// of this: weak increments and strong increments. We call such increments "critical" because it
41 /// is critical that they are delivered to the thread doing the increment. Some examples:
42 ///
43 /// * One thread makes a zero-to-one strong increment, and another thread makes a zero-to-one weak
44 ///   increment. Delivering the node to the thread doing the weak increment is wrong, since the
45 ///   thread doing the strong increment may have ended a long time ago when the command is actually
46 ///   processed by userspace.
47 ///
48 /// * We have a weak reference and are about to drop it on one thread. But then another thread does
49 ///   a zero-to-one strong increment. If the strong increment gets sent to the thread that was
50 ///   about to drop the weak reference, then the strong increment could be processed after the
51 ///   other thread has already exited, which would be too late.
52 ///
53 /// Note that trying to create a `ListArc` to the node can succeed even if `has_normal_push` is
54 /// set. This is because another thread might just have popped the node from a todo list, but not
55 /// yet called `do_work`. However, if `has_normal_push` is false, then creating a `ListArc` should
56 /// always succeed.
57 ///
58 /// Like the other fields in `NodeInner`, the delivery state is protected by the process lock.
59 struct DeliveryState {
60     /// Is the `Node` currently scheduled?
61     has_pushed_node: bool,
62 
63     /// Is a wrapper currently scheduled?
64     ///
65     /// The wrapper is used only for strong zero2one increments.
66     has_pushed_wrapper: bool,
67 
68     /// Is the currently scheduled `Node` scheduled due to a weak zero2one increment?
69     ///
70     /// Weak zero2one operations are always scheduled using the `Node`.
71     has_weak_zero2one: bool,
72 
73     /// Is the currently scheduled wrapper/`Node` scheduled due to a strong zero2one increment?
74     ///
75     /// If `has_pushed_wrapper` is set, then the strong zero2one increment was scheduled using the
76     /// wrapper. Otherwise, `has_pushed_node` must be set and it was scheduled using the `Node`.
77     has_strong_zero2one: bool,
78 }
79 
80 impl DeliveryState {
should_normal_push(&self) -> bool81     fn should_normal_push(&self) -> bool {
82         !self.has_pushed_node && !self.has_pushed_wrapper
83     }
84 
did_normal_push(&mut self)85     fn did_normal_push(&mut self) {
86         assert!(self.should_normal_push());
87         self.has_pushed_node = true;
88     }
89 
should_push_weak_zero2one(&self) -> bool90     fn should_push_weak_zero2one(&self) -> bool {
91         !self.has_weak_zero2one && !self.has_strong_zero2one
92     }
93 
can_push_weak_zero2one_normally(&self) -> bool94     fn can_push_weak_zero2one_normally(&self) -> bool {
95         !self.has_pushed_node
96     }
97 
did_push_weak_zero2one(&mut self)98     fn did_push_weak_zero2one(&mut self) {
99         assert!(self.should_push_weak_zero2one());
100         assert!(self.can_push_weak_zero2one_normally());
101         self.has_pushed_node = true;
102         self.has_weak_zero2one = true;
103     }
104 
should_push_strong_zero2one(&self) -> bool105     fn should_push_strong_zero2one(&self) -> bool {
106         !self.has_strong_zero2one
107     }
108 
can_push_strong_zero2one_normally(&self) -> bool109     fn can_push_strong_zero2one_normally(&self) -> bool {
110         !self.has_pushed_node
111     }
112 
did_push_strong_zero2one(&mut self)113     fn did_push_strong_zero2one(&mut self) {
114         assert!(self.should_push_strong_zero2one());
115         assert!(self.can_push_strong_zero2one_normally());
116         self.has_pushed_node = true;
117         self.has_strong_zero2one = true;
118     }
119 
did_push_strong_zero2one_wrapper(&mut self)120     fn did_push_strong_zero2one_wrapper(&mut self) {
121         assert!(self.should_push_strong_zero2one());
122         assert!(!self.can_push_strong_zero2one_normally());
123         self.has_pushed_wrapper = true;
124         self.has_strong_zero2one = true;
125     }
126 }
127 
128 struct CountState {
129     /// The reference count.
130     count: usize,
131     /// Whether the process that owns this node thinks that we hold a refcount on it. (Note that
132     /// even if count is greater than one, we only increment it once in the owning process.)
133     has_count: bool,
134 }
135 
136 impl CountState {
new() -> Self137     fn new() -> Self {
138         Self {
139             count: 0,
140             has_count: false,
141         }
142     }
143 }
144 
145 struct NodeInner {
146     /// Strong refcounts held on this node by `NodeRef` objects.
147     strong: CountState,
148     /// Weak refcounts held on this node by `NodeRef` objects.
149     weak: CountState,
150     delivery_state: DeliveryState,
151     /// The binder driver guarantees that oneway transactions sent to the same node are serialized,
152     /// that is, userspace will not be given the next one until it has finished processing the
153     /// previous oneway transaction. This is done to avoid the case where two oneway transactions
154     /// arrive in opposite order from the order in which they were sent. (E.g., they could be
155     /// delivered to two different threads, which could appear as-if they were sent in opposite
156     /// order.)
157     ///
158     /// To fix that, we store pending oneway transactions in a separate list in the node, and don't
159     /// deliver the next oneway transaction until userspace signals that it has finished processing
160     /// the previous oneway transaction by calling the `BC_FREE_BUFFER` ioctl.
161     oneway_todo: List<DTRWrap<Transaction>>,
162     /// Keeps track of whether this node has a pending oneway transaction.
163     ///
164     /// When this is true, incoming oneway transactions are stored in `oneway_todo`, instead of
165     /// being delivered directly to the process.
166     has_oneway_transaction: bool,
167     /// List of processes to deliver a notification to when this node is destroyed (usually due to
168     /// the process dying).
169     death_list: List<DTRWrap<NodeDeath>, 1>,
170     /// List of processes to deliver freeze notifications to.
171     freeze_list: KVVec<Arc<Process>>,
172     /// The number of active BR_INCREFS or BR_ACQUIRE operations. (should be maximum two)
173     ///
174     /// If this is non-zero, then we postpone any BR_RELEASE or BR_DECREFS notifications until the
175     /// active operations have ended. This avoids the situation an increment and decrement get
176     /// reordered from userspace's perspective.
177     active_inc_refs: u8,
178     /// List of `NodeRefInfo` objects that reference this node.
179     refs: List<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
180 }
181 
182 use kernel::bindings::rb_node_layout;
183 use mem::offset_of;
184 pub(crate) const NODE_LAYOUT: rb_node_layout = rb_node_layout {
185     arc_offset: Arc::<Node>::DATA_OFFSET + offset_of!(DTRWrap<Node>, wrapped),
186     debug_id: offset_of!(Node, debug_id),
187     ptr: offset_of!(Node, ptr),
188 };
189 
190 #[pin_data]
191 pub(crate) struct Node {
192     pub(crate) debug_id: usize,
193     ptr: u64,
194     pub(crate) cookie: u64,
195     pub(crate) flags: u32,
196     pub(crate) owner: Arc<Process>,
197     inner: LockedBy<NodeInner, ProcessInner>,
198     #[pin]
199     links_track: AtomicTracker,
200 }
201 
202 kernel::list::impl_list_arc_safe! {
203     impl ListArcSafe<0> for Node {
204         tracked_by links_track: AtomicTracker;
205     }
206 }
207 
208 // These make `oneway_todo` work.
209 kernel::list::impl_has_list_links! {
210     impl HasListLinks<0> for DTRWrap<Transaction> { self.links.inner }
211 }
212 kernel::list::impl_list_item! {
213     impl ListItem<0> for DTRWrap<Transaction> {
214         using ListLinks;
215     }
216 }
217 
218 impl Node {
new( ptr: u64, cookie: u64, flags: u32, owner: Arc<Process>, ) -> impl PinInit<Self>219     pub(crate) fn new(
220         ptr: u64,
221         cookie: u64,
222         flags: u32,
223         owner: Arc<Process>,
224     ) -> impl PinInit<Self> {
225         pin_init!(Self {
226             inner: LockedBy::new(
227                 &owner.inner,
228                 NodeInner {
229                     strong: CountState::new(),
230                     weak: CountState::new(),
231                     delivery_state: DeliveryState {
232                         has_pushed_node: false,
233                         has_pushed_wrapper: false,
234                         has_weak_zero2one: false,
235                         has_strong_zero2one: false,
236                     },
237                     death_list: List::new(),
238                     oneway_todo: List::new(),
239                     freeze_list: KVVec::new(),
240                     has_oneway_transaction: false,
241                     active_inc_refs: 0,
242                     refs: List::new(),
243                 },
244             ),
245             debug_id: super::next_debug_id(),
246             ptr,
247             cookie,
248             flags,
249             owner,
250             links_track <- AtomicTracker::new(),
251         })
252     }
253 
has_oneway_transaction(&self, owner_inner: &mut ProcessInner) -> bool254     pub(crate) fn has_oneway_transaction(&self, owner_inner: &mut ProcessInner) -> bool {
255         let inner = self.inner.access_mut(owner_inner);
256         inner.has_oneway_transaction
257     }
258 
259     #[inline(never)]
full_debug_print( &self, m: &SeqFile, owner_inner: &mut ProcessInner, ) -> Result<()>260     pub(crate) fn full_debug_print(
261         &self,
262         m: &SeqFile,
263         owner_inner: &mut ProcessInner,
264     ) -> Result<()> {
265         let prio = self.node_prio();
266         let inner = self.inner.access_mut(owner_inner);
267         seq_print!(
268             m,
269             "  node {}: u{:016x} c{:016x} pri {}:{} hs {} hw {} cs {} cw {}",
270             self.debug_id,
271             self.ptr,
272             self.cookie,
273             prio.sched_policy,
274             prio.prio,
275             inner.strong.has_count,
276             inner.weak.has_count,
277             inner.strong.count,
278             inner.weak.count,
279         );
280         if !inner.refs.is_empty() {
281             seq_print!(m, " proc");
282             for node_ref in &inner.refs {
283                 seq_print!(m, " {}", node_ref.process.task.pid());
284             }
285         }
286         seq_print!(m, "\n");
287         for t in &inner.oneway_todo {
288             t.debug_print_inner(m, "    pending async transaction ");
289         }
290         Ok(())
291     }
292 
293     /// Insert the `NodeRef` into this `refs` list.
294     ///
295     /// # Safety
296     ///
297     /// It must be the case that `info.node_ref.node` is this node.
298     pub(crate) unsafe fn insert_node_info(
299         &self,
300         info: ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
301     ) {
302         self.inner
303             .access_mut(&mut self.owner.inner.lock())
304             .refs
305             .push_front(info);
306     }
307 
308     /// Insert the `NodeRef` into this `refs` list.
309     ///
310     /// # Safety
311     ///
312     /// It must be the case that `info.node_ref.node` is this node.
remove_node_info( &self, info: &NodeRefInfo, ) -> Option<ListArc<NodeRefInfo,313     pub(crate) unsafe fn remove_node_info(
314         &self,
315         info: &NodeRefInfo,
316     ) -> Option<ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>> {
317         // SAFETY: We always insert `NodeRefInfo` objects into the `refs` list of the node that it
318         // references in `info.node_ref.node`. That is this node, so `info` cannot possibly be in
319         // the `refs` list of another node.
320         unsafe {
321             self.inner
322                 .access_mut(&mut self.owner.inner.lock())
323                 .refs
324                 .remove(info)
325         }
326     }
327 
node_prio(&self) -> prio::BinderPriority328     pub(crate) fn node_prio(&self) -> prio::BinderPriority {
329         let flags = self.flags;
330         let priority = (flags & FLAT_BINDER_FLAG_PRIORITY_MASK) as prio::Nice;
331         let sched_policy =
332             (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >> FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
333 
334         BinderPriority {
335             sched_policy,
336             prio: prio::to_kernel_prio(sched_policy, priority),
337         }
338     }
339 
inherit_rt(&self) -> bool340     pub(crate) fn inherit_rt(&self) -> bool {
341         (self.flags & FLAT_BINDER_FLAG_INHERIT_RT) != 0
342     }
343 
344     /// An id that is unique across all binder nodes on the system. Used as the key in the
345     /// `by_node` map.
global_id(&self) -> usize346     pub(crate) fn global_id(&self) -> usize {
347         self as *const Node as usize
348     }
349 
get_id(&self) -> (u64, u64)350     pub(crate) fn get_id(&self) -> (u64, u64) {
351         (self.ptr, self.cookie)
352     }
353 
add_death( &self, death: ListArc<DTRWrap<NodeDeath>, 1>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>, )354     pub(crate) fn add_death(
355         &self,
356         death: ListArc<DTRWrap<NodeDeath>, 1>,
357         guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
358     ) {
359         self.inner.access_mut(guard).death_list.push_back(death);
360     }
361 
inc_ref_done_locked( self: &DArc<Node>, _strong: bool, owner_inner: &mut ProcessInner, ) -> Option<DLArc<Node>>362     pub(crate) fn inc_ref_done_locked(
363         self: &DArc<Node>,
364         _strong: bool,
365         owner_inner: &mut ProcessInner,
366     ) -> Option<DLArc<Node>> {
367         let inner = self.inner.access_mut(owner_inner);
368         if inner.active_inc_refs == 0 {
369             pr_err!("inc_ref_done called when no active inc_refs");
370             return None;
371         }
372 
373         inner.active_inc_refs -= 1;
374         if inner.active_inc_refs == 0 {
375             // Having active inc_refs can inhibit dropping of ref-counts. Calculate whether we
376             // would send a refcount decrement, and if so, tell the caller to schedule us.
377             let strong = inner.strong.count > 0;
378             let has_strong = inner.strong.has_count;
379             let weak = strong || inner.weak.count > 0;
380             let has_weak = inner.weak.has_count;
381 
382             let should_drop_weak = !weak && has_weak;
383             let should_drop_strong = !strong && has_strong;
384 
385             // If we want to drop the ref-count again, tell the caller to schedule a work node for
386             // that.
387             let need_push = should_drop_weak || should_drop_strong;
388 
389             if need_push && inner.delivery_state.should_normal_push() {
390                 let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
391                 inner.delivery_state.did_normal_push();
392                 Some(list_arc)
393             } else {
394                 None
395             }
396         } else {
397             None
398         }
399     }
400 
update_refcount_locked( self: &DArc<Node>, inc: bool, strong: bool, count: usize, owner_inner: &mut ProcessInner, ) -> Option<DLArc<Node>>401     pub(crate) fn update_refcount_locked(
402         self: &DArc<Node>,
403         inc: bool,
404         strong: bool,
405         count: usize,
406         owner_inner: &mut ProcessInner,
407     ) -> Option<DLArc<Node>> {
408         let is_dead = owner_inner.is_dead;
409         let inner = self.inner.access_mut(owner_inner);
410 
411         // Get a reference to the state we'll update.
412         let state = if strong {
413             &mut inner.strong
414         } else {
415             &mut inner.weak
416         };
417 
418         // Update the count and determine whether we need to push work.
419         let need_push = if inc {
420             state.count += count;
421             // TODO: This method shouldn't be used for zero-to-one increments.
422             !is_dead && !state.has_count
423         } else {
424             if state.count < count {
425                 pr_err!("Failure: refcount underflow!");
426                 return None;
427             }
428             state.count -= count;
429             !is_dead && state.count == 0 && state.has_count
430         };
431 
432         if need_push && inner.delivery_state.should_normal_push() {
433             let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
434             inner.delivery_state.did_normal_push();
435             Some(list_arc)
436         } else {
437             None
438         }
439     }
440 
incr_refcount_allow_zero2one( self: &DArc<Self>, strong: bool, owner_inner: &mut ProcessInner, ) -> Result<Option<DLArc<Node>>, CouldNotDeliverCriticalIncrement>441     pub(crate) fn incr_refcount_allow_zero2one(
442         self: &DArc<Self>,
443         strong: bool,
444         owner_inner: &mut ProcessInner,
445     ) -> Result<Option<DLArc<Node>>, CouldNotDeliverCriticalIncrement> {
446         let is_dead = owner_inner.is_dead;
447         let inner = self.inner.access_mut(owner_inner);
448 
449         // Get a reference to the state we'll update.
450         let state = if strong {
451             &mut inner.strong
452         } else {
453             &mut inner.weak
454         };
455 
456         // Update the count and determine whether we need to push work.
457         state.count += 1;
458         if is_dead || state.has_count {
459             return Ok(None);
460         }
461 
462         // Userspace needs to be notified of this.
463         if !strong && inner.delivery_state.should_push_weak_zero2one() {
464             assert!(inner.delivery_state.can_push_weak_zero2one_normally());
465             let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
466             inner.delivery_state.did_push_weak_zero2one();
467             Ok(Some(list_arc))
468         } else if strong && inner.delivery_state.should_push_strong_zero2one() {
469             if inner.delivery_state.can_push_strong_zero2one_normally() {
470                 let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
471                 inner.delivery_state.did_push_strong_zero2one();
472                 Ok(Some(list_arc))
473             } else {
474                 state.count -= 1;
475                 Err(CouldNotDeliverCriticalIncrement)
476             }
477         } else {
478             // Work is already pushed, and we don't need to push again.
479             Ok(None)
480         }
481     }
482 
incr_refcount_allow_zero2one_with_wrapper( self: &DArc<Self>, strong: bool, wrapper: CritIncrWrapper, owner_inner: &mut ProcessInner, ) -> Option<DLArc<dyn DeliverToRead>>483     pub(crate) fn incr_refcount_allow_zero2one_with_wrapper(
484         self: &DArc<Self>,
485         strong: bool,
486         wrapper: CritIncrWrapper,
487         owner_inner: &mut ProcessInner,
488     ) -> Option<DLArc<dyn DeliverToRead>> {
489         match self.incr_refcount_allow_zero2one(strong, owner_inner) {
490             Ok(Some(node)) => Some(node as _),
491             Ok(None) => None,
492             Err(CouldNotDeliverCriticalIncrement) => {
493                 assert!(strong);
494                 let inner = self.inner.access_mut(owner_inner);
495                 inner.strong.count += 1;
496                 inner.delivery_state.did_push_strong_zero2one_wrapper();
497                 Some(wrapper.init(self.clone()))
498             }
499         }
500     }
501 
update_refcount(self: &DArc<Self>, inc: bool, count: usize, strong: bool)502     pub(crate) fn update_refcount(self: &DArc<Self>, inc: bool, count: usize, strong: bool) {
503         self.owner
504             .inner
505             .lock()
506             .update_node_refcount(self, inc, strong, count, None);
507     }
508 
populate_counts( &self, out: &mut BinderNodeInfoForRef, guard: &Guard<'_, ProcessInner, SpinLockBackend>, )509     pub(crate) fn populate_counts(
510         &self,
511         out: &mut BinderNodeInfoForRef,
512         guard: &Guard<'_, ProcessInner, SpinLockBackend>,
513     ) {
514         let inner = self.inner.access(guard);
515         out.strong_count = inner.strong.count as _;
516         out.weak_count = inner.weak.count as _;
517     }
518 
populate_debug_info( &self, out: &mut BinderNodeDebugInfo, guard: &Guard<'_, ProcessInner, SpinLockBackend>, )519     pub(crate) fn populate_debug_info(
520         &self,
521         out: &mut BinderNodeDebugInfo,
522         guard: &Guard<'_, ProcessInner, SpinLockBackend>,
523     ) {
524         out.ptr = self.ptr as _;
525         out.cookie = self.cookie as _;
526         let inner = self.inner.access(guard);
527         if inner.strong.has_count {
528             out.has_strong_ref = 1;
529         }
530         if inner.weak.has_count {
531             out.has_weak_ref = 1;
532         }
533     }
534 
force_has_count(&self, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>)535     pub(crate) fn force_has_count(&self, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) {
536         let inner = self.inner.access_mut(guard);
537         inner.strong.has_count = true;
538         inner.weak.has_count = true;
539     }
540 
write(&self, writer: &mut BinderReturnWriter<'_>, code: u32) -> Result541     fn write(&self, writer: &mut BinderReturnWriter<'_>, code: u32) -> Result {
542         writer.write_code(code)?;
543         writer.write_payload(&self.ptr)?;
544         writer.write_payload(&self.cookie)?;
545         Ok(())
546     }
547 
submit_oneway( &self, transaction: DLArc<Transaction>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>, ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)>548     pub(crate) fn submit_oneway(
549         &self,
550         transaction: DLArc<Transaction>,
551         guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
552     ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
553         if guard.is_dead {
554             return Err((BinderError::new_dead(), transaction));
555         }
556 
557         let inner = self.inner.access_mut(guard);
558         if inner.has_oneway_transaction {
559             inner.oneway_todo.push_back(transaction);
560         } else {
561             inner.has_oneway_transaction = true;
562             guard.push_work(transaction)?;
563         }
564         Ok(())
565     }
566 
release(&self)567     pub(crate) fn release(&self) {
568         let mut guard = self.owner.inner.lock();
569         while let Some(work) = self.inner.access_mut(&mut guard).oneway_todo.pop_front() {
570             drop(guard);
571             work.into_arc().cancel();
572             guard = self.owner.inner.lock();
573         }
574 
575         let death_list = core::mem::take(&mut self.inner.access_mut(&mut guard).death_list);
576         drop(guard);
577         for death in death_list {
578             death.into_arc().set_dead();
579         }
580     }
581 
pending_oneway_finished(&self)582     pub(crate) fn pending_oneway_finished(&self) {
583         let mut guard = self.owner.inner.lock();
584         if guard.is_dead {
585             // Cleanup will happen in `Process::deferred_release`.
586             return;
587         }
588 
589         let inner = self.inner.access_mut(&mut guard);
590 
591         let transaction = inner.oneway_todo.pop_front();
592         inner.has_oneway_transaction = transaction.is_some();
593         if let Some(transaction) = transaction {
594             match guard.push_work(transaction) {
595                 Ok(()) => {}
596                 Err((_err, work)) => {
597                     // Process is dead.
598                     // This shouldn't happen due to the `is_dead` check, but if it does, just drop
599                     // the transaction and return.
600                     drop(guard);
601                     drop(work);
602                 }
603             }
604         }
605     }
606 
607     /// Finds an outdated transaction that the given transaction can replace.
608     ///
609     /// If one is found, it is removed from the list and returned.
take_outdated_transaction( &self, new: &Transaction, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>, ) -> Option<DLArc<Transaction>>610     pub(crate) fn take_outdated_transaction(
611         &self,
612         new: &Transaction,
613         guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
614     ) -> Option<DLArc<Transaction>> {
615         let inner = self.inner.access_mut(guard);
616         let mut cursor = inner.oneway_todo.cursor_front();
617         while let Some(next) = cursor.peek_next() {
618             if new.can_replace(&next) {
619                 return Some(next.remove());
620             }
621             cursor.move_next();
622         }
623         None
624     }
625 
626     /// This is split into a separate function since it's called by both `Node::do_work` and
627     /// `NodeWrapper::do_work`.
do_work_locked( &self, writer: &mut BinderReturnWriter<'_>, mut guard: Guard<'_, ProcessInner, SpinLockBackend>, ) -> Result<bool>628     fn do_work_locked(
629         &self,
630         writer: &mut BinderReturnWriter<'_>,
631         mut guard: Guard<'_, ProcessInner, SpinLockBackend>,
632     ) -> Result<bool> {
633         let inner = self.inner.access_mut(&mut guard);
634         let strong = inner.strong.count > 0;
635         let has_strong = inner.strong.has_count;
636         let weak = strong || inner.weak.count > 0;
637         let has_weak = inner.weak.has_count;
638 
639         if weak && !has_weak {
640             inner.weak.has_count = true;
641             inner.active_inc_refs += 1;
642         }
643 
644         if strong && !has_strong {
645             inner.strong.has_count = true;
646             inner.active_inc_refs += 1;
647         }
648 
649         let no_active_inc_refs = inner.active_inc_refs == 0;
650         let should_drop_weak = no_active_inc_refs && (!weak && has_weak);
651         let should_drop_strong = no_active_inc_refs && (!strong && has_strong);
652         if should_drop_weak {
653             inner.weak.has_count = false;
654         }
655         if should_drop_strong {
656             inner.strong.has_count = false;
657         }
658         if no_active_inc_refs && !weak {
659             // Remove the node if there are no references to it.
660             guard.remove_node(self.ptr);
661         }
662         drop(guard);
663 
664         if weak && !has_weak {
665             self.write(writer, BR_INCREFS)?;
666         }
667         if strong && !has_strong {
668             self.write(writer, BR_ACQUIRE)?;
669         }
670         if should_drop_strong {
671             self.write(writer, BR_RELEASE)?;
672         }
673         if should_drop_weak {
674             self.write(writer, BR_DECREFS)?;
675         }
676 
677         Ok(true)
678     }
679 
add_freeze_listener( &self, process: &Arc<Process>, flags: kernel::alloc::Flags, ) -> Result680     pub(crate) fn add_freeze_listener(
681         &self,
682         process: &Arc<Process>,
683         flags: kernel::alloc::Flags,
684     ) -> Result {
685         let mut vec_alloc = KVVec::<Arc<Process>>::new();
686         loop {
687             let mut guard = self.owner.inner.lock();
688             // Do not check for `guard.dead`. The `dead` flag that matters here is the owner of the
689             // listener, no the target.
690             let inner = self.inner.access_mut(&mut guard);
691             let len = inner.freeze_list.len();
692             if len >= inner.freeze_list.capacity() {
693                 if len >= vec_alloc.capacity() {
694                     drop(guard);
695                     vec_alloc = KVVec::with_capacity((1 + len).next_power_of_two(), flags)?;
696                     continue;
697                 }
698                 mem::swap(&mut inner.freeze_list, &mut vec_alloc);
699                 for elem in vec_alloc.drain_all() {
700                     inner.freeze_list.push_within_capacity(elem)?;
701                 }
702             }
703             inner.freeze_list.push_within_capacity(process.clone())?;
704             return Ok(());
705         }
706     }
707 
remove_freeze_listener(&self, p: &Arc<Process>)708     pub(crate) fn remove_freeze_listener(&self, p: &Arc<Process>) {
709         let _unused_capacity;
710         let mut guard = self.owner.inner.lock();
711         let inner = self.inner.access_mut(&mut guard);
712         let len = inner.freeze_list.len();
713         inner.freeze_list.retain(|proc| !Arc::ptr_eq(proc, p));
714         if len == inner.freeze_list.len() {
715             pr_warn!(
716                 "Could not remove freeze listener for {}\n",
717                 p.pid_in_current_ns()
718             );
719         }
720         if inner.freeze_list.is_empty() {
721             _unused_capacity = mem::replace(&mut inner.freeze_list, KVVec::new());
722         }
723     }
724 
freeze_list<'a>(&'a self, guard: &'a ProcessInner) -> &'a [Arc<Process>]725     pub(crate) fn freeze_list<'a>(&'a self, guard: &'a ProcessInner) -> &'a [Arc<Process>] {
726         &self.inner.access(guard).freeze_list
727     }
728 }
729 
730 impl DeliverToRead for Node {
do_work( self: DArc<Self>, _thread: &Thread, writer: &mut BinderReturnWriter<'_>, ) -> Result<bool>731     fn do_work(
732         self: DArc<Self>,
733         _thread: &Thread,
734         writer: &mut BinderReturnWriter<'_>,
735     ) -> Result<bool> {
736         let mut owner_inner = self.owner.inner.lock();
737         let inner = self.inner.access_mut(&mut owner_inner);
738 
739         assert!(inner.delivery_state.has_pushed_node);
740         if inner.delivery_state.has_pushed_wrapper {
741             // If the wrapper is scheduled, then we are either a normal push or weak zero2one
742             // increment, and the wrapper is a strong zero2one increment, so the wrapper always
743             // takes precedence over us.
744             assert!(inner.delivery_state.has_strong_zero2one);
745             inner.delivery_state.has_pushed_node = false;
746             inner.delivery_state.has_weak_zero2one = false;
747             return Ok(true);
748         }
749 
750         inner.delivery_state.has_pushed_node = false;
751         inner.delivery_state.has_weak_zero2one = false;
752         inner.delivery_state.has_strong_zero2one = false;
753 
754         self.do_work_locked(writer, owner_inner)
755     }
756 
cancel(self: DArc<Self>)757     fn cancel(self: DArc<Self>) {}
on_thread_selected(&self, _thread: &Thread)758     fn on_thread_selected(&self, _thread: &Thread) {}
759 
should_sync_wakeup(&self) -> bool760     fn should_sync_wakeup(&self) -> bool {
761         false
762     }
763 
764     #[inline(never)]
debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()>765     fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
766         seq_print!(
767             m,
768             "{}node work {}: u{:016x} c{:016x}\n",
769             prefix,
770             self.debug_id,
771             self.ptr,
772             self.cookie,
773         );
774         Ok(())
775     }
776 }
777 
778 /// Represents something that holds one or more ref-counts to a `Node`.
779 ///
780 /// Whenever process A holds a refcount to a node owned by a different process B, then process A
781 /// will store a `NodeRef` that refers to the `Node` in process B. When process A releases the
782 /// refcount, we destroy the NodeRef, which decrements the ref-count in process A.
783 ///
784 /// This type is also used for some other cases. For example, a transaction allocation holds a
785 /// refcount on the target node, and this is implemented by storing a `NodeRef` in the allocation
786 /// so that the destructor of the allocation will drop a refcount of the `Node`.
787 pub(crate) struct NodeRef {
788     pub(crate) node: DArc<Node>,
789     /// How many times does this NodeRef hold a refcount on the Node?
790     strong_node_count: usize,
791     weak_node_count: usize,
792     /// How many times does userspace hold a refcount on this NodeRef?
793     strong_count: usize,
794     weak_count: usize,
795 }
796 
797 impl NodeRef {
new(node: DArc<Node>, strong_count: usize, weak_count: usize) -> Self798     pub(crate) fn new(node: DArc<Node>, strong_count: usize, weak_count: usize) -> Self {
799         Self {
800             node,
801             strong_node_count: strong_count,
802             weak_node_count: weak_count,
803             strong_count,
804             weak_count,
805         }
806     }
807 
absorb(&mut self, mut other: Self)808     pub(crate) fn absorb(&mut self, mut other: Self) {
809         assert!(
810             Arc::ptr_eq(&self.node, &other.node),
811             "absorb called with differing nodes"
812         );
813         self.strong_node_count += other.strong_node_count;
814         self.weak_node_count += other.weak_node_count;
815         self.strong_count += other.strong_count;
816         self.weak_count += other.weak_count;
817         other.strong_count = 0;
818         other.weak_count = 0;
819         other.strong_node_count = 0;
820         other.weak_node_count = 0;
821 
822         if self.strong_node_count >= 2 || self.weak_node_count >= 2 {
823             let mut guard = self.node.owner.inner.lock();
824             let inner = self.node.inner.access_mut(&mut guard);
825 
826             if self.strong_node_count >= 2 {
827                 inner.strong.count -= self.strong_node_count - 1;
828                 self.strong_node_count = 1;
829                 assert_ne!(inner.strong.count, 0);
830             }
831             if self.weak_node_count >= 2 {
832                 inner.weak.count -= self.weak_node_count - 1;
833                 self.weak_node_count = 1;
834                 assert_ne!(inner.weak.count, 0);
835             }
836         }
837     }
838 
get_count(&self) -> (usize, usize)839     pub(crate) fn get_count(&self) -> (usize, usize) {
840         (self.strong_count, self.weak_count)
841     }
842 
clone(&self, strong: bool) -> Result<NodeRef>843     pub(crate) fn clone(&self, strong: bool) -> Result<NodeRef> {
844         if strong && self.strong_count == 0 {
845             return Err(EINVAL);
846         }
847         Ok(self
848             .node
849             .owner
850             .inner
851             .lock()
852             .new_node_ref(self.node.clone(), strong, None))
853     }
854 
855     /// Updates (increments or decrements) the number of references held against the node. If the
856     /// count being updated transitions from 0 to 1 or from 1 to 0, the node is notified by having
857     /// its `update_refcount` function called.
858     ///
859     /// Returns whether `self` should be removed (when both counts are zero).
update(&mut self, inc: bool, strong: bool) -> bool860     pub(crate) fn update(&mut self, inc: bool, strong: bool) -> bool {
861         if strong && self.strong_count == 0 {
862             return false;
863         }
864         let (count, node_count, other_count) = if strong {
865             (
866                 &mut self.strong_count,
867                 &mut self.strong_node_count,
868                 self.weak_count,
869             )
870         } else {
871             (
872                 &mut self.weak_count,
873                 &mut self.weak_node_count,
874                 self.strong_count,
875             )
876         };
877         if inc {
878             if *count == 0 {
879                 *node_count = 1;
880                 self.node.update_refcount(true, 1, strong);
881             }
882             *count += 1;
883         } else {
884             if *count == 0 {
885                 pr_warn!(
886                     "pid {} performed invalid decrement on ref\n",
887                     kernel::current!().pid()
888                 );
889                 return false;
890             }
891             *count -= 1;
892             if *count == 0 {
893                 self.node.update_refcount(false, *node_count, strong);
894                 *node_count = 0;
895                 return other_count == 0;
896             }
897         }
898         false
899     }
900 }
901 
902 impl Drop for NodeRef {
903     // This destructor is called conditionally from `Allocation::drop`. That branch is often
904     // mispredicted. Inlining this method call reduces the cost of those branch mispredictions.
905     #[inline(always)]
drop(&mut self)906     fn drop(&mut self) {
907         if self.strong_node_count > 0 {
908             self.node
909                 .update_refcount(false, self.strong_node_count, true);
910         }
911         if self.weak_node_count > 0 {
912             self.node
913                 .update_refcount(false, self.weak_node_count, false);
914         }
915     }
916 }
917 
918 struct NodeDeathInner {
919     dead: bool,
920     cleared: bool,
921     notification_done: bool,
922     /// Indicates whether the normal flow was interrupted by removing the handle. In this case, we
923     /// need behave as if the death notification didn't exist (i.e., we don't deliver anything to
924     /// the user.
925     aborted: bool,
926 }
927 
928 /// Used to deliver notifications when a process dies.
929 ///
930 /// A process can request to be notified when a process dies using `BC_REQUEST_DEATH_NOTIFICATION`.
931 /// This will make the driver send a `BR_DEAD_BINDER` to userspace when the process dies (or
932 /// immediately if it is already dead). Userspace is supposed to respond with `BC_DEAD_BINDER_DONE`
933 /// once it has processed the notification.
934 ///
935 /// Userspace can unregister from death notifications using the `BC_CLEAR_DEATH_NOTIFICATION`
936 /// command. In this case, the kernel will respond with `BR_CLEAR_DEATH_NOTIFICATION_DONE` once the
937 /// notification has been removed. Note that if the remote process dies before the kernel has
938 /// responded with `BR_CLEAR_DEATH_NOTIFICATION_DONE`, then the kernel will still send a
939 /// `BR_DEAD_BINDER`, which userspace must be able to process. In this case, the kernel will wait
940 /// for the `BC_DEAD_BINDER_DONE` command before it sends `BR_CLEAR_DEATH_NOTIFICATION_DONE`.
941 ///
942 /// Note that even if the kernel sends a `BR_DEAD_BINDER`, this does not remove the death
943 /// notification. Userspace must still remove it manually using `BC_CLEAR_DEATH_NOTIFICATION`.
944 ///
945 /// If a process uses `BC_RELEASE` to destroy its last refcount on a node that has an active death
946 /// registration, then the death registration is immediately deleted (we implement this using the
947 /// `aborted` field). However, userspace is not supposed to delete a `NodeRef` without first
948 /// deregistering death notifications, so this codepath is not executed under normal circumstances.
949 #[pin_data]
950 pub(crate) struct NodeDeath {
951     node: DArc<Node>,
952     process: Arc<Process>,
953     pub(crate) cookie: u64,
954     #[pin]
955     links_track: AtomicTracker<0>,
956     /// Used by the owner `Node` to store a list of registered death notifications.
957     ///
958     /// # Invariants
959     ///
960     /// Only ever used with the `death_list` list of `self.node`.
961     #[pin]
962     death_links: ListLinks<1>,
963     /// Used by the process to keep track of the death notifications for which we have sent a
964     /// `BR_DEAD_BINDER` but not yet received a `BC_DEAD_BINDER_DONE`.
965     ///
966     /// # Invariants
967     ///
968     /// Only ever used with the `delivered_deaths` list of `self.process`.
969     #[pin]
970     delivered_links: ListLinks<2>,
971     #[pin]
972     delivered_links_track: AtomicTracker<2>,
973     #[pin]
974     inner: SpinLock<NodeDeathInner>,
975 }
976 
977 impl NodeDeath {
978     /// Constructs a new node death notification object.
new( node: DArc<Node>, process: Arc<Process>, cookie: u64, ) -> impl PinInit<DTRWrap<Self>>979     pub(crate) fn new(
980         node: DArc<Node>,
981         process: Arc<Process>,
982         cookie: u64,
983     ) -> impl PinInit<DTRWrap<Self>> {
984         DTRWrap::new(pin_init!(
985             Self {
986                 node,
987                 process,
988                 cookie,
989                 links_track <- AtomicTracker::new(),
990                 death_links <- ListLinks::new(),
991                 delivered_links <- ListLinks::new(),
992                 delivered_links_track <- AtomicTracker::new(),
993                 inner <- kernel::new_spinlock!(NodeDeathInner {
994                     dead: false,
995                     cleared: false,
996                     notification_done: false,
997                     aborted: false,
998                 }, "NodeDeath::inner"),
999             }
1000         ))
1001     }
1002 
1003     /// Sets the cleared flag to `true`.
1004     ///
1005     /// It removes `self` from the node's death notification list if needed.
1006     ///
1007     /// Returns whether it needs to be queued.
set_cleared(self: &DArc<Self>, abort: bool) -> bool1008     pub(crate) fn set_cleared(self: &DArc<Self>, abort: bool) -> bool {
1009         let (needs_removal, needs_queueing) = {
1010             // Update state and determine if we need to queue a work item. We only need to do it
1011             // when the node is not dead or if the user already completed the death notification.
1012             let mut inner = self.inner.lock();
1013             if abort {
1014                 inner.aborted = true;
1015             }
1016             if inner.cleared {
1017                 // Already cleared.
1018                 return false;
1019             }
1020             inner.cleared = true;
1021             (!inner.dead, !inner.dead || inner.notification_done)
1022         };
1023 
1024         // Remove death notification from node.
1025         if needs_removal {
1026             let mut owner_inner = self.node.owner.inner.lock();
1027             let node_inner = self.node.inner.access_mut(&mut owner_inner);
1028             // SAFETY: A `NodeDeath` is never inserted into the death list of any node other than
1029             // its owner, so it is either in this death list or in no death list.
1030             unsafe { node_inner.death_list.remove(self) };
1031         }
1032         needs_queueing
1033     }
1034 
1035     /// Sets the 'notification done' flag to `true`.
set_notification_done(self: DArc<Self>, thread: &Thread)1036     pub(crate) fn set_notification_done(self: DArc<Self>, thread: &Thread) {
1037         let needs_queueing = {
1038             let mut inner = self.inner.lock();
1039             inner.notification_done = true;
1040             inner.cleared
1041         };
1042         if needs_queueing {
1043             if let Some(death) = ListArc::try_from_arc_or_drop(self) {
1044                 let _ = thread.push_work_if_looper(death);
1045             }
1046         }
1047     }
1048 
1049     /// Sets the 'dead' flag to `true` and queues work item if needed.
set_dead(self: DArc<Self>)1050     pub(crate) fn set_dead(self: DArc<Self>) {
1051         let needs_queueing = {
1052             let mut inner = self.inner.lock();
1053             if inner.cleared {
1054                 false
1055             } else {
1056                 inner.dead = true;
1057                 true
1058             }
1059         };
1060         if needs_queueing {
1061             // Push the death notification to the target process. There is nothing else to do if
1062             // it's already dead.
1063             if let Some(death) = ListArc::try_from_arc_or_drop(self) {
1064                 let process = death.process.clone();
1065                 let _ = process.push_work(death);
1066             }
1067         }
1068     }
1069 }
1070 
1071 kernel::list::impl_list_arc_safe! {
1072     impl ListArcSafe<0> for NodeDeath {
1073         tracked_by links_track: AtomicTracker;
1074     }
1075 }
1076 
1077 kernel::list::impl_has_list_links! {
1078     impl HasListLinks<1> for DTRWrap<NodeDeath> { self.wrapped.death_links }
1079 }
1080 kernel::list::impl_list_arc_safe! {
1081     impl ListArcSafe<1> for DTRWrap<NodeDeath> { untracked; }
1082 }
1083 kernel::list::impl_list_item! {
1084     impl ListItem<1> for DTRWrap<NodeDeath> {
1085         using ListLinks;
1086     }
1087 }
1088 
1089 kernel::list::impl_has_list_links! {
1090     impl HasListLinks<2> for DTRWrap<NodeDeath> { self.wrapped.delivered_links }
1091 }
1092 kernel::list::impl_list_arc_safe! {
1093     impl ListArcSafe<2> for DTRWrap<NodeDeath> {
1094         tracked_by wrapped: NodeDeath;
1095     }
1096 }
1097 kernel::list::impl_list_arc_safe! {
1098     impl ListArcSafe<2> for NodeDeath {
1099         tracked_by delivered_links_track: AtomicTracker<2>;
1100     }
1101 }
1102 kernel::list::impl_list_item! {
1103     impl ListItem<2> for DTRWrap<NodeDeath> {
1104         using ListLinks;
1105     }
1106 }
1107 
1108 impl DeliverToRead for NodeDeath {
do_work( self: DArc<Self>, _thread: &Thread, writer: &mut BinderReturnWriter<'_>, ) -> Result<bool>1109     fn do_work(
1110         self: DArc<Self>,
1111         _thread: &Thread,
1112         writer: &mut BinderReturnWriter<'_>,
1113     ) -> Result<bool> {
1114         let done = {
1115             let inner = self.inner.lock();
1116             if inner.aborted {
1117                 return Ok(true);
1118             }
1119             inner.cleared && (!inner.dead || inner.notification_done)
1120         };
1121 
1122         let cookie = self.cookie;
1123         let cmd = if done {
1124             BR_CLEAR_DEATH_NOTIFICATION_DONE
1125         } else {
1126             let process = self.process.clone();
1127             let mut process_inner = process.inner.lock();
1128             let inner = self.inner.lock();
1129             if inner.aborted {
1130                 return Ok(true);
1131             }
1132             // We're still holding the inner lock, so it cannot be aborted while we insert it into
1133             // the delivered list.
1134             process_inner.death_delivered(self.clone());
1135             BR_DEAD_BINDER
1136         };
1137 
1138         writer.write_code(cmd)?;
1139         writer.write_payload(&cookie)?;
1140         // DEAD_BINDER notifications can cause transactions, so stop processing work items when we
1141         // get to a death notification.
1142         Ok(cmd != BR_DEAD_BINDER)
1143     }
1144 
cancel(self: DArc<Self>)1145     fn cancel(self: DArc<Self>) {}
on_thread_selected(&self, _thread: &Thread)1146     fn on_thread_selected(&self, _thread: &Thread) {}
1147 
should_sync_wakeup(&self) -> bool1148     fn should_sync_wakeup(&self) -> bool {
1149         false
1150     }
1151 
1152     #[inline(never)]
debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()>1153     fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
1154         let inner = self.inner.lock();
1155 
1156         let dead_binder = inner.dead && !inner.notification_done;
1157 
1158         if dead_binder {
1159             if inner.cleared {
1160                 seq_print!(m, "{}has cleared dead binder\n", prefix);
1161             } else {
1162                 seq_print!(m, "{}has dead binder\n", prefix);
1163             }
1164         } else {
1165             seq_print!(m, "{}has cleared death notification\n", prefix);
1166         }
1167 
1168         Ok(())
1169     }
1170 }
1171