• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 
3 // Copyright (C) 2024 Google LLC.
4 
5 //! This module defines the `Thread` type, which represents a userspace thread that is using
6 //! binder.
7 //!
8 //! The `Process` object stores all of the threads in an rb tree.
9 
10 use kernel::{
11     bindings,
12     fs::{File, LocalFile},
13     list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
14     prelude::*,
15     security,
16     seq_file::SeqFile,
17     seq_print,
18     sync::poll::{PollCondVar, PollTable},
19     sync::{Arc, SpinLock},
20     task::Task,
21     types::{ARef, Either},
22     uaccess::UserSlice,
23     uapi,
24 };
25 
26 use crate::{
27     allocation::{Allocation, AllocationView, BinderObject, BinderObjectRef, NewAllocation},
28     defs::*,
29     error::BinderResult,
30     prio::{self, BinderPriority, PriorityState},
31     process::Process,
32     ptr_align,
33     stats::GLOBAL_STATS,
34     transaction::Transaction,
35     BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead,
36 };
37 
38 use core::{
39     mem::size_of,
40     sync::atomic::{AtomicU32, Ordering},
41 };
42 
43 /// Stores the layout of the scatter-gather entries. This is used during the `translate_objects`
44 /// call and is discarded when it returns.
45 struct ScatterGatherState {
46     /// A struct that tracks the amount of unused buffer space.
47     unused_buffer_space: UnusedBufferSpace,
48     /// Scatter-gather entries to copy.
49     sg_entries: KVec<ScatterGatherEntry>,
50     /// Indexes into `sg_entries` corresponding to the last binder_buffer_object that
51     /// was processed and all of its ancestors. The array is in sorted order.
52     ancestors: KVec<usize>,
53 }
54 
55 /// This entry specifies an additional buffer that should be copied using the scatter-gather
56 /// mechanism.
57 struct ScatterGatherEntry {
58     /// The index in the offset array of the BINDER_TYPE_PTR that this entry originates from.
59     obj_index: usize,
60     /// Offset in target buffer.
61     offset: usize,
62     /// User address in source buffer.
63     sender_uaddr: usize,
64     /// Number of bytes to copy.
65     length: usize,
66     /// The minimum offset of the next fixup in this buffer.
67     fixup_min_offset: usize,
68     /// The offsets within this buffer that contain pointers which should be translated.
69     pointer_fixups: KVec<PointerFixupEntry>,
70 }
71 
72 /// This entry specifies that a fixup should happen at `target_offset` of the
73 /// buffer. If `skip` is nonzero, then the fixup is a `binder_fd_array_object`
74 /// and is applied later. Otherwise if `skip` is zero, then the size of the
75 /// fixup is `sizeof::<u64>()` and `pointer_value` is written to the buffer.
76 struct PointerFixupEntry {
77     /// The number of bytes to skip, or zero for a `binder_buffer_object` fixup.
78     skip: usize,
79     /// The translated pointer to write when `skip` is zero.
80     pointer_value: u64,
81     /// The offset at which the value should be written. The offset is relative
82     /// to the original buffer.
83     target_offset: usize,
84 }
85 
86 /// Return type of `apply_and_validate_fixup_in_parent`.
87 struct ParentFixupInfo {
88     /// The index of the parent buffer in `sg_entries`.
89     parent_sg_index: usize,
90     /// The number of ancestors of the buffer.
91     ///
92     /// The buffer is considered an ancestor of itself, so this is always at
93     /// least one.
94     num_ancestors: usize,
95     /// New value of `fixup_min_offset` if this fixup is applied.
96     new_min_offset: usize,
97     /// The offset of the fixup in the target buffer.
98     target_offset: usize,
99 }
100 
101 impl ScatterGatherState {
102     /// Called when a `binder_buffer_object` or `binder_fd_array_object` tries
103     /// to access a region in its parent buffer. These accesses have various
104     /// restrictions, which this method verifies.
105     ///
106     /// The `parent_offset` and `length` arguments describe the offset and
107     /// length of the access in the parent buffer.
108     ///
109     /// # Detailed restrictions
110     ///
111     /// Obviously the fixup must be in-bounds for the parent buffer.
112     ///
113     /// For safety reasons, we only allow fixups inside a buffer to happen
114     /// at increasing offsets; additionally, we only allow fixup on the last
115     /// buffer object that was verified, or one of its parents.
116     ///
117     /// Example of what is allowed:
118     ///
119     /// A
120     ///   B (parent = A, offset = 0)
121     ///   C (parent = A, offset = 16)
122     ///     D (parent = C, offset = 0)
123     ///   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
124     ///
125     /// Examples of what is not allowed:
126     ///
127     /// Decreasing offsets within the same parent:
128     /// A
129     ///   C (parent = A, offset = 16)
130     ///   B (parent = A, offset = 0) // decreasing offset within A
131     ///
132     /// Arcerring to a parent that wasn't the last object or any of its parents:
133     /// A
134     ///   B (parent = A, offset = 0)
135     ///   C (parent = A, offset = 0)
136     ///   C (parent = A, offset = 16)
137     ///     D (parent = B, offset = 0) // B is not A or any of A's parents
validate_parent_fixup( &self, parent: usize, parent_offset: usize, length: usize, ) -> Result<ParentFixupInfo>138     fn validate_parent_fixup(
139         &self,
140         parent: usize,
141         parent_offset: usize,
142         length: usize,
143     ) -> Result<ParentFixupInfo> {
144         // Using `position` would also be correct, but `rposition` avoids
145         // quadratic running times.
146         let ancestors_i = self
147             .ancestors
148             .iter()
149             .copied()
150             .rposition(|sg_idx| self.sg_entries[sg_idx].obj_index == parent)
151             .ok_or(EINVAL)?;
152         let sg_idx = self.ancestors[ancestors_i];
153         let sg_entry = match self.sg_entries.get(sg_idx) {
154             Some(sg_entry) => sg_entry,
155             None => {
156                 pr_err!(
157                     "self.ancestors[{}] is {}, but self.sg_entries.len() is {}",
158                     ancestors_i,
159                     sg_idx,
160                     self.sg_entries.len()
161                 );
162                 return Err(EINVAL);
163             }
164         };
165         if sg_entry.fixup_min_offset > parent_offset {
166             pr_warn!(
167                 "validate_parent_fixup: fixup_min_offset={}, parent_offset={}",
168                 sg_entry.fixup_min_offset,
169                 parent_offset
170             );
171             return Err(EINVAL);
172         }
173         let new_min_offset = parent_offset.checked_add(length).ok_or(EINVAL)?;
174         if new_min_offset > sg_entry.length {
175             pr_warn!(
176                 "validate_parent_fixup: new_min_offset={}, sg_entry.length={}",
177                 new_min_offset,
178                 sg_entry.length
179             );
180             return Err(EINVAL);
181         }
182         let target_offset = sg_entry.offset.checked_add(parent_offset).ok_or(EINVAL)?;
183         // The `ancestors_i + 1` operation can't overflow since the output of the addition is at
184         // most `self.ancestors.len()`, which also fits in a usize.
185         Ok(ParentFixupInfo {
186             parent_sg_index: sg_idx,
187             num_ancestors: ancestors_i + 1,
188             new_min_offset,
189             target_offset,
190         })
191     }
192 }
193 
194 /// Keeps track of how much unused buffer space is left. The initial amount is the number of bytes
195 /// requested by the user using the `buffers_size` field of `binder_transaction_data_sg`. Each time
196 /// we translate an object of type `BINDER_TYPE_PTR`, some of the unused buffer space is consumed.
197 struct UnusedBufferSpace {
198     /// The start of the remaining space.
199     offset: usize,
200     /// The end of the remaining space.
201     limit: usize,
202 }
203 impl UnusedBufferSpace {
204     /// Claim the next `size` bytes from the unused buffer space. The offset for the claimed chunk
205     /// into the buffer is returned.
claim_next(&mut self, size: usize) -> Result<usize>206     fn claim_next(&mut self, size: usize) -> Result<usize> {
207         // We require every chunk to be aligned.
208         let size = ptr_align(size).ok_or(EINVAL)?;
209         let new_offset = self.offset.checked_add(size).ok_or(EINVAL)?;
210 
211         if new_offset <= self.limit {
212             let offset = self.offset;
213             self.offset = new_offset;
214             Ok(offset)
215         } else {
216             Err(EINVAL)
217         }
218     }
219 }
220 
221 pub(crate) enum PushWorkRes {
222     Ok,
223     FailedDead(DLArc<dyn DeliverToRead>),
224 }
225 
226 impl PushWorkRes {
is_ok(&self) -> bool227     fn is_ok(&self) -> bool {
228         match self {
229             PushWorkRes::Ok => true,
230             PushWorkRes::FailedDead(_) => false,
231         }
232     }
233 }
234 
235 /// The fields of `Thread` protected by the spinlock.
236 struct InnerThread {
237     /// Determines the looper state of the thread. It is a bit-wise combination of the constants
238     /// prefixed with `LOOPER_`.
239     looper_flags: u32,
240 
241     /// Determines whether the looper should return.
242     looper_need_return: bool,
243 
244     /// Determines if thread is dead.
245     is_dead: bool,
246 
247     /// Work item used to deliver error codes to the thread that started a transaction. Stored here
248     /// so that it can be reused.
249     reply_work: DArc<ThreadError>,
250 
251     /// Work item used to deliver error codes to the current thread. Stored here so that it can be
252     /// reused.
253     return_work: DArc<ThreadError>,
254 
255     /// Determines whether the work list below should be processed. When set to false, `work_list`
256     /// is treated as if it were empty.
257     process_work_list: bool,
258     /// List of work items to deliver to userspace.
259     work_list: List<DTRWrap<dyn DeliverToRead>>,
260     current_transaction: Option<DArc<Transaction>>,
261 
262     /// Extended error information for this thread.
263     extended_error: ExtendedError,
264 }
265 
266 const LOOPER_REGISTERED: u32 = 0x01;
267 const LOOPER_ENTERED: u32 = 0x02;
268 const LOOPER_EXITED: u32 = 0x04;
269 const LOOPER_INVALID: u32 = 0x08;
270 const LOOPER_WAITING: u32 = 0x10;
271 const LOOPER_WAITING_PROC: u32 = 0x20;
272 const LOOPER_POLL: u32 = 0x40;
273 
274 impl InnerThread {
new() -> Result<Self>275     fn new() -> Result<Self> {
276         fn next_err_id() -> u32 {
277             static EE_ID: AtomicU32 = AtomicU32::new(0);
278             EE_ID.fetch_add(1, Ordering::Relaxed)
279         }
280 
281         Ok(Self {
282             looper_flags: 0,
283             looper_need_return: false,
284             is_dead: false,
285             process_work_list: false,
286             reply_work: ThreadError::try_new()?,
287             return_work: ThreadError::try_new()?,
288             work_list: List::new(),
289             current_transaction: None,
290             extended_error: ExtendedError::new(next_err_id(), BR_OK, 0),
291         })
292     }
293 
pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>>294     fn pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>> {
295         if !self.process_work_list {
296             return None;
297         }
298 
299         let ret = self.work_list.pop_front();
300         self.process_work_list = !self.work_list.is_empty();
301         ret
302     }
303 
push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes304     fn push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
305         if self.is_dead {
306             PushWorkRes::FailedDead(work)
307         } else {
308             self.work_list.push_back(work);
309             self.process_work_list = true;
310             PushWorkRes::Ok
311         }
312     }
313 
push_reply_work(&mut self, code: u32)314     fn push_reply_work(&mut self, code: u32) {
315         if let Ok(work) = ListArc::try_from_arc(self.reply_work.clone()) {
316             work.set_error_code(code);
317             self.push_work(work);
318         } else {
319             pr_warn!("Thread reply work is already in use.");
320         }
321     }
322 
push_return_work(&mut self, reply: u32)323     fn push_return_work(&mut self, reply: u32) {
324         if let Ok(work) = ListArc::try_from_arc(self.return_work.clone()) {
325             work.set_error_code(reply);
326             self.push_work(work);
327         } else {
328             pr_warn!("Thread return work is already in use.");
329         }
330     }
331 
332     /// Used to push work items that do not need to be processed immediately and can wait until the
333     /// thread gets another work item.
push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>)334     fn push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>) {
335         self.work_list.push_back(work);
336     }
337 
338     /// Fetches the transaction this thread can reply to. If the thread has a pending transaction
339     /// (that it could respond to) but it has also issued a transaction, it must first wait for the
340     /// previously-issued transaction to complete.
341     ///
342     /// The `thread` parameter should be the thread containing this `ThreadInner`.
pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>>343     fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>> {
344         let transaction = self.current_transaction.take().ok_or(EINVAL)?;
345         if core::ptr::eq(thread, transaction.from.as_ref()) {
346             self.current_transaction = Some(transaction);
347             return Err(EINVAL);
348         }
349         // Find a new current transaction for this thread.
350         self.current_transaction = transaction.find_from(thread).cloned();
351         Ok(transaction)
352     }
353 
pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool354     fn pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool {
355         match self.current_transaction.take() {
356             None => false,
357             Some(old) => {
358                 if !Arc::ptr_eq(transaction, &old) {
359                     self.current_transaction = Some(old);
360                     return false;
361                 }
362                 self.current_transaction = old.clone_next();
363                 true
364             }
365         }
366     }
367 
looper_enter(&mut self)368     fn looper_enter(&mut self) {
369         self.looper_flags |= LOOPER_ENTERED;
370         if self.looper_flags & LOOPER_REGISTERED != 0 {
371             self.looper_flags |= LOOPER_INVALID;
372         }
373     }
374 
looper_register(&mut self, valid: bool)375     fn looper_register(&mut self, valid: bool) {
376         self.looper_flags |= LOOPER_REGISTERED;
377         if !valid || self.looper_flags & LOOPER_ENTERED != 0 {
378             self.looper_flags |= LOOPER_INVALID;
379         }
380     }
381 
looper_exit(&mut self)382     fn looper_exit(&mut self) {
383         self.looper_flags |= LOOPER_EXITED;
384     }
385 
386     /// Determines whether the thread is part of a pool, i.e., if it is a looper.
is_looper(&self) -> bool387     fn is_looper(&self) -> bool {
388         self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0
389     }
390 
391     /// Determines whether the thread should attempt to fetch work items from the process queue.
392     /// This is generally case when the thread is registered as a looper and not part of a
393     /// transaction stack. But if there is local work, we want to return to userspace before we
394     /// deliver any remote work.
should_use_process_work_queue(&self) -> bool395     fn should_use_process_work_queue(&self) -> bool {
396         self.current_transaction.is_none() && !self.process_work_list && self.is_looper()
397     }
398 
poll(&mut self) -> u32399     fn poll(&mut self) -> u32 {
400         self.looper_flags |= LOOPER_POLL;
401         if self.process_work_list || self.looper_need_return {
402             bindings::POLLIN
403         } else {
404             0
405         }
406     }
407 }
408 
409 pub(crate) struct ThreadPrioState {
410     pub(crate) state: PriorityState,
411     pub(crate) next: BinderPriority,
412 }
413 
414 use core::mem::offset_of;
415 use kernel::bindings::rb_thread_layout;
416 pub(crate) const THREAD_LAYOUT: rb_thread_layout = rb_thread_layout {
417     arc_offset: Arc::<Thread>::DATA_OFFSET,
418     process: offset_of!(Thread, process),
419     id: offset_of!(Thread, id),
420 };
421 
422 /// This represents a thread that's used with binder.
423 #[pin_data]
424 pub(crate) struct Thread {
425     pub(crate) id: i32,
426     pub(crate) process: Arc<Process>,
427     pub(crate) task: ARef<Task>,
428     #[pin]
429     inner: SpinLock<InnerThread>,
430     #[pin]
431     pub(crate) prio_lock: SpinLock<ThreadPrioState>,
432     #[pin]
433     work_condvar: PollCondVar,
434     /// Used to insert this thread into the process' `ready_threads` list.
435     ///
436     /// INVARIANT: May never be used for any other list than the `self.process.ready_threads`.
437     #[pin]
438     links: ListLinks,
439     #[pin]
440     links_track: AtomicTracker,
441 }
442 
443 kernel::list::impl_has_list_links! {
444     impl HasListLinks<0> for Thread { self.links }
445 }
446 kernel::list::impl_list_arc_safe! {
447     impl ListArcSafe<0> for Thread {
448         tracked_by links_track: AtomicTracker;
449     }
450 }
451 kernel::list::impl_list_item! {
452     impl ListItem<0> for Thread {
453         using ListLinks;
454     }
455 }
456 
457 impl Thread {
new(id: i32, process: Arc<Process>) -> Result<Arc<Self>>458     pub(crate) fn new(id: i32, process: Arc<Process>) -> Result<Arc<Self>> {
459         let inner = InnerThread::new()?;
460 
461         let prio = ThreadPrioState {
462             state: PriorityState::Set,
463             next: BinderPriority::default(),
464         };
465 
466         Arc::pin_init(
467             try_pin_init!(Thread {
468                 id,
469                 process,
470                 task: ARef::from(&**kernel::current!()),
471                 inner <- kernel::new_spinlock!(inner, "Thread::inner"),
472                 prio_lock <- kernel::new_spinlock!(prio, "Thread::prio_lock"),
473                 work_condvar <- kernel::new_poll_condvar!("Thread::work_condvar"),
474                 links <- ListLinks::new(),
475                 links_track <- AtomicTracker::new(),
476             }),
477             GFP_KERNEL,
478         )
479     }
480 
481     #[inline(never)]
debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()>482     pub(crate) fn debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()> {
483         let inner = self.inner.lock();
484 
485         if print_all || inner.current_transaction.is_some() || !inner.work_list.is_empty() {
486             seq_print!(
487                 m,
488                 "  thread {}: l {:02x} need_return {}\n",
489                 self.id,
490                 inner.looper_flags,
491                 inner.looper_need_return,
492             );
493         }
494 
495         let mut t_opt = inner.current_transaction.as_ref();
496         while let Some(t) = t_opt {
497             if Arc::ptr_eq(&t.from, self) {
498                 t.debug_print_inner(m, "    outgoing transaction ");
499                 t_opt = t.from_parent.as_ref();
500             } else if Arc::ptr_eq(&t.to, &self.process) {
501                 t.debug_print_inner(m, "    incoming transaction ");
502                 t_opt = t.find_from(self);
503             } else {
504                 t.debug_print_inner(m, "    bad transaction ");
505                 t_opt = None;
506             }
507         }
508 
509         for work in &inner.work_list {
510             work.debug_print(m, "    ", "    pending transaction ")?;
511         }
512         Ok(())
513     }
514 
get_extended_error(&self, data: UserSlice) -> Result515     pub(crate) fn get_extended_error(&self, data: UserSlice) -> Result {
516         let mut writer = data.writer();
517         let ee = self.inner.lock().extended_error;
518         writer.write(&ee)?;
519         Ok(())
520     }
521 
set_current_transaction(&self, transaction: DArc<Transaction>)522     pub(crate) fn set_current_transaction(&self, transaction: DArc<Transaction>) {
523         self.inner.lock().current_transaction = Some(transaction);
524     }
525 
has_current_transaction(&self) -> bool526     pub(crate) fn has_current_transaction(&self) -> bool {
527         self.inner.lock().current_transaction.is_some()
528     }
529 
530     /// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is
531     /// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a
532     /// signal); otherwise it returns indicating that none is available.
get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>>533     fn get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
534         {
535             let mut inner = self.inner.lock();
536             if inner.looper_need_return {
537                 return Ok(inner.pop_work());
538             }
539         }
540 
541         // Try once if the caller does not want to wait.
542         if !wait {
543             return self.inner.lock().pop_work().ok_or(EAGAIN).map(Some);
544         }
545 
546         // Loop waiting only on the local queue (i.e., not registering with the process queue).
547         let mut inner = self.inner.lock();
548         loop {
549             if let Some(work) = inner.pop_work() {
550                 return Ok(Some(work));
551             }
552 
553             inner.looper_flags |= LOOPER_WAITING;
554             let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
555             inner.looper_flags &= !LOOPER_WAITING;
556 
557             if signal_pending {
558                 return Err(EINTR);
559             }
560             if inner.looper_need_return {
561                 return Ok(None);
562             }
563         }
564     }
565 
566     /// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide
567     /// queue if none is available locally.
568     ///
569     /// This must only be called when the thread is not participating in a transaction chain. If it
570     /// is, the local version (`get_work_local`) should be used instead.
get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>>571     fn get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
572         // Try to get work from the thread's work queue, using only a local lock.
573         {
574             let mut inner = self.inner.lock();
575             if let Some(work) = inner.pop_work() {
576                 return Ok(Some(work));
577             }
578             if inner.looper_need_return {
579                 drop(inner);
580                 return Ok(self.process.get_work());
581             }
582         }
583 
584         // If the caller doesn't want to wait, try to grab work from the process queue.
585         //
586         // We know nothing will have been queued directly to the thread queue because it is not in
587         // a transaction and it is not in the process' ready list.
588         if !wait {
589             return self.process.get_work().ok_or(EAGAIN).map(Some);
590         }
591 
592         // Get work from the process queue. If none is available, atomically register as ready.
593         let reg = match self.process.get_work_or_register(self) {
594             Either::Left(work) => return Ok(Some(work)),
595             Either::Right(reg) => reg,
596         };
597 
598         let mut inner = self.inner.lock();
599         loop {
600             if let Some(work) = inner.pop_work() {
601                 return Ok(Some(work));
602             }
603 
604             self.restore_priority(&self.process.default_priority);
605 
606             inner.looper_flags |= LOOPER_WAITING | LOOPER_WAITING_PROC;
607             let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
608             inner.looper_flags &= !(LOOPER_WAITING | LOOPER_WAITING_PROC);
609 
610             if signal_pending || inner.looper_need_return {
611                 // We need to return now. We need to pull the thread off the list of ready threads
612                 // (by dropping `reg`), then check the state again after it's off the list to
613                 // ensure that something was not queued in the meantime. If something has been
614                 // queued, we just return it (instead of the error).
615                 drop(inner);
616                 drop(reg);
617 
618                 let res = match self.inner.lock().pop_work() {
619                     Some(work) => Ok(Some(work)),
620                     None if signal_pending => Err(EINTR),
621                     None => Ok(None),
622                 };
623                 return res;
624             }
625         }
626     }
627 
628     /// Push the provided work item to be delivered to user space via this thread.
629     ///
630     /// Returns whether the item was successfully pushed. This can only fail if the thread is dead.
push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes631     pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
632         let sync = work.should_sync_wakeup();
633 
634         let res = self.inner.lock().push_work(work);
635 
636         if res.is_ok() {
637             if sync {
638                 self.work_condvar.notify_sync();
639             } else {
640                 self.work_condvar.notify_one();
641             }
642         }
643 
644         res
645     }
646 
647     /// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's
648     /// part of a thread pool) and is alive. Otherwise, push the work item to the process instead.
push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult649     pub(crate) fn push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
650         let mut inner = self.inner.lock();
651         if inner.is_looper() && !inner.is_dead {
652             inner.push_work(work);
653             Ok(())
654         } else {
655             drop(inner);
656             self.process.push_work(work)
657         }
658     }
659 
push_work_deferred(&self, work: DLArc<dyn DeliverToRead>)660     pub(crate) fn push_work_deferred(&self, work: DLArc<dyn DeliverToRead>) {
661         self.inner.lock().push_work_deferred(work);
662     }
663 
push_return_work(&self, reply: u32)664     pub(crate) fn push_return_work(&self, reply: u32) {
665         self.inner.lock().push_return_work(reply);
666     }
667 
do_set_priority(&self, desired: &BinderPriority, verify: bool)668     fn do_set_priority(&self, desired: &BinderPriority, verify: bool) {
669         let task = &*self.task;
670         let mut policy = desired.sched_policy;
671         let mut priority;
672 
673         if task.policy() == policy && task.normal_prio() == desired.prio {
674             let mut prio_state = self.prio_lock.lock();
675             if prio_state.state == PriorityState::Pending {
676                 prio_state.state = PriorityState::Set;
677             }
678             return;
679         }
680 
681         let has_cap_nice = task.has_capability_noaudit(bindings::CAP_SYS_NICE as _);
682         priority = prio::to_userspace_prio(policy, desired.prio);
683 
684         if verify && prio::is_rt_policy(policy) && !has_cap_nice {
685             // For rt_policy, we store the rt priority as a nice. (See to_userspace_prio and
686             // to_kernel_prio impls.)
687             let max_rtprio: prio::Nice = task.rlimit_rtprio();
688             if max_rtprio == 0 {
689                 policy = prio::SCHED_NORMAL;
690                 priority = prio::MIN_NICE;
691             } else if priority > max_rtprio {
692                 priority = max_rtprio;
693             }
694         }
695 
696         if verify && prio::is_fair_policy(policy) && !has_cap_nice {
697             let min_nice = task.rlimit_nice();
698 
699             if min_nice > prio::MAX_NICE {
700                 pr_err!("{} RLIMIT_NICE not set", task.pid());
701                 return;
702             } else if priority < min_nice {
703                 priority = min_nice;
704             }
705         }
706 
707         if policy != desired.sched_policy || prio::to_kernel_prio(policy, priority) != desired.prio
708         {
709             pr_debug!(
710                 "{}: priority {} not allowed, using {} instead",
711                 task.pid(),
712                 desired.prio,
713                 prio::to_kernel_prio(policy, priority),
714             );
715         }
716 
717         crate::trace::trace_set_priority(
718             task,
719             desired.prio,
720             prio::to_kernel_prio(policy, priority),
721         );
722 
723         let mut prio_state = self.prio_lock.lock();
724         if !verify && prio_state.state == PriorityState::Abort {
725             // A new priority has been set by an incoming nested
726             // transaction. Abort this priority restore and allow
727             // the transaction to run at the new desired priority.
728             drop(prio_state);
729             pr_debug!("{}: aborting priority restore", task.pid());
730             return;
731         }
732 
733         // Set the actual priority.
734         if task.policy() != policy || prio::is_rt_policy(policy) {
735             let prio = if prio::is_rt_policy(policy) {
736                 priority
737             } else {
738                 0
739             };
740             task.sched_setscheduler_nocheck(policy as i32, prio, true);
741         }
742 
743         if prio::is_fair_policy(policy) {
744             task.set_user_nice(priority);
745         }
746 
747         prio_state.state = PriorityState::Set;
748     }
749 
set_priority(&self, desired: &BinderPriority, t: &Transaction)750     pub(crate) fn set_priority(&self, desired: &BinderPriority, t: &Transaction) {
751         self.do_set_priority(desired, true);
752         crate::trace::vh_set_priority(t, &self.task);
753     }
754 
restore_priority(&self, desired: &BinderPriority)755     pub(crate) fn restore_priority(&self, desired: &BinderPriority) {
756         self.do_set_priority(desired, false);
757         crate::trace::vh_restore_priority(&self.task);
758     }
759 
translate_object( &self, obj_index: usize, offset: usize, object: BinderObjectRef<'_>, view: &mut AllocationView<'_>, allow_fds: bool, sg_state: &mut ScatterGatherState, ) -> BinderResult760     fn translate_object(
761         &self,
762         obj_index: usize,
763         offset: usize,
764         object: BinderObjectRef<'_>,
765         view: &mut AllocationView<'_>,
766         allow_fds: bool,
767         sg_state: &mut ScatterGatherState,
768     ) -> BinderResult {
769         match object {
770             BinderObjectRef::Binder(obj) => {
771                 let strong = obj.hdr.type_ == BINDER_TYPE_BINDER;
772                 // SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid
773                 // representation.
774                 let ptr = unsafe { obj.__bindgen_anon_1.binder } as _;
775                 let cookie = obj.cookie as _;
776                 let flags = obj.flags as _;
777                 let node = self
778                     .process
779                     .as_arc_borrow()
780                     .get_node(ptr, cookie, flags, strong, self)?;
781                 security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
782                 view.transfer_binder_object(offset, obj, strong, node)?;
783             }
784             BinderObjectRef::Handle(obj) => {
785                 let strong = obj.hdr.type_ == BINDER_TYPE_HANDLE;
786                 // SAFETY: `handle` is a `u32`; any bit pattern is a valid representation.
787                 let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
788                 let node = self.process.get_node_from_handle(handle, strong)?;
789                 security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
790                 view.transfer_binder_object(offset, obj, strong, node)?;
791             }
792             BinderObjectRef::Fd(obj) => {
793                 if !allow_fds {
794                     return Err(EPERM.into());
795                 }
796 
797                 // SAFETY: `fd` is a `u32`; any bit pattern is a valid representation.
798                 let fd = unsafe { obj.__bindgen_anon_1.fd };
799                 let file = LocalFile::fget(fd)?;
800                 // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
801                 // ioctl, so there are no active calls to `fdget_pos` on this thread.
802                 let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
803                 security::binder_transfer_file(
804                     &self.process.cred,
805                     &view.alloc.process.cred,
806                     &file,
807                 )?;
808 
809                 let mut obj_write = BinderFdObject::default();
810                 obj_write.hdr.type_ = BINDER_TYPE_FD;
811                 // This will be overwritten with the actual fd when the transaction is received.
812                 obj_write.__bindgen_anon_1.fd = u32::MAX;
813                 obj_write.cookie = obj.cookie;
814                 view.write::<BinderFdObject>(offset, &obj_write)?;
815 
816                 const FD_FIELD_OFFSET: usize =
817                     ::core::mem::offset_of!(uapi::binder_fd_object, __bindgen_anon_1.fd) as usize;
818 
819                 let field_offset = offset + FD_FIELD_OFFSET;
820                 crate::trace::trace_transaction_fd_send(view.alloc.debug_id, fd, field_offset);
821 
822                 view.alloc.info_add_fd(file, field_offset, false)?;
823             }
824             BinderObjectRef::Ptr(obj) => {
825                 let obj_length = obj.length.try_into().map_err(|_| EINVAL)?;
826                 let alloc_offset = match sg_state.unused_buffer_space.claim_next(obj_length) {
827                     Ok(alloc_offset) => alloc_offset,
828                     Err(err) => {
829                         pr_warn!(
830                             "Failed to claim space for a BINDER_TYPE_PTR. (offset: {}, limit: {}, size: {})",
831                             sg_state.unused_buffer_space.offset,
832                             sg_state.unused_buffer_space.limit,
833                             obj_length,
834                         );
835                         return Err(err.into());
836                     }
837                 };
838 
839                 let sg_state_idx = sg_state.sg_entries.len();
840                 sg_state.sg_entries.push(
841                     ScatterGatherEntry {
842                         obj_index,
843                         offset: alloc_offset,
844                         sender_uaddr: obj.buffer as _,
845                         length: obj_length,
846                         pointer_fixups: KVec::new(),
847                         fixup_min_offset: 0,
848                     },
849                     GFP_KERNEL,
850                 )?;
851 
852                 let buffer_ptr_in_user_space = (view.alloc.ptr + alloc_offset) as u64;
853 
854                 if obj.flags & uapi::BINDER_BUFFER_FLAG_HAS_PARENT == 0 {
855                     sg_state.ancestors.clear();
856                     sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
857                 } else {
858                     // Another buffer also has a pointer to this buffer, and we need to fixup that
859                     // pointer too.
860 
861                     let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
862                     let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
863 
864                     let info = sg_state.validate_parent_fixup(
865                         parent_index,
866                         parent_offset,
867                         size_of::<u64>(),
868                     )?;
869 
870                     sg_state.ancestors.truncate(info.num_ancestors);
871                     sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
872 
873                     let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
874                         Some(parent_entry) => parent_entry,
875                         None => {
876                             pr_err!(
877                                 "validate_parent_fixup returned index out of bounds for sg.entries"
878                             );
879                             return Err(EINVAL.into());
880                         }
881                     };
882 
883                     parent_entry.fixup_min_offset = info.new_min_offset;
884                     parent_entry.pointer_fixups.push(
885                         PointerFixupEntry {
886                             skip: 0,
887                             pointer_value: buffer_ptr_in_user_space,
888                             target_offset: info.target_offset,
889                         },
890                         GFP_KERNEL,
891                     )?;
892                 }
893 
894                 let mut obj_write = BinderBufferObject::default();
895                 obj_write.hdr.type_ = BINDER_TYPE_PTR;
896                 obj_write.flags = obj.flags;
897                 obj_write.buffer = buffer_ptr_in_user_space;
898                 obj_write.length = obj.length;
899                 obj_write.parent = obj.parent;
900                 obj_write.parent_offset = obj.parent_offset;
901                 view.write::<BinderBufferObject>(offset, &obj_write)?;
902             }
903             BinderObjectRef::Fda(obj) => {
904                 if !allow_fds {
905                     return Err(EPERM.into());
906                 }
907                 let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
908                 let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
909                 let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?;
910                 let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?;
911 
912                 let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?;
913                 view.alloc.info_add_fd_reserve(num_fds)?;
914 
915                 sg_state.ancestors.truncate(info.num_ancestors);
916                 let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
917                     Some(parent_entry) => parent_entry,
918                     None => {
919                         pr_err!(
920                             "validate_parent_fixup returned index out of bounds for sg.entries"
921                         );
922                         return Err(EINVAL.into());
923                     }
924                 };
925 
926                 parent_entry.fixup_min_offset = info.new_min_offset;
927                 parent_entry
928                     .pointer_fixups
929                     .push(
930                         PointerFixupEntry {
931                             skip: fds_len,
932                             pointer_value: 0,
933                             target_offset: info.target_offset,
934                         },
935                         GFP_KERNEL,
936                     )
937                     .map_err(|_| ENOMEM)?;
938 
939                 let fda_uaddr = parent_entry
940                     .sender_uaddr
941                     .checked_add(parent_offset)
942                     .ok_or(EINVAL)?;
943                 let mut fda_bytes = KVec::new();
944                 UserSlice::new(fda_uaddr as _, fds_len).read_all(&mut fda_bytes, GFP_KERNEL)?;
945 
946                 if fds_len != fda_bytes.len() {
947                     pr_err!("UserSlice::read_all returned wrong length in BINDER_TYPE_FDA");
948                     return Err(EINVAL.into());
949                 }
950 
951                 for i in (0..fds_len).step_by(size_of::<u32>()) {
952                     let fd = {
953                         let mut fd_bytes = [0u8; size_of::<u32>()];
954                         fd_bytes.copy_from_slice(&fda_bytes[i..i + size_of::<u32>()]);
955                         u32::from_ne_bytes(fd_bytes)
956                     };
957 
958                     let file = LocalFile::fget(fd)?;
959                     // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
960                     // ioctl, so there are no active calls to `fdget_pos` on this thread.
961                     let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
962                     security::binder_transfer_file(
963                         &self.process.cred,
964                         &view.alloc.process.cred,
965                         &file,
966                     )?;
967 
968                     // The `validate_parent_fixup` call ensuers that this addition will not
969                     // overflow.
970                     view.alloc.info_add_fd(file, info.target_offset + i, true)?;
971                 }
972                 drop(fda_bytes);
973 
974                 let mut obj_write = BinderFdArrayObject::default();
975                 obj_write.hdr.type_ = BINDER_TYPE_FDA;
976                 obj_write.num_fds = obj.num_fds;
977                 obj_write.parent = obj.parent;
978                 obj_write.parent_offset = obj.parent_offset;
979                 view.write::<BinderFdArrayObject>(offset, &obj_write)?;
980             }
981         }
982         Ok(())
983     }
984 
apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult985     fn apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult {
986         for sg_entry in &mut sg_state.sg_entries {
987             let mut end_of_previous_fixup = sg_entry.offset;
988             let offset_end = sg_entry.offset.checked_add(sg_entry.length).ok_or(EINVAL)?;
989 
990             let mut reader = UserSlice::new(sg_entry.sender_uaddr as _, sg_entry.length).reader();
991             for fixup in &mut sg_entry.pointer_fixups {
992                 let fixup_len = if fixup.skip == 0 {
993                     size_of::<u64>()
994                 } else {
995                     fixup.skip
996                 };
997 
998                 let target_offset_end = fixup.target_offset.checked_add(fixup_len).ok_or(EINVAL)?;
999                 if fixup.target_offset < end_of_previous_fixup || offset_end < target_offset_end {
1000                     pr_warn!(
1001                         "Fixups oob {} {} {} {}",
1002                         fixup.target_offset,
1003                         end_of_previous_fixup,
1004                         offset_end,
1005                         target_offset_end
1006                     );
1007                     return Err(EINVAL.into());
1008                 }
1009 
1010                 let copy_off = end_of_previous_fixup;
1011                 let copy_len = fixup.target_offset - end_of_previous_fixup;
1012                 if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
1013                     pr_warn!("Failed copying into alloc: {:?}", err);
1014                     return Err(err.into());
1015                 }
1016                 if fixup.skip == 0 {
1017                     let res = alloc.write::<u64>(fixup.target_offset, &fixup.pointer_value);
1018                     if let Err(err) = res {
1019                         pr_warn!("Failed copying ptr into alloc: {:?}", err);
1020                         return Err(err.into());
1021                     }
1022                 }
1023                 if let Err(err) = reader.skip(fixup_len) {
1024                     pr_warn!("Failed skipping {} from reader: {:?}", fixup_len, err);
1025                     return Err(err.into());
1026                 }
1027                 end_of_previous_fixup = target_offset_end;
1028             }
1029             let copy_off = end_of_previous_fixup;
1030             let copy_len = offset_end - end_of_previous_fixup;
1031             if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
1032                 pr_warn!("Failed copying remainder into alloc: {:?}", err);
1033                 return Err(err.into());
1034             }
1035         }
1036         Ok(())
1037     }
1038 
1039     /// This method copies the payload of a transaction into the target process.
1040     ///
1041     /// The resulting payload will have several different components, which will be stored next to
1042     /// each other in the allocation. Furthermore, various objects can be embedded in the payload,
1043     /// and those objects have to be translated so that they make sense to the target transaction.
copy_transaction_data( &self, to_process: Arc<Process>, tr: &BinderTransactionDataSg, debug_id: usize, allow_fds: bool, txn_security_ctx_offset: Option<&mut usize>, ) -> BinderResult<NewAllocation>1044     pub(crate) fn copy_transaction_data(
1045         &self,
1046         to_process: Arc<Process>,
1047         tr: &BinderTransactionDataSg,
1048         debug_id: usize,
1049         allow_fds: bool,
1050         txn_security_ctx_offset: Option<&mut usize>,
1051     ) -> BinderResult<NewAllocation> {
1052         let trd = &tr.transaction_data;
1053         let is_oneway = trd.flags & TF_ONE_WAY != 0;
1054         let mut secctx = if let Some(offset) = txn_security_ctx_offset {
1055             let secid = self.process.cred.get_secid();
1056             let ctx = match security::SecurityCtx::from_secid(secid) {
1057                 Ok(ctx) => ctx,
1058                 Err(err) => {
1059                     pr_warn!("Failed to get security ctx for id {}: {:?}", secid, err);
1060                     return Err(err.into());
1061                 }
1062             };
1063             Some((offset, ctx))
1064         } else {
1065             None
1066         };
1067 
1068         let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?;
1069         let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?;
1070         let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?;
1071         let aligned_offsets_size = ptr_align(offsets_size).ok_or(EINVAL)?;
1072         let buffers_size = tr.buffers_size.try_into().map_err(|_| EINVAL)?;
1073         let aligned_buffers_size = ptr_align(buffers_size).ok_or(EINVAL)?;
1074         let aligned_secctx_size = match secctx.as_ref() {
1075             Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?,
1076             None => 0,
1077         };
1078 
1079         // This guarantees that at least `sizeof(usize)` bytes will be allocated.
1080         let len = usize::max(
1081             aligned_data_size
1082                 .checked_add(aligned_offsets_size)
1083                 .and_then(|sum| sum.checked_add(aligned_buffers_size))
1084                 .and_then(|sum| sum.checked_add(aligned_secctx_size))
1085                 .ok_or(ENOMEM)?,
1086             size_of::<usize>(),
1087         );
1088         let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size;
1089         let mut alloc =
1090             match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) {
1091                 Ok(alloc) => alloc,
1092                 Err(err) => {
1093                     pr_warn!(
1094                         "Failed to allocate buffer. len:{}, is_oneway:{}",
1095                         len,
1096                         is_oneway
1097                     );
1098                     return Err(err);
1099                 }
1100             };
1101 
1102         crate::trace::trace_transaction_alloc_buf(debug_id, tr);
1103 
1104         // SAFETY: This accesses a union field, but it's okay because the field's type is valid for
1105         // all bit-patterns.
1106         let trd_data_ptr = unsafe { &trd.data.ptr };
1107         let mut buffer_reader = UserSlice::new(trd_data_ptr.buffer as _, data_size).reader();
1108         let mut end_of_previous_object = 0;
1109         let mut sg_state = None;
1110 
1111         // Copy offsets if there are any.
1112         if offsets_size > 0 {
1113             {
1114                 let mut reader = UserSlice::new(trd_data_ptr.offsets as _, offsets_size).reader();
1115                 alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?;
1116             }
1117 
1118             let offsets_start = aligned_data_size;
1119             let offsets_end = aligned_data_size + aligned_offsets_size;
1120 
1121             // This state is used for BINDER_TYPE_PTR objects.
1122             let sg_state = sg_state.insert(ScatterGatherState {
1123                 unused_buffer_space: UnusedBufferSpace {
1124                     offset: offsets_end,
1125                     limit: len,
1126                 },
1127                 sg_entries: KVec::new(),
1128                 ancestors: KVec::new(),
1129             });
1130 
1131             // Traverse the objects specified.
1132             let mut view = AllocationView::new(&mut alloc, data_size);
1133             for (index, index_offset) in (offsets_start..offsets_end)
1134                 .step_by(size_of::<usize>())
1135                 .enumerate()
1136             {
1137                 let offset = view.alloc.read(index_offset)?;
1138 
1139                 if offset < end_of_previous_object {
1140                     pr_warn!("Got transaction with invalid offset.");
1141                     return Err(EINVAL.into());
1142                 }
1143 
1144                 // Copy data between two objects.
1145                 if end_of_previous_object < offset {
1146                     view.copy_into(
1147                         &mut buffer_reader,
1148                         end_of_previous_object,
1149                         offset - end_of_previous_object,
1150                     )?;
1151                 }
1152 
1153                 let mut object = BinderObject::read_from(&mut buffer_reader)?;
1154 
1155                 match self.translate_object(
1156                     index,
1157                     offset,
1158                     object.as_ref(),
1159                     &mut view,
1160                     allow_fds,
1161                     sg_state,
1162                 ) {
1163                     Ok(()) => end_of_previous_object = offset + object.size(),
1164                     Err(err) => {
1165                         pr_warn!("Error while translating object.");
1166                         return Err(err);
1167                     }
1168                 }
1169 
1170                 // Update the indexes containing objects to clean up.
1171                 let offset_after_object = index_offset + size_of::<usize>();
1172                 view.alloc
1173                     .set_info_offsets(offsets_start..offset_after_object);
1174             }
1175         }
1176 
1177         // Copy remaining raw data.
1178         alloc.copy_into(
1179             &mut buffer_reader,
1180             end_of_previous_object,
1181             data_size - end_of_previous_object,
1182         )?;
1183 
1184         if let Some(sg_state) = sg_state.as_mut() {
1185             if let Err(err) = self.apply_sg(&mut alloc, sg_state) {
1186                 pr_warn!("Failure in apply_sg: {:?}", err);
1187                 return Err(err);
1188             }
1189         }
1190 
1191         if let Some((off_out, secctx)) = secctx.as_mut() {
1192             if let Err(err) = alloc.write(secctx_off, secctx.as_bytes()) {
1193                 pr_warn!("Failed to write security context: {:?}", err);
1194                 return Err(err.into());
1195             }
1196             **off_out = secctx_off;
1197         }
1198         Ok(alloc)
1199     }
1200 
unwind_transaction_stack(self: &Arc<Self>)1201     fn unwind_transaction_stack(self: &Arc<Self>) {
1202         let mut thread = self.clone();
1203         while let Ok(transaction) = {
1204             let mut inner = thread.inner.lock();
1205             inner.pop_transaction_to_reply(thread.as_ref())
1206         } {
1207             let reply = Err(BR_DEAD_REPLY);
1208             if !transaction.from.deliver_single_reply(reply, &transaction) {
1209                 break;
1210             }
1211 
1212             thread = transaction.from.clone();
1213         }
1214     }
1215 
deliver_reply( &self, reply: Result<DLArc<Transaction>, u32>, transaction: &DArc<Transaction>, )1216     pub(crate) fn deliver_reply(
1217         &self,
1218         reply: Result<DLArc<Transaction>, u32>,
1219         transaction: &DArc<Transaction>,
1220     ) {
1221         if self.deliver_single_reply(reply, transaction) {
1222             transaction.from.unwind_transaction_stack();
1223         }
1224     }
1225 
1226     /// Delivers a reply to the thread that started a transaction. The reply can either be a
1227     /// reply-transaction or an error code to be delivered instead.
1228     ///
1229     /// Returns whether the thread is dead. If it is, the caller is expected to unwind the
1230     /// transaction stack by completing transactions for threads that are dead.
deliver_single_reply( &self, reply: Result<DLArc<Transaction>, u32>, transaction: &DArc<Transaction>, ) -> bool1231     fn deliver_single_reply(
1232         &self,
1233         reply: Result<DLArc<Transaction>, u32>,
1234         transaction: &DArc<Transaction>,
1235     ) -> bool {
1236         if let Ok(transaction) = &reply {
1237             crate::trace::trace_transaction(true, &transaction);
1238 
1239             transaction.set_outstanding(&mut self.process.inner.lock());
1240         }
1241 
1242         {
1243             let mut inner = self.inner.lock();
1244             if !inner.pop_transaction_replied(transaction) {
1245                 return false;
1246             }
1247 
1248             if inner.is_dead {
1249                 return true;
1250             }
1251 
1252             match reply {
1253                 Ok(work) => {
1254                     inner.push_work(work);
1255                 }
1256                 Err(code) => inner.push_reply_work(code),
1257             }
1258         }
1259 
1260         // Notify the thread now that we've released the inner lock.
1261         self.work_condvar.notify_sync();
1262         false
1263     }
1264 
1265     /// Determines if the given transaction is the current transaction for this thread.
is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool1266     fn is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool {
1267         let inner = self.inner.lock();
1268         match &inner.current_transaction {
1269             None => false,
1270             Some(current) => Arc::ptr_eq(current, transaction),
1271         }
1272     }
1273 
1274     /// Determines the current top of the transaction stack. It fails if the top is in another
1275     /// thread (i.e., this thread belongs to a stack but it has called another thread). The top is
1276     /// [`None`] if the thread is not currently participating in a transaction stack.
top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>>1277     fn top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>> {
1278         let inner = self.inner.lock();
1279         if let Some(cur) = &inner.current_transaction {
1280             if core::ptr::eq(self, cur.from.as_ref()) {
1281                 pr_warn!("got new transaction with bad transaction stack");
1282                 return Err(EINVAL);
1283             }
1284             Ok(Some(cur.clone()))
1285         } else {
1286             Ok(None)
1287         }
1288     }
1289 
transaction<T>(self: &Arc<Self>, tr: &BinderTransactionDataSg, inner: T) where T: FnOnce(&Arc<Self>, &BinderTransactionDataSg) -> BinderResult,1290     fn transaction<T>(self: &Arc<Self>, tr: &BinderTransactionDataSg, inner: T)
1291     where
1292         T: FnOnce(&Arc<Self>, &BinderTransactionDataSg) -> BinderResult,
1293     {
1294         if let Err(err) = inner(self, tr) {
1295             if err.should_pr_warn() {
1296                 let mut ee = self.inner.lock().extended_error;
1297                 ee.command = err.reply;
1298                 ee.param = err.as_errno();
1299                 pr_warn!(
1300                     "Transaction failed: {:?} my_pid:{}",
1301                     err,
1302                     self.process.pid_in_current_ns()
1303                 );
1304             }
1305 
1306             self.push_return_work(err.reply);
1307         }
1308     }
1309 
transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult1310     fn transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1311         let handle = unsafe { tr.transaction_data.target.handle };
1312         let node_ref = self.process.get_transaction_node(handle)?;
1313         security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
1314         // TODO: We need to ensure that there isn't a pending transaction in the work queue. How
1315         // could this happen?
1316         let top = self.top_of_transaction_stack()?;
1317         let list_completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
1318         let completion = list_completion.clone_arc();
1319         let transaction = Transaction::new(node_ref, top, self, tr)?;
1320 
1321         // Check that the transaction stack hasn't changed while the lock was released, then update
1322         // it with the new transaction.
1323         {
1324             let mut inner = self.inner.lock();
1325             if !transaction.is_stacked_on(&inner.current_transaction) {
1326                 pr_warn!("Transaction stack changed during transaction!");
1327                 return Err(EINVAL.into());
1328             }
1329             inner.current_transaction = Some(transaction.clone_arc());
1330             // We push the completion as a deferred work so that we wait for the reply before returning
1331             // to userland.
1332             inner.push_work_deferred(list_completion);
1333         }
1334 
1335         if let Err(e) = transaction.submit() {
1336             completion.skip();
1337             // Define `transaction` first to drop it after `inner`.
1338             let transaction;
1339             let mut inner = self.inner.lock();
1340             transaction = inner.current_transaction.take().unwrap();
1341             inner.current_transaction = transaction.clone_next();
1342             Err(e)
1343         } else {
1344             Ok(())
1345         }
1346     }
1347 
reply_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult1348     fn reply_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1349         let orig = self.inner.lock().pop_transaction_to_reply(self)?;
1350         if !orig.from.is_current_transaction(&orig) {
1351             return Err(EINVAL.into());
1352         }
1353 
1354         // We need to complete the transaction even if we cannot complete building the reply.
1355         let out = (|| -> BinderResult<_> {
1356             let completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
1357             let process = orig.from.process.clone();
1358             let allow_fds = orig.flags & TF_ACCEPT_FDS != 0;
1359             let reply = Transaction::new_reply(self, process, tr, allow_fds)?;
1360             self.inner.lock().push_work(completion);
1361             orig.from.deliver_reply(Ok(reply), &orig);
1362             Ok(())
1363         })()
1364         .map_err(|mut err| {
1365             // At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let
1366             // the sender know that the transaction has completed (with an error in this case).
1367             pr_warn!(
1368                 "Failure {:?} during reply - delivering BR_FAILED_REPLY to sender.",
1369                 err
1370             );
1371             let reply = Err(BR_FAILED_REPLY);
1372             orig.from.deliver_reply(reply, &orig);
1373             err.reply = BR_TRANSACTION_COMPLETE;
1374             err
1375         });
1376 
1377         // Restore the priority even on failure.
1378         self.restore_priority(&orig.saved_priority());
1379         out
1380     }
1381 
oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult1382     fn oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
1383         // SAFETY: The `handle` field is valid for all possible byte values, so reading from the
1384         // union is okay.
1385         let handle = unsafe { tr.transaction_data.target.handle };
1386         let node_ref = self.process.get_transaction_node(handle)?;
1387         security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
1388         let transaction = Transaction::new(node_ref, None, self, tr)?;
1389         let code = if self.process.is_oneway_spam_detection_enabled()
1390             && transaction.oneway_spam_detected
1391         {
1392             BR_ONEWAY_SPAM_SUSPECT
1393         } else {
1394             BR_TRANSACTION_COMPLETE
1395         };
1396         let list_completion = DTRWrap::arc_try_new(DeliverCode::new(code))?;
1397         let completion = list_completion.clone_arc();
1398         self.inner.lock().push_work(list_completion);
1399         match transaction.submit() {
1400             Ok(()) => Ok(()),
1401             Err(err) => {
1402                 completion.skip();
1403                 Err(err)
1404             }
1405         }
1406     }
1407 
write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result1408     fn write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result {
1409         let write_start = req.write_buffer.wrapping_add(req.write_consumed);
1410         let write_len = req.write_size.saturating_sub(req.write_consumed);
1411         let mut reader = UserSlice::new(write_start as _, write_len as _).reader();
1412 
1413         while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_unused() {
1414             let before = reader.len();
1415             let cmd = reader.read::<u32>()?;
1416             crate::trace::trace_command(cmd);
1417             GLOBAL_STATS.inc_bc(cmd);
1418             self.process.stats.inc_bc(cmd);
1419             match cmd {
1420                 BC_TRANSACTION => {
1421                     let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
1422                     if tr.transaction_data.flags & TF_ONE_WAY != 0 {
1423                         self.transaction(&tr, Self::oneway_transaction_inner);
1424                     } else {
1425                         self.transaction(&tr, Self::transaction_inner);
1426                     }
1427                 }
1428                 BC_TRANSACTION_SG => {
1429                     let tr = reader.read::<BinderTransactionDataSg>()?;
1430                     if tr.transaction_data.flags & TF_ONE_WAY != 0 {
1431                         self.transaction(&tr, Self::oneway_transaction_inner);
1432                     } else {
1433                         self.transaction(&tr, Self::transaction_inner);
1434                     }
1435                 }
1436                 BC_REPLY => {
1437                     let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
1438                     self.transaction(&tr, Self::reply_inner)
1439                 }
1440                 BC_REPLY_SG => {
1441                     let tr = reader.read::<BinderTransactionDataSg>()?;
1442                     self.transaction(&tr, Self::reply_inner)
1443                 }
1444                 BC_FREE_BUFFER => {
1445                     let buffer = self.process.buffer_get(reader.read()?);
1446                     if let Some(buffer) = &buffer {
1447                         if buffer.looper_need_return_on_free() {
1448                             self.inner.lock().looper_need_return = true;
1449                         }
1450                         crate::trace::trace_transaction_buffer_release(buffer.debug_id);
1451                     }
1452                     drop(buffer);
1453                 }
1454                 BC_INCREFS => {
1455                     self.process
1456                         .as_arc_borrow()
1457                         .update_ref(reader.read()?, true, false)?
1458                 }
1459                 BC_ACQUIRE => {
1460                     self.process
1461                         .as_arc_borrow()
1462                         .update_ref(reader.read()?, true, true)?
1463                 }
1464                 BC_RELEASE => {
1465                     self.process
1466                         .as_arc_borrow()
1467                         .update_ref(reader.read()?, false, true)?
1468                 }
1469                 BC_DECREFS => {
1470                     self.process
1471                         .as_arc_borrow()
1472                         .update_ref(reader.read()?, false, false)?
1473                 }
1474                 BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
1475                 BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
1476                 BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
1477                 BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?,
1478                 BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self),
1479                 BC_REGISTER_LOOPER => {
1480                     let valid = self.process.register_thread();
1481                     self.inner.lock().looper_register(valid);
1482                 }
1483                 BC_ENTER_LOOPER => self.inner.lock().looper_enter(),
1484                 BC_EXIT_LOOPER => self.inner.lock().looper_exit(),
1485                 BC_REQUEST_FREEZE_NOTIFICATION => self.process.request_freeze_notif(&mut reader)?,
1486                 BC_CLEAR_FREEZE_NOTIFICATION => self.process.clear_freeze_notif(&mut reader)?,
1487                 BC_FREEZE_NOTIFICATION_DONE => self.process.freeze_notif_done(&mut reader)?,
1488 
1489                 // Fail if given an unknown error code.
1490                 // BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported.
1491                 _ => return Err(EINVAL),
1492             }
1493             // Update the number of write bytes consumed.
1494             req.write_consumed += (before - reader.len()) as u64;
1495         }
1496 
1497         Ok(())
1498     }
1499 
read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result1500     fn read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result {
1501         let read_start = req.read_buffer.wrapping_add(req.read_consumed);
1502         let read_len = req.read_size.saturating_sub(req.read_consumed);
1503         let mut writer = BinderReturnWriter::new(
1504             UserSlice::new(read_start as _, read_len as _).writer(),
1505             self,
1506         );
1507         let (in_pool, has_transaction, thread_todo, use_proc_queue) = {
1508             let inner = self.inner.lock();
1509             (
1510                 inner.is_looper(),
1511                 inner.current_transaction.is_some(),
1512                 !inner.work_list.is_empty(),
1513                 inner.should_use_process_work_queue(),
1514             )
1515         };
1516 
1517         crate::trace::trace_wait_for_work(use_proc_queue, has_transaction, thread_todo);
1518 
1519         let getter = if use_proc_queue {
1520             Self::get_work
1521         } else {
1522             Self::get_work_local
1523         };
1524 
1525         // Reserve some room at the beginning of the read buffer so that we can send a
1526         // BR_SPAWN_LOOPER if we need to.
1527         let mut has_noop_placeholder = false;
1528         if req.read_consumed == 0 {
1529             if let Err(err) = writer.write_code(BR_NOOP) {
1530                 pr_warn!("Failure when writing BR_NOOP at beginning of buffer.");
1531                 return Err(err);
1532             }
1533             has_noop_placeholder = true;
1534         }
1535 
1536         // Loop doing work while there is room in the buffer.
1537         let initial_len = writer.len();
1538         while writer.len() >= size_of::<uapi::binder_transaction_data_secctx>() + 4 {
1539             match getter(self, wait && initial_len == writer.len()) {
1540                 Ok(Some(work)) => match work.into_arc().do_work(self, &mut writer) {
1541                     Ok(true) => {}
1542                     Ok(false) => break,
1543                     Err(err) => {
1544                         return Err(err);
1545                     }
1546                 },
1547                 Ok(None) => {
1548                     break;
1549                 }
1550                 Err(err) => {
1551                     // Propagate the error if we haven't written anything else.
1552                     if err != EINTR && err != EAGAIN {
1553                         pr_warn!("Failure in work getter: {:?}", err);
1554                     }
1555                     if initial_len == writer.len() {
1556                         return Err(err);
1557                     } else {
1558                         break;
1559                     }
1560                 }
1561             }
1562         }
1563 
1564         req.read_consumed += read_len - writer.len() as u64;
1565 
1566         // Write BR_SPAWN_LOOPER if the process needs more threads for its pool.
1567         if has_noop_placeholder && in_pool && self.process.needs_thread() {
1568             let mut writer = UserSlice::new(req.read_buffer as _, req.read_size as _).writer();
1569             writer.write(&BR_SPAWN_LOOPER)?;
1570         }
1571         Ok(())
1572     }
1573 
write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result1574     pub(crate) fn write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result {
1575         let (mut reader, mut writer) = data.reader_writer();
1576         let mut req = reader.read::<BinderWriteRead>()?;
1577 
1578         // Go through the write buffer.
1579         let mut ret = Ok(());
1580         if req.write_size > 0 {
1581             ret = self.write(&mut req);
1582             crate::trace::trace_write_done(ret);
1583             if let Err(err) = ret {
1584                 pr_warn!(
1585                     "Write failure {:?} in pid:{}",
1586                     err,
1587                     self.process.pid_in_current_ns()
1588                 );
1589                 req.read_consumed = 0;
1590                 writer.write(&req)?;
1591                 self.inner.lock().looper_need_return = false;
1592                 return ret;
1593             }
1594         }
1595 
1596         // Go through the work queue.
1597         if req.read_size > 0 {
1598             ret = self.read(&mut req, wait);
1599             crate::trace::trace_read_done(ret);
1600             if ret.is_err() && ret != Err(EINTR) {
1601                 pr_warn!(
1602                     "Read failure {:?} in pid:{}",
1603                     ret,
1604                     self.process.pid_in_current_ns()
1605                 );
1606             }
1607         }
1608 
1609         // Write the request back so that the consumed fields are visible to the caller.
1610         writer.write(&req)?;
1611 
1612         self.inner.lock().looper_need_return = false;
1613 
1614         ret
1615     }
1616 
poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32)1617     pub(crate) fn poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32) {
1618         table.register_wait(file, &self.work_condvar);
1619         let mut inner = self.inner.lock();
1620         (inner.should_use_process_work_queue(), inner.poll())
1621     }
1622 
1623     /// Make the call to `get_work` or `get_work_local` return immediately, if any.
exit_looper(&self)1624     pub(crate) fn exit_looper(&self) {
1625         let mut inner = self.inner.lock();
1626         let should_notify = inner.looper_flags & LOOPER_WAITING != 0;
1627         if should_notify {
1628             inner.looper_need_return = true;
1629         }
1630         drop(inner);
1631 
1632         if should_notify {
1633             self.work_condvar.notify_one();
1634         }
1635     }
1636 
notify_if_poll_ready(&self, sync: bool)1637     pub(crate) fn notify_if_poll_ready(&self, sync: bool) {
1638         // Determine if we need to notify. This requires the lock.
1639         let inner = self.inner.lock();
1640         let notify = inner.looper_flags & LOOPER_POLL != 0 && inner.should_use_process_work_queue();
1641         drop(inner);
1642 
1643         // Now that the lock is no longer held, notify the waiters if we have to.
1644         if notify {
1645             if sync {
1646                 self.work_condvar.notify_sync();
1647             } else {
1648                 self.work_condvar.notify_one();
1649             }
1650         }
1651     }
1652 
release(self: &Arc<Self>)1653     pub(crate) fn release(self: &Arc<Self>) {
1654         self.inner.lock().is_dead = true;
1655 
1656         self.work_condvar.clear();
1657         self.unwind_transaction_stack();
1658 
1659         // Cancel all pending work items.
1660         while let Ok(Some(work)) = self.get_work_local(false) {
1661             work.into_arc().cancel();
1662         }
1663     }
1664 }
1665 
1666 #[pin_data]
1667 struct ThreadError {
1668     error_code: AtomicU32,
1669     #[pin]
1670     links_track: AtomicTracker,
1671 }
1672 
1673 impl ThreadError {
try_new() -> Result<DArc<Self>>1674     fn try_new() -> Result<DArc<Self>> {
1675         DTRWrap::arc_pin_init(pin_init!(Self {
1676             error_code: AtomicU32::new(BR_OK),
1677             links_track <- AtomicTracker::new(),
1678         }))
1679         .map(ListArc::into_arc)
1680     }
1681 
set_error_code(&self, code: u32)1682     fn set_error_code(&self, code: u32) {
1683         self.error_code.store(code, Ordering::Relaxed);
1684     }
1685 
is_unused(&self) -> bool1686     fn is_unused(&self) -> bool {
1687         self.error_code.load(Ordering::Relaxed) == BR_OK
1688     }
1689 }
1690 
1691 impl DeliverToRead for ThreadError {
do_work( self: DArc<Self>, _thread: &Thread, writer: &mut BinderReturnWriter<'_>, ) -> Result<bool>1692     fn do_work(
1693         self: DArc<Self>,
1694         _thread: &Thread,
1695         writer: &mut BinderReturnWriter<'_>,
1696     ) -> Result<bool> {
1697         let code = self.error_code.load(Ordering::Relaxed);
1698         self.error_code.store(BR_OK, Ordering::Relaxed);
1699         writer.write_code(code)?;
1700         Ok(true)
1701     }
1702 
cancel(self: DArc<Self>)1703     fn cancel(self: DArc<Self>) {}
on_thread_selected(&self, _thread: &Thread)1704     fn on_thread_selected(&self, _thread: &Thread) {}
1705 
should_sync_wakeup(&self) -> bool1706     fn should_sync_wakeup(&self) -> bool {
1707         false
1708     }
1709 
debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()>1710     fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
1711         seq_print!(
1712             m,
1713             "{}transaction error: {}\n",
1714             prefix,
1715             self.error_code.load(Ordering::Relaxed)
1716         );
1717         Ok(())
1718     }
1719 }
1720 
1721 kernel::list::impl_list_arc_safe! {
1722     impl ListArcSafe<0> for ThreadError {
1723         tracked_by links_track: AtomicTracker;
1724     }
1725 }
1726