1 // SPDX-License-Identifier: GPL-2.0
2
3 // Copyright (C) 2024 Google LLC.
4
5 //! Keeps track of unpinned ranges in an ashmem file.
6
7 use crate::{
8 ashmem_shrinker::{
9 self, CountObjects, ScanObjects, ShrinkControl, Shrinker, ShrinkerBuilder,
10 ShrinkerRegistration,
11 },
12 shmem::ShmemFile,
13 AshmemModule,
14 };
15 use core::{
16 mem::MaybeUninit,
17 pin::Pin,
18 sync::atomic::{AtomicUsize, Ordering},
19 };
20 use kernel::{
21 c_str,
22 list::{List, ListArc, ListLinks},
23 page::PAGE_SIZE,
24 prelude::*,
25 sync::{GlobalGuard, GlobalLockedBy, UniqueArc},
26 };
27
28 // Only updated with ASHMEM_MUTEX held, but the shrinker will read it without the mutex.
29 pub(crate) static LRU_COUNT: AtomicUsize = AtomicUsize::new(0);
30
31 pub(crate) struct AshmemLru {
32 lru_list: List<Range, 0>,
33 }
34
35 /// Represents ownership of the `ASHMEM_MUTEX` lock.
36 ///
37 /// Using a wrapper struct around `GlobalGuard` so we can add our own methods to the guard.
38 pub(crate) struct AshmemGuard(pub(crate) GlobalGuard<ASHMEM_MUTEX>);
39
40 // These make `AshmemGuard` inherit the behavior of `GlobalGuard`.
41 impl core::ops::Deref for AshmemGuard {
42 type Target = GlobalGuard<ASHMEM_MUTEX>;
deref(&self) -> &GlobalGuard<ASHMEM_MUTEX>43 fn deref(&self) -> &GlobalGuard<ASHMEM_MUTEX> {
44 &self.0
45 }
46 }
47 impl core::ops::DerefMut for AshmemGuard {
deref_mut(&mut self) -> &mut GlobalGuard<ASHMEM_MUTEX>48 fn deref_mut(&mut self) -> &mut GlobalGuard<ASHMEM_MUTEX> {
49 &mut self.0
50 }
51 }
52
53 impl AshmemGuard {
shrink_range(&mut self, range: &Range, pgstart: usize, pgend: usize)54 fn shrink_range(&mut self, range: &Range, pgstart: usize, pgend: usize) {
55 let old_size = range.size(self);
56 {
57 let inner = range.inner.as_mut(self);
58 inner.pgstart = pgstart;
59 inner.pgend = pgend;
60 }
61 let new_size = range.size(self);
62
63 // Only change the counter if the range is on the lru list.
64 if !range.purged(self) {
65 let mut lru_count = LRU_COUNT.load(Ordering::Relaxed);
66 lru_count -= old_size;
67 lru_count += new_size;
68 LRU_COUNT.store(lru_count, Ordering::Relaxed);
69 }
70 }
71
insert_lru(&mut self, range: ListArc<Range>)72 fn insert_lru(&mut self, range: ListArc<Range>) {
73 // Don't insert the range if it's already purged.
74 if !range.purged(self) {
75 let mut lru_count = LRU_COUNT.load(Ordering::Relaxed);
76 lru_count += range.size(self);
77 LRU_COUNT.store(lru_count, Ordering::Relaxed);
78 self.lru_list.push_front(range);
79 }
80 }
81
remove_lru(&mut self, range: &Range) -> Option<ListArc<Range>>82 fn remove_lru(&mut self, range: &Range) -> Option<ListArc<Range>> {
83 // SAFETY: The only list with ID 0 is this list, so the range can't be in some other list
84 // with the same ID.
85 let ret = unsafe { self.lru_list.remove(range) };
86
87 // Only decrement lru_count if the range was actually in the list.
88 if ret.is_some() {
89 let mut lru_count = LRU_COUNT.load(Ordering::Relaxed);
90 lru_count -= range.size(self);
91 LRU_COUNT.store(lru_count, Ordering::Relaxed);
92 }
93
94 ret
95 }
96 }
97
98 kernel::sync::global_lock! {
99 // SAFETY: We call `init` as the very first thing in the initialization of this module, so
100 // there are no calls to `lock` before `init` is called.
101 pub(crate) unsafe(uninit) static ASHMEM_MUTEX: Mutex<AshmemLru> = AshmemLru {
102 lru_list: List::new(),
103 };
104 }
105
106 #[pin_data]
107 pub(crate) struct Range {
108 /// prev/next pointers for `Area::unpinned_list`.
109 ///
110 /// Note that "unpinned" here refers to the ASHMEM_PIN/UNPIN ioctls, which is unrelated to
111 /// Rust's concept of pinning.
112 #[pin]
113 lru: ListLinks<0>,
114 #[pin]
115 unpinned: ListLinks<1>,
116 file: ShmemFile,
117 pub(crate) inner: GlobalLockedBy<RangeInner, ASHMEM_MUTEX>,
118 }
119
120 pub(crate) struct RangeInner {
121 pub(crate) pgstart: usize,
122 pub(crate) pgend: usize,
123 pub(crate) purged: bool,
124 }
125
126 impl Range {
set_purged(&self, guard: &mut AshmemGuard)127 pub(crate) fn set_purged(&self, guard: &mut AshmemGuard) {
128 self.inner.as_mut(guard).purged = true;
129 }
130
purged(&self, guard: &AshmemGuard) -> bool131 pub(crate) fn purged(&self, guard: &AshmemGuard) -> bool {
132 self.inner.as_ref(guard).purged
133 }
134
pgstart(&self, guard: &AshmemGuard) -> usize135 pub(crate) fn pgstart(&self, guard: &AshmemGuard) -> usize {
136 self.inner.as_ref(guard).pgstart
137 }
138
pgend(&self, guard: &AshmemGuard) -> usize139 pub(crate) fn pgend(&self, guard: &AshmemGuard) -> usize {
140 self.inner.as_ref(guard).pgend
141 }
142
size(&self, guard: &AshmemGuard) -> usize143 pub(crate) fn size(&self, guard: &AshmemGuard) -> usize {
144 let inner = self.inner.as_ref(guard);
145 inner.pgend - inner.pgstart + 1
146 }
147
is_before_page(&self, page: usize, guard: &AshmemGuard) -> bool148 pub(crate) fn is_before_page(&self, page: usize, guard: &AshmemGuard) -> bool {
149 let inner = self.inner.as_ref(guard);
150 inner.pgend < page
151 }
152
contains_page(&self, page: usize, guard: &AshmemGuard) -> bool153 pub(crate) fn contains_page(&self, page: usize, guard: &AshmemGuard) -> bool {
154 let inner = self.inner.as_ref(guard);
155 inner.pgstart <= page && inner.pgend >= page
156 }
157
is_superset_of_range( &self, pgstart: usize, pgend: usize, guard: &AshmemGuard, ) -> bool158 pub(crate) fn is_superset_of_range(
159 &self,
160 pgstart: usize,
161 pgend: usize,
162 guard: &AshmemGuard,
163 ) -> bool {
164 let inner = self.inner.as_ref(guard);
165 inner.pgstart <= pgstart && inner.pgend >= pgend
166 }
167
is_subset_of_range( &self, pgstart: usize, pgend: usize, guard: &AshmemGuard, ) -> bool168 pub(crate) fn is_subset_of_range(
169 &self,
170 pgstart: usize,
171 pgend: usize,
172 guard: &AshmemGuard,
173 ) -> bool {
174 let inner = self.inner.as_ref(guard);
175 inner.pgstart >= pgstart && inner.pgend <= pgend
176 }
177
overlaps_with_range( &self, pgstart: usize, pgend: usize, guard: &AshmemGuard, ) -> bool178 pub(crate) fn overlaps_with_range(
179 &self,
180 pgstart: usize,
181 pgend: usize,
182 guard: &AshmemGuard,
183 ) -> bool {
184 self.contains_page(pgstart, guard)
185 || self.contains_page(pgend, guard)
186 || self.is_subset_of_range(pgstart, pgend, guard)
187 }
188 }
189
190 kernel::list::impl_has_list_links! {
191 impl HasListLinks<0> for Range { self.lru }
192 impl HasListLinks<1> for Range { self.unpinned }
193 }
194
195 kernel::list::impl_list_arc_safe! {
196 impl ListArcSafe<0> for Range { untracked; }
197 impl ListArcSafe<1> for Range { untracked; }
198 }
199
200 kernel::list::impl_list_item! {
201 impl ListItem<0> for Range { using ListLinks; }
202 impl ListItem<1> for Range { using ListLinks; }
203 }
204
205 pub(crate) struct Area {
206 /// List of page ranges that have been unpinned by `ASHMEM_UNPIN`.
207 ///
208 /// The ranges are sorted in descending order.
209 unpinned_list: List<Range, 1>,
210 }
211
212 impl Drop for Area {
drop(&mut self)213 fn drop(&mut self) {
214 let mut guard = AshmemGuard(super::ASHMEM_MUTEX.lock());
215 for range in &self.unpinned_list {
216 guard.remove_lru(&range);
217 }
218 }
219 }
220
221 impl Area {
new() -> Self222 pub(crate) fn new() -> Self {
223 Self {
224 unpinned_list: List::new(),
225 }
226 }
227
228 /// Mark the given range of pages as unpinned so they can be reclaimed.
229 ///
230 /// The `new_range` argument must be `Some` when calling this method. If this call needs an
231 /// allocation, it will take it from the option. Otherwise, the allocation is left in the
232 /// option so that the caller can free it after releasing the mutex.
unpin( &mut self, mut pgstart: usize, mut pgend: usize, new_range: &mut Option<NewRange<'_>>, guard: &mut AshmemGuard, )233 pub(crate) fn unpin(
234 &mut self,
235 mut pgstart: usize,
236 mut pgend: usize,
237 new_range: &mut Option<NewRange<'_>>,
238 guard: &mut AshmemGuard,
239 ) {
240 let mut purged = false;
241 let mut cursor = self.unpinned_list.cursor_front();
242 while let Some(next) = cursor.peek_next() {
243 // Short-circuit: this is our insertion point.
244 if next.is_before_page(pgstart, guard) {
245 break;
246 }
247
248 // If the entire range is already unpinned, just return.
249 if next.is_superset_of_range(pgstart, pgend, guard) {
250 return;
251 }
252
253 if next.overlaps_with_range(pgstart, pgend, guard) {
254 pgstart = usize::min(pgstart, next.pgstart(guard));
255 pgend = usize::max(pgend, next.pgend(guard));
256 purged |= next.purged(guard);
257 guard.remove_lru(&next.remove());
258
259 // restart loop
260 cursor = self.unpinned_list.cursor_front();
261 continue;
262 }
263
264 cursor.move_next();
265 }
266
267 let new_range = new_range.take().unwrap().init(RangeInner {
268 pgstart,
269 pgend,
270 purged,
271 });
272
273 let (range_lru, new_range) = ListArc::<Range, 0>::pair_from_pin_unique::<1>(new_range);
274 guard.insert_lru(range_lru);
275 cursor.insert(new_range);
276 }
277
278 /// Mark the given range of pages as pinned so they can't be reclaimed.
279 ///
280 /// Returns whether any of the pages have been reclaimed.
281 ///
282 /// The `new_range` argument must be `Some` when calling this method. If this call needs an
283 /// allocation, it will take it from the option. Otherwise, the allocation is left in the
284 /// option so that the caller can free it after releasing the mutex.
pin( &mut self, pgstart: usize, pgend: usize, new_range: &mut Option<NewRange<'_>>, guard: &mut AshmemGuard, ) -> bool285 pub(crate) fn pin(
286 &mut self,
287 pgstart: usize,
288 pgend: usize,
289 new_range: &mut Option<NewRange<'_>>,
290 guard: &mut AshmemGuard,
291 ) -> bool {
292 let mut purged = false;
293 let mut cursor = self.unpinned_list.cursor_front();
294 while let Some(next) = cursor.peek_next() {
295 // moved past last applicable page; we can short circuit
296 if next.is_before_page(pgstart, guard) {
297 break;
298 }
299
300 // The user can ask us to pin pages that span multiple ranges,
301 // or to pin pages that aren't even unpinned, so this is messy.
302 //
303 // Four cases:
304 // 1. The requested range subsumes an existing range, so we
305 // just remove the entire matching range.
306 // 2. The requested range overlaps the start of an existing
307 // range, so we just update that range.
308 // 3. The requested range overlaps the end of an existing
309 // range, so we just update that range.
310 // 4. The requested range punches a hole in an existing range,
311 // so we have to update one side of the range and then
312 // create a new range for the other side.
313 if next.overlaps_with_range(pgstart, pgend, guard) {
314 purged |= next.purged(guard);
315
316 let curr_pgstart = next.pgstart(guard);
317 let curr_pgend = next.pgend(guard);
318
319 if next.is_subset_of_range(pgstart, pgend, guard) {
320 // Case #1: Easy. Just nuke the whole thing.
321 let removed = next.remove();
322 guard.remove_lru(&removed);
323 continue;
324 } else if curr_pgstart >= pgstart {
325 // Case #2: We overlap from the start, so adjust it.
326 guard.shrink_range(&next, pgend + 1, curr_pgend);
327 } else if curr_pgend <= pgend {
328 // Case #3: We overlap from the rear, so adjust it.
329 guard.shrink_range(&next, curr_pgstart, pgstart - 1);
330 } else {
331 // Case #4: We eat a chunk out of the middle. A bit
332 // more complicated, we allocate a new range for the
333 // second half and adjust the first chunk's endpoint.
334 guard.shrink_range(&next, curr_pgstart, pgstart - 1);
335 let purged = next.purged(guard);
336
337 let new_range = new_range.take().unwrap().init(RangeInner {
338 pgstart: pgend + 1,
339 pgend: curr_pgend,
340 purged,
341 });
342
343 let (range_lru, new_range) =
344 ListArc::<Range, 0>::pair_from_pin_unique::<1>(new_range);
345 guard.insert_lru(range_lru);
346 cursor.insert(new_range);
347 break;
348 }
349 }
350
351 cursor.move_next();
352 }
353 purged
354 }
355
range_has_unpinned_page( &self, pgstart: usize, pgend: usize, guard: &mut AshmemGuard, ) -> bool356 pub(crate) fn range_has_unpinned_page(
357 &self,
358 pgstart: usize,
359 pgend: usize,
360 guard: &mut AshmemGuard,
361 ) -> bool {
362 for range in &self.unpinned_list {
363 if range.overlaps_with_range(pgstart, pgend, guard) {
364 return true;
365 }
366 }
367 false
368 }
369 }
370
371 pub(crate) struct NewRange<'a> {
372 pub(crate) file: &'a ShmemFile,
373 pub(crate) alloc: UniqueArc<MaybeUninit<Range>>,
374 }
375
376 impl<'a> NewRange<'a> {
init(self, inner: RangeInner) -> Pin<UniqueArc<Range>>377 fn init(self, inner: RangeInner) -> Pin<UniqueArc<Range>> {
378 let new_range = self.alloc.pin_init_with(pin_init!(Range {
379 lru <- ListLinks::new(),
380 unpinned <- ListLinks::new(),
381 file: self.file.clone(),
382 inner: GlobalLockedBy::new(inner),
383 }));
384
385 match new_range {
386 Ok(new_range) => new_range,
387 Err(infallible) => match infallible {},
388 }
389 }
390 }
391
392 impl AshmemGuard {
free_lru(&mut self, stop_after: usize) -> usize393 pub(crate) fn free_lru(&mut self, stop_after: usize) -> usize {
394 let mut freed = 0;
395 while let Some(range) = self.lru_list.pop_back() {
396 let start = range.pgstart(self) * PAGE_SIZE;
397 let end = (range.pgend(self) + 1) * PAGE_SIZE;
398 range.set_purged(self);
399 self.remove_lru(&range);
400 freed += range.size(self);
401
402 // C ashmem releases the mutex and uses a different mechanism to ensure mutual
403 // exclusion with `pin_unpin` operations, but we only hold `ASHMEM_MUTEX` here and in
404 // `pin_unpin`, so we don't need to release the mutex. A different mutex is used for
405 // all of the other ashmem operations.
406 range.file.punch_hole(start, end - start);
407
408 if freed >= stop_after {
409 break;
410 }
411
412 if super::shrinker_should_stop() {
413 break;
414 }
415 }
416 freed
417 }
418 }
419
420 impl Shrinker for super::AshmemModule {
421 // Our shrinker data is in a global, so we don't need to set the private data.
422 type Ptr = ();
423
count_objects(_: (), _sc: ShrinkControl<'_>) -> CountObjects424 fn count_objects(_: (), _sc: ShrinkControl<'_>) -> CountObjects {
425 let count = LRU_COUNT.load(super::Ordering::Relaxed);
426 if count == 0 {
427 CountObjects::EMPTY
428 } else {
429 CountObjects::new(count)
430 }
431 }
432
scan_objects(_: (), sc: ShrinkControl<'_>) -> ScanObjects433 fn scan_objects(_: (), sc: ShrinkControl<'_>) -> ScanObjects {
434 if !sc.reclaim_fs_allowed() {
435 return ScanObjects::STOP;
436 }
437
438 let Some(guard) = super::ASHMEM_MUTEX.try_lock() else {
439 return ScanObjects::STOP;
440 };
441 let mut guard = AshmemGuard(guard);
442
443 let num_freed = guard.free_lru(sc.nr_to_scan());
444 ScanObjects::from_count(num_freed)
445 }
446 }
447
448 /// Make line below shorter.
449 type AshmemShrinkerType = Option<ShrinkerRegistration<AshmemModule>>;
450
451 kernel::sync::global_lock! {
452 // SAFETY: We call `init` as the very first thing in the initialization of this module, so
453 // there are no calls to `lock` before `init` is called.
454 pub(crate) unsafe(uninit) static ASHMEM_SHRINKER: Mutex<AshmemShrinkerType> = None;
455 }
456
set_shrinker_enabled(enabled: bool) -> Result<()>457 pub(crate) fn set_shrinker_enabled(enabled: bool) -> Result<()> {
458 let mut shrinker = ASHMEM_SHRINKER.lock();
459 if enabled {
460 if shrinker.is_none() {
461 let mut builder = ShrinkerBuilder::new(c_str!("android-ashmem"))?;
462 builder.set_seeks(4 * ashmem_shrinker::DEFAULT_SEEKS);
463 *shrinker = Some(builder.register(()));
464 }
465 } else {
466 *shrinker = None;
467 }
468 Ok(())
469 }
470
get_shrinker_enabled() -> bool471 pub(crate) fn get_shrinker_enabled() -> bool {
472 ASHMEM_SHRINKER.lock().is_some()
473 }
474
475 #[cfg(test)]
range_test() -> Result476 fn range_test() -> Result {
477 fn get_random(max: usize) -> usize {
478 let rng = unsafe { kernel::bindings::get_random_u64() };
479 (rng % max as u64) as usize
480 }
481
482 fn memset(slice: &mut [bool], value: bool) {
483 for ptr in slice {
484 *ptr = value;
485 }
486 }
487
488 const SIZE: usize = 16;
489
490 let file = ShmemFile::new(c_str!("test_file"), SIZE * PAGE_SIZE, 0)?;
491 let mut area = Area::new();
492 let mut unpinned = [false; SIZE];
493
494 let mut new_range = None;
495 for _ in 0..SIZE {
496 let start = get_random(SIZE);
497 let end = get_random(SIZE - start) + start;
498 let op = get_random(2) == 0;
499
500 if new_range.is_none() {
501 new_range = Some(NewRange {
502 file: &file,
503 alloc: UniqueArc::new_uninit(GFP_KERNEL)?,
504 });
505 }
506 let mut lock = AshmemGuard(ASHMEM_MUTEX.lock());
507 if op {
508 pr_err!("Unpinning {start} to {end}.");
509 area.unpin(start, end, &mut new_range, &mut lock);
510 memset(&mut unpinned[start..=end], true);
511 } else {
512 pr_err!("Pinning {start} to {end}.");
513 area.pin(start, end, &mut new_range, &mut lock);
514 memset(&mut unpinned[start..=end], false);
515 }
516
517 for item in &area.unpinned_list {
518 pr_err!(
519 "Seeing range {} to {}.",
520 item.pgstart(&lock),
521 item.pgend(&lock)
522 );
523 }
524
525 let mut cursor = area.unpinned_list.cursor_back();
526 let mut fail = false;
527 for i in 0..SIZE {
528 let mut target = false;
529 while let Some(prev) = cursor.peek_prev() {
530 if prev.pgend(&lock) < i {
531 cursor.move_prev();
532 continue;
533 }
534 target = prev.pgstart(&lock) <= i;
535 break;
536 }
537 if target != unpinned[i] {
538 pr_err!("Mismatch on {i}!");
539 fail = true;
540 }
541 }
542 if fail {
543 return Err(EINVAL);
544 }
545 }
546 pr_err!("Test completed successfully!");
547 Ok(())
548 }
549