1 // Copyright 2020, The Android Open Source Project 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 //! This module implements the key garbage collector. 16 //! The key garbage collector has one public function `notify_gc()`. This will create 17 //! a thread on demand which will query the database for unreferenced key entries, 18 //! optionally dispose of sensitive key material appropriately, and then delete 19 //! the key entry from the database. 20 21 use crate::{ 22 async_task, 23 database::{BlobMetaData, KeystoreDB, Uuid}, 24 super_key::SuperKeyManager, 25 }; 26 use anyhow::{Context, Result}; 27 use async_task::AsyncTask; 28 use std::sync::{ 29 atomic::{AtomicU8, Ordering}, 30 Arc, 31 }; 32 33 pub struct Gc { 34 async_task: Arc<AsyncTask>, 35 notified: Arc<AtomicU8>, 36 } 37 38 impl Gc { 39 /// Creates a garbage collector using the given async_task. 40 /// The garbage collector needs a function to invalidate key blobs, a database connection, 41 /// and a reference to the `SuperKeyManager`. They are obtained from the init function. 42 /// The function is only called if this is first time a garbage collector was initialized 43 /// with the given AsyncTask instance. 44 /// Note: It is a logical error to initialize different Gc instances with the same `AsyncTask`. new_init_with<F>(async_task: Arc<AsyncTask>, init: F) -> Self where F: FnOnce() -> ( Box<dyn Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static>, KeystoreDB, Arc<SuperKeyManager>, ) + Send + 'static,45 pub fn new_init_with<F>(async_task: Arc<AsyncTask>, init: F) -> Self 46 where 47 F: FnOnce() -> ( 48 Box<dyn Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static>, 49 KeystoreDB, 50 Arc<SuperKeyManager>, 51 ) + Send 52 + 'static, 53 { 54 let weak_at = Arc::downgrade(&async_task); 55 let notified = Arc::new(AtomicU8::new(0)); 56 let notified_clone = notified.clone(); 57 // Initialize the task's shelf. 58 async_task.queue_hi(move |shelf| { 59 let (invalidate_key, db, super_key) = init(); 60 let notified = notified_clone; 61 shelf.get_or_put_with(|| GcInternal { 62 deleted_blob_ids: vec![], 63 superseded_blobs: vec![], 64 invalidate_key, 65 db, 66 async_task: weak_at, 67 super_key, 68 notified, 69 }); 70 }); 71 Self { async_task, notified } 72 } 73 74 /// Notifies the key garbage collector to iterate through orphaned and superseded blobs and 75 /// attempts their deletion. We only process one key at a time and then schedule another 76 /// attempt by queueing it in the async_task (low priority) queue. notify_gc(&self)77 pub fn notify_gc(&self) { 78 if let Ok(0) = self.notified.compare_exchange(0, 1, Ordering::Relaxed, Ordering::Relaxed) { 79 self.async_task.queue_lo(|shelf| shelf.get_downcast_mut::<GcInternal>().unwrap().step()) 80 } 81 } 82 } 83 84 struct GcInternal { 85 deleted_blob_ids: Vec<i64>, 86 superseded_blobs: Vec<(i64, Vec<u8>, BlobMetaData)>, 87 invalidate_key: Box<dyn Fn(&Uuid, &[u8]) -> Result<()> + Send + 'static>, 88 db: KeystoreDB, 89 async_task: std::sync::Weak<AsyncTask>, 90 super_key: Arc<SuperKeyManager>, 91 notified: Arc<AtomicU8>, 92 } 93 94 impl GcInternal { 95 /// Attempts to process one blob from the database. 96 /// We process one key at a time, because deleting a key is a time consuming process which 97 /// may involve calling into the KeyMint backend and we don't want to hog neither the backend 98 /// nor the database for extended periods of time. 99 /// To limit the number of database transactions, which are also expensive and competing 100 /// with threads on the critical path, deleted blobs are loaded in batches. process_one_key(&mut self) -> Result<()>101 fn process_one_key(&mut self) -> Result<()> { 102 if self.superseded_blobs.is_empty() { 103 let blobs = self 104 .db 105 .handle_next_superseded_blobs(&self.deleted_blob_ids, 20) 106 .context("In process_one_key: Trying to handle superseded blob.")?; 107 self.deleted_blob_ids = vec![]; 108 self.superseded_blobs = blobs; 109 } 110 111 if let Some((blob_id, blob, blob_metadata)) = self.superseded_blobs.pop() { 112 // Add the next blob_id to the deleted blob ids list. So it will be 113 // removed from the database regardless of whether the following 114 // succeeds or not. 115 self.deleted_blob_ids.push(blob_id); 116 117 // If the key has a km_uuid we try to get the corresponding device 118 // and delete the key, unwrapping if necessary and possible. 119 // (At this time keys may get deleted without having the super encryption 120 // key in this case we can only delete the key from the database.) 121 if let Some(uuid) = blob_metadata.km_uuid() { 122 let blob = self 123 .super_key 124 .unwrap_key_if_required(&blob_metadata, &blob) 125 .context("In process_one_key: Trying to unwrap to-be-deleted blob.")?; 126 (self.invalidate_key)(&uuid, &*blob) 127 .context("In process_one_key: Trying to invalidate key.")?; 128 } 129 } 130 Ok(()) 131 } 132 133 /// Processes one key and then schedules another attempt until it runs out of blobs to delete. step(&mut self)134 fn step(&mut self) { 135 self.notified.store(0, Ordering::Relaxed); 136 if let Err(e) = self.process_one_key() { 137 log::error!("Error trying to delete blob entry. {:?}", e); 138 } 139 // Schedule the next step. This gives high priority requests a chance to interleave. 140 if !self.deleted_blob_ids.is_empty() { 141 if let Some(at) = self.async_task.upgrade() { 142 if let Ok(0) = 143 self.notified.compare_exchange(0, 1, Ordering::Relaxed, Ordering::Relaxed) 144 { 145 at.queue_lo(move |shelf| { 146 shelf.get_downcast_mut::<GcInternal>().unwrap().step() 147 }); 148 } 149 } 150 } 151 } 152 } 153