1 // Copyright 2021, The Android Open Source Project 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 15 //! This module acts as a bridge between the legacy key database and the keystore2 database. 16 17 use crate::key_parameter::KeyParameterValue; 18 use crate::legacy_blob::BlobValue; 19 use crate::utils::{uid_to_android_user, watchdog as wd}; 20 use crate::{async_task::AsyncTask, legacy_blob::LegacyBlobLoader}; 21 use crate::{database::KeyType, error::Error}; 22 use crate::{ 23 database::{ 24 BlobMetaData, BlobMetaEntry, CertificateInfo, DateTime, EncryptedBy, KeyMetaData, 25 KeyMetaEntry, KeystoreDB, Uuid, KEYSTORE_UUID, 26 }, 27 super_key::USER_SUPER_KEY, 28 }; 29 use android_hardware_security_keymint::aidl::android::hardware::security::keymint::SecurityLevel::SecurityLevel; 30 use android_system_keystore2::aidl::android::system::keystore2::{ 31 Domain::Domain, KeyDescriptor::KeyDescriptor, ResponseCode::ResponseCode, 32 }; 33 use anyhow::{Context, Result}; 34 use core::ops::Deref; 35 use keystore2_crypto::{Password, ZVec}; 36 use std::collections::{HashMap, HashSet}; 37 use std::sync::atomic::{AtomicU8, Ordering}; 38 use std::sync::mpsc::channel; 39 use std::sync::{Arc, Mutex}; 40 41 /// Represents LegacyMigrator. 42 pub struct LegacyMigrator { 43 async_task: Arc<AsyncTask>, 44 initializer: Mutex< 45 Option< 46 Box< 47 dyn FnOnce() -> (KeystoreDB, HashMap<SecurityLevel, Uuid>, Arc<LegacyBlobLoader>) 48 + Send 49 + 'static, 50 >, 51 >, 52 >, 53 /// This atomic is used for cheap interior mutability. It is intended to prevent 54 /// expensive calls into the legacy migrator when the legacy database is empty. 55 /// When transitioning from READY to EMPTY, spurious calls may occur for a brief period 56 /// of time. This is tolerable in favor of the common case. 57 state: AtomicU8, 58 } 59 60 #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] 61 struct RecentMigration { 62 uid: u32, 63 alias: String, 64 } 65 66 impl RecentMigration { new(uid: u32, alias: String) -> Self67 fn new(uid: u32, alias: String) -> Self { 68 Self { uid, alias } 69 } 70 } 71 72 enum BulkDeleteRequest { 73 Uid(u32), 74 User(u32), 75 } 76 77 struct LegacyMigratorState { 78 recently_migrated: HashSet<RecentMigration>, 79 recently_migrated_super_key: HashSet<u32>, 80 legacy_loader: Arc<LegacyBlobLoader>, 81 sec_level_to_km_uuid: HashMap<SecurityLevel, Uuid>, 82 db: KeystoreDB, 83 } 84 85 impl LegacyMigrator { 86 const WIFI_NAMESPACE: i64 = 102; 87 const AID_WIFI: u32 = 1010; 88 89 const STATE_UNINITIALIZED: u8 = 0; 90 const STATE_READY: u8 = 1; 91 const STATE_EMPTY: u8 = 2; 92 93 /// Constructs a new LegacyMigrator using the given AsyncTask object as migration 94 /// worker. new(async_task: Arc<AsyncTask>) -> Self95 pub fn new(async_task: Arc<AsyncTask>) -> Self { 96 Self { 97 async_task, 98 initializer: Default::default(), 99 state: AtomicU8::new(Self::STATE_UNINITIALIZED), 100 } 101 } 102 103 /// The legacy migrator must be initialized deferred, because keystore starts very early. 104 /// At this time the data partition may not be mounted. So we cannot open database connections 105 /// until we get actual key load requests. This sets the function that the legacy loader 106 /// uses to connect to the database. set_init<F>(&self, f_init: F) -> Result<()> where F: FnOnce() -> (KeystoreDB, HashMap<SecurityLevel, Uuid>, Arc<LegacyBlobLoader>) + Send + 'static,107 pub fn set_init<F>(&self, f_init: F) -> Result<()> 108 where 109 F: FnOnce() -> (KeystoreDB, HashMap<SecurityLevel, Uuid>, Arc<LegacyBlobLoader>) 110 + Send 111 + 'static, 112 { 113 let mut initializer = self.initializer.lock().expect("Failed to lock initializer."); 114 115 // If we are not uninitialized we have no business setting the initializer. 116 if self.state.load(Ordering::Relaxed) != Self::STATE_UNINITIALIZED { 117 return Ok(()); 118 } 119 120 // Only set the initializer if it hasn't been set before. 121 if initializer.is_none() { 122 *initializer = Some(Box::new(f_init)) 123 } 124 125 Ok(()) 126 } 127 128 /// This function is called by the migration requestor to check if it is worth 129 /// making a migration request. It also transitions the state from UNINITIALIZED 130 /// to READY or EMPTY on first use. The deferred initialization is necessary, because 131 /// Keystore 2.0 runs early during boot, where data may not yet be mounted. 132 /// Returns Ok(STATE_READY) if a migration request is worth undertaking and 133 /// Ok(STATE_EMPTY) if the database is empty. An error is returned if the loader 134 /// was not initialized and cannot be initialized. check_state(&self) -> Result<u8>135 fn check_state(&self) -> Result<u8> { 136 let mut first_try = true; 137 loop { 138 match (self.state.load(Ordering::Relaxed), first_try) { 139 (Self::STATE_EMPTY, _) => { 140 return Ok(Self::STATE_EMPTY); 141 } 142 (Self::STATE_UNINITIALIZED, true) => { 143 // If we find the legacy loader uninitialized, we grab the initializer lock, 144 // check if the legacy database is empty, and if not, schedule an initialization 145 // request. Coming out of the initializer lock, the state is either EMPTY or 146 // READY. 147 let mut initializer = self.initializer.lock().unwrap(); 148 149 if let Some(initializer) = initializer.take() { 150 let (db, sec_level_to_km_uuid, legacy_loader) = (initializer)(); 151 152 if legacy_loader.is_empty().context( 153 "In check_state: Trying to check if the legacy database is empty.", 154 )? { 155 self.state.store(Self::STATE_EMPTY, Ordering::Relaxed); 156 return Ok(Self::STATE_EMPTY); 157 } 158 159 self.async_task.queue_hi(move |shelf| { 160 shelf.get_or_put_with(|| LegacyMigratorState { 161 recently_migrated: Default::default(), 162 recently_migrated_super_key: Default::default(), 163 legacy_loader, 164 sec_level_to_km_uuid, 165 db, 166 }); 167 }); 168 169 // It is safe to set this here even though the async task may not yet have 170 // run because any thread observing this will not be able to schedule a 171 // task that can run before the initialization. 172 // Also we can only transition out of this state while having the 173 // initializer lock and having found an initializer. 174 self.state.store(Self::STATE_READY, Ordering::Relaxed); 175 return Ok(Self::STATE_READY); 176 } else { 177 // There is a chance that we just lost the race from state.load() to 178 // grabbing the initializer mutex. If that is the case the state must 179 // be EMPTY or READY after coming out of the lock. So we can give it 180 // one more try. 181 first_try = false; 182 continue; 183 } 184 } 185 (Self::STATE_UNINITIALIZED, false) => { 186 // Okay, tough luck. The legacy loader was really completely uninitialized. 187 return Err(Error::sys()).context( 188 "In check_state: Legacy loader should not be called uninitialized.", 189 ); 190 } 191 (Self::STATE_READY, _) => return Ok(Self::STATE_READY), 192 (s, _) => panic!("Unknown legacy migrator state. {} ", s), 193 } 194 } 195 } 196 197 /// List all aliases for uid in the legacy database. list_uid(&self, domain: Domain, namespace: i64) -> Result<Vec<KeyDescriptor>>198 pub fn list_uid(&self, domain: Domain, namespace: i64) -> Result<Vec<KeyDescriptor>> { 199 let _wp = wd::watch_millis("LegacyMigrator::list_uid", 500); 200 201 let uid = match (domain, namespace) { 202 (Domain::APP, namespace) => namespace as u32, 203 (Domain::SELINUX, Self::WIFI_NAMESPACE) => Self::AID_WIFI, 204 _ => return Ok(Vec::new()), 205 }; 206 self.do_serialized(move |state| state.list_uid(uid)).unwrap_or_else(|| Ok(Vec::new())).map( 207 |v| { 208 v.into_iter() 209 .map(|alias| KeyDescriptor { 210 domain, 211 nspace: namespace, 212 alias: Some(alias), 213 blob: None, 214 }) 215 .collect() 216 }, 217 ) 218 } 219 220 /// Sends the given closure to the migrator thread for execution after calling check_state. 221 /// Returns None if the database was empty and the request was not executed. 222 /// Otherwise returns Some with the result produced by the migration request. 223 /// The loader state may transition to STATE_EMPTY during the execution of this function. do_serialized<F, T: Send + 'static>(&self, f: F) -> Option<Result<T>> where F: FnOnce(&mut LegacyMigratorState) -> Result<T> + Send + 'static,224 fn do_serialized<F, T: Send + 'static>(&self, f: F) -> Option<Result<T>> 225 where 226 F: FnOnce(&mut LegacyMigratorState) -> Result<T> + Send + 'static, 227 { 228 // Short circuit if the database is empty or not initialized (error case). 229 match self.check_state().context("In do_serialized: Checking state.") { 230 Ok(LegacyMigrator::STATE_EMPTY) => return None, 231 Ok(LegacyMigrator::STATE_READY) => {} 232 Err(e) => return Some(Err(e)), 233 Ok(s) => panic!("Unknown legacy migrator state. {} ", s), 234 } 235 236 // We have established that there may be a key in the legacy database. 237 // Now we schedule a migration request. 238 let (sender, receiver) = channel(); 239 self.async_task.queue_hi(move |shelf| { 240 // Get the migrator state from the shelf. 241 // There may not be a state. This can happen if this migration request was scheduled 242 // before a previous request established that the legacy database was empty 243 // and removed the state from the shelf. Since we know now that the database 244 // is empty, we can return None here. 245 let (new_state, result) = if let Some(legacy_migrator_state) = 246 shelf.get_downcast_mut::<LegacyMigratorState>() 247 { 248 let result = f(legacy_migrator_state); 249 (legacy_migrator_state.check_empty(), Some(result)) 250 } else { 251 (Self::STATE_EMPTY, None) 252 }; 253 254 // If the migration request determined that the database is now empty, we discard 255 // the state from the shelf to free up the resources we won't need any longer. 256 if result.is_some() && new_state == Self::STATE_EMPTY { 257 shelf.remove_downcast_ref::<LegacyMigratorState>(); 258 } 259 260 // Send the result to the requester. 261 if let Err(e) = sender.send((new_state, result)) { 262 log::error!("In do_serialized. Error in sending the result. {:?}", e); 263 } 264 }); 265 266 let (new_state, result) = match receiver.recv() { 267 Err(e) => { 268 return Some(Err(e).context("In do_serialized. Failed to receive from the sender.")) 269 } 270 Ok(r) => r, 271 }; 272 273 // We can only transition to EMPTY but never back. 274 // The migrator never creates any legacy blobs. 275 if new_state == Self::STATE_EMPTY { 276 self.state.store(Self::STATE_EMPTY, Ordering::Relaxed) 277 } 278 279 result 280 } 281 282 /// Runs the key_accessor function and returns its result. If it returns an error and the 283 /// root cause was KEY_NOT_FOUND, tries to migrate a key with the given parameters from 284 /// the legacy database to the new database and runs the key_accessor function again if 285 /// the migration request was successful. with_try_migrate<F, T>( &self, key: &KeyDescriptor, caller_uid: u32, key_accessor: F, ) -> Result<T> where F: Fn() -> Result<T>,286 pub fn with_try_migrate<F, T>( 287 &self, 288 key: &KeyDescriptor, 289 caller_uid: u32, 290 key_accessor: F, 291 ) -> Result<T> 292 where 293 F: Fn() -> Result<T>, 294 { 295 let _wp = wd::watch_millis("LegacyMigrator::with_try_migrate", 500); 296 297 // Access the key and return on success. 298 match key_accessor() { 299 Ok(result) => return Ok(result), 300 Err(e) => match e.root_cause().downcast_ref::<Error>() { 301 Some(&Error::Rc(ResponseCode::KEY_NOT_FOUND)) => {} 302 _ => return Err(e), 303 }, 304 } 305 306 // Filter inputs. We can only load legacy app domain keys and some special rules due 307 // to which we migrate keys transparently to an SELINUX domain. 308 let uid = match key { 309 KeyDescriptor { domain: Domain::APP, alias: Some(_), .. } => caller_uid, 310 KeyDescriptor { domain: Domain::SELINUX, nspace, alias: Some(_), .. } => { 311 match *nspace { 312 Self::WIFI_NAMESPACE => Self::AID_WIFI, 313 _ => { 314 return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)) 315 .context(format!("No legacy keys for namespace {}", nspace)) 316 } 317 } 318 } 319 _ => { 320 return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)) 321 .context("No legacy keys for key descriptor.") 322 } 323 }; 324 325 let key_clone = key.clone(); 326 let result = self 327 .do_serialized(move |migrator_state| migrator_state.check_and_migrate(uid, key_clone)); 328 329 if let Some(result) = result { 330 result?; 331 // After successful migration try again. 332 key_accessor() 333 } else { 334 Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)).context("Legacy database is empty.") 335 } 336 } 337 338 /// Calls key_accessor and returns the result on success. In the case of a KEY_NOT_FOUND error 339 /// this function makes a migration request and on success retries the key_accessor. with_try_migrate_super_key<F, T>( &self, user_id: u32, pw: &Password, mut key_accessor: F, ) -> Result<Option<T>> where F: FnMut() -> Result<Option<T>>,340 pub fn with_try_migrate_super_key<F, T>( 341 &self, 342 user_id: u32, 343 pw: &Password, 344 mut key_accessor: F, 345 ) -> Result<Option<T>> 346 where 347 F: FnMut() -> Result<Option<T>>, 348 { 349 let _wp = wd::watch_millis("LegacyMigrator::with_try_migrate_super_key", 500); 350 351 match key_accessor() { 352 Ok(Some(result)) => return Ok(Some(result)), 353 Ok(None) => {} 354 Err(e) => return Err(e), 355 } 356 let pw = pw.try_clone().context("In with_try_migrate_super_key: Cloning password.")?; 357 let result = self.do_serialized(move |migrator_state| { 358 migrator_state.check_and_migrate_super_key(user_id, &pw) 359 }); 360 361 if let Some(result) = result { 362 result?; 363 // After successful migration try again. 364 key_accessor() 365 } else { 366 Ok(None) 367 } 368 } 369 370 /// Deletes all keys belonging to the given namespace, migrating them into the database 371 /// for subsequent garbage collection if necessary. bulk_delete_uid(&self, domain: Domain, nspace: i64) -> Result<()>372 pub fn bulk_delete_uid(&self, domain: Domain, nspace: i64) -> Result<()> { 373 let _wp = wd::watch_millis("LegacyMigrator::bulk_delete_uid", 500); 374 375 let uid = match (domain, nspace) { 376 (Domain::APP, nspace) => nspace as u32, 377 (Domain::SELINUX, Self::WIFI_NAMESPACE) => Self::AID_WIFI, 378 // Nothing to do. 379 _ => return Ok(()), 380 }; 381 382 let result = self.do_serialized(move |migrator_state| { 383 migrator_state.bulk_delete(BulkDeleteRequest::Uid(uid), false) 384 }); 385 386 result.unwrap_or(Ok(())) 387 } 388 389 /// Deletes all keys belonging to the given android user, migrating them into the database 390 /// for subsequent garbage collection if necessary. bulk_delete_user( &self, user_id: u32, keep_non_super_encrypted_keys: bool, ) -> Result<()>391 pub fn bulk_delete_user( 392 &self, 393 user_id: u32, 394 keep_non_super_encrypted_keys: bool, 395 ) -> Result<()> { 396 let _wp = wd::watch_millis("LegacyMigrator::bulk_delete_user", 500); 397 398 let result = self.do_serialized(move |migrator_state| { 399 migrator_state 400 .bulk_delete(BulkDeleteRequest::User(user_id), keep_non_super_encrypted_keys) 401 }); 402 403 result.unwrap_or(Ok(())) 404 } 405 406 /// Queries the legacy database for the presence of a super key for the given user. has_super_key(&self, user_id: u32) -> Result<bool>407 pub fn has_super_key(&self, user_id: u32) -> Result<bool> { 408 let result = 409 self.do_serialized(move |migrator_state| migrator_state.has_super_key(user_id)); 410 result.unwrap_or(Ok(false)) 411 } 412 } 413 414 impl LegacyMigratorState { get_km_uuid(&self, is_strongbox: bool) -> Result<Uuid>415 fn get_km_uuid(&self, is_strongbox: bool) -> Result<Uuid> { 416 let sec_level = if is_strongbox { 417 SecurityLevel::STRONGBOX 418 } else { 419 SecurityLevel::TRUSTED_ENVIRONMENT 420 }; 421 422 self.sec_level_to_km_uuid.get(&sec_level).copied().ok_or_else(|| { 423 anyhow::anyhow!(Error::sys()).context("In get_km_uuid: No KM instance for blob.") 424 }) 425 } 426 list_uid(&mut self, uid: u32) -> Result<Vec<String>>427 fn list_uid(&mut self, uid: u32) -> Result<Vec<String>> { 428 self.legacy_loader 429 .list_keystore_entries_for_uid(uid) 430 .context("In list_uid: Trying to list legacy entries.") 431 } 432 433 /// This is a key migration request that must run in the migrator thread. This must 434 /// be passed to do_serialized. check_and_migrate(&mut self, uid: u32, mut key: KeyDescriptor) -> Result<()>435 fn check_and_migrate(&mut self, uid: u32, mut key: KeyDescriptor) -> Result<()> { 436 let alias = key.alias.clone().ok_or_else(|| { 437 anyhow::anyhow!(Error::sys()).context(concat!( 438 "In check_and_migrate: Must be Some because ", 439 "our caller must not have called us otherwise." 440 )) 441 })?; 442 443 if self.recently_migrated.contains(&RecentMigration::new(uid, alias.clone())) { 444 return Ok(()); 445 } 446 447 if key.domain == Domain::APP { 448 key.nspace = uid as i64; 449 } 450 451 // If the key is not found in the cache, try to load from the legacy database. 452 let (km_blob_params, user_cert, ca_cert) = self 453 .legacy_loader 454 .load_by_uid_alias(uid, &alias, None) 455 .context("In check_and_migrate: Trying to load legacy blob.")?; 456 let result = match km_blob_params { 457 Some((km_blob, params)) => { 458 let is_strongbox = km_blob.is_strongbox(); 459 let (blob, mut blob_metadata) = match km_blob.take_value() { 460 BlobValue::Encrypted { iv, tag, data } => { 461 // Get super key id for user id. 462 let user_id = uid_to_android_user(uid as u32); 463 464 let super_key_id = match self 465 .db 466 .load_super_key(&USER_SUPER_KEY, user_id) 467 .context("In check_and_migrate: Failed to load super key")? 468 { 469 Some((_, entry)) => entry.id(), 470 None => { 471 // This might be the first time we access the super key, 472 // and it may not have been migrated. We cannot import 473 // the legacy super_key key now, because we need to reencrypt 474 // it which we cannot do if we are not unlocked, which we are 475 // not because otherwise the key would have been migrated. 476 // We can check though if the key exists. If it does, 477 // we can return Locked. Otherwise, we can delete the 478 // key and return NotFound, because the key will never 479 // be unlocked again. 480 if self.legacy_loader.has_super_key(user_id) { 481 return Err(Error::Rc(ResponseCode::LOCKED)).context(concat!( 482 "In check_and_migrate: Cannot migrate super key of this ", 483 "key while user is locked." 484 )); 485 } else { 486 self.legacy_loader.remove_keystore_entry(uid, &alias).context( 487 concat!( 488 "In check_and_migrate: ", 489 "Trying to remove obsolete key." 490 ), 491 )?; 492 return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)) 493 .context("In check_and_migrate: Obsolete key."); 494 } 495 } 496 }; 497 498 let mut blob_metadata = BlobMetaData::new(); 499 blob_metadata.add(BlobMetaEntry::Iv(iv.to_vec())); 500 blob_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec())); 501 blob_metadata 502 .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id))); 503 (LegacyBlob::Vec(data), blob_metadata) 504 } 505 BlobValue::Decrypted(data) => (LegacyBlob::ZVec(data), BlobMetaData::new()), 506 _ => { 507 return Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)) 508 .context("In check_and_migrate: Legacy key has unexpected type.") 509 } 510 }; 511 512 let km_uuid = self 513 .get_km_uuid(is_strongbox) 514 .context("In check_and_migrate: Trying to get KM UUID")?; 515 blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid)); 516 517 let mut metadata = KeyMetaData::new(); 518 let creation_date = DateTime::now() 519 .context("In check_and_migrate: Trying to make creation time.")?; 520 metadata.add(KeyMetaEntry::CreationDate(creation_date)); 521 522 // Store legacy key in the database. 523 self.db 524 .store_new_key( 525 &key, 526 KeyType::Client, 527 ¶ms, 528 &(&blob, &blob_metadata), 529 &CertificateInfo::new(user_cert, ca_cert), 530 &metadata, 531 &km_uuid, 532 ) 533 .context("In check_and_migrate.")?; 534 Ok(()) 535 } 536 None => { 537 if let Some(ca_cert) = ca_cert { 538 self.db 539 .store_new_certificate(&key, KeyType::Client, &ca_cert, &KEYSTORE_UUID) 540 .context("In check_and_migrate: Failed to insert new certificate.")?; 541 Ok(()) 542 } else { 543 Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)) 544 .context("In check_and_migrate: Legacy key not found.") 545 } 546 } 547 }; 548 549 match result { 550 Ok(()) => { 551 // Add the key to the migrated_keys list. 552 self.recently_migrated.insert(RecentMigration::new(uid, alias.clone())); 553 // Delete legacy key from the file system 554 self.legacy_loader 555 .remove_keystore_entry(uid, &alias) 556 .context("In check_and_migrate: Trying to remove migrated key.")?; 557 Ok(()) 558 } 559 Err(e) => Err(e), 560 } 561 } 562 check_and_migrate_super_key(&mut self, user_id: u32, pw: &Password) -> Result<()>563 fn check_and_migrate_super_key(&mut self, user_id: u32, pw: &Password) -> Result<()> { 564 if self.recently_migrated_super_key.contains(&user_id) { 565 return Ok(()); 566 } 567 568 if let Some(super_key) = self 569 .legacy_loader 570 .load_super_key(user_id, &pw) 571 .context("In check_and_migrate_super_key: Trying to load legacy super key.")? 572 { 573 let (blob, blob_metadata) = 574 crate::super_key::SuperKeyManager::encrypt_with_password(&super_key, pw) 575 .context("In check_and_migrate_super_key: Trying to encrypt super key.")?; 576 577 self.db 578 .store_super_key( 579 user_id, 580 &USER_SUPER_KEY, 581 &blob, 582 &blob_metadata, 583 &KeyMetaData::new(), 584 ) 585 .context(concat!( 586 "In check_and_migrate_super_key: ", 587 "Trying to insert legacy super_key into the database." 588 ))?; 589 self.legacy_loader.remove_super_key(user_id); 590 self.recently_migrated_super_key.insert(user_id); 591 Ok(()) 592 } else { 593 Err(Error::Rc(ResponseCode::KEY_NOT_FOUND)) 594 .context("In check_and_migrate_super_key: No key found do migrate.") 595 } 596 } 597 598 /// Key migrator request to be run by do_serialized. 599 /// See LegacyMigrator::bulk_delete_uid and LegacyMigrator::bulk_delete_user. bulk_delete( &mut self, bulk_delete_request: BulkDeleteRequest, keep_non_super_encrypted_keys: bool, ) -> Result<()>600 fn bulk_delete( 601 &mut self, 602 bulk_delete_request: BulkDeleteRequest, 603 keep_non_super_encrypted_keys: bool, 604 ) -> Result<()> { 605 let (aliases, user_id) = match bulk_delete_request { 606 BulkDeleteRequest::Uid(uid) => ( 607 self.legacy_loader 608 .list_keystore_entries_for_uid(uid) 609 .context("In bulk_delete: Trying to get aliases for uid.") 610 .map(|aliases| { 611 let mut h = HashMap::<u32, HashSet<String>>::new(); 612 h.insert(uid, aliases.into_iter().collect()); 613 h 614 })?, 615 uid_to_android_user(uid), 616 ), 617 BulkDeleteRequest::User(user_id) => ( 618 self.legacy_loader 619 .list_keystore_entries_for_user(user_id) 620 .context("In bulk_delete: Trying to get aliases for user_id.")?, 621 user_id, 622 ), 623 }; 624 625 let super_key_id = self 626 .db 627 .load_super_key(&USER_SUPER_KEY, user_id) 628 .context("In bulk_delete: Failed to load super key")? 629 .map(|(_, entry)| entry.id()); 630 631 for (uid, alias) in aliases 632 .into_iter() 633 .map(|(uid, aliases)| aliases.into_iter().map(move |alias| (uid, alias))) 634 .flatten() 635 { 636 let (km_blob_params, _, _) = self 637 .legacy_loader 638 .load_by_uid_alias(uid, &alias, None) 639 .context("In bulk_delete: Trying to load legacy blob.")?; 640 641 // Determine if the key needs special handling to be deleted. 642 let (need_gc, is_super_encrypted) = km_blob_params 643 .as_ref() 644 .map(|(blob, params)| { 645 ( 646 params.iter().any(|kp| { 647 KeyParameterValue::RollbackResistance == *kp.key_parameter_value() 648 }), 649 blob.is_encrypted(), 650 ) 651 }) 652 .unwrap_or((false, false)); 653 654 if keep_non_super_encrypted_keys && !is_super_encrypted { 655 continue; 656 } 657 658 if need_gc { 659 let mark_deleted = match km_blob_params 660 .map(|(blob, _)| (blob.is_strongbox(), blob.take_value())) 661 { 662 Some((is_strongbox, BlobValue::Encrypted { iv, tag, data })) => { 663 let mut blob_metadata = BlobMetaData::new(); 664 if let (Ok(km_uuid), Some(super_key_id)) = 665 (self.get_km_uuid(is_strongbox), super_key_id) 666 { 667 blob_metadata.add(BlobMetaEntry::KmUuid(km_uuid)); 668 blob_metadata.add(BlobMetaEntry::Iv(iv.to_vec())); 669 blob_metadata.add(BlobMetaEntry::AeadTag(tag.to_vec())); 670 blob_metadata 671 .add(BlobMetaEntry::EncryptedBy(EncryptedBy::KeyId(super_key_id))); 672 Some((LegacyBlob::Vec(data), blob_metadata)) 673 } else { 674 // Oh well - we tried our best, but if we cannot determine which 675 // KeyMint instance we have to send this blob to, we cannot 676 // do more than delete the key from the file system. 677 // And if we don't know which key wraps this key we cannot 678 // unwrap it for KeyMint either. 679 None 680 } 681 } 682 Some((_, BlobValue::Decrypted(data))) => { 683 Some((LegacyBlob::ZVec(data), BlobMetaData::new())) 684 } 685 _ => None, 686 }; 687 688 if let Some((blob, blob_metadata)) = mark_deleted { 689 self.db.set_deleted_blob(&blob, &blob_metadata).context(concat!( 690 "In bulk_delete: Trying to insert deleted ", 691 "blob into the database for garbage collection." 692 ))?; 693 } 694 } 695 696 self.legacy_loader 697 .remove_keystore_entry(uid, &alias) 698 .context("In bulk_delete: Trying to remove migrated key.")?; 699 } 700 Ok(()) 701 } 702 has_super_key(&mut self, user_id: u32) -> Result<bool>703 fn has_super_key(&mut self, user_id: u32) -> Result<bool> { 704 Ok(self.recently_migrated_super_key.contains(&user_id) 705 || self.legacy_loader.has_super_key(user_id)) 706 } 707 check_empty(&self) -> u8708 fn check_empty(&self) -> u8 { 709 if self.legacy_loader.is_empty().unwrap_or(false) { 710 LegacyMigrator::STATE_EMPTY 711 } else { 712 LegacyMigrator::STATE_READY 713 } 714 } 715 } 716 717 enum LegacyBlob { 718 Vec(Vec<u8>), 719 ZVec(ZVec), 720 } 721 722 impl Deref for LegacyBlob { 723 type Target = [u8]; 724 deref(&self) -> &Self::Target725 fn deref(&self) -> &Self::Target { 726 match self { 727 Self::Vec(v) => &v, 728 Self::ZVec(v) => &v, 729 } 730 } 731 } 732