1 /* 2 * Copyright (C) 2023 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package com.android.server.healthconnect.backuprestore; 18 19 import static android.health.connect.Constants.DEFAULT_INT; 20 import static android.health.connect.HealthConnectDataState.RESTORE_ERROR_FETCHING_DATA; 21 import static android.health.connect.HealthConnectDataState.RESTORE_ERROR_NONE; 22 import static android.health.connect.HealthConnectDataState.RESTORE_ERROR_UNKNOWN; 23 import static android.health.connect.HealthConnectDataState.RESTORE_ERROR_VERSION_DIFF; 24 import static android.health.connect.HealthConnectDataState.RESTORE_STATE_IDLE; 25 import static android.health.connect.HealthConnectDataState.RESTORE_STATE_IN_PROGRESS; 26 import static android.health.connect.HealthConnectDataState.RESTORE_STATE_PENDING; 27 import static android.health.connect.HealthConnectManager.DATA_DOWNLOAD_COMPLETE; 28 import static android.health.connect.HealthConnectManager.DATA_DOWNLOAD_FAILED; 29 import static android.health.connect.HealthConnectManager.DATA_DOWNLOAD_RETRY; 30 import static android.health.connect.HealthConnectManager.DATA_DOWNLOAD_STARTED; 31 import static android.health.connect.HealthConnectManager.DATA_DOWNLOAD_STATE_UNKNOWN; 32 33 import static com.android.server.healthconnect.backuprestore.BackupRestore.BackupRestoreJobService.EXTRA_JOB_NAME_KEY; 34 import static com.android.server.healthconnect.backuprestore.BackupRestore.BackupRestoreJobService.EXTRA_USER_ID; 35 36 import static java.util.Objects.requireNonNull; 37 38 import android.annotation.IntDef; 39 import android.app.job.JobInfo; 40 import android.app.job.JobParameters; 41 import android.app.job.JobScheduler; 42 import android.app.job.JobService; 43 import android.content.ComponentName; 44 import android.content.Context; 45 import android.database.sqlite.SQLiteDatabase; 46 import android.health.connect.HealthConnectDataState; 47 import android.health.connect.HealthConnectException; 48 import android.health.connect.HealthConnectManager.DataDownloadState; 49 import android.health.connect.aidl.IDataStagingFinishedCallback; 50 import android.health.connect.restore.BackupFileNamesSet; 51 import android.health.connect.restore.StageRemoteDataException; 52 import android.health.connect.restore.StageRemoteDataRequest; 53 import android.os.Binder; 54 import android.os.ParcelFileDescriptor; 55 import android.os.PersistableBundle; 56 import android.os.RemoteException; 57 import android.os.UserHandle; 58 import android.text.format.DateUtils; 59 import android.util.ArrayMap; 60 import android.util.ArraySet; 61 import android.util.Log; 62 import android.util.Slog; 63 64 import com.android.healthfitness.flags.Flags; 65 import com.android.internal.annotations.VisibleForTesting; 66 import com.android.server.healthconnect.HealthConnectThreadScheduler; 67 import com.android.server.healthconnect.exportimport.DatabaseMerger; 68 import com.android.server.healthconnect.fitness.FitnessRecordReadHelper; 69 import com.android.server.healthconnect.fitness.FitnessRecordUpsertHelper; 70 import com.android.server.healthconnect.migration.MigrationStateManager; 71 import com.android.server.healthconnect.permission.FirstGrantTimeManager; 72 import com.android.server.healthconnect.permission.GrantTimeXmlHelper; 73 import com.android.server.healthconnect.permission.UserGrantTimeState; 74 import com.android.server.healthconnect.storage.HealthConnectContext; 75 import com.android.server.healthconnect.storage.HealthConnectDatabase; 76 import com.android.server.healthconnect.storage.TransactionManager; 77 import com.android.server.healthconnect.storage.datatypehelpers.AppInfoHelper; 78 import com.android.server.healthconnect.storage.datatypehelpers.DeviceInfoHelper; 79 import com.android.server.healthconnect.storage.datatypehelpers.HealthDataCategoryPriorityHelper; 80 import com.android.server.healthconnect.storage.datatypehelpers.MedicalDataSourceHelper; 81 import com.android.server.healthconnect.storage.datatypehelpers.MedicalResourceHelper; 82 import com.android.server.healthconnect.storage.datatypehelpers.MedicalResourceIndicesHelper; 83 import com.android.server.healthconnect.storage.datatypehelpers.PreferenceHelper; 84 import com.android.server.healthconnect.utils.FilesUtil; 85 import com.android.server.healthconnect.utils.RunnableWithThrowable; 86 87 import java.io.File; 88 import java.io.FileInputStream; 89 import java.io.FileOutputStream; 90 import java.io.IOException; 91 import java.lang.annotation.Retention; 92 import java.lang.annotation.RetentionPolicy; 93 import java.nio.file.FileSystems; 94 import java.nio.file.Files; 95 import java.nio.file.Path; 96 import java.nio.file.StandardCopyOption; 97 import java.time.Instant; 98 import java.util.Collections; 99 import java.util.List; 100 import java.util.Map; 101 import java.util.Objects; 102 import java.util.Set; 103 import java.util.concurrent.locks.ReentrantReadWriteLock; 104 import java.util.stream.Collectors; 105 import java.util.stream.Stream; 106 107 /** 108 * Class that takes up the responsibility to perform backup / restore related tasks. 109 * 110 * @hide 111 */ 112 public final class BackupRestore { 113 // Key for storing the current data download state 114 @VisibleForTesting 115 public static final String DATA_DOWNLOAD_STATE_KEY = "data_download_state_key"; 116 117 // The below values for the IntDef are defined in chronological order of the restore process. 118 @VisibleForTesting public static final int INTERNAL_RESTORE_STATE_UNKNOWN = 0; 119 @VisibleForTesting public static final int INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING = 1; 120 @VisibleForTesting public static final int INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS = 2; 121 @VisibleForTesting public static final int INTERNAL_RESTORE_STATE_STAGING_DONE = 3; 122 @VisibleForTesting public static final int INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS = 4; 123 // See b/290172311 for details. 124 @VisibleForTesting public static final int INTERNAL_RESTORE_STATE_MERGING_DONE_OLD_CODE = 5; 125 126 @VisibleForTesting public static final int INTERNAL_RESTORE_STATE_MERGING_DONE = 6; 127 128 @VisibleForTesting 129 static final long DATA_DOWNLOAD_TIMEOUT_INTERVAL_MILLIS = 14 * DateUtils.DAY_IN_MILLIS; 130 131 @VisibleForTesting 132 static final long DATA_STAGING_TIMEOUT_INTERVAL_MILLIS = DateUtils.DAY_IN_MILLIS; 133 134 @VisibleForTesting 135 static final long DATA_MERGING_TIMEOUT_INTERVAL_MILLIS = 5 * DateUtils.DAY_IN_MILLIS; 136 137 @VisibleForTesting 138 static final long DATA_MERGING_RETRY_DELAY_MILLIS = 12 * DateUtils.HOUR_IN_MILLIS; 139 140 // Used in #setOverrideDeadline to set a minimum window of 24 hours. See b/311402873, 141 // b/319721118 142 @VisibleForTesting 143 static final long MINIMUM_LATENCY_WINDOW_MILLIS = 24 * DateUtils.HOUR_IN_MILLIS; 144 145 @VisibleForTesting static final String DATA_DOWNLOAD_TIMEOUT_KEY = "data_download_timeout_key"; 146 147 @VisibleForTesting static final String DATA_STAGING_TIMEOUT_KEY = "data_staging_timeout_key"; 148 @VisibleForTesting static final String DATA_MERGING_TIMEOUT_KEY = "data_merging_timeout_key"; 149 150 @VisibleForTesting 151 static final String DATA_DOWNLOAD_TIMEOUT_CANCELLED_KEY = "data_download_timeout_cancelled_key"; 152 153 @VisibleForTesting 154 static final String DATA_STAGING_TIMEOUT_CANCELLED_KEY = "data_staging_timeout_cancelled_key"; 155 156 @VisibleForTesting 157 static final String DATA_MERGING_TIMEOUT_CANCELLED_KEY = "data_merging_timeout_cancelled_key"; 158 159 @VisibleForTesting static final String DATA_MERGING_RETRY_KEY = "data_merging_retry_key"; 160 private static final String DATA_MERGING_RETRY_CANCELLED_KEY = 161 "data_merging_retry_cancelled_key"; 162 163 @Retention(RetentionPolicy.SOURCE) 164 @IntDef({ 165 INTERNAL_RESTORE_STATE_UNKNOWN, 166 INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING, 167 INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS, 168 INTERNAL_RESTORE_STATE_STAGING_DONE, 169 INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS, 170 INTERNAL_RESTORE_STATE_MERGING_DONE_OLD_CODE, 171 INTERNAL_RESTORE_STATE_MERGING_DONE 172 }) 173 public @interface InternalRestoreState {} 174 175 // Key for storing the current data restore state on disk. 176 public static final String DATA_RESTORE_STATE_KEY = "data_restore_state_key"; 177 // Key for storing the error restoring HC data. 178 public static final String DATA_RESTORE_ERROR_KEY = "data_restore_error_key"; 179 180 @VisibleForTesting 181 static final String GRANT_TIME_FILE_NAME = "health-permissions-first-grant-times.xml"; 182 183 @VisibleForTesting static final String STAGED_DATABASE_DIR = "remote_staged"; 184 185 @VisibleForTesting static final String STAGED_DATABASE_NAME = "healthconnect_staged.db"; 186 187 private static final String DATABASE_BACKUP_FILE_NAME = "healthconnect_backup.db"; 188 private static final String BACKUP_DIR = "backup"; 189 private static final List<String> PHR_TABLES_TO_CLEAR = 190 List.of( 191 MedicalDataSourceHelper.getMainTableName(), 192 MedicalResourceHelper.getMainTableName(), 193 MedicalResourceIndicesHelper.getTableName()); 194 195 private static final String TAG = "HealthConnectBackupRestore"; 196 private final ReentrantReadWriteLock mStatesLock = new ReentrantReadWriteLock(true); 197 private final FirstGrantTimeManager mFirstGrantTimeManager; 198 private final MigrationStateManager mMigrationStateManager; 199 200 private final Context mContext; 201 private final Object mMergingLock = new Object(); 202 203 private final DatabaseMerger mDatabaseMerger; 204 205 private final PreferenceHelper mPreferenceHelper; 206 private final TransactionManager mTransactionManager; 207 private final File mEnvironmentDataDirectory; 208 209 private boolean mActivelyStagingRemoteData = false; 210 211 private volatile UserHandle mCurrentForegroundUser; 212 private final HealthConnectThreadScheduler mThreadScheduler; 213 private final BackupRestoreJobScheduler mJobScheduler; 214 215 @SuppressWarnings("NullAway.Init") // TODO(b/317029272): fix this suppression BackupRestore( AppInfoHelper appInfoHelper, FirstGrantTimeManager firstGrantTimeManager, MigrationStateManager migrationStateManager, PreferenceHelper preferenceHelper, TransactionManager transactionManager, FitnessRecordUpsertHelper fitnessRecordUpsertHelper, FitnessRecordReadHelper fitnessRecordReadHelper, Context context, DeviceInfoHelper deviceInfoHelper, HealthDataCategoryPriorityHelper healthDataCategoryPriorityHelper, HealthConnectThreadScheduler threadScheduler, File environmentDataDirectory)216 public BackupRestore( 217 AppInfoHelper appInfoHelper, 218 FirstGrantTimeManager firstGrantTimeManager, 219 MigrationStateManager migrationStateManager, 220 PreferenceHelper preferenceHelper, 221 TransactionManager transactionManager, 222 FitnessRecordUpsertHelper fitnessRecordUpsertHelper, 223 FitnessRecordReadHelper fitnessRecordReadHelper, 224 Context context, 225 DeviceInfoHelper deviceInfoHelper, 226 HealthDataCategoryPriorityHelper healthDataCategoryPriorityHelper, 227 HealthConnectThreadScheduler threadScheduler, 228 File environmentDataDirectory) { 229 this( 230 appInfoHelper, 231 firstGrantTimeManager, 232 migrationStateManager, 233 preferenceHelper, 234 transactionManager, 235 fitnessRecordUpsertHelper, 236 fitnessRecordReadHelper, 237 context, 238 deviceInfoHelper, 239 healthDataCategoryPriorityHelper, 240 threadScheduler, 241 environmentDataDirectory, 242 new BackupRestoreJobScheduler()); 243 } 244 245 @VisibleForTesting BackupRestore( AppInfoHelper appInfoHelper, FirstGrantTimeManager firstGrantTimeManager, MigrationStateManager migrationStateManager, PreferenceHelper preferenceHelper, TransactionManager transactionManager, FitnessRecordUpsertHelper fitnessRecordUpsertHelper, FitnessRecordReadHelper fitnessRecordReadHelper, Context context, DeviceInfoHelper deviceInfoHelper, HealthDataCategoryPriorityHelper healthDataCategoryPriorityHelper, HealthConnectThreadScheduler threadScheduler, File environmentDataDirectory, BackupRestoreJobScheduler jobScheduler)246 BackupRestore( 247 AppInfoHelper appInfoHelper, 248 FirstGrantTimeManager firstGrantTimeManager, 249 MigrationStateManager migrationStateManager, 250 PreferenceHelper preferenceHelper, 251 TransactionManager transactionManager, 252 FitnessRecordUpsertHelper fitnessRecordUpsertHelper, 253 FitnessRecordReadHelper fitnessRecordReadHelper, 254 Context context, 255 DeviceInfoHelper deviceInfoHelper, 256 HealthDataCategoryPriorityHelper healthDataCategoryPriorityHelper, 257 HealthConnectThreadScheduler threadScheduler, 258 File environmentDataDirectory, 259 BackupRestoreJobScheduler jobScheduler) { 260 mFirstGrantTimeManager = firstGrantTimeManager; 261 mMigrationStateManager = migrationStateManager; 262 mContext = context; 263 mCurrentForegroundUser = mContext.getUser(); 264 mDatabaseMerger = 265 new DatabaseMerger( 266 appInfoHelper, 267 deviceInfoHelper, 268 healthDataCategoryPriorityHelper, 269 transactionManager, 270 fitnessRecordUpsertHelper, 271 fitnessRecordReadHelper); 272 mPreferenceHelper = preferenceHelper; 273 mTransactionManager = transactionManager; 274 mThreadScheduler = threadScheduler; 275 mEnvironmentDataDirectory = environmentDataDirectory; 276 mJobScheduler = jobScheduler; 277 } 278 setupForUser(UserHandle currentForegroundUser)279 public void setupForUser(UserHandle currentForegroundUser) { 280 Slog.d(TAG, "Performing user switch operations."); 281 mCurrentForegroundUser = currentForegroundUser; 282 mThreadScheduler.scheduleInternalTask(this::scheduleAllJobs); 283 } 284 285 /** 286 * Prepares for staging all health connect remote data. 287 * 288 * @return true if the preparation was successful. false either if staging already in progress 289 * or done. 290 */ prepForStagingIfNotAlreadyDone()291 public boolean prepForStagingIfNotAlreadyDone() { 292 mStatesLock.writeLock().lock(); 293 try { 294 Slog.d(TAG, "Prepping for staging."); 295 setDataDownloadState(DATA_DOWNLOAD_COMPLETE, false /* force */); 296 @InternalRestoreState int curDataRestoreState = getInternalRestoreState(); 297 if (curDataRestoreState >= INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS) { 298 if (curDataRestoreState >= INTERNAL_RESTORE_STATE_STAGING_DONE) { 299 Slog.w(TAG, "Staging is already done. Cur state " + curDataRestoreState); 300 } else { 301 // Maybe the caller died and is trying to stage the data again. 302 Slog.w(TAG, "Already in the process of staging."); 303 } 304 return false; 305 } 306 mActivelyStagingRemoteData = true; 307 setInternalRestoreState(INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS, false /* force */); 308 return true; 309 } finally { 310 mStatesLock.writeLock().unlock(); 311 } 312 } 313 314 /** 315 * Stages all health connect remote data for merging later. 316 * 317 * <p>This should be called on the proper thread. 318 */ stageAllHealthConnectRemoteData( Map<String, ParcelFileDescriptor> pfdsByFileName, Map<String, HealthConnectException> exceptionsByFileName, UserHandle userHandle, IDataStagingFinishedCallback callback)319 public void stageAllHealthConnectRemoteData( 320 Map<String, ParcelFileDescriptor> pfdsByFileName, 321 Map<String, HealthConnectException> exceptionsByFileName, 322 UserHandle userHandle, 323 IDataStagingFinishedCallback callback) { 324 HealthConnectContext dbContext = 325 HealthConnectContext.create( 326 mContext, userHandle, STAGED_DATABASE_DIR, mEnvironmentDataDirectory); 327 File stagedRemoteDataDir = dbContext.getDataDir(); 328 try { 329 stagedRemoteDataDir.mkdirs(); 330 331 // Now that we have the dir we can try to copy all the data. 332 // Any exceptions we face will be collected and shared with the caller. 333 pfdsByFileName.forEach( 334 (fileName, pfd) -> { 335 File destination = new File(stagedRemoteDataDir, fileName); 336 try (FileInputStream inputStream = 337 new FileInputStream(pfd.getFileDescriptor())) { 338 Path destinationPath = 339 FileSystems.getDefault().getPath(destination.getAbsolutePath()); 340 Files.copy( 341 inputStream, 342 destinationPath, 343 StandardCopyOption.REPLACE_EXISTING); 344 } catch (IOException e) { 345 Slog.e( 346 TAG, 347 "Failed to get copy to destination: " + destination.getName(), 348 e); 349 destination.delete(); 350 exceptionsByFileName.put( 351 fileName, 352 new HealthConnectException( 353 HealthConnectException.ERROR_IO, e.getMessage())); 354 } catch (SecurityException e) { 355 Slog.e( 356 TAG, 357 "Failed to get copy to destination: " + destination.getName(), 358 e); 359 destination.delete(); 360 exceptionsByFileName.put( 361 fileName, 362 new HealthConnectException( 363 HealthConnectException.ERROR_SECURITY, e.getMessage())); 364 } finally { 365 try { 366 pfd.close(); 367 } catch (IOException e) { 368 exceptionsByFileName.put( 369 fileName, 370 new HealthConnectException( 371 HealthConnectException.ERROR_IO, e.getMessage())); 372 } 373 } 374 }); 375 } finally { 376 // We are done staging all the remote data, update the data restore state. 377 // Even if we encountered any exception we still say that we are "done" as 378 // we don't expect the caller to retry and see different results. 379 setInternalRestoreState(INTERNAL_RESTORE_STATE_STAGING_DONE, false); 380 mActivelyStagingRemoteData = false; 381 382 // Share the result / exception with the caller. 383 try { 384 if (exceptionsByFileName.isEmpty()) { 385 callback.onResult(); 386 Slog.i(TAG, "Restore response sent successfully to caller."); 387 } else { 388 Slog.i(TAG, "Exceptions encountered during staging."); 389 setDataRestoreError(RESTORE_ERROR_FETCHING_DATA); 390 callback.onError(new StageRemoteDataException(exceptionsByFileName)); 391 } 392 } catch (RemoteException e) { 393 Log.e(TAG, "Restore response could not be sent to the caller.", e); 394 } catch (SecurityException e) { 395 Log.e( 396 TAG, 397 "Restore response could not be sent due to conflicting AIDL definitions", 398 e); 399 } finally { 400 // Now that the callback for the stageAllHealthConnectRemoteData API has been called 401 // we can start the merging process. 402 merge(); 403 } 404 } 405 } 406 407 /** Writes the backup data into files represented by the passed file descriptors. */ getAllDataForBackup( StageRemoteDataRequest stageRemoteDataRequest, UserHandle userHandle)408 public void getAllDataForBackup( 409 StageRemoteDataRequest stageRemoteDataRequest, UserHandle userHandle) { 410 Slog.i( 411 TAG, 412 "getAllDataForBackup, number of files to backup = " 413 + stageRemoteDataRequest.getPfdsByFileName().size()); 414 415 Map<String, ParcelFileDescriptor> pfdsByFileName = 416 stageRemoteDataRequest.getPfdsByFileName(); 417 418 // If PERSONAL_HEALTH_RECORD_DISABLE_D2D is enabled, create a temporary copy of the 419 // HC database and delete all the PHR tables content. 420 // Set the default to the original database path, if the PERSONAL_HEALTH_RECORD_DISABLE_D2D 421 // is enabled, it will be updated to be database copy path. 422 File databasePath = mTransactionManager.getDatabasePath(); 423 HealthConnectContext dbContext = 424 HealthConnectContext.create( 425 mContext, userHandle, BACKUP_DIR, mEnvironmentDataDirectory); 426 File backupDataDir = dbContext.getDataDir(); 427 if (Flags.personalHealthRecordDisableD2d()) { 428 databasePath = new File(backupDataDir, DATABASE_BACKUP_FILE_NAME); 429 try { 430 // Copies the HC database to the temp file. 431 copyDatabase(databasePath); 432 } catch (Exception e) { 433 Slog.e(TAG, "Failed to create local file for backup", e); 434 return; 435 } 436 437 try { 438 // Deletes the PHR tables content from the temp file. 439 deletePhrTablesContent(dbContext); 440 } catch (Exception e) { 441 Slog.e(TAG, "Failed to clear PHR tables.", e); 442 return; 443 } 444 } 445 446 var backupFilesByFileNames = 447 getBackupFilesByFileNames(userHandle, backupDataDir, databasePath); 448 pfdsByFileName.forEach( 449 (fileName, pfd) -> { 450 @SuppressWarnings("NullAway") // TODO(b/317029272): fix this suppression 451 Path sourceFilePath = backupFilesByFileNames.get(fileName).toPath(); 452 try (FileOutputStream outputStream = 453 new FileOutputStream(pfd.getFileDescriptor())) { 454 Files.copy(sourceFilePath, outputStream); 455 } catch (IOException | SecurityException e) { 456 Slog.e(TAG, "Failed to send " + fileName + " for backup", e); 457 } finally { 458 try { 459 pfd.close(); 460 } catch (IOException e) { 461 Slog.e(TAG, "Failed to close " + fileName + " for backup", e); 462 } 463 } 464 }); 465 466 deleteBackupFiles(backupDataDir); 467 } 468 469 /** Get the file names of all the files that are transported during backup / restore. */ getAllBackupFileNames(boolean forDeviceToDevice)470 public BackupFileNamesSet getAllBackupFileNames(boolean forDeviceToDevice) { 471 Slog.i(TAG, "getAllBackupFileNames, forDeviceToDevice = " + forDeviceToDevice); 472 ArraySet<String> backupFileNames = new ArraySet<>(); 473 if (forDeviceToDevice) { 474 backupFileNames.add(STAGED_DATABASE_NAME); 475 } 476 backupFileNames.add(GRANT_TIME_FILE_NAME); 477 return new BackupFileNamesSet(backupFileNames); 478 } 479 copyDatabase(File destination)480 private void copyDatabase(File destination) throws IOException { 481 Slog.i(TAG, "Database copying started."); 482 483 if (!destination.exists() && !destination.mkdirs()) { 484 throw new IOException("Unable to create directory for the database copy."); 485 } 486 487 Files.copy( 488 mTransactionManager.getDatabasePath().toPath(), 489 destination.toPath(), 490 StandardCopyOption.REPLACE_EXISTING); 491 492 Slog.i(TAG, "Database copying completed: " + destination.toPath().toAbsolutePath()); 493 } 494 deletePhrTablesContent(HealthConnectContext dbContext)495 private void deletePhrTablesContent(HealthConnectContext dbContext) { 496 // Throwing a exception when calling this method implies that it was not possible to 497 // create a HC database from the file and, therefore, most probably the database was 498 // corrupted during the file copy. 499 try (HealthConnectDatabase exportDatabase = 500 new HealthConnectDatabase(dbContext, DATABASE_BACKUP_FILE_NAME)) { 501 SQLiteDatabase db = exportDatabase.getReadableDatabase(); 502 for (String tableName : PHR_TABLES_TO_CLEAR) { 503 db.execSQL("DELETE FROM " + tableName + ";"); 504 } 505 } 506 Slog.i(TAG, "Drop PHR tables completed."); 507 } 508 509 /** Updates the download state of the remote data. */ updateDataDownloadState(@ataDownloadState int downloadState)510 public void updateDataDownloadState(@DataDownloadState int downloadState) { 511 setDataDownloadState(downloadState, false /* force */); 512 513 if (downloadState == DATA_DOWNLOAD_COMPLETE) { 514 setInternalRestoreState(INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING, false /* force */); 515 } else if (downloadState == DATA_DOWNLOAD_FAILED) { 516 setInternalRestoreState(INTERNAL_RESTORE_STATE_MERGING_DONE, false /* force */); 517 setDataRestoreError(RESTORE_ERROR_FETCHING_DATA); 518 } 519 } 520 521 /** Deletes all the staged data and resets all the states. */ 522 @SuppressWarnings("NullAway") // TODO(b/317029272): fix this suppression deleteAndResetEverything(UserHandle userHandle)523 public void deleteAndResetEverything(UserHandle userHandle) { 524 HealthConnectContext dbContext = 525 HealthConnectContext.create( 526 mContext, userHandle, STAGED_DATABASE_DIR, mEnvironmentDataDirectory); 527 528 // Don't delete anything while we are in the process of merging staged data. 529 synchronized (mMergingLock) { 530 dbContext.deleteDatabase(STAGED_DATABASE_NAME); 531 FilesUtil.deleteDir(dbContext.getDataDir()); 532 } 533 setDataDownloadState(DATA_DOWNLOAD_STATE_UNKNOWN, true /* force */); 534 setInternalRestoreState(INTERNAL_RESTORE_STATE_UNKNOWN, true /* force */); 535 setDataRestoreError(RESTORE_ERROR_NONE); 536 } 537 538 /** Shares the {@link HealthConnectDataState} in the provided callback. */ getDataRestoreState()539 public @HealthConnectDataState.DataRestoreState int getDataRestoreState() { 540 @InternalRestoreState int currentRestoreState = getInternalRestoreState(); 541 @DataDownloadState int currentDownloadState = getDataDownloadState(); 542 543 // Return IDLE if neither the download or restore has started yet. 544 if (currentRestoreState == INTERNAL_RESTORE_STATE_UNKNOWN 545 && currentDownloadState == DATA_DOWNLOAD_STATE_UNKNOWN) { 546 return RESTORE_STATE_IDLE; 547 } 548 549 // Return IDLE if restore is complete. 550 if (currentRestoreState == INTERNAL_RESTORE_STATE_MERGING_DONE) { 551 return RESTORE_STATE_IDLE; 552 } 553 // Return IN_PROGRESS if merging is currently in progress. 554 if (currentRestoreState == INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS) { 555 return RESTORE_STATE_IN_PROGRESS; 556 } 557 558 // In all other cases, return restore pending. 559 return RESTORE_STATE_PENDING; 560 } 561 562 /** Get the current data restore error. */ getDataRestoreError()563 public @HealthConnectDataState.DataRestoreError int getDataRestoreError() { 564 @HealthConnectDataState.DataRestoreError int dataRestoreError = RESTORE_ERROR_NONE; 565 String restoreErrorOnDisk = mPreferenceHelper.getPreference(DATA_RESTORE_ERROR_KEY); 566 567 if (restoreErrorOnDisk == null) { 568 return dataRestoreError; 569 } 570 try { 571 dataRestoreError = Integer.parseInt(restoreErrorOnDisk); 572 } catch (Exception e) { 573 Slog.e(TAG, "Exception parsing restoreErrorOnDisk " + restoreErrorOnDisk, e); 574 } 575 return dataRestoreError; 576 } 577 578 /** Returns the file names of all the staged files. */ 579 @VisibleForTesting getStagedRemoteFileNames(UserHandle userHandle)580 public Set<String> getStagedRemoteFileNames(UserHandle userHandle) { 581 HealthConnectContext dbContext = 582 HealthConnectContext.create( 583 mContext, userHandle, STAGED_DATABASE_DIR, mEnvironmentDataDirectory); 584 File[] allFiles = dbContext.getDataDir().listFiles(); 585 if (allFiles == null) { 586 return Collections.emptySet(); 587 } 588 return Stream.of(allFiles) 589 .filter(file -> !file.isDirectory()) 590 .map(File::getName) 591 .collect(Collectors.toSet()); 592 } 593 594 /** Returns true if restore merging is in progress. API calls are blocked when this is true. */ isRestoreMergingInProgress()595 public boolean isRestoreMergingInProgress() { 596 return getInternalRestoreState() == INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS; 597 } 598 599 /** Schedules any pending jobs. */ scheduleAllJobs()600 public void scheduleAllJobs() { 601 scheduleDownloadStateTimeoutJob(); 602 scheduleStagingTimeoutJob(); 603 scheduleMergingTimeoutJob(); 604 605 // We can schedule "retry merging" only if we are in the STAGING_DONE state. However, if we 606 // are in STAGING_DONE state, then we should definitely attempt merging now - and that's 607 // what we will do below. 608 // So, there's no point in scheduling a "retry merging" job. If Migration is going on then 609 // the merge attempt will take care of that automatically (and schedule the retry job as 610 // needed). 611 triggerMergingIfApplicable(); 612 } 613 614 /** Cancel all the jobs and sets the cancelled time. */ cancelAllJobs()615 public void cancelAllJobs() { 616 mJobScheduler.cancelAllJobs(mContext); 617 setJobCancelledTimeIfExists(DATA_DOWNLOAD_TIMEOUT_KEY, DATA_DOWNLOAD_TIMEOUT_CANCELLED_KEY); 618 setJobCancelledTimeIfExists(DATA_STAGING_TIMEOUT_KEY, DATA_STAGING_TIMEOUT_CANCELLED_KEY); 619 setJobCancelledTimeIfExists(DATA_MERGING_TIMEOUT_KEY, DATA_MERGING_TIMEOUT_CANCELLED_KEY); 620 setJobCancelledTimeIfExists(DATA_MERGING_RETRY_KEY, DATA_MERGING_RETRY_CANCELLED_KEY); 621 } 622 getCurrentUserHandle()623 public UserHandle getCurrentUserHandle() { 624 return mCurrentForegroundUser; 625 } 626 setInternalRestoreState(@nternalRestoreState int dataRestoreState, boolean force)627 void setInternalRestoreState(@InternalRestoreState int dataRestoreState, boolean force) { 628 @InternalRestoreState int currentRestoreState = getInternalRestoreState(); 629 mStatesLock.writeLock().lock(); 630 try { 631 if (!force && currentRestoreState >= dataRestoreState) { 632 Slog.w( 633 TAG, 634 "Attempt to update data restore state in wrong order from " 635 + currentRestoreState 636 + " to " 637 + dataRestoreState); 638 return; 639 } 640 mPreferenceHelper.insertOrReplacePreference( 641 DATA_RESTORE_STATE_KEY, String.valueOf(dataRestoreState)); 642 643 if (dataRestoreState == INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING 644 || dataRestoreState == INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS) { 645 scheduleStagingTimeoutJob(); 646 } else if (dataRestoreState == INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS) { 647 scheduleMergingTimeoutJob(); 648 } 649 } finally { 650 mStatesLock.writeLock().unlock(); 651 } 652 } 653 654 @InternalRestoreState getInternalRestoreState()655 int getInternalRestoreState() { 656 mStatesLock.readLock().lock(); 657 try { 658 String restoreStateOnDisk = mPreferenceHelper.getPreference(DATA_RESTORE_STATE_KEY); 659 @InternalRestoreState int currentRestoreState = INTERNAL_RESTORE_STATE_UNKNOWN; 660 if (restoreStateOnDisk == null) { 661 return currentRestoreState; 662 } 663 try { 664 currentRestoreState = Integer.parseInt(restoreStateOnDisk); 665 } catch (Exception e) { 666 Slog.e(TAG, "Exception parsing restoreStateOnDisk: " + restoreStateOnDisk, e); 667 } 668 // If we are not actively staging the data right now but the disk still reflects that we 669 // are then that means we died in the middle of staging. We should be waiting for the 670 // remote data to be staged now. 671 if (!mActivelyStagingRemoteData 672 && currentRestoreState == INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS) { 673 currentRestoreState = INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING; 674 } 675 return currentRestoreState; 676 } finally { 677 mStatesLock.readLock().unlock(); 678 } 679 } 680 681 /** Returns true if this job needs rescheduling; false otherwise. */ 682 @VisibleForTesting handleJob(PersistableBundle extras)683 boolean handleJob(PersistableBundle extras) { 684 String jobName = extras.getString(EXTRA_JOB_NAME_KEY); 685 switch (jobName) { 686 case DATA_DOWNLOAD_TIMEOUT_KEY -> executeDownloadStateTimeoutJob(); 687 case DATA_STAGING_TIMEOUT_KEY -> executeStagingTimeoutJob(); 688 case DATA_MERGING_TIMEOUT_KEY -> executeMergingTimeoutJob(); 689 case DATA_MERGING_RETRY_KEY -> executeRetryMergingJob(); 690 default -> Slog.w(TAG, "Unknown job" + jobName + " delivered."); 691 } 692 // None of the jobs want to reschedule. 693 return false; 694 } 695 696 @VisibleForTesting shouldAttemptMerging()697 boolean shouldAttemptMerging() { 698 @InternalRestoreState int internalRestoreState = getInternalRestoreState(); 699 if (internalRestoreState == INTERNAL_RESTORE_STATE_STAGING_DONE 700 || internalRestoreState == INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS 701 || internalRestoreState == INTERNAL_RESTORE_STATE_MERGING_DONE_OLD_CODE) { 702 Slog.i(TAG, "Should attempt merging now with state = " + internalRestoreState); 703 return true; 704 } 705 return false; 706 } 707 708 @VisibleForTesting merge()709 void merge() { 710 @InternalRestoreState int internalRestoreState = getInternalRestoreState(); 711 if (internalRestoreState >= INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS) { 712 Slog.i(TAG, "Not merging as internalRestoreState is " + internalRestoreState); 713 return; 714 } 715 716 if (mMigrationStateManager.isMigrationInProgress()) { 717 Slog.i(TAG, "Not merging as Migration in progress."); 718 scheduleRetryMergingJob(); 719 return; 720 } 721 722 int currentDbVersion = mTransactionManager.getDatabaseVersion(); 723 HealthConnectContext dbContext = 724 HealthConnectContext.create( 725 mContext, 726 mCurrentForegroundUser, 727 STAGED_DATABASE_DIR, 728 mEnvironmentDataDirectory); 729 File stagedDbFile = dbContext.getDatabasePath(STAGED_DATABASE_NAME); 730 if (stagedDbFile.exists()) { 731 try (SQLiteDatabase stagedDb = 732 SQLiteDatabase.openDatabase( 733 stagedDbFile, new SQLiteDatabase.OpenParams.Builder().build())) { 734 int stagedDbVersion = stagedDb.getVersion(); 735 Slog.i( 736 TAG, 737 "merging staged data, current version = " 738 + currentDbVersion 739 + ", staged version = " 740 + stagedDbVersion); 741 if (currentDbVersion < stagedDbVersion) { 742 Slog.i(TAG, "Module needs upgrade for merging to version " + stagedDbVersion); 743 setDataRestoreError(RESTORE_ERROR_VERSION_DIFF); 744 return; 745 } 746 } 747 } else { 748 Slog.i(TAG, "No database file found to merge."); 749 } 750 751 Slog.i(TAG, "Starting the data merge."); 752 setInternalRestoreState(INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS, false); 753 mergeGrantTimes(dbContext); 754 mergeDatabase(dbContext); 755 setInternalRestoreState(INTERNAL_RESTORE_STATE_MERGING_DONE, false); 756 757 // Reset the error in case it was due to version diff. 758 // TODO(b/327170886): Should we always set it to NONE once merging is done? 759 if (getDataRestoreError() == RESTORE_ERROR_VERSION_DIFF) { 760 setDataRestoreError(RESTORE_ERROR_NONE); 761 } 762 } 763 getBackupFilesByFileNames( UserHandle userHandle, File backupDataDir, File databasePath)764 private Map<String, File> getBackupFilesByFileNames( 765 UserHandle userHandle, File backupDataDir, File databasePath) { 766 ArrayMap<String, File> backupFilesByFileNames = new ArrayMap<>(); 767 768 backupFilesByFileNames.put(STAGED_DATABASE_NAME, databasePath); 769 770 File grantTimeFile = new File(backupDataDir, GRANT_TIME_FILE_NAME); 771 try { 772 grantTimeFile.createNewFile(); 773 GrantTimeXmlHelper.serializeGrantTimes( 774 grantTimeFile, mFirstGrantTimeManager.getGrantTimeStateForUser(userHandle)); 775 backupFilesByFileNames.put(grantTimeFile.getName(), grantTimeFile); 776 } catch (IOException e) { 777 Slog.e(TAG, "Could not create the grant time file for backup.", e); 778 } 779 780 return backupFilesByFileNames; 781 } 782 deleteBackupFiles(File backupDataDir)783 private void deleteBackupFiles(File backupDataDir) { 784 if (Flags.personalHealthRecordDisableD2d()) { 785 File databaseBackupFile = new File(backupDataDir, DATABASE_BACKUP_FILE_NAME); 786 databaseBackupFile.delete(); 787 } 788 // We only create a backup copy for grant times. DB is copied from source. 789 File grantTimeFile = new File(backupDataDir, GRANT_TIME_FILE_NAME); 790 grantTimeFile.delete(); 791 } 792 793 @DataDownloadState getDataDownloadState()794 private int getDataDownloadState() { 795 mStatesLock.readLock().lock(); 796 try { 797 String downloadStateOnDisk = mPreferenceHelper.getPreference(DATA_DOWNLOAD_STATE_KEY); 798 @DataDownloadState int currentDownloadState = DATA_DOWNLOAD_STATE_UNKNOWN; 799 if (downloadStateOnDisk == null) { 800 return currentDownloadState; 801 } 802 try { 803 currentDownloadState = Integer.parseInt(downloadStateOnDisk); 804 } catch (Exception e) { 805 Slog.e(TAG, "Exception parsing downloadStateOnDisk " + downloadStateOnDisk, e); 806 } 807 return currentDownloadState; 808 } finally { 809 mStatesLock.readLock().unlock(); 810 } 811 } 812 setDataDownloadState(@ataDownloadState int downloadState, boolean force)813 private void setDataDownloadState(@DataDownloadState int downloadState, boolean force) { 814 mStatesLock.writeLock().lock(); 815 try { 816 @DataDownloadState int currentDownloadState = getDataDownloadState(); 817 if (!force 818 && (currentDownloadState == DATA_DOWNLOAD_FAILED 819 || currentDownloadState == DATA_DOWNLOAD_COMPLETE)) { 820 Slog.w(TAG, "HC data download already in terminal state."); 821 return; 822 } 823 mPreferenceHelper.insertOrReplacePreference( 824 DATA_DOWNLOAD_STATE_KEY, String.valueOf(downloadState)); 825 826 if (downloadState == DATA_DOWNLOAD_STARTED || downloadState == DATA_DOWNLOAD_RETRY) { 827 mPreferenceHelper.insertOrReplacePreference( 828 DATA_DOWNLOAD_TIMEOUT_KEY, Long.toString(Instant.now().toEpochMilli())); 829 scheduleDownloadStateTimeoutJob(); 830 } 831 } finally { 832 mStatesLock.writeLock().unlock(); 833 } 834 } 835 836 // Creating a separate single line method to keep this code close to the rest of the code that 837 // uses PreferenceHelper to keep data on the disk. setDataRestoreError( @ealthConnectDataState.DataRestoreError int dataRestoreError)838 private void setDataRestoreError( 839 @HealthConnectDataState.DataRestoreError int dataRestoreError) { 840 mPreferenceHelper.insertOrReplacePreference( 841 DATA_RESTORE_ERROR_KEY, String.valueOf(dataRestoreError)); 842 } 843 844 /** Schedule timeout for data download state so that we are not stuck in the current state. */ scheduleDownloadStateTimeoutJob()845 private void scheduleDownloadStateTimeoutJob() { 846 @DataDownloadState int currentDownloadState = getDataDownloadState(); 847 if (currentDownloadState != DATA_DOWNLOAD_STARTED 848 && currentDownloadState != DATA_DOWNLOAD_RETRY) { 849 Slog.i( 850 TAG, 851 "Attempt to schedule download timeout job with state: " + currentDownloadState); 852 // We are not in the correct state. There's no need to set the timer. 853 return; 854 } 855 856 // We might be here because the device rebooted or the user switched. If a timer was already 857 // going on then we want to continue that timer. 858 long timeoutMillis = 859 getRemainingTimeoutMillis( 860 DATA_DOWNLOAD_TIMEOUT_KEY, 861 DATA_DOWNLOAD_TIMEOUT_CANCELLED_KEY, 862 DATA_DOWNLOAD_TIMEOUT_INTERVAL_MILLIS); 863 864 int userId = mCurrentForegroundUser.getIdentifier(); 865 final PersistableBundle extras = new PersistableBundle(); 866 extras.putInt(EXTRA_USER_ID, userId); 867 extras.putString(EXTRA_JOB_NAME_KEY, DATA_DOWNLOAD_TIMEOUT_KEY); 868 JobInfo.Builder jobInfoBuilder = 869 new JobInfo.Builder( 870 BackupRestoreJobService.BACKUP_RESTORE_JOB_ID + userId, 871 new ComponentName(mContext, BackupRestoreJobService.class)) 872 .setExtras(extras) 873 .setMinimumLatency(timeoutMillis) 874 .setOverrideDeadline(timeoutMillis + MINIMUM_LATENCY_WINDOW_MILLIS); 875 Slog.i( 876 TAG, 877 "Scheduling download state timeout job with period: " + timeoutMillis + " millis"); 878 mJobScheduler.schedule(mContext, jobInfoBuilder.build(), this); 879 880 // Set the start time 881 mPreferenceHelper.insertOrReplacePreference( 882 DATA_DOWNLOAD_TIMEOUT_KEY, Long.toString(Instant.now().toEpochMilli())); 883 } 884 executeDownloadStateTimeoutJob()885 private void executeDownloadStateTimeoutJob() { 886 @DataDownloadState int currentDownloadState = getDataDownloadState(); 887 if (currentDownloadState == DATA_DOWNLOAD_STARTED 888 || currentDownloadState == DATA_DOWNLOAD_RETRY) { 889 Slog.i(TAG, "Executing download state timeout job"); 890 setDataDownloadState(DATA_DOWNLOAD_FAILED, false); 891 setDataRestoreError(RESTORE_ERROR_FETCHING_DATA); 892 // Remove the remaining timeouts from the disk 893 mPreferenceHelper.insertOrReplacePreference(DATA_DOWNLOAD_TIMEOUT_KEY, ""); 894 mPreferenceHelper.insertOrReplacePreference(DATA_DOWNLOAD_TIMEOUT_CANCELLED_KEY, ""); 895 } else { 896 Slog.i(TAG, "Download state timeout job fired in state: " + currentDownloadState); 897 } 898 } 899 900 /** Schedule timeout for data staging state so that we are not stuck in the current state. */ scheduleStagingTimeoutJob()901 private void scheduleStagingTimeoutJob() { 902 @InternalRestoreState int internalRestoreState = getInternalRestoreState(); 903 if (internalRestoreState != INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING 904 && internalRestoreState != INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS) { 905 // We are not in the correct state. There's no need to set the timer. 906 Slog.i( 907 TAG, 908 "Attempt to schedule staging timeout job with state: " + internalRestoreState); 909 return; 910 } 911 912 // We might be here because the device rebooted or the user switched. If a timer was already 913 // going on then we want to continue that timer. 914 long timeoutMillis = 915 getRemainingTimeoutMillis( 916 DATA_STAGING_TIMEOUT_KEY, 917 DATA_STAGING_TIMEOUT_CANCELLED_KEY, 918 DATA_STAGING_TIMEOUT_INTERVAL_MILLIS); 919 920 int userId = mCurrentForegroundUser.getIdentifier(); 921 final PersistableBundle extras = new PersistableBundle(); 922 extras.putInt(EXTRA_USER_ID, userId); 923 extras.putString(EXTRA_JOB_NAME_KEY, DATA_STAGING_TIMEOUT_KEY); 924 JobInfo.Builder jobInfoBuilder = 925 new JobInfo.Builder( 926 BackupRestoreJobService.BACKUP_RESTORE_JOB_ID + userId, 927 new ComponentName(mContext, BackupRestoreJobService.class)) 928 .setExtras(extras) 929 .setMinimumLatency(timeoutMillis) 930 .setOverrideDeadline(timeoutMillis + MINIMUM_LATENCY_WINDOW_MILLIS); 931 Slog.i(TAG, "Scheduling staging timeout job with period: " + timeoutMillis + " millis"); 932 mJobScheduler.schedule(mContext, jobInfoBuilder.build(), this); 933 934 // Set the start time 935 mPreferenceHelper.insertOrReplacePreference( 936 DATA_STAGING_TIMEOUT_KEY, Long.toString(Instant.now().toEpochMilli())); 937 } 938 executeStagingTimeoutJob()939 private void executeStagingTimeoutJob() { 940 @InternalRestoreState int internalRestoreState = getInternalRestoreState(); 941 if (internalRestoreState == INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING 942 || internalRestoreState == INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS) { 943 Slog.i(TAG, "Executing staging timeout job"); 944 setInternalRestoreState(INTERNAL_RESTORE_STATE_MERGING_DONE, false); 945 setDataRestoreError(RESTORE_ERROR_UNKNOWN); 946 // Remove the remaining timeouts from the disk 947 mPreferenceHelper.insertOrReplacePreference(DATA_STAGING_TIMEOUT_KEY, ""); 948 mPreferenceHelper.insertOrReplacePreference(DATA_STAGING_TIMEOUT_CANCELLED_KEY, ""); 949 } else { 950 Slog.i(TAG, "Staging timeout job fired in state: " + internalRestoreState); 951 } 952 } 953 954 /** Schedule timeout for data merging state so that we are not stuck in the current state. */ scheduleMergingTimeoutJob()955 private void scheduleMergingTimeoutJob() { 956 @InternalRestoreState int internalRestoreState = getInternalRestoreState(); 957 if (internalRestoreState != INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS) { 958 // We are not in the correct state. There's no need to set the timer. 959 Slog.i( 960 TAG, 961 "Attempt to schedule merging timeout job with state: " + internalRestoreState); 962 return; 963 } 964 965 // We might be here because the device rebooted or the user switched. If a timer was already 966 // going on then we want to continue that timer. 967 long timeoutMillis = 968 getRemainingTimeoutMillis( 969 DATA_MERGING_TIMEOUT_KEY, 970 DATA_MERGING_TIMEOUT_CANCELLED_KEY, 971 DATA_MERGING_TIMEOUT_INTERVAL_MILLIS); 972 973 int userId = mCurrentForegroundUser.getIdentifier(); 974 final PersistableBundle extras = new PersistableBundle(); 975 extras.putInt(EXTRA_USER_ID, userId); 976 extras.putString(EXTRA_JOB_NAME_KEY, DATA_MERGING_TIMEOUT_KEY); 977 JobInfo.Builder jobInfoBuilder = 978 new JobInfo.Builder( 979 BackupRestoreJobService.BACKUP_RESTORE_JOB_ID + userId, 980 new ComponentName(mContext, BackupRestoreJobService.class)) 981 .setExtras(extras) 982 .setMinimumLatency(timeoutMillis) 983 .setOverrideDeadline(timeoutMillis + MINIMUM_LATENCY_WINDOW_MILLIS); 984 Slog.i(TAG, "Scheduling merging timeout job with period: " + timeoutMillis + " millis"); 985 mJobScheduler.schedule(mContext, jobInfoBuilder.build(), this); 986 987 // Set the start time 988 mPreferenceHelper.insertOrReplacePreference( 989 DATA_MERGING_TIMEOUT_KEY, Long.toString(Instant.now().toEpochMilli())); 990 } 991 executeMergingTimeoutJob()992 private void executeMergingTimeoutJob() { 993 @InternalRestoreState int internalRestoreState = getInternalRestoreState(); 994 if (internalRestoreState == INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS) { 995 Slog.i(TAG, "Executing merging timeout job"); 996 setInternalRestoreState(INTERNAL_RESTORE_STATE_MERGING_DONE, false); 997 setDataRestoreError(RESTORE_ERROR_UNKNOWN); 998 // Remove the remaining timeouts from the disk 999 mPreferenceHelper.insertOrReplacePreference(DATA_MERGING_TIMEOUT_KEY, ""); 1000 mPreferenceHelper.insertOrReplacePreference(DATA_MERGING_TIMEOUT_CANCELLED_KEY, ""); 1001 } else { 1002 Slog.i(TAG, "Merging timeout job fired in state: " + internalRestoreState); 1003 } 1004 } 1005 scheduleRetryMergingJob()1006 private void scheduleRetryMergingJob() { 1007 @InternalRestoreState int internalRestoreState = getInternalRestoreState(); 1008 if (internalRestoreState != INTERNAL_RESTORE_STATE_STAGING_DONE) { 1009 // We can do merging only if we are in the STAGING_DONE state. 1010 Slog.i( 1011 TAG, 1012 "Attempt to schedule merging retry job with state: " + internalRestoreState); 1013 return; 1014 } 1015 1016 int userId = mCurrentForegroundUser.getIdentifier(); 1017 final PersistableBundle extras = new PersistableBundle(); 1018 extras.putInt(EXTRA_USER_ID, userId); 1019 extras.putString(EXTRA_JOB_NAME_KEY, DATA_MERGING_RETRY_KEY); 1020 1021 // We might be here because the device rebooted or the user switched. If a timer was already 1022 // going on then we want to continue that timer. 1023 long timeoutMillis = 1024 getRemainingTimeoutMillis( 1025 DATA_MERGING_RETRY_KEY, 1026 DATA_MERGING_RETRY_CANCELLED_KEY, 1027 DATA_MERGING_RETRY_DELAY_MILLIS); 1028 JobInfo.Builder jobInfoBuilder = 1029 new JobInfo.Builder( 1030 BackupRestoreJobService.BACKUP_RESTORE_JOB_ID + userId, 1031 new ComponentName(mContext, BackupRestoreJobService.class)) 1032 .setExtras(extras) 1033 .setMinimumLatency(timeoutMillis) 1034 .setOverrideDeadline(timeoutMillis + MINIMUM_LATENCY_WINDOW_MILLIS); 1035 Slog.i(TAG, "Scheduling retry merging job with period: " + timeoutMillis + " millis"); 1036 mJobScheduler.schedule(mContext, jobInfoBuilder.build(), this); 1037 1038 // Set the start time 1039 mPreferenceHelper.insertOrReplacePreference( 1040 DATA_MERGING_RETRY_KEY, Long.toString(Instant.now().toEpochMilli())); 1041 } 1042 executeRetryMergingJob()1043 private void executeRetryMergingJob() { 1044 @InternalRestoreState int internalRestoreState = getInternalRestoreState(); 1045 if (internalRestoreState == INTERNAL_RESTORE_STATE_STAGING_DONE) { 1046 Slog.i(TAG, "Retrying merging"); 1047 merge(); 1048 1049 if (getInternalRestoreState() == INTERNAL_RESTORE_STATE_MERGING_DONE) { 1050 // Remove the remaining timeouts from the disk 1051 mPreferenceHelper.insertOrReplacePreference(DATA_MERGING_RETRY_KEY, ""); 1052 mPreferenceHelper.insertOrReplacePreference(DATA_MERGING_RETRY_CANCELLED_KEY, ""); 1053 } 1054 } else { 1055 Slog.i(TAG, "Merging retry job fired in state: " + internalRestoreState); 1056 } 1057 } 1058 triggerMergingIfApplicable()1059 private void triggerMergingIfApplicable() { 1060 mThreadScheduler.scheduleInternalTask( 1061 () -> { 1062 if (shouldAttemptMerging()) { 1063 Slog.i(TAG, "Attempting merging."); 1064 setInternalRestoreState(INTERNAL_RESTORE_STATE_STAGING_DONE, true); 1065 merge(); 1066 } 1067 }); 1068 } 1069 getRemainingTimeoutMillis( String startTimeKey, String cancelledTimeKey, long stdTimeout)1070 private long getRemainingTimeoutMillis( 1071 String startTimeKey, String cancelledTimeKey, long stdTimeout) { 1072 String startTimeStr = mPreferenceHelper.getPreference(startTimeKey); 1073 if (startTimeStr == null || startTimeStr.trim().isEmpty()) { 1074 return stdTimeout; 1075 } 1076 long currTime = Instant.now().toEpochMilli(); 1077 String cancelledTimeStr = mPreferenceHelper.getPreference(cancelledTimeKey); 1078 if (cancelledTimeStr == null || cancelledTimeStr.trim().isEmpty()) { 1079 return Math.max(0, stdTimeout - (currTime - Long.parseLong(startTimeStr))); 1080 } 1081 long spentTime = Long.parseLong(cancelledTimeStr) - Long.parseLong(startTimeStr); 1082 return Math.max(0, stdTimeout - spentTime); 1083 } 1084 setJobCancelledTimeIfExists(String startTimeKey, String cancelTimeKey)1085 private void setJobCancelledTimeIfExists(String startTimeKey, String cancelTimeKey) { 1086 if (mPreferenceHelper.getPreference(startTimeKey) != null) { 1087 mPreferenceHelper.insertOrReplacePreference( 1088 cancelTimeKey, Long.toString(Instant.now().toEpochMilli())); 1089 } 1090 } 1091 mergeGrantTimes(HealthConnectContext dbContext)1092 private void mergeGrantTimes(HealthConnectContext dbContext) { 1093 File restoredGrantTimeFile = new File(dbContext.getDataDir(), GRANT_TIME_FILE_NAME); 1094 Slog.i(TAG, "Merging grant times."); 1095 1096 UserGrantTimeState userGrantTimeState = 1097 GrantTimeXmlHelper.parseGrantTime(restoredGrantTimeFile); 1098 mFirstGrantTimeManager.applyAndStageGrantTimeStateForUser( 1099 mCurrentForegroundUser, userGrantTimeState); 1100 1101 Slog.i(TAG, "Deleting staged grant times after merging."); 1102 restoredGrantTimeFile.delete(); 1103 } 1104 mergeDatabase(HealthConnectContext dbContext)1105 private void mergeDatabase(HealthConnectContext dbContext) { 1106 synchronized (mMergingLock) { 1107 if (!dbContext.getDatabasePath(STAGED_DATABASE_NAME).exists()) { 1108 Slog.i(TAG, "No staged db found."); 1109 // no db was staged 1110 return; 1111 } 1112 Slog.i(TAG, "Merging health connect db."); 1113 1114 mDatabaseMerger.merge(new HealthConnectDatabase(dbContext, STAGED_DATABASE_NAME)); 1115 1116 // Delete the staged db as we are done merging. 1117 Slog.i(TAG, "Deleting staged db after merging."); 1118 dbContext.deleteDatabase(STAGED_DATABASE_NAME); 1119 } 1120 } 1121 1122 /** Execute the task as critical section by holding read lock. */ runWithStatesReadLock(RunnableWithThrowable<E> task)1123 public <E extends Throwable> void runWithStatesReadLock(RunnableWithThrowable<E> task) 1124 throws E { 1125 mStatesLock.readLock().lock(); 1126 try { 1127 task.run(); 1128 } finally { 1129 mStatesLock.readLock().unlock(); 1130 } 1131 } 1132 1133 /** Inner class to separate out the scheduling layer to make it more testable. */ 1134 static class BackupRestoreJobScheduler { schedule(Context context, JobInfo jobInfo, BackupRestore backupRestore)1135 void schedule(Context context, JobInfo jobInfo, BackupRestore backupRestore) { 1136 BackupRestoreJobService.setCurrentBackupRestore(backupRestore); 1137 final long token = Binder.clearCallingIdentity(); 1138 try { 1139 int result = 1140 requireNonNull(context.getSystemService(JobScheduler.class)) 1141 .forNamespace(BackupRestoreJobService.BACKUP_RESTORE_JOBS_NAMESPACE) 1142 .schedule(jobInfo); 1143 1144 if (result != JobScheduler.RESULT_SUCCESS) { 1145 Slog.e( 1146 TAG, 1147 "Failed to schedule: " 1148 + jobInfo.getExtras().getString(EXTRA_JOB_NAME_KEY)); 1149 } 1150 } finally { 1151 Binder.restoreCallingIdentity(token); 1152 } 1153 } 1154 1155 /** Cancels all jobs for our namespace. */ cancelAllJobs(Context context)1156 void cancelAllJobs(Context context) { 1157 requireNonNull(context.getSystemService(JobScheduler.class)) 1158 .forNamespace(BackupRestoreJobService.BACKUP_RESTORE_JOBS_NAMESPACE) 1159 .cancelAll(); 1160 } 1161 } 1162 1163 /** Schedules the jobs for {@link BackupRestore} */ 1164 public static final class BackupRestoreJobService extends JobService { 1165 public static final String BACKUP_RESTORE_JOBS_NAMESPACE = "BACKUP_RESTORE_JOBS_NAMESPACE"; 1166 public static final String EXTRA_USER_ID = "user_id"; 1167 public static final String EXTRA_JOB_NAME_KEY = "job_name"; 1168 private static final int BACKUP_RESTORE_JOB_ID = 1000; 1169 1170 @SuppressWarnings("NullAway.Init") // TODO(b/317029272): fix this suppression 1171 private static volatile BackupRestore sBackupRestore; 1172 1173 /** 1174 * Set the current backup restore. 1175 * 1176 * <p>Used for detecting user changes. 1177 * 1178 * @param backupRestore the current {@link BackupRestore} 1179 */ setCurrentBackupRestore(BackupRestore backupRestore)1180 public static void setCurrentBackupRestore(BackupRestore backupRestore) { 1181 sBackupRestore = backupRestore; 1182 } 1183 1184 @Override onStartJob(JobParameters params)1185 public boolean onStartJob(JobParameters params) { 1186 int userId = params.getExtras().getInt(EXTRA_USER_ID, DEFAULT_INT); 1187 if (userId != sBackupRestore.getCurrentUserHandle().getIdentifier()) { 1188 Slog.w( 1189 TAG, 1190 "Got onStartJob for non active user: " 1191 + userId 1192 + ", but the current active user is: " 1193 + sBackupRestore.getCurrentUserHandle().getIdentifier()); 1194 return false; 1195 } 1196 1197 String jobName = params.getExtras().getString(EXTRA_JOB_NAME_KEY); 1198 if (Objects.isNull(jobName)) { 1199 Slog.w(TAG, "Got onStartJob for a nameless job"); 1200 return false; 1201 } 1202 1203 sBackupRestore.mThreadScheduler.scheduleInternalTask( 1204 () -> jobFinished(params, sBackupRestore.handleJob(params.getExtras()))); 1205 1206 return true; 1207 } 1208 1209 @Override onStopJob(JobParameters params)1210 public boolean onStopJob(JobParameters params) { 1211 return false; 1212 } 1213 } 1214 } 1215