• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2023 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 package com.android.server.healthconnect.backuprestore;
18 
19 import static android.health.connect.Constants.DEFAULT_INT;
20 import static android.health.connect.Constants.DEFAULT_LONG;
21 import static android.health.connect.HealthConnectDataState.RESTORE_ERROR_FETCHING_DATA;
22 import static android.health.connect.HealthConnectDataState.RESTORE_ERROR_NONE;
23 import static android.health.connect.HealthConnectDataState.RESTORE_ERROR_UNKNOWN;
24 import static android.health.connect.HealthConnectDataState.RESTORE_ERROR_VERSION_DIFF;
25 import static android.health.connect.HealthConnectDataState.RESTORE_STATE_IDLE;
26 import static android.health.connect.HealthConnectDataState.RESTORE_STATE_IN_PROGRESS;
27 import static android.health.connect.HealthConnectDataState.RESTORE_STATE_PENDING;
28 import static android.health.connect.HealthConnectManager.DATA_DOWNLOAD_COMPLETE;
29 import static android.health.connect.HealthConnectManager.DATA_DOWNLOAD_FAILED;
30 import static android.health.connect.HealthConnectManager.DATA_DOWNLOAD_RETRY;
31 import static android.health.connect.HealthConnectManager.DATA_DOWNLOAD_STARTED;
32 import static android.health.connect.HealthConnectManager.DATA_DOWNLOAD_STATE_UNKNOWN;
33 
34 import static com.android.server.healthconnect.backuprestore.BackupRestore.BackupRestoreJobService.EXTRA_JOB_NAME_KEY;
35 import static com.android.server.healthconnect.backuprestore.BackupRestore.BackupRestoreJobService.EXTRA_USER_ID;
36 import static com.android.server.healthconnect.storage.utils.StorageUtils.getCursorBlob;
37 import static com.android.server.healthconnect.storage.utils.StorageUtils.getCursorLong;
38 import static com.android.server.healthconnect.storage.utils.StorageUtils.getCursorString;
39 
40 import android.annotation.IntDef;
41 import android.annotation.NonNull;
42 import android.app.job.JobInfo;
43 import android.app.job.JobParameters;
44 import android.app.job.JobScheduler;
45 import android.app.job.JobService;
46 import android.content.ComponentName;
47 import android.content.Context;
48 import android.content.ContextWrapper;
49 import android.database.Cursor;
50 import android.health.connect.HealthConnectDataState;
51 import android.health.connect.HealthConnectException;
52 import android.health.connect.HealthConnectManager.DataDownloadState;
53 import android.health.connect.ReadRecordsRequestUsingFilters;
54 import android.health.connect.aidl.IDataStagingFinishedCallback;
55 import android.health.connect.datatypes.Record;
56 import android.health.connect.internal.datatypes.RecordInternal;
57 import android.health.connect.internal.datatypes.utils.RecordMapper;
58 import android.health.connect.restore.BackupFileNamesSet;
59 import android.health.connect.restore.StageRemoteDataException;
60 import android.health.connect.restore.StageRemoteDataRequest;
61 import android.os.Binder;
62 import android.os.ParcelFileDescriptor;
63 import android.os.PersistableBundle;
64 import android.os.RemoteException;
65 import android.os.UserHandle;
66 import android.text.format.DateUtils;
67 import android.util.ArrayMap;
68 import android.util.ArraySet;
69 import android.util.Log;
70 import android.util.Pair;
71 import android.util.Slog;
72 
73 import com.android.internal.annotations.GuardedBy;
74 import com.android.internal.annotations.VisibleForTesting;
75 import com.android.server.healthconnect.HealthConnectThreadScheduler;
76 import com.android.server.healthconnect.migration.MigrationStateManager;
77 import com.android.server.healthconnect.permission.FirstGrantTimeManager;
78 import com.android.server.healthconnect.permission.GrantTimeXmlHelper;
79 import com.android.server.healthconnect.permission.UserGrantTimeState;
80 import com.android.server.healthconnect.storage.HealthConnectDatabase;
81 import com.android.server.healthconnect.storage.TransactionManager;
82 import com.android.server.healthconnect.storage.datatypehelpers.AppInfoHelper;
83 import com.android.server.healthconnect.storage.datatypehelpers.PreferenceHelper;
84 import com.android.server.healthconnect.storage.datatypehelpers.RecordHelper;
85 import com.android.server.healthconnect.storage.request.DeleteTableRequest;
86 import com.android.server.healthconnect.storage.request.ReadTableRequest;
87 import com.android.server.healthconnect.storage.request.ReadTransactionRequest;
88 import com.android.server.healthconnect.storage.request.UpsertTransactionRequest;
89 import com.android.server.healthconnect.storage.utils.RecordHelperProvider;
90 import com.android.server.healthconnect.utils.FilesUtil;
91 import com.android.server.healthconnect.utils.RunnableWithThrowable;
92 
93 import java.io.File;
94 import java.io.FileInputStream;
95 import java.io.FileOutputStream;
96 import java.io.IOException;
97 import java.lang.annotation.Retention;
98 import java.lang.annotation.RetentionPolicy;
99 import java.nio.file.FileSystems;
100 import java.nio.file.Files;
101 import java.nio.file.Path;
102 import java.nio.file.StandardCopyOption;
103 import java.time.Instant;
104 import java.util.Collections;
105 import java.util.List;
106 import java.util.Map;
107 import java.util.Objects;
108 import java.util.Set;
109 import java.util.concurrent.locks.ReentrantReadWriteLock;
110 import java.util.stream.Collectors;
111 import java.util.stream.Stream;
112 
113 /**
114  * Class that takes up the responsibility to perform backup / restore related tasks.
115  *
116  * @hide
117  */
118 public final class BackupRestore {
119     // Key for storing the current data download state
120     @VisibleForTesting
121     public static final String DATA_DOWNLOAD_STATE_KEY = "data_download_state_key";
122     // The below values for the IntDef are defined in chronological order of the restore process.
123     public static final int INTERNAL_RESTORE_STATE_UNKNOWN = 0;
124     public static final int INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING = 1;
125     public static final int INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS = 2;
126     public static final int INTERNAL_RESTORE_STATE_STAGING_DONE = 3;
127     public static final int INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS = 4;
128     public static final int INTERNAL_RESTORE_STATE_MERGING_DONE = 5;
129 
130     @VisibleForTesting
131     static final long DATA_DOWNLOAD_TIMEOUT_INTERVAL_MILLIS = 14 * DateUtils.DAY_IN_MILLIS;
132 
133     @VisibleForTesting
134     static final long DATA_STAGING_TIMEOUT_INTERVAL_MILLIS = DateUtils.DAY_IN_MILLIS;
135 
136     @VisibleForTesting
137     static final long DATA_MERGING_TIMEOUT_INTERVAL_MILLIS = 5 * DateUtils.DAY_IN_MILLIS;
138 
139     private static final long DATA_MERGING_RETRY_DELAY_MILLIS = 12 * DateUtils.HOUR_IN_MILLIS;
140 
141     @VisibleForTesting static final String DATA_DOWNLOAD_TIMEOUT_KEY = "data_download_timeout_key";
142 
143     @VisibleForTesting static final String DATA_STAGING_TIMEOUT_KEY = "data_staging_timeout_key";
144     @VisibleForTesting static final String DATA_MERGING_TIMEOUT_KEY = "data_merging_timeout_key";
145 
146     @VisibleForTesting
147     static final String DATA_DOWNLOAD_TIMEOUT_CANCELLED_KEY = "data_download_timeout_cancelled_key";
148 
149     @VisibleForTesting
150     static final String DATA_STAGING_TIMEOUT_CANCELLED_KEY = "data_staging_timeout_cancelled_key";
151 
152     @VisibleForTesting
153     static final String DATA_MERGING_TIMEOUT_CANCELLED_KEY = "data_merging_timeout_cancelled_key";
154 
155     @VisibleForTesting static final String DATA_MERGING_RETRY_KEY = "data_merging_retry_key";
156     private static final String DATA_MERGING_RETRY_CANCELLED_KEY =
157             "data_merging_retry_cancelled_key";
158 
159     @Retention(RetentionPolicy.SOURCE)
160     @IntDef({
161             INTERNAL_RESTORE_STATE_UNKNOWN,
162             INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING,
163             INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS,
164             INTERNAL_RESTORE_STATE_STAGING_DONE,
165             INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS,
166             INTERNAL_RESTORE_STATE_MERGING_DONE
167     })
168     public @interface InternalRestoreState {}
169 
170     // Key for storing the current data restore state on disk.
171     public static final String DATA_RESTORE_STATE_KEY = "data_restore_state_key";
172     // Key for storing the error restoring HC data.
173     public static final String DATA_RESTORE_ERROR_KEY = "data_restore_error_key";
174 
175     @VisibleForTesting
176     static final String GRANT_TIME_FILE_NAME = "health-permissions-first-grant-times.xml";
177 
178     private static final String TAG = "HealthConnectBackupRestore";
179     private final ReentrantReadWriteLock mStatesLock = new ReentrantReadWriteLock(true);
180     private final FirstGrantTimeManager mFirstGrantTimeManager;
181     private final MigrationStateManager mMigrationStateManager;
182 
183     private final Context mStagedDbContext;
184     private final Context mContext;
185     private final Map<Long, String> mStagedPackageNamesByAppIds = new ArrayMap<>();
186     private final Object mMergingLock = new Object();
187 
188     @GuardedBy("mMergingLock")
189     private HealthConnectDatabase mStagedDatabase;
190 
191     private boolean mActivelyStagingRemoteData = false;
192 
193     private volatile UserHandle mCurrentForegroundUser;
194 
BackupRestore( FirstGrantTimeManager firstGrantTimeManager, MigrationStateManager migrationStateManager, @NonNull Context context)195     public BackupRestore(
196             FirstGrantTimeManager firstGrantTimeManager,
197             MigrationStateManager migrationStateManager,
198             @NonNull Context context) {
199         mFirstGrantTimeManager = firstGrantTimeManager;
200         mMigrationStateManager = migrationStateManager;
201         mStagedDbContext = new StagedDatabaseContext(context);
202         mContext = context;
203         mCurrentForegroundUser = mContext.getUser();
204     }
205 
setupForUser(UserHandle currentForegroundUser)206     public void setupForUser(UserHandle currentForegroundUser) {
207         Slog.d(TAG, "Performing user switch operations.");
208         mCurrentForegroundUser = currentForegroundUser;
209         HealthConnectThreadScheduler.scheduleInternalTask(this::scheduleAllJobs);
210     }
211 
212     /**
213      * Prepares for staging all health connect remote data.
214      *
215      * @return true if the preparation was successful. false either if staging already in progress
216      *     or done.
217      */
prepForStagingIfNotAlreadyDone()218     public boolean prepForStagingIfNotAlreadyDone() {
219         mStatesLock.writeLock().lock();
220         try {
221             Slog.d(TAG, "Prepping for staging.");
222             setDataDownloadState(DATA_DOWNLOAD_COMPLETE, false /* force */);
223             @InternalRestoreState int curDataRestoreState = getInternalRestoreState();
224             if (curDataRestoreState >= INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS) {
225                 if (curDataRestoreState >= INTERNAL_RESTORE_STATE_STAGING_DONE) {
226                     Slog.w(TAG, "Staging is already done. Cur state " + curDataRestoreState);
227                 } else {
228                     // Maybe the caller died and is trying to stage the data again.
229                     Slog.w(TAG, "Already in the process of staging.");
230                 }
231                 return false;
232             }
233             mActivelyStagingRemoteData = true;
234             setInternalRestoreState(INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS, false /* force */);
235             return true;
236         } finally {
237             mStatesLock.writeLock().unlock();
238         }
239     }
240 
241     /**
242      * Stages all health connect remote data for merging later.
243      *
244      * <p>This should be called on the proper thread.
245      */
stageAllHealthConnectRemoteData( Map<String, ParcelFileDescriptor> pfdsByFileName, Map<String, HealthConnectException> exceptionsByFileName, int userId, @NonNull IDataStagingFinishedCallback callback)246     public void stageAllHealthConnectRemoteData(
247             Map<String, ParcelFileDescriptor> pfdsByFileName,
248             Map<String, HealthConnectException> exceptionsByFileName,
249             int userId,
250             @NonNull IDataStagingFinishedCallback callback) {
251         File stagedRemoteDataDir = getStagedRemoteDataDirectoryForUser(userId);
252         try {
253             stagedRemoteDataDir.mkdirs();
254 
255             // Now that we have the dir we can try to copy all the data.
256             // Any exceptions we face will be collected and shared with the caller.
257             pfdsByFileName.forEach(
258                     (fileName, pfd) -> {
259                         File destination = new File(stagedRemoteDataDir, fileName);
260                         try (FileInputStream inputStream =
261                                 new FileInputStream(pfd.getFileDescriptor())) {
262                             Path destinationPath =
263                                     FileSystems.getDefault().getPath(destination.getAbsolutePath());
264                             Files.copy(
265                                     inputStream,
266                                     destinationPath,
267                                     StandardCopyOption.REPLACE_EXISTING);
268                         } catch (IOException e) {
269                             Slog.e(
270                                     TAG,
271                                     "Failed to get copy to destination: "
272                                             + destination.getName(), e);
273                             destination.delete();
274                             exceptionsByFileName.put(
275                                     fileName,
276                                     new HealthConnectException(
277                                             HealthConnectException.ERROR_IO, e.getMessage()));
278                         } catch (SecurityException e) {
279                             Slog.e(
280                                     TAG,
281                                     "Failed to get copy to destination: "
282                                             + destination.getName(), e);
283                             destination.delete();
284                             exceptionsByFileName.put(
285                                     fileName,
286                                     new HealthConnectException(
287                                             HealthConnectException.ERROR_SECURITY, e.getMessage()));
288                         } finally {
289                             try {
290                                 pfd.close();
291                             } catch (IOException e) {
292                                 exceptionsByFileName.put(
293                                         fileName,
294                                         new HealthConnectException(
295                                                 HealthConnectException.ERROR_IO, e.getMessage()));
296                             }
297                         }
298                     });
299         } finally {
300             // We are done staging all the remote data, update the data restore state.
301             // Even if we encountered any exception we still say that we are "done" as
302             // we don't expect the caller to retry and see different results.
303             setInternalRestoreState(INTERNAL_RESTORE_STATE_STAGING_DONE, false);
304             mActivelyStagingRemoteData = false;
305 
306             // Share the result / exception with the caller.
307             try {
308                 if (exceptionsByFileName.isEmpty()) {
309                     callback.onResult();
310                 } else {
311                     Slog.i(TAG, "Exceptions encountered during staging.");
312                     setDataRestoreError(RESTORE_ERROR_FETCHING_DATA);
313                     callback.onError(new StageRemoteDataException(exceptionsByFileName));
314                 }
315             } catch (RemoteException e) {
316                 Log.e(TAG, "Restore response could not be sent to the caller.", e);
317             } catch (SecurityException e) {
318                 Log.e(
319                         TAG,
320                         "Restore response could not be sent due to conflicting AIDL definitions",
321                         e);
322             } finally {
323                 // Now that the callback for the stageAllHealthConnectRemoteData API has been called
324                 // we can start the merging process.
325                 merge();
326             }
327         }
328     }
329 
330     /** Writes the backup data into files represented by the passed file descriptors. */
getAllDataForBackup( @onNull StageRemoteDataRequest stageRemoteDataRequest, @NonNull UserHandle userHandle)331     public void getAllDataForBackup(
332             @NonNull StageRemoteDataRequest stageRemoteDataRequest,
333             @NonNull UserHandle userHandle) {
334         Slog.d(TAG, "Incoming request to get all data for backup");
335         Map<String, ParcelFileDescriptor> pfdsByFileName =
336                 stageRemoteDataRequest.getPfdsByFileName();
337 
338         var backupFilesByFileNames = getBackupFilesByFileNames(userHandle);
339         pfdsByFileName.forEach(
340                 (fileName, pfd) -> {
341                     Path sourceFilePath = backupFilesByFileNames.get(fileName).toPath();
342                     try (FileOutputStream outputStream =
343                             new FileOutputStream(pfd.getFileDescriptor())) {
344                         Files.copy(sourceFilePath, outputStream);
345                     } catch (IOException | SecurityException e) {
346                         Slog.e(TAG, "Failed to send " + fileName + " for backup", e);
347                     } finally {
348                         try {
349                             pfd.close();
350                         } catch (IOException e) {
351                             Slog.e(TAG, "Failed to close " + fileName + " for backup", e);
352                         }
353                     }
354                 });
355     }
356 
357     /** Get the file names of all the files that are transported during backup / restore. */
getAllBackupFileNames(boolean forDeviceToDevice)358     public BackupFileNamesSet getAllBackupFileNames(boolean forDeviceToDevice) {
359         ArraySet<String> backupFileNames = new ArraySet<>();
360         if (forDeviceToDevice) {
361             backupFileNames.add(
362                     TransactionManager.getInitialisedInstance().getDatabasePath().getName());
363         }
364         backupFileNames.add(GRANT_TIME_FILE_NAME);
365         return new BackupFileNamesSet(backupFileNames);
366     }
367 
368     /** Updates the download state of the remote data. */
updateDataDownloadState(@ataDownloadState int downloadState)369     public void updateDataDownloadState(@DataDownloadState int downloadState) {
370         setDataDownloadState(downloadState, false /* force */);
371 
372         if (downloadState == DATA_DOWNLOAD_COMPLETE) {
373             setInternalRestoreState(INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING, false /* force */);
374         } else if (downloadState == DATA_DOWNLOAD_FAILED) {
375             setInternalRestoreState(INTERNAL_RESTORE_STATE_MERGING_DONE, false /* force */);
376             setDataRestoreError(RESTORE_ERROR_FETCHING_DATA);
377         }
378     }
379 
380     /** Deletes all the staged data and resets all the states. */
deleteAndResetEverything(@onNull UserHandle userHandle)381     public void deleteAndResetEverything(@NonNull UserHandle userHandle) {
382         // Don't delete anything while we are in the process of merging staged data.
383         synchronized (mMergingLock) {
384             mStagedDbContext.deleteDatabase(HealthConnectDatabase.getName());
385             mStagedDatabase = null;
386             FilesUtil.deleteDir(getStagedRemoteDataDirectoryForUser(userHandle.getIdentifier()));
387         }
388         setDataDownloadState(DATA_DOWNLOAD_STATE_UNKNOWN, true /* force */);
389         setInternalRestoreState(INTERNAL_RESTORE_STATE_UNKNOWN, true /* force */);
390         setDataRestoreError(RESTORE_ERROR_NONE);
391     }
392 
393     /** Shares the {@link HealthConnectDataState} in the provided callback. */
getDataRestoreState()394     public @HealthConnectDataState.DataRestoreState int getDataRestoreState() {
395         @HealthConnectDataState.DataRestoreState int dataRestoreState = RESTORE_STATE_IDLE;
396 
397         @InternalRestoreState int currentRestoreState = getInternalRestoreState();
398 
399         if (currentRestoreState == INTERNAL_RESTORE_STATE_MERGING_DONE) {
400             // already with correct values.
401         } else if (currentRestoreState == INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS) {
402             dataRestoreState = RESTORE_STATE_IN_PROGRESS;
403         } else if (currentRestoreState != INTERNAL_RESTORE_STATE_UNKNOWN) {
404             dataRestoreState = RESTORE_STATE_PENDING;
405         }
406 
407         @DataDownloadState int currentDownloadState = getDataDownloadState();
408         if (currentDownloadState == DATA_DOWNLOAD_FAILED) {
409             // already with correct values.
410         } else if (currentDownloadState != DATA_DOWNLOAD_STATE_UNKNOWN) {
411             dataRestoreState = RESTORE_STATE_PENDING;
412         }
413 
414         return dataRestoreState;
415     }
416 
417     /** Get the current data restore error. */
getDataRestoreError()418     public @HealthConnectDataState.DataRestoreError int getDataRestoreError() {
419         @HealthConnectDataState.DataRestoreError int dataRestoreError = RESTORE_ERROR_NONE;
420         String restoreErrorOnDisk =
421                 PreferenceHelper.getInstance().getPreference(DATA_RESTORE_ERROR_KEY);
422 
423         if (restoreErrorOnDisk == null) {
424             return dataRestoreError;
425         }
426         try {
427             dataRestoreError = Integer.parseInt(restoreErrorOnDisk);
428         } catch (Exception e) {
429             Slog.e(TAG, "Exception parsing restoreErrorOnDisk " + restoreErrorOnDisk, e);
430         }
431         return dataRestoreError;
432     }
433 
434     /** Returns the file names of all the staged files. */
435     @VisibleForTesting
getStagedRemoteFileNames(int userId)436     public Set<String> getStagedRemoteFileNames(int userId) {
437         File[] allFiles = getStagedRemoteDataDirectoryForUser(userId).listFiles();
438         if (allFiles == null) {
439             return Collections.emptySet();
440         }
441         return Stream.of(allFiles)
442                 .filter(file -> !file.isDirectory())
443                 .map(File::getName)
444                 .collect(Collectors.toSet());
445     }
446 
447     /** Returns true if restore merging is in progress. API calls are blocked when this is true. */
isRestoreMergingInProgress()448     public boolean isRestoreMergingInProgress() {
449         return getInternalRestoreState() == INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS;
450     }
451 
452     /** Schedules any pending jobs. */
scheduleAllJobs()453     public void scheduleAllJobs() {
454         scheduleDownloadStateTimeoutJob();
455         scheduleStagingTimeoutJob();
456         scheduleMergingTimeoutJob();
457 
458         // We can schedule "retry merging" only if we are in the STAGING_DONE state.  However, if we
459         // are in STAGING_DONE state, then we should definitely attempt merging now - and that's
460         // what we will do below.
461         // So, there's no point in scheduling a "retry merging" job.  If Migration is going on then
462         // the merge attempt will take care of that automatically (and schedule the retry job as
463         // needed).
464         triggerMergingIfApplicable();
465     }
466 
467     /** Cancel all the jobs and sets the cancelled time. */
cancelAllJobs()468     public void cancelAllJobs() {
469         BackupRestoreJobService.cancelAllJobs(mContext);
470         setJobCancelledTimeIfExists(DATA_DOWNLOAD_TIMEOUT_KEY, DATA_DOWNLOAD_TIMEOUT_CANCELLED_KEY);
471         setJobCancelledTimeIfExists(DATA_STAGING_TIMEOUT_KEY, DATA_STAGING_TIMEOUT_CANCELLED_KEY);
472         setJobCancelledTimeIfExists(DATA_MERGING_TIMEOUT_KEY, DATA_MERGING_TIMEOUT_CANCELLED_KEY);
473         setJobCancelledTimeIfExists(DATA_MERGING_RETRY_KEY, DATA_MERGING_RETRY_CANCELLED_KEY);
474     }
475 
getCurrentUserHandle()476     public UserHandle getCurrentUserHandle() {
477         return mCurrentForegroundUser;
478     }
479 
setInternalRestoreState(@nternalRestoreState int dataRestoreState, boolean force)480     void setInternalRestoreState(@InternalRestoreState int dataRestoreState, boolean force) {
481         @InternalRestoreState int currentRestoreState = getInternalRestoreState();
482         mStatesLock.writeLock().lock();
483         try {
484             if (!force && currentRestoreState >= dataRestoreState) {
485                 Slog.w(
486                         TAG,
487                         "Attempt to update data restore state in wrong order from "
488                                 + currentRestoreState
489                                 + " to "
490                                 + dataRestoreState);
491                 return;
492             }
493             PreferenceHelper.getInstance()
494                     .insertOrReplacePreference(
495                             DATA_RESTORE_STATE_KEY, String.valueOf(dataRestoreState));
496 
497             if (dataRestoreState == INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING
498                     || dataRestoreState == INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS) {
499                 scheduleStagingTimeoutJob();
500             } else if (dataRestoreState == INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS) {
501                 scheduleMergingTimeoutJob();
502             }
503         } finally {
504             mStatesLock.writeLock().unlock();
505         }
506     }
507 
getInternalRestoreState()508     @InternalRestoreState int getInternalRestoreState() {
509         mStatesLock.readLock().lock();
510         try {
511             String restoreStateOnDisk =
512                     PreferenceHelper.getInstance().getPreference(DATA_RESTORE_STATE_KEY);
513             @InternalRestoreState int currentRestoreState = INTERNAL_RESTORE_STATE_UNKNOWN;
514             if (restoreStateOnDisk == null) {
515                 return currentRestoreState;
516             }
517             try {
518                 currentRestoreState = Integer.parseInt(restoreStateOnDisk);
519             } catch (Exception e) {
520                 Slog.e(TAG, "Exception parsing restoreStateOnDisk: " + restoreStateOnDisk, e);
521             }
522             // If we are not actively staging the data right now but the disk still reflects that we
523             // are then that means we died in the middle of staging.  We should be waiting for the
524             // remote data to be staged now.
525             if (!mActivelyStagingRemoteData
526                     && currentRestoreState == INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS) {
527                 currentRestoreState = INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING;
528             }
529             return currentRestoreState;
530         } finally {
531             mStatesLock.readLock().unlock();
532         }
533     }
534 
535     /** Returns true if this job needs rescheduling; false otherwise. */
536     @VisibleForTesting
handleJob(PersistableBundle extras)537     boolean handleJob(PersistableBundle extras) {
538         String jobName = extras.getString(EXTRA_JOB_NAME_KEY);
539         switch (jobName) {
540             case DATA_DOWNLOAD_TIMEOUT_KEY -> executeDownloadStateTimeoutJob();
541             case DATA_STAGING_TIMEOUT_KEY -> executeStagingTimeoutJob();
542             case DATA_MERGING_TIMEOUT_KEY -> executeMergingTimeoutJob();
543             case DATA_MERGING_RETRY_KEY -> executeRetryMergingJob();
544             default -> Slog.w(TAG, "Unknown job" + jobName + " delivered.");
545         }
546         // None of the jobs want to reschedule.
547         return false;
548     }
549 
550     @VisibleForTesting
shouldAttemptMerging()551     boolean shouldAttemptMerging() {
552         @InternalRestoreState int internalRestoreState = getInternalRestoreState();
553         if (internalRestoreState == INTERNAL_RESTORE_STATE_STAGING_DONE
554                 || internalRestoreState == INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS) {
555             Slog.i(TAG, "Will attempt merging as it was already happening or bound to happen");
556             return true;
557         }
558         return false;
559     }
560 
561     @VisibleForTesting
merge()562     void merge() {
563         @InternalRestoreState int internalRestoreState = getInternalRestoreState();
564         if (internalRestoreState >= INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS) {
565             Slog.i(TAG, "Not merging as internalRestoreState is " + internalRestoreState);
566             return;
567         }
568 
569         if (mMigrationStateManager.isMigrationInProgress()) {
570             Slog.i(TAG, "Not merging as Migration in progress.");
571             scheduleRetryMergingJob();
572             return;
573         }
574 
575         int currentDbVersion = TransactionManager.getInitialisedInstance().getDatabaseVersion();
576         int stagedDbVersion = getStagedDatabase().getReadableDatabase().getVersion();
577         if (currentDbVersion < stagedDbVersion) {
578             Slog.i(TAG, "Module needs upgrade for merging to version " + stagedDbVersion);
579             setDataRestoreError(RESTORE_ERROR_VERSION_DIFF);
580             return;
581         }
582 
583         setInternalRestoreState(INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS, false);
584         mergeGrantTimes();
585         mergeDatabase();
586         setInternalRestoreState(INTERNAL_RESTORE_STATE_MERGING_DONE, false);
587     }
588 
getBackupFilesByFileNames(UserHandle userHandle)589     private Map<String, File> getBackupFilesByFileNames(UserHandle userHandle) {
590         ArrayMap<String, File> backupFilesByFileNames = new ArrayMap<>();
591 
592         File databasePath = TransactionManager.getInitialisedInstance().getDatabasePath();
593         backupFilesByFileNames.put(databasePath.getName(), databasePath);
594 
595         File backupDataDir = getBackupDataDirectoryForUser(userHandle.getIdentifier());
596         backupDataDir.mkdirs();
597         File grantTimeFile = new File(backupDataDir, GRANT_TIME_FILE_NAME);
598         try {
599             grantTimeFile.createNewFile();
600             GrantTimeXmlHelper.serializeGrantTimes(
601                     grantTimeFile, mFirstGrantTimeManager.createBackupState(userHandle));
602             backupFilesByFileNames.put(grantTimeFile.getName(), grantTimeFile);
603         } catch (IOException e) {
604             Slog.e(TAG, "Could not create the grant time file for backup.", e);
605         }
606 
607         return backupFilesByFileNames;
608     }
609 
getDataDownloadState()610     @DataDownloadState private int getDataDownloadState() {
611         mStatesLock.readLock().lock();
612         try {
613             String downloadStateOnDisk =
614                     PreferenceHelper.getInstance().getPreference(DATA_DOWNLOAD_STATE_KEY);
615             @DataDownloadState int currentDownloadState = DATA_DOWNLOAD_STATE_UNKNOWN;
616             if (downloadStateOnDisk == null) {
617                 return currentDownloadState;
618             }
619             try {
620                 currentDownloadState = Integer.parseInt(downloadStateOnDisk);
621             } catch (Exception e) {
622                 Slog.e(TAG, "Exception parsing downloadStateOnDisk " + downloadStateOnDisk, e);
623             }
624             return currentDownloadState;
625         } finally {
626             mStatesLock.readLock().unlock();
627         }
628     }
629 
setDataDownloadState(@ataDownloadState int downloadState, boolean force)630     private void setDataDownloadState(@DataDownloadState int downloadState, boolean force) {
631         mStatesLock.writeLock().lock();
632         try {
633             @DataDownloadState int currentDownloadState = getDataDownloadState();
634             if (!force
635                     && (currentDownloadState == DATA_DOWNLOAD_FAILED
636                             || currentDownloadState == DATA_DOWNLOAD_COMPLETE)) {
637                 Slog.w(TAG, "HC data download already in terminal state.");
638                 return;
639             }
640             PreferenceHelper.getInstance()
641                     .insertOrReplacePreference(
642                             DATA_DOWNLOAD_STATE_KEY, String.valueOf(downloadState));
643 
644             if (downloadState == DATA_DOWNLOAD_STARTED || downloadState == DATA_DOWNLOAD_RETRY) {
645                 PreferenceHelper.getInstance()
646                         .insertOrReplacePreference(
647                                 DATA_DOWNLOAD_TIMEOUT_KEY,
648                                 Long.toString(Instant.now().toEpochMilli()));
649                 scheduleDownloadStateTimeoutJob();
650             }
651         } finally {
652             mStatesLock.writeLock().unlock();
653         }
654     }
655 
656     // Creating a separate single line method to keep this code close to the rest of the code that
657     // uses PreferenceHelper to keep data on the disk.
setDataRestoreError( @ealthConnectDataState.DataRestoreError int dataRestoreError)658     private void setDataRestoreError(
659             @HealthConnectDataState.DataRestoreError int dataRestoreError) {
660         PreferenceHelper.getInstance()
661                 .insertOrReplacePreference(
662                         DATA_RESTORE_ERROR_KEY, String.valueOf(dataRestoreError));
663     }
664 
665     /** Schedule timeout for data download state so that we are not stuck in the current state. */
scheduleDownloadStateTimeoutJob()666     private void scheduleDownloadStateTimeoutJob() {
667         @DataDownloadState int currentDownloadState = getDataDownloadState();
668         if (currentDownloadState != DATA_DOWNLOAD_STARTED
669                 && currentDownloadState != DATA_DOWNLOAD_RETRY) {
670             Slog.i(
671                     TAG,
672                     "Attempt to schedule download timeout job with state: "
673                             + currentDownloadState);
674             // We are not in the correct state. There's no need to set the timer.
675             return;
676         }
677 
678         // We might be here because the device rebooted or the user switched. If a timer was already
679         // going on then we want to continue that timer.
680         long timeout =
681                 getRemainingTimeout(
682                         DATA_DOWNLOAD_TIMEOUT_KEY,
683                         DATA_DOWNLOAD_TIMEOUT_CANCELLED_KEY,
684                         DATA_DOWNLOAD_TIMEOUT_INTERVAL_MILLIS);
685 
686         int userId = mCurrentForegroundUser.getIdentifier();
687         final PersistableBundle extras = new PersistableBundle();
688         extras.putInt(EXTRA_USER_ID, userId);
689         extras.putString(EXTRA_JOB_NAME_KEY, DATA_DOWNLOAD_TIMEOUT_KEY);
690         JobInfo.Builder jobInfoBuilder =
691                 new JobInfo.Builder(
692                                 BackupRestoreJobService.BACKUP_RESTORE_JOB_ID + userId,
693                                 new ComponentName(mContext, BackupRestoreJobService.class))
694                         .setExtras(extras)
695                         .setMinimumLatency(timeout)
696                         .setOverrideDeadline(timeout << 1);
697         Slog.i(TAG, "Scheduling download state timeout job with period: " + timeout);
698         BackupRestoreJobService.schedule(mContext, jobInfoBuilder.build(), this);
699 
700         // Set the start time
701         PreferenceHelper.getInstance()
702                 .insertOrReplacePreference(
703                         DATA_DOWNLOAD_TIMEOUT_KEY, Long.toString(Instant.now().toEpochMilli()));
704     }
705 
executeDownloadStateTimeoutJob()706     private void executeDownloadStateTimeoutJob() {
707         @DataDownloadState int currentDownloadState = getDataDownloadState();
708         if (currentDownloadState == DATA_DOWNLOAD_STARTED
709                 || currentDownloadState == DATA_DOWNLOAD_RETRY) {
710             Slog.i(TAG, "Executing download state timeout job");
711             setDataDownloadState(DATA_DOWNLOAD_FAILED, false);
712             setDataRestoreError(RESTORE_ERROR_FETCHING_DATA);
713             // Remove the remaining timeouts from the disk
714             PreferenceHelper.getInstance()
715                     .insertOrReplacePreference(DATA_DOWNLOAD_TIMEOUT_KEY, "");
716             PreferenceHelper.getInstance()
717                     .insertOrReplacePreference(DATA_DOWNLOAD_TIMEOUT_CANCELLED_KEY, "");
718         } else {
719             Slog.i(TAG, "Download state timeout job fired in state: " + currentDownloadState);
720         }
721     }
722 
723     /** Schedule timeout for data staging state so that we are not stuck in the current state. */
scheduleStagingTimeoutJob()724     private void scheduleStagingTimeoutJob() {
725         @InternalRestoreState int internalRestoreState = getInternalRestoreState();
726         if (internalRestoreState != INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING
727                 && internalRestoreState != INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS) {
728             // We are not in the correct state. There's no need to set the timer.
729             Slog.i(
730                     TAG,
731                     "Attempt to schedule staging timeout job with state: " + internalRestoreState);
732             return;
733         }
734 
735         // We might be here because the device rebooted or the user switched. If a timer was already
736         // going on then we want to continue that timer.
737         long timeout =
738                 getRemainingTimeout(
739                         DATA_STAGING_TIMEOUT_KEY,
740                         DATA_STAGING_TIMEOUT_CANCELLED_KEY,
741                         DATA_STAGING_TIMEOUT_INTERVAL_MILLIS);
742 
743         int userId = mCurrentForegroundUser.getIdentifier();
744         final PersistableBundle extras = new PersistableBundle();
745         extras.putInt(EXTRA_USER_ID, userId);
746         extras.putString(EXTRA_JOB_NAME_KEY, DATA_STAGING_TIMEOUT_KEY);
747         JobInfo.Builder jobInfoBuilder =
748                 new JobInfo.Builder(
749                                 BackupRestoreJobService.BACKUP_RESTORE_JOB_ID + userId,
750                                 new ComponentName(mContext, BackupRestoreJobService.class))
751                         .setExtras(extras)
752                         .setMinimumLatency(timeout)
753                         .setOverrideDeadline(timeout << 1);
754         Slog.i(TAG, "Scheduling staging timeout job with period: " + timeout);
755         BackupRestoreJobService.schedule(mContext, jobInfoBuilder.build(), this);
756 
757         // Set the start time
758         PreferenceHelper.getInstance()
759                 .insertOrReplacePreference(
760                         DATA_STAGING_TIMEOUT_KEY, Long.toString(Instant.now().toEpochMilli()));
761     }
762 
executeStagingTimeoutJob()763     private void executeStagingTimeoutJob() {
764         @InternalRestoreState int internalRestoreState = getInternalRestoreState();
765         if (internalRestoreState == INTERNAL_RESTORE_STATE_WAITING_FOR_STAGING
766                 || internalRestoreState == INTERNAL_RESTORE_STATE_STAGING_IN_PROGRESS) {
767             Slog.i(TAG, "Executing staging timeout job");
768             setInternalRestoreState(INTERNAL_RESTORE_STATE_MERGING_DONE, false);
769             setDataRestoreError(RESTORE_ERROR_UNKNOWN);
770             // Remove the remaining timeouts from the disk
771             PreferenceHelper.getInstance()
772                     .insertOrReplacePreference(DATA_STAGING_TIMEOUT_KEY, "");
773             PreferenceHelper.getInstance()
774                     .insertOrReplacePreference(DATA_STAGING_TIMEOUT_CANCELLED_KEY, "");
775         } else {
776             Slog.i(TAG, "Staging timeout job fired in state: " + internalRestoreState);
777         }
778     }
779 
780     /** Schedule timeout for data merging state so that we are not stuck in the current state. */
scheduleMergingTimeoutJob()781     private void scheduleMergingTimeoutJob() {
782         @InternalRestoreState int internalRestoreState = getInternalRestoreState();
783         if (internalRestoreState != INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS) {
784             // We are not in the correct state. There's no need to set the timer.
785             Slog.i(
786                     TAG,
787                     "Attempt to schedule merging timeout job with state: " + internalRestoreState);
788             return;
789         }
790 
791         // We might be here because the device rebooted or the user switched. If a timer was already
792         // going on then we want to continue that timer.
793         long timeout =
794                 getRemainingTimeout(
795                         DATA_MERGING_TIMEOUT_KEY,
796                         DATA_MERGING_TIMEOUT_CANCELLED_KEY,
797                         DATA_MERGING_TIMEOUT_INTERVAL_MILLIS);
798 
799         int userId = mCurrentForegroundUser.getIdentifier();
800         final PersistableBundle extras = new PersistableBundle();
801         extras.putInt(EXTRA_USER_ID, userId);
802         extras.putString(EXTRA_JOB_NAME_KEY, DATA_MERGING_TIMEOUT_KEY);
803         JobInfo.Builder jobInfoBuilder =
804                 new JobInfo.Builder(
805                                 BackupRestoreJobService.BACKUP_RESTORE_JOB_ID + userId,
806                                 new ComponentName(mContext, BackupRestoreJobService.class))
807                         .setExtras(extras)
808                         .setMinimumLatency(timeout)
809                         .setOverrideDeadline(timeout << 1);
810         Slog.i(TAG, "Scheduling merging timeout job with period: " + timeout);
811         BackupRestoreJobService.schedule(mContext, jobInfoBuilder.build(), this);
812 
813         // Set the start time
814         PreferenceHelper.getInstance()
815                 .insertOrReplacePreference(
816                         DATA_MERGING_TIMEOUT_KEY, Long.toString(Instant.now().toEpochMilli()));
817     }
818 
executeMergingTimeoutJob()819     private void executeMergingTimeoutJob() {
820         @InternalRestoreState int internalRestoreState = getInternalRestoreState();
821         if (internalRestoreState == INTERNAL_RESTORE_STATE_MERGING_IN_PROGRESS) {
822             Slog.i(TAG, "Executing merging timeout job");
823             setInternalRestoreState(INTERNAL_RESTORE_STATE_MERGING_DONE, false);
824             setDataRestoreError(RESTORE_ERROR_UNKNOWN);
825             // Remove the remaining timeouts from the disk
826             PreferenceHelper.getInstance()
827                     .insertOrReplacePreference(DATA_MERGING_TIMEOUT_KEY, "");
828             PreferenceHelper.getInstance()
829                     .insertOrReplacePreference(DATA_MERGING_TIMEOUT_CANCELLED_KEY, "");
830         } else {
831             Slog.i(TAG, "Merging timeout job fired in state: " + internalRestoreState);
832         }
833     }
834 
scheduleRetryMergingJob()835     private void scheduleRetryMergingJob() {
836         @InternalRestoreState int internalRestoreState = getInternalRestoreState();
837         if (internalRestoreState != INTERNAL_RESTORE_STATE_STAGING_DONE) {
838             // We can do merging only if we are in the STAGING_DONE state.
839             Slog.i(
840                     TAG,
841                     "Attempt to schedule merging retry job with state: " + internalRestoreState);
842             return;
843         }
844 
845         int userId = mCurrentForegroundUser.getIdentifier();
846         final PersistableBundle extras = new PersistableBundle();
847         extras.putInt(EXTRA_USER_ID, userId);
848         extras.putString(EXTRA_JOB_NAME_KEY, DATA_MERGING_RETRY_KEY);
849 
850         // We might be here because the device rebooted or the user switched. If a timer was already
851         // going on then we want to continue that timer.
852         long timeout =
853                 getRemainingTimeout(
854                         DATA_MERGING_RETRY_KEY,
855                         DATA_MERGING_RETRY_CANCELLED_KEY,
856                         DATA_MERGING_RETRY_DELAY_MILLIS);
857         JobInfo.Builder jobInfoBuilder =
858                 new JobInfo.Builder(
859                                 BackupRestoreJobService.BACKUP_RESTORE_JOB_ID + userId,
860                                 new ComponentName(mContext, BackupRestoreJobService.class))
861                         .setExtras(extras)
862                         .setMinimumLatency(timeout)
863                         .setOverrideDeadline(timeout << 1);
864         Slog.i(TAG, "Scheduling retry merging job with period: " + timeout);
865         BackupRestoreJobService.schedule(mContext, jobInfoBuilder.build(), this);
866 
867         // Set the start time
868         PreferenceHelper.getInstance()
869                 .insertOrReplacePreference(
870                         DATA_MERGING_RETRY_KEY,
871                         Long.toString(Instant.now().toEpochMilli()));
872     }
873 
executeRetryMergingJob()874     private void executeRetryMergingJob() {
875         @InternalRestoreState int internalRestoreState = getInternalRestoreState();
876         if (internalRestoreState == INTERNAL_RESTORE_STATE_STAGING_DONE) {
877             Slog.i(TAG, "Retrying merging");
878             merge();
879 
880             if (getInternalRestoreState() == INTERNAL_RESTORE_STATE_MERGING_DONE) {
881                 // Remove the remaining timeouts from the disk
882                 PreferenceHelper.getInstance()
883                         .insertOrReplacePreference(DATA_MERGING_RETRY_KEY, "");
884                 PreferenceHelper.getInstance()
885                         .insertOrReplacePreference(DATA_MERGING_RETRY_CANCELLED_KEY, "");
886             }
887         } else {
888             Slog.i(TAG, "Merging retry job fired in state: " + internalRestoreState);
889         }
890     }
891 
triggerMergingIfApplicable()892     private void triggerMergingIfApplicable() {
893         HealthConnectThreadScheduler.scheduleInternalTask(() -> {
894             if (shouldAttemptMerging()) {
895                 Slog.i(TAG, "Attempting merging.");
896                 setInternalRestoreState(INTERNAL_RESTORE_STATE_STAGING_DONE, true);
897                 merge();
898             }
899         });
900     }
901 
getRemainingTimeout( String startTimeKey, String cancelledTimeKey, long stdTimeout)902     private long getRemainingTimeout(
903             String startTimeKey, String cancelledTimeKey, long stdTimeout) {
904         String startTimeStr = PreferenceHelper.getInstance().getPreference(startTimeKey);
905         if (startTimeStr == null || startTimeStr.trim().isEmpty()) {
906             return stdTimeout;
907         }
908         long currTime = Instant.now().toEpochMilli();
909         String cancelledTimeStr = PreferenceHelper.getInstance().getPreference(cancelledTimeKey);
910         if (cancelledTimeStr == null || cancelledTimeStr.trim().isEmpty()) {
911             return Math.max(0, stdTimeout - (currTime - Long.parseLong(startTimeStr)));
912         }
913         long spentTime = Long.parseLong(cancelledTimeStr) - Long.parseLong(startTimeStr);
914         return Math.max(0, stdTimeout - spentTime);
915     }
916 
setJobCancelledTimeIfExists(String startTimeKey, String cancelTimeKey)917     private void setJobCancelledTimeIfExists(String startTimeKey, String cancelTimeKey) {
918         if (PreferenceHelper.getInstance().getPreference(startTimeKey) != null) {
919             PreferenceHelper.getInstance()
920                     .insertOrReplacePreference(
921                             cancelTimeKey, Long.toString(Instant.now().toEpochMilli()));
922         }
923     }
924 
925     /**
926      * Get the dir for the user with all the staged data - either from the cloud restore or from the
927      * d2d process.
928      */
getStagedRemoteDataDirectoryForUser(int userId)929     private static File getStagedRemoteDataDirectoryForUser(int userId) {
930         return getNamedHcDirectoryForUser("remote_staged", userId);
931     }
932 
getBackupDataDirectoryForUser(int userId)933     private static File getBackupDataDirectoryForUser(int userId) {
934         return getNamedHcDirectoryForUser("backup", userId);
935     }
936 
getNamedHcDirectoryForUser(String dirName, int userId)937     private static File getNamedHcDirectoryForUser(String dirName, int userId) {
938         File hcDirectoryForUser = FilesUtil.getDataSystemCeHCDirectoryForUser(userId);
939         return new File(hcDirectoryForUser, dirName);
940     }
941 
mergeGrantTimes()942     private void mergeGrantTimes() {
943         Slog.i(TAG, "Merging grant times.");
944         File restoredGrantTimeFile =
945                 new File(
946                         getStagedRemoteDataDirectoryForUser(mCurrentForegroundUser.getIdentifier()),
947                         GRANT_TIME_FILE_NAME);
948         UserGrantTimeState userGrantTimeState =
949                 GrantTimeXmlHelper.parseGrantTime(restoredGrantTimeFile);
950         mFirstGrantTimeManager.applyAndStageBackupDataForUser(
951                 mCurrentForegroundUser, userGrantTimeState);
952     }
953 
mergeDatabase()954     private void mergeDatabase() {
955         synchronized (mMergingLock) {
956             if (!mStagedDbContext.getDatabasePath(HealthConnectDatabase.getName()).exists()) {
957                 Slog.i(TAG, "No staged db found.");
958                 // no db was staged
959                 return;
960             }
961 
962             // We never read from the staged db if the module version is behind the staged db
963             // version. So, we are guaranteed that the merging code will be able to read all the
964             // records from the db - as the upcoming code is guaranteed to understand the records
965             // present in the staged db.
966 
967             // We are sure to migrate the db now, so prepare
968             prepInternalDataPerStagedDb();
969 
970             // Go through each record type and migrate all records of that type.
971             var recordTypeMap = RecordMapper.getInstance().getRecordIdToExternalRecordClassMap();
972             for (var recordTypeMapEntry : recordTypeMap.entrySet()) {
973                 mergeRecordsOfType(recordTypeMapEntry.getKey(), recordTypeMapEntry.getValue());
974             }
975 
976             // Delete the staged db as we are done merging.
977             Slog.i(TAG, "Deleting staged db after merging.");
978             mStagedDbContext.deleteDatabase(HealthConnectDatabase.getName());
979             mStagedDatabase = null;
980         }
981     }
982 
mergeRecordsOfType(int recordType, Class<T> recordTypeClass)983     private <T extends Record> void mergeRecordsOfType(int recordType, Class<T> recordTypeClass) {
984         RecordHelper<?> recordHelper =
985                 RecordHelperProvider.getInstance().getRecordHelper(recordType);
986         // Read all the records of the given type from the staged db and insert them into the
987         // existing healthconnect db.
988         long token = DEFAULT_LONG;
989         do {
990             var recordsToMergeAndToken = getRecordsToMerge(recordTypeClass, token, recordHelper);
991             if (recordsToMergeAndToken.first.isEmpty()) {
992                 break;
993             }
994             Slog.d(TAG, "Found record to merge: " + recordsToMergeAndToken.first.getClass());
995             // Using null package name for making insertion for two reasons:
996             // 1. we don't want to update the logs for this package.
997             // 2. we don't want to update the package name in the records as they already have the
998             //    correct package name.
999             UpsertTransactionRequest upsertTransactionRequest =
1000                     new UpsertTransactionRequest(
1001                             null /* packageName */,
1002                             recordsToMergeAndToken.first,
1003                             mContext,
1004                             true /* isInsertRequest */,
1005                             true /* skipPackageNameAndLogs */);
1006             TransactionManager.getInitialisedInstance()
1007                     .insertAll(upsertTransactionRequest.getUpsertRequests());
1008 
1009             token = DEFAULT_LONG;
1010             if (recordsToMergeAndToken.second != DEFAULT_LONG) {
1011                 token = recordsToMergeAndToken.second * 2;
1012             }
1013         } while (token != DEFAULT_LONG);
1014 
1015         // Once all the records of this type have been merged we can delete the table.
1016 
1017         // Passing -1 for startTime and endTime as we don't want to have time based filtering in the
1018         // final query.
1019         Slog.d(TAG, "Deleting table for: " + recordTypeClass);
1020         DeleteTableRequest deleteTableRequest =
1021                 recordHelper.getDeleteTableRequest(
1022                         null /* packageFilters */,
1023                         DEFAULT_LONG /* startTime */,
1024                         DEFAULT_LONG /* endTime */,
1025                         false /* useLocalTimeFilter */);
1026         getStagedDatabase().getWritableDatabase().execSQL(deleteTableRequest.getDeleteCommand());
1027     }
1028 
getRecordsToMerge( Class<T> recordTypeClass, long requestToken, RecordHelper<?> recordHelper)1029     private <T extends Record> Pair<List<RecordInternal<?>>, Long> getRecordsToMerge(
1030             Class<T> recordTypeClass, long requestToken, RecordHelper<?> recordHelper) {
1031         ReadRecordsRequestUsingFilters<T> readRecordsRequest =
1032                 new ReadRecordsRequestUsingFilters.Builder<>(recordTypeClass)
1033                         .setAscending(true)
1034                         .setPageSize(2000)
1035                         .setPageToken(requestToken)
1036                         .build();
1037 
1038         Map<String, Boolean> extraReadPermsMapping = new ArrayMap<>();
1039         List<String> extraReadPerms = recordHelper.getExtraReadPermissions();
1040         for (var extraReadPerm : extraReadPerms) {
1041             extraReadPermsMapping.put(extraReadPerm, true);
1042         }
1043 
1044         // Working with startDateAccess of -1 as we don't want to have time based filtering in the
1045         // query.
1046         ReadTransactionRequest readTransactionRequest =
1047                 new ReadTransactionRequest(
1048                         null,
1049                         readRecordsRequest.toReadRecordsRequestParcel(),
1050                         DEFAULT_LONG /* startDateAccess */,
1051                         false,
1052                         extraReadPermsMapping);
1053 
1054         List<RecordInternal<?>> recordInternalList;
1055         long token = DEFAULT_LONG;
1056         ReadTableRequest readTableRequest = readTransactionRequest.getReadRequests().get(0);
1057         try (Cursor cursor = read(readTableRequest)) {
1058             recordInternalList =
1059                     recordHelper.getInternalRecords(
1060                             cursor, readTableRequest.getPageSize(), mStagedPackageNamesByAppIds);
1061             String startTimeColumnName = recordHelper.getStartTimeColumnName();
1062 
1063             populateInternalRecordsWithExtraData(recordInternalList, readTableRequest);
1064 
1065             // Get the token for the next read request.
1066             if (cursor.moveToNext()) {
1067                 token = getCursorLong(cursor, startTimeColumnName);
1068             }
1069         }
1070         return Pair.create(recordInternalList, token);
1071     }
1072 
read(ReadTableRequest request)1073     private Cursor read(ReadTableRequest request) {
1074         synchronized (mMergingLock) {
1075             return mStagedDatabase.getReadableDatabase().rawQuery(request.getReadCommand(), null);
1076         }
1077     }
1078 
populateInternalRecordsWithExtraData( List<RecordInternal<?>> records, ReadTableRequest request)1079     private void populateInternalRecordsWithExtraData(
1080             List<RecordInternal<?>> records, ReadTableRequest request) {
1081         if (request.getExtraReadRequests() == null) {
1082             return;
1083         }
1084         for (ReadTableRequest extraDataRequest : request.getExtraReadRequests()) {
1085             Cursor cursorExtraData = read(extraDataRequest);
1086             request.getRecordHelper()
1087                     .updateInternalRecordsWithExtraFields(
1088                             records, cursorExtraData, extraDataRequest.getTableName());
1089         }
1090     }
1091 
prepInternalDataPerStagedDb()1092     private void prepInternalDataPerStagedDb() {
1093         try (Cursor cursor = read(new ReadTableRequest(AppInfoHelper.TABLE_NAME))) {
1094             while (cursor.moveToNext()) {
1095                 long rowId = getCursorLong(cursor, RecordHelper.PRIMARY_COLUMN_NAME);
1096                 String packageName = getCursorString(cursor, AppInfoHelper.PACKAGE_COLUMN_NAME);
1097                 String appName = getCursorString(cursor, AppInfoHelper.APPLICATION_COLUMN_NAME);
1098                 byte[] icon = getCursorBlob(cursor, AppInfoHelper.APP_ICON_COLUMN_NAME);
1099                 mStagedPackageNamesByAppIds.put(rowId, packageName);
1100 
1101                 // If this package is not installed on the target device and is not present in the
1102                 // health db, then fill the health db with the info from source db.
1103                 AppInfoHelper.getInstance()
1104                         .addOrUpdateAppInfoIfNotInstalled(
1105                                 mContext, packageName, appName, icon, false /* onlyReplace */);
1106             }
1107         }
1108     }
1109 
1110     @VisibleForTesting
getStagedDatabase()1111     HealthConnectDatabase getStagedDatabase() {
1112         synchronized (mMergingLock) {
1113             if (mStagedDatabase == null) {
1114                 mStagedDatabase = new HealthConnectDatabase(mStagedDbContext);
1115             }
1116             return mStagedDatabase;
1117         }
1118     }
1119 
1120     /**
1121      * {@link Context} for the staged health connect db.
1122      *
1123      * @hide
1124      */
1125     private static final class StagedDatabaseContext extends ContextWrapper {
StagedDatabaseContext(@onNull Context context)1126         StagedDatabaseContext(@NonNull Context context) {
1127             super(context);
1128             Objects.requireNonNull(context);
1129         }
1130 
1131         @Override
getDatabasePath(String name)1132         public File getDatabasePath(String name) {
1133             File stagedDataDir = getStagedRemoteDataDirectoryForUser(0);
1134             stagedDataDir.mkdirs();
1135             return new File(stagedDataDir, name);
1136         }
1137     }
1138 
1139     /** Execute the task as critical section by holding read lock. */
runWithStatesReadLock(RunnableWithThrowable<E> task)1140     public <E extends Throwable> void runWithStatesReadLock(RunnableWithThrowable<E> task)
1141             throws E {
1142         mStatesLock.readLock().lock();
1143         try {
1144             task.run();
1145         } finally {
1146             mStatesLock.readLock().unlock();
1147         }
1148     }
1149 
1150     /** Schedules the jobs for {@link BackupRestore} */
1151     public static final class BackupRestoreJobService extends JobService {
1152         public static final String BACKUP_RESTORE_JOBS_NAMESPACE = "BACKUP_RESTORE_JOBS_NAMESPACE";
1153         public static final String EXTRA_USER_ID = "user_id";
1154         public static final String EXTRA_JOB_NAME_KEY = "job_name";
1155         private static final int BACKUP_RESTORE_JOB_ID = 1000;
1156 
1157         static volatile BackupRestore sBackupRestore;
1158 
1159         @Override
onStartJob(JobParameters params)1160         public boolean onStartJob(JobParameters params) {
1161             int userId = params.getExtras().getInt(EXTRA_USER_ID, DEFAULT_INT);
1162             if (userId != sBackupRestore.getCurrentUserHandle().getIdentifier()) {
1163                 Slog.w(
1164                         TAG,
1165                         "Got onStartJob for non active user: "
1166                                 + userId
1167                                 + ", but the current active user is: "
1168                                 + sBackupRestore.getCurrentUserHandle().getIdentifier());
1169                 return false;
1170             }
1171 
1172             String jobName = params.getExtras().getString(EXTRA_JOB_NAME_KEY);
1173             if (Objects.isNull(jobName)) {
1174                 Slog.w(TAG, "Got onStartJob for a nameless job");
1175                 return false;
1176             }
1177 
1178             HealthConnectThreadScheduler.scheduleInternalTask(
1179                     () -> jobFinished(params, sBackupRestore.handleJob(params.getExtras())));
1180 
1181             return true;
1182         }
1183 
1184         @Override
onStopJob(JobParameters params)1185         public boolean onStopJob(JobParameters params) {
1186             return false;
1187         }
1188 
schedule( Context context, @NonNull JobInfo jobInfo, BackupRestore backupRestore)1189         static void schedule(
1190                 Context context, @NonNull JobInfo jobInfo, BackupRestore backupRestore) {
1191             sBackupRestore = backupRestore;
1192             final long token = Binder.clearCallingIdentity();
1193             try {
1194                 int result =
1195                         Objects.requireNonNull(context.getSystemService(JobScheduler.class))
1196                                 .forNamespace(BACKUP_RESTORE_JOBS_NAMESPACE)
1197                                 .schedule(jobInfo);
1198 
1199                 if (result != JobScheduler.RESULT_SUCCESS) {
1200                     Slog.e(
1201                             TAG,
1202                             "Failed to schedule: " + jobInfo.getExtras().getString(
1203                                     EXTRA_JOB_NAME_KEY));
1204                 }
1205             } finally {
1206                 Binder.restoreCallingIdentity(token);
1207             }
1208         }
1209 
1210         /** Cancels all jobs for our namespace. */
cancelAllJobs(Context context)1211         public static void cancelAllJobs(Context context) {
1212             Objects.requireNonNull(context.getSystemService(JobScheduler.class))
1213                     .forNamespace(BACKUP_RESTORE_JOBS_NAMESPACE)
1214                     .cancelAll();
1215         }
1216     }
1217 }
1218