1 /* 2 * Copyright (C) 2022 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package com.google.android.car.kitchensink.backup; 18 import android.annotation.Nullable; 19 import android.app.backup.BackupDataInput; 20 import android.app.backup.BackupDataOutput; 21 import android.app.backup.BackupTransport; 22 import android.app.backup.RestoreDescription; 23 import android.app.backup.RestoreSet; 24 import android.content.ComponentName; 25 import android.content.Context; 26 import android.content.Intent; 27 import android.content.pm.PackageInfo; 28 import android.os.IBinder; 29 import android.os.ParcelFileDescriptor; 30 import android.system.ErrnoException; 31 import android.system.Os; 32 import android.system.StructStat; 33 import android.util.ArrayMap; 34 import android.util.Base64; 35 import android.util.Log; 36 37 import libcore.io.IoUtils; 38 39 import java.io.BufferedOutputStream; 40 import java.io.File; 41 import java.io.FileInputStream; 42 import java.io.FileNotFoundException; 43 import java.io.FileOutputStream; 44 import java.io.IOException; 45 import java.util.ArrayList; 46 import java.util.Collections; 47 48 49 public final class KitchenSinkBackupTransport extends BackupTransport { 50 private static final String TRANSPORT_DIR_NAME = 51 "com.google.android.car.kitchensink.backup.KitchenSinkBackupTransport"; 52 53 private static final String TRANSPORT_DESTINATION_STRING = 54 "Backing up to debug-only private cache"; 55 56 private static final String TRANSPORT_DATA_MANAGEMENT_LABEL = ""; 57 private static final String FULL_DATA_DIR = "_full"; 58 private static final String INCREMENTAL_DIR = "_delta"; 59 private static final String DEFAULT_DEVICE_NAME_FOR_RESTORE_SET = "flash"; 60 // The currently-active restore set always has the same (nonzero) token, which is 1 in this case 61 private static final long CURRENT_SET_TOKEN = 1; 62 private static final String TAG = KitchenSinkBackupTransport.class.getSimpleName(); 63 private static final boolean DEBUG = true; 64 private static final long FULL_BACKUP_SIZE_QUOTA = 25 * 1024 * 1024; 65 private static final long KEY_VALUE_BACKUP_SIZE_QUOTA = 5 * 1024 * 1024; 66 // set of other possible backups currently available over this transport. 67 static final long[] POSSIBLE_SETS = { 2, 3, 4, 5, 6, 7, 8, 9 }; 68 private static final int FULL_RESTORE_BUFFER_BYTE_SIZE = 2 * 1024; 69 private static final int FULL_BACKUP_BUFFER_BYTE_SIZE = 4096; 70 71 private final Context mContext; 72 private File mDataDir; 73 private File mCurrentSetDir; 74 private File mCurrentSetFullDir; 75 private File mCurrentSetIncrementalDir; 76 77 // Kay/Value restore 78 private PackageInfo[] mRestorePackages; 79 private int mRestorePackageIndex; // Index into mRestorePackages 80 private int mRestoreType; 81 private File mRestoreSetDir; 82 private File mRestoreSetIncrementalDir; 83 private File mRestoreSetFullDir; 84 85 private byte[] mFullBackupBuffer; 86 private long mFullBackupSize; 87 private ParcelFileDescriptor mSocket; 88 private String mFullTargetPackage; 89 private FileInputStream mSocketInputStream; 90 private BufferedOutputStream mFullBackupOutputStream; 91 92 private byte[] mFullRestoreBuffer; 93 private FileInputStream mCurFullRestoreStream; 94 makeDataDirs()95 private void makeDataDirs() { 96 if (DEBUG) Log.v(TAG, "Making new data directories."); 97 mDataDir = mContext.getFilesDir(); 98 mCurrentSetDir = new File(mDataDir, Long.toString(CURRENT_SET_TOKEN)); 99 mCurrentSetFullDir = new File(mCurrentSetDir, FULL_DATA_DIR); 100 mCurrentSetIncrementalDir = new File(mCurrentSetDir, INCREMENTAL_DIR); 101 102 mCurrentSetDir.mkdirs(); 103 mCurrentSetFullDir.mkdir(); 104 mCurrentSetIncrementalDir.mkdir(); 105 } KitchenSinkBackupTransport(Context context)106 public KitchenSinkBackupTransport(Context context) { 107 mContext = context; 108 makeDataDirs(); 109 } 110 111 @Override name()112 public String name() { 113 return new ComponentName(mContext, this.getClass()).flattenToShortString(); 114 } 115 116 @Override transportDirName()117 public String transportDirName() { 118 return TRANSPORT_DIR_NAME; 119 } 120 121 @Override currentDestinationString()122 public String currentDestinationString() { 123 return TRANSPORT_DESTINATION_STRING; 124 } 125 126 @Override configurationIntent()127 public Intent configurationIntent() { 128 // The KitchenSink transport is not user-configurable 129 return null; 130 } 131 dataManagementIntent()132 public Intent dataManagementIntent() { 133 // The KitchenSink transport does not present a data-management UI 134 return null; 135 } 136 @Override dataManagementIntentLabel()137 public CharSequence dataManagementIntentLabel() { 138 return TRANSPORT_DATA_MANAGEMENT_LABEL; 139 } 140 141 @Override requestBackupTime()142 public long requestBackupTime() { 143 if (DEBUG) Log.d(TAG, "request backup time"); 144 // any time is a good time for local backup 145 return 0; 146 } 147 148 @Override initializeDevice()149 public int initializeDevice() { 150 if (DEBUG) { 151 Log.d(TAG, "initializing server side storage for this device; wiping all data"); 152 } 153 // Deletes all data from current storage set 154 deleteContents(mCurrentSetDir); 155 makeDataDirs(); 156 return TRANSPORT_OK; 157 } 158 159 // Deletes the contents recursively deleteContents(File dirname)160 private void deleteContents(File dirname) { 161 if (DEBUG) Log.d(TAG, "Deleting data from: " + dirname); 162 File[] contents = dirname.listFiles(); 163 if (contents == null) return; 164 for (File f : contents) { 165 if (f.isDirectory()) { 166 // delete the directory's contents then fall through 167 // and delete the directory itself. 168 deleteContents(f); 169 } 170 // deletes the directory itself after deleting everything in it 171 f.delete(); 172 } 173 174 } 175 176 // Encapsulation of a single k/v element change 177 private static final class KVOperation { 178 // Element filename after base 64 encoding as the key, for efficiency 179 final String mKey; 180 // An allocated byte array where data is placed when read from the stream 181 // null when this is a deletion operation 182 final @Nullable byte[] mValue; 183 KVOperation(String k, @Nullable byte[] v)184 KVOperation(String k, @Nullable byte[] v) { 185 mKey = k; 186 mValue = v; 187 } 188 } 189 190 @Override performBackup(PackageInfo packageInfo, ParcelFileDescriptor data)191 public int performBackup(PackageInfo packageInfo, ParcelFileDescriptor data) { 192 return performBackup(packageInfo, data, /* flags= */ 0); 193 } 194 195 @Override performBackup(PackageInfo packageInfo, ParcelFileDescriptor data, int flags)196 public int performBackup(PackageInfo packageInfo, ParcelFileDescriptor data, int flags) { 197 Log.i(TAG, "perform backup is called for: " + packageInfo.packageName); 198 try { 199 return performBackupInternal(packageInfo, data, flags); 200 } finally { 201 // close the output stream regardless of whether an exception is thrown or caught. 202 IoUtils.closeQuietly(data); 203 } 204 } 205 performBackupInternal(PackageInfo packageInfo, ParcelFileDescriptor data, int flags)206 private int performBackupInternal(PackageInfo packageInfo, ParcelFileDescriptor data, 207 int flags) { 208 Log.i(TAG, "perform backup internal is called for: " + packageInfo.packageName); 209 if ((flags & FLAG_DATA_NOT_CHANGED) != 0) { 210 // For unchanged data we do nothing and tell the caller everything was OK 211 Log.i(TAG, "Data is not changed, no backup needed for " + packageInfo.packageName); 212 return TRANSPORT_OK; 213 } 214 boolean isIncremental = (flags & FLAG_INCREMENTAL) != 0; 215 boolean isNonIncremental = (flags & FLAG_NON_INCREMENTAL) != 0; 216 217 if (isIncremental) { 218 Log.i(TAG, "Performing incremental backup for " + packageInfo.packageName); 219 } else if (isNonIncremental) { 220 Log.i(TAG, "Performing non-incremental backup for " + packageInfo.packageName); 221 } else { 222 Log.i(TAG, "Performing backup for " + packageInfo.packageName); 223 } 224 225 if (DEBUG) { 226 try { 227 // get detailed information about the file, access system API 228 StructStat ss = Os.fstat(data.getFileDescriptor()); 229 Log.v(TAG, "performBackup() pkg=" + packageInfo.packageName 230 + " size=" + ss.st_size + " flags=" + flags); 231 } catch (ErrnoException e) { 232 Log.w(TAG, " Unable to stat input file in performBackup() " + e); 233 } 234 } 235 236 File packageDir = new File(mCurrentSetIncrementalDir, packageInfo.packageName); 237 boolean hasDataForPackage = !packageDir.mkdirs(); 238 239 if (isNonIncremental && hasDataForPackage) { 240 Log.w(TAG, "Requested non-incremental, deleting existing data."); 241 clearBackupData(packageInfo); 242 packageDir.mkdirs(); 243 } 244 245 // go through the entire input data stream to make a list of all the updates to apply later 246 ArrayList<KVOperation> changeOps; 247 try { 248 changeOps = parseBackupStream(data); 249 } catch (IOException e) { 250 // if something goes wrong, abort the operation and return error. 251 Log.v(TAG, "Exception reading backup input", e); 252 return TRANSPORT_ERROR; 253 } 254 255 // calculate the sum of the current in-datastore size per key to detect quota overrun 256 ArrayMap<String, Integer> datastore = new ArrayMap<>(); 257 int totalSize = parseKeySizes(packageDir, datastore); 258 Log.i(TAG, "Total size of the current data:" + totalSize); 259 // find out the datastore size that will result from applying the 260 // sequence of delta operations 261 if (DEBUG) { 262 int numOps = changeOps.size(); 263 if (numOps > 0) { 264 Log.v(TAG, "Calculating delta size impact for " + numOps + "updates."); 265 } else { 266 Log.v(TAG, "No operations in backup stream, so no size change"); 267 } 268 } 269 270 int updatedSize = totalSize; 271 for (KVOperation op : changeOps) { 272 // Deduct the size of the key we're about to replace, if any 273 final Integer curSize = datastore.get(op.mKey); 274 if (curSize != null) { 275 updatedSize -= curSize.intValue(); 276 if (DEBUG && op.mValue == null) { 277 Log.d(TAG, "delete " + op.mKey + ", updated total " + updatedSize); 278 } 279 } 280 281 // And add back the size of the value we're about to store, if any 282 if (op.mValue != null) { 283 updatedSize += op.mValue.length; 284 if (DEBUG) { 285 Log.d(TAG, ((curSize == null) ? " new " : " replace ") 286 + op.mKey + ", updated total " + updatedSize); 287 } 288 } 289 } 290 291 // If our final size is over quota, report the failure 292 if (updatedSize > KEY_VALUE_BACKUP_SIZE_QUOTA) { 293 Log.w(TAG, "New datastore size " + updatedSize 294 + " exceeds quota " + KEY_VALUE_BACKUP_SIZE_QUOTA); 295 return TRANSPORT_QUOTA_EXCEEDED; 296 } 297 // No problem with storage size, so go ahead and apply the delta operations 298 // (in the order that the app provided them) 299 for (KVOperation op : changeOps) { 300 File element = new File(packageDir, op.mKey); 301 302 // this is either a deletion or a rewrite-from-zero, so we can just remove 303 // the existing file and proceed in either case. 304 Log.v(TAG, "Deleting the existing file: " + element.getPath()); 305 element.delete(); 306 307 // if this wasn't a deletion, put the new data in place 308 if (op.mValue != null) { 309 try (FileOutputStream out = new FileOutputStream(element)) { 310 out.write(op.mValue, 0, op.mValue.length); 311 } catch (IOException e) { 312 Log.e(TAG, "Unable to update key file " + element, e); 313 return TRANSPORT_ERROR; 314 } 315 } 316 } 317 Log.i(TAG, "KVBackup is successful."); 318 return TRANSPORT_OK; 319 } 320 321 // Parses the input with Base64-encode and returns the value with a newly allocated byte[] parseBackupStream(ParcelFileDescriptor data)322 private ArrayList<KVOperation> parseBackupStream(ParcelFileDescriptor data) 323 throws IOException { 324 ArrayList<KVOperation> changeOps = new ArrayList<>(); 325 BackupDataInput changeSet = new BackupDataInput(data.getFileDescriptor()); 326 while (changeSet.readNextHeader()) { 327 String key = changeSet.getKey(); 328 String base64Key = new String(Base64.encode(key.getBytes(), Base64.NO_WRAP)); 329 int dataSize = changeSet.getDataSize(); 330 if (DEBUG) { 331 Log.d(TAG, "Delta operation key: " + key + "; size: " + dataSize 332 + "; key64: " + base64Key); 333 } 334 335 byte[] buf = null; 336 if (dataSize >= 0) { 337 buf = new byte[dataSize]; 338 changeSet.readEntityData(buf, 0, dataSize); 339 } 340 changeOps.add(new KVOperation(base64Key, buf)); 341 } 342 return changeOps; 343 } 344 345 // Reads the given datastore directory, building a table of the value size of each 346 // keyed element, and returning the summed total. parseKeySizes(File packageDir, ArrayMap<String, Integer> datastore)347 private int parseKeySizes(File packageDir, ArrayMap<String, Integer> datastore) { 348 int totalSize = 0; 349 final String[] elements = packageDir.list(); 350 if (elements != null) { 351 if (DEBUG) { 352 Log.d(TAG, "Existing datastore contents: " + packageDir); 353 } 354 for (String file : elements) { 355 File element = new File(packageDir, file); 356 String key = file; // filename 357 int size = (int) element.length(); 358 totalSize += size; 359 if (DEBUG) { 360 Log.d(TAG, " key " + key + " size " + size); 361 } 362 datastore.put(key, size); 363 } 364 if (DEBUG) { 365 Log.d(TAG, "TOTAL: " + totalSize); 366 } 367 } else { 368 if (DEBUG) { 369 Log.d(TAG, "No existing data for package: " + packageDir); 370 } 371 } 372 return totalSize; 373 } 374 375 @Override getBinder()376 public IBinder getBinder() { 377 if (DEBUG) Log.d(TAG, "get binder"); 378 return super.getBinder(); 379 } 380 381 @Override getTransportFlags()382 public int getTransportFlags() { 383 if (DEBUG) Log.d(TAG, "get transport flags"); 384 return super.getTransportFlags(); 385 } 386 387 388 @Override clearBackupData(PackageInfo packageInfo)389 public int clearBackupData(PackageInfo packageInfo) { 390 File packageDir = new File(mCurrentSetIncrementalDir, packageInfo.packageName); 391 if (DEBUG) { 392 Log.d(TAG, "clear backup data for package: " + packageInfo.packageName 393 + " package directory: " + packageDir); 394 } 395 final File[] incrementalFiles = packageDir.listFiles(); 396 // deletes files in incremental file set 397 if (incrementalFiles != null) { 398 for (File f : incrementalFiles) { 399 f.delete(); 400 } 401 packageDir.delete(); 402 } 403 // deletes files in current file set 404 packageDir = new File(mCurrentSetFullDir, packageInfo.packageName); 405 final File[] currentFiles = packageDir.listFiles(); 406 if (currentFiles != null) { 407 for (File f : currentFiles) { 408 f.delete(); 409 } 410 packageDir.delete(); 411 } 412 return TRANSPORT_OK; 413 } 414 415 // calls after performBackup(), performFullBackup(), clearBackupData() 416 @Override finishBackup()417 public int finishBackup() { 418 if (DEBUG) Log.d(TAG, "finish backup for:" + mFullTargetPackage); 419 return closeFullBackup(); 420 } 421 closeFullBackup()422 private int closeFullBackup() { 423 if (mSocket == null) { 424 return TRANSPORT_OK; 425 } 426 try { 427 if (mFullBackupOutputStream != null) { 428 // forces any buffered output bytes 429 // to be written out to the underlying output stream. 430 mFullBackupOutputStream.flush(); 431 mFullBackupOutputStream.close(); 432 } 433 mSocketInputStream = null; 434 mFullTargetPackage = null; 435 mSocket.close(); 436 } catch (IOException e) { 437 if (DEBUG) { 438 Log.w(TAG, "Exception caught in closeFullBackup()", e); 439 } 440 return TRANSPORT_ERROR; 441 } finally { 442 mSocket = null; 443 mFullBackupOutputStream = null; 444 } 445 return TRANSPORT_OK; 446 } 447 448 // ------------------------------------------------------------------------------------ 449 // Full backup handling 450 451 @Override requestFullBackupTime()452 public long requestFullBackupTime() { 453 if (DEBUG) Log.d(TAG, "request full backup time"); 454 return 0; 455 } 456 457 @Override checkFullBackupSize(long size)458 public int checkFullBackupSize(long size) { 459 if (DEBUG) Log.d(TAG, "check full backup size"); 460 int result = TRANSPORT_OK; 461 // Decline zero-size "backups" 462 if (size <= 0) { 463 result = TRANSPORT_PACKAGE_REJECTED; 464 } else if (size > FULL_BACKUP_SIZE_QUOTA) { 465 result = TRANSPORT_QUOTA_EXCEEDED; 466 } 467 if (result != TRANSPORT_OK) { 468 if (DEBUG) { 469 Log.d(TAG, "Declining backup of size " + size + " Full backup size quota: " 470 + FULL_BACKUP_SIZE_QUOTA); 471 } 472 } 473 return result; 474 } 475 476 @Override performFullBackup(PackageInfo targetPackage, ParcelFileDescriptor socket)477 public int performFullBackup(PackageInfo targetPackage, ParcelFileDescriptor socket) { 478 if (DEBUG) Log.d(TAG, "perform full backup for: " + targetPackage); 479 if (mSocket != null) { 480 Log.e(TAG, "Attempt to initiate full backup while one is in progress"); 481 return TRANSPORT_ERROR; 482 } 483 // We know a priori that we run in the system process, so we need to make 484 // sure to dup() our own copy of the socket fd. Transports which run in 485 // their own processes must not do this. 486 try { 487 mFullBackupSize = 0; 488 mSocket = ParcelFileDescriptor.dup(socket.getFileDescriptor()); 489 mSocketInputStream = new FileInputStream(mSocket.getFileDescriptor()); 490 } catch (IOException e) { 491 Log.e(TAG, "Unable to process socket for full backup:" + e); 492 return TRANSPORT_ERROR; 493 } 494 495 mFullTargetPackage = targetPackage.packageName; 496 mFullBackupBuffer = new byte[FULL_BACKUP_BUFFER_BYTE_SIZE]; 497 498 return TRANSPORT_OK; 499 } 500 501 @Override performFullBackup(PackageInfo targetPackage, ParcelFileDescriptor socket, int flags)502 public int performFullBackup(PackageInfo targetPackage, ParcelFileDescriptor socket, 503 int flags) { 504 Log.v(TAG, "perform full backup, flags:" + flags + ", package:" + targetPackage); 505 return super.performFullBackup(targetPackage, socket, flags); 506 } 507 508 // Reads data from socket file descriptor provided in performFullBackup() call 509 @Override sendBackupData(final int numBytes)510 public int sendBackupData(final int numBytes) { 511 if (DEBUG) Log.d(TAG, "send back data"); 512 if (mSocket == null) { 513 Log.w(TAG, "Attempted sendBackupData before performFullBackup"); 514 return TRANSPORT_ERROR; 515 } 516 517 mFullBackupSize += numBytes; 518 if (mFullBackupSize > FULL_BACKUP_SIZE_QUOTA) { 519 return TRANSPORT_QUOTA_EXCEEDED; 520 } 521 522 if (numBytes > mFullBackupBuffer.length) { 523 mFullBackupBuffer = new byte[numBytes]; 524 } 525 // creates new full backup output stream at the target location 526 if (mFullBackupOutputStream == null) { 527 FileOutputStream outputStream; 528 try { 529 File tarball = new File(mCurrentSetFullDir, mFullTargetPackage); 530 outputStream = new FileOutputStream(tarball); 531 } catch (FileNotFoundException e) { 532 return TRANSPORT_ERROR; 533 } 534 // later will close when finishBackup() and cancelFullBackup() are called 535 mFullBackupOutputStream = new BufferedOutputStream(outputStream); 536 } 537 538 int bytesLeft = numBytes; 539 while (bytesLeft > 0) { 540 try { 541 int nRead = mSocketInputStream.read(mFullBackupBuffer, 0, bytesLeft); 542 Log.i(TAG, "read " + bytesLeft + " bytes of data"); 543 if (nRead < 0) { 544 // Something went wrong if we expect data but saw EOD 545 Log.w(TAG, "Unexpected EOD; failing backup"); 546 return TRANSPORT_ERROR; 547 } 548 mFullBackupOutputStream.write(mFullBackupBuffer, 0, nRead); 549 bytesLeft -= nRead; 550 } catch (IOException e) { 551 Log.e(TAG, "Error handling backup data for " + mFullTargetPackage); 552 return TRANSPORT_ERROR; 553 } 554 } 555 if (DEBUG) { 556 Log.d(TAG, "Stored " + numBytes + " of data"); 557 } 558 return TRANSPORT_OK; 559 } 560 561 // Happens before finishBackup(), tear down any ongoing backup state 562 @Override cancelFullBackup()563 public void cancelFullBackup() { 564 if (DEBUG) { 565 Log.d(TAG, "Canceling full backup of " + mFullTargetPackage); 566 } 567 File archive = new File(mCurrentSetFullDir, mFullTargetPackage); 568 closeFullBackup(); 569 if (archive.exists()) { 570 archive.delete(); 571 } 572 } 573 574 // ------------------------------------------------------------------------------------ 575 // Restore handling 576 577 @Override getAvailableRestoreSets()578 public RestoreSet[] getAvailableRestoreSets() { 579 Log.v(TAG, "get available restore sets"); 580 long[] existing = new long[POSSIBLE_SETS.length + 1]; 581 // number of existing non-current sets 582 int num = 0; 583 // see which possible non-current sets exist... 584 for (long token : POSSIBLE_SETS) { 585 // if the file directory exists for the non-current set 586 if ((new File(mDataDir, Long.toString(token))).exists()) { 587 existing[num++] = token; 588 Log.v(TAG, "number of available restore sets: " + num); 589 } 590 } 591 // always adds the currently-active set at last 592 existing[num++] = CURRENT_SET_TOKEN; 593 594 RestoreSet[] available = new RestoreSet[num]; 595 String deviceName = DEFAULT_DEVICE_NAME_FOR_RESTORE_SET; 596 for (int i = 0; i < available.length; i++) { 597 available[i] = new RestoreSet("Local disk image", deviceName, existing[i]); 598 } 599 return available; 600 } 601 602 @Override getCurrentRestoreSet()603 public long getCurrentRestoreSet() { 604 // The current restore set always has the same token, which is 1 605 if (DEBUG) Log.d(TAG, "get current restore set"); 606 return CURRENT_SET_TOKEN; 607 } 608 609 @Override startRestore(long token, PackageInfo[] packages)610 public int startRestore(long token, PackageInfo[] packages) { 611 if (DEBUG) { 612 Log.d(TAG, "start restore for token: " + token + " , num of packages: " 613 + packages.length); 614 } 615 mRestorePackages = packages; 616 mRestorePackageIndex = -1; 617 mRestoreSetDir = new File(mDataDir, Long.toString(token)); 618 mRestoreSetIncrementalDir = new File(mRestoreSetDir, INCREMENTAL_DIR); 619 mRestoreSetFullDir = new File(mRestoreSetDir, FULL_DATA_DIR); 620 return TRANSPORT_OK; 621 } 622 623 // Get the package name of the next application with data in the backup store, plus 624 // a description of the structure of the restored type 625 @Override nextRestorePackage()626 public RestoreDescription nextRestorePackage() { 627 if (DEBUG) { 628 Log.d(TAG, "nextRestorePackage() : mRestorePackageIndex=" + mRestorePackageIndex 629 + " length=" + mRestorePackages.length); 630 } 631 if (mRestorePackages == null) throw new IllegalStateException("startRestore not called"); 632 633 boolean found; 634 while (++mRestorePackageIndex < mRestorePackages.length) { 635 // name of the current restore package 636 String name = mRestorePackages[mRestorePackageIndex].packageName; 637 638 // If we have key/value data for this package, deliver that 639 // skip packages where we have a data dir but no actual contents 640 found = hasRestoreDataForPackage(name); 641 if (found) { 642 mRestoreType = RestoreDescription.TYPE_KEY_VALUE; 643 } else { 644 // No key/value data; check for [non-empty] full data 645 File maybeFullData = new File(mRestoreSetFullDir, name); 646 if (maybeFullData.length() > 0) { 647 if (DEBUG) { 648 Log.d(TAG, "nextRestorePackage(TYPE_FULL_STREAM) @ " 649 + mRestorePackageIndex + " = " + name); 650 } 651 mRestoreType = RestoreDescription.TYPE_FULL_STREAM; 652 mCurFullRestoreStream = null; // ensure starting from the ground state 653 found = true; 654 } 655 } 656 657 if (found) { 658 return new RestoreDescription(name, mRestoreType); 659 } 660 // if not found for either type 661 if (DEBUG) { 662 Log.d(TAG, "... package @ " + mRestorePackageIndex + " = " + name 663 + " has no data; skipping"); 664 } 665 } 666 667 if (DEBUG) Log.d(TAG, "no more packages to restore"); 668 return RestoreDescription.NO_MORE_PACKAGES; 669 } 670 671 // check if this package has key/value backup data hasRestoreDataForPackage(String packageName)672 private boolean hasRestoreDataForPackage(String packageName) { 673 String[] contents = (new File(mRestoreSetIncrementalDir, packageName)).list(); 674 if (contents != null && contents.length > 0) { 675 if (DEBUG) { 676 Log.d(TAG, "nextRestorePackage(TYPE_KEY_VALUE) @ " 677 + mRestorePackageIndex + " = " + packageName); 678 } 679 return true; 680 } 681 return false; 682 } 683 684 // get the date for the application returned by nextRestorePackage(), only if key/value is 685 // the delivery type 686 @Override getRestoreData(ParcelFileDescriptor outFd)687 public int getRestoreData(ParcelFileDescriptor outFd) { 688 if (DEBUG) Log.d(TAG, "get restore data"); 689 if (mRestorePackages == null) throw new IllegalStateException("startRestore not called"); 690 if (mRestorePackageIndex < 0) { 691 throw new IllegalStateException("nextRestorePackage not called"); 692 } 693 if (mRestoreType != RestoreDescription.TYPE_KEY_VALUE) { 694 throw new IllegalStateException("getRestoreData(fd) for non-key/value dataset, " 695 + "restore type:" + mRestoreType); 696 } 697 File packageDir = new File(mRestoreSetIncrementalDir, 698 mRestorePackages[mRestorePackageIndex].packageName); 699 // the restore set is the concatenation of the individual record blobs, 700 // each of which is a file in the package's directory. 701 ArrayList<DecodedFilename> blobs = contentsByKey(packageDir); 702 if (blobs == null) { // nextRestorePackage() ensures the dir exists, so this is an error 703 Log.e(TAG, "No keys for package: " + packageDir); 704 return TRANSPORT_ERROR; 705 } 706 707 // We expect at least some data if the directory exists in the first place 708 if (DEBUG) Log.d(TAG, "getRestoreData() found " + blobs.size() + " key files"); 709 BackupDataOutput out = new BackupDataOutput(outFd.getFileDescriptor()); 710 try { 711 for (DecodedFilename keyEntry : blobs) { 712 File f = keyEntry.mFile; 713 try (FileInputStream in = new FileInputStream(f)) { 714 int size = (int) f.length(); 715 byte[] buf = new byte[size]; 716 in.read(buf); 717 if (DEBUG) Log.d(TAG, "... key=" + keyEntry.mKey + " size=" + size); 718 out.writeEntityHeader(keyEntry.mKey, size); 719 out.writeEntityData(buf, size); 720 } 721 } 722 return TRANSPORT_OK; 723 } catch (IOException e) { 724 Log.e(TAG, "Unable to read backup records", e); 725 return TRANSPORT_ERROR; 726 } 727 } 728 729 private static final class DecodedFilename implements Comparable<DecodedFilename> { 730 public File mFile; 731 public String mKey; 732 DecodedFilename(File f)733 DecodedFilename(File f) { 734 mFile = f; 735 mKey = new String(Base64.decode(f.getName(), Base64.DEFAULT)); 736 } 737 738 @Override compareTo(DecodedFilename other)739 public int compareTo(DecodedFilename other) { 740 // sorts into ascending lexical order by decoded key 741 return mKey.compareTo(other.mKey); 742 } 743 } 744 745 // Return a list of the files in the given directory, sorted lexically by 746 // the Base64-decoded file name, not by the on-disk filename contentsByKey(File dir)747 private ArrayList<DecodedFilename> contentsByKey(File dir) { 748 File[] allFiles = dir.listFiles(); 749 if (allFiles == null || allFiles.length == 0) { 750 return null; 751 } 752 753 // Decode the filenames into keys then sort lexically by key 754 ArrayList<DecodedFilename> contents = new ArrayList<>(); 755 for (File f : allFiles) { 756 contents.add(new DecodedFilename(f)); 757 } 758 Collections.sort(contents); 759 return contents; 760 } 761 762 @Override finishRestore()763 public void finishRestore() { 764 if (DEBUG) Log.d(TAG, "finishRestore()"); 765 if (mRestoreType == RestoreDescription.TYPE_FULL_STREAM) { 766 resetFullRestoreState(); 767 } 768 // set the restore type back to 0 769 mRestoreType = 0; 770 } 771 772 // Clears full restore stream and full restore buffer back to the ground state resetFullRestoreState()773 private void resetFullRestoreState() { 774 IoUtils.closeQuietly(mCurFullRestoreStream); 775 mCurFullRestoreStream = null; 776 mFullRestoreBuffer = null; 777 } 778 779 // ------------------------------------------------------------------------------------ 780 // Full restore handling 781 782 // Writes some data to the socket supplied to this call, and returns the number of bytes 783 // written. The system will then read that many bytes and stream them to the 784 // application's agent for restore. 785 @Override getNextFullRestoreDataChunk(ParcelFileDescriptor socket)786 public int getNextFullRestoreDataChunk(ParcelFileDescriptor socket) { 787 if (DEBUG) Log.d(TAG, "get next full restore data chunk"); 788 if (mRestoreType != RestoreDescription.TYPE_FULL_STREAM) { 789 throw new IllegalStateException("Asked for full restore data for non-stream package" 790 + ", restore type:" + mRestoreType); 791 } 792 793 // first chunk? 794 if (mCurFullRestoreStream == null) { 795 final String name = mRestorePackages[mRestorePackageIndex].packageName; 796 if (DEBUG) Log.i(TAG, "Starting full restore of " + name); 797 File dataset = new File(mRestoreSetFullDir, name); 798 try { 799 mCurFullRestoreStream = new FileInputStream(dataset); 800 } catch (IOException e) { 801 // If we can't open the target package's tarball, we return the single-package 802 // error code and let the caller go on to the next package. 803 Log.e(TAG, "Unable to read archive for " + name + e); 804 return TRANSPORT_PACKAGE_REJECTED; 805 } 806 mFullRestoreBuffer = new byte[FULL_RESTORE_BUFFER_BYTE_SIZE]; 807 } 808 809 FileOutputStream stream = new FileOutputStream(socket.getFileDescriptor()); 810 811 int nRead; 812 try { 813 nRead = mCurFullRestoreStream.read(mFullRestoreBuffer); 814 if (nRead < 0) { 815 // EOF: tell the caller we're done 816 nRead = NO_MORE_DATA; 817 } else if (nRead == 0) { 818 // This shouldn't happen when reading a FileInputStream; we should always 819 // get either a positive nonzero byte count or -1. Log the situation and 820 // treat it as EOF. 821 Log.w(TAG, "read() of archive file returned 0; treating as EOF"); 822 nRead = NO_MORE_DATA; 823 } else { 824 if (DEBUG) { 825 Log.i(TAG, "delivering restore chunk: " + nRead); 826 } 827 stream.write(mFullRestoreBuffer, 0, nRead); 828 } 829 } catch (IOException e) { 830 Log.e(TAG, "exception:" + e); 831 return TRANSPORT_ERROR; // Hard error accessing the file; shouldn't happen 832 } finally { 833 IoUtils.closeQuietly(socket); 834 } 835 836 return nRead; 837 } 838 839 // If the OS encounters an error while processing RestoreDescription.TYPE_FULL_STREAM 840 // data for restore, it will invoke this method to tell the transport that it should 841 // abandon the data download for the current package. 842 @Override abortFullRestore()843 public int abortFullRestore() { 844 Log.v(TAG, "abort full restore"); 845 if (mRestoreType != RestoreDescription.TYPE_FULL_STREAM) { 846 throw new IllegalStateException("abortFullRestore() but not currently restoring" 847 + ", restore type: " + mRestoreType); 848 } 849 resetFullRestoreState(); 850 mRestoreType = 0; 851 return TRANSPORT_OK; 852 } 853 854 @Override getBackupQuota(String packageName, boolean isFullBackup)855 public long getBackupQuota(String packageName, boolean isFullBackup) { 856 if (DEBUG) Log.d(TAG, "get backup quota"); 857 return isFullBackup ? FULL_BACKUP_SIZE_QUOTA : KEY_VALUE_BACKUP_SIZE_QUOTA; 858 } 859 860 } 861