1 /* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License 15 */ 16 17 package com.android.server.job; 18 19 import static android.net.NetworkCapabilities.NET_CAPABILITY_TEMPORARILY_NOT_METERED; 20 import static android.net.NetworkCapabilities.TRANSPORT_TEST; 21 22 import static com.android.server.job.JobSchedulerService.sElapsedRealtimeClock; 23 import static com.android.server.job.JobSchedulerService.sSystemClock; 24 25 import android.annotation.NonNull; 26 import android.annotation.Nullable; 27 import android.app.job.JobInfo; 28 import android.app.job.JobWorkItem; 29 import android.content.ComponentName; 30 import android.content.Context; 31 import android.net.NetworkRequest; 32 import android.os.Environment; 33 import android.os.Handler; 34 import android.os.PersistableBundle; 35 import android.os.Process; 36 import android.os.SystemClock; 37 import android.text.TextUtils; 38 import android.text.format.DateUtils; 39 import android.util.ArraySet; 40 import android.util.AtomicFile; 41 import android.util.Pair; 42 import android.util.Slog; 43 import android.util.SparseArray; 44 import android.util.SparseBooleanArray; 45 import android.util.SystemConfigFileCommitEventLogger; 46 import android.util.Xml; 47 48 import com.android.internal.annotations.GuardedBy; 49 import com.android.internal.annotations.VisibleForTesting; 50 import com.android.internal.util.ArrayUtils; 51 import com.android.internal.util.BitUtils; 52 import com.android.modules.expresslog.Histogram; 53 import com.android.modules.utils.TypedXmlPullParser; 54 import com.android.modules.utils.TypedXmlSerializer; 55 import com.android.server.AppSchedulingModuleThread; 56 import com.android.server.IoThread; 57 import com.android.server.job.JobSchedulerInternal.JobStorePersistStats; 58 import com.android.server.job.controllers.JobStatus; 59 60 import org.xmlpull.v1.XmlPullParser; 61 import org.xmlpull.v1.XmlPullParserException; 62 import org.xmlpull.v1.XmlSerializer; 63 64 import java.io.File; 65 import java.io.FileInputStream; 66 import java.io.FileNotFoundException; 67 import java.io.FileOutputStream; 68 import java.io.IOException; 69 import java.io.InputStream; 70 import java.util.ArrayList; 71 import java.util.List; 72 import java.util.Objects; 73 import java.util.Set; 74 import java.util.StringJoiner; 75 import java.util.concurrent.CountDownLatch; 76 import java.util.function.Consumer; 77 import java.util.function.Predicate; 78 import java.util.regex.Pattern; 79 80 /** 81 * Maintains the master list of jobs that the job scheduler is tracking. These jobs are compared by 82 * reference, so none of the functions in this class should make a copy. 83 * Also handles read/write of persisted jobs. 84 * 85 * Note on locking: 86 * All callers to this class must <strong>lock on the class object they are calling</strong>. 87 * This is important b/c {@link com.android.server.job.JobStore.WriteJobsMapToDiskRunnable} 88 * and {@link com.android.server.job.JobStore.ReadJobMapFromDiskRunnable} lock on that 89 * object. 90 * 91 * Test: 92 * atest $ANDROID_BUILD_TOP/frameworks/base/services/tests/servicestests/src/com/android/server/job/JobStoreTest.java 93 */ 94 public final class JobStore { 95 private static final String TAG = "JobStore"; 96 private static final boolean DEBUG = JobSchedulerService.DEBUG; 97 98 /** Threshold to adjust how often we want to write to the db. */ 99 private static final long JOB_PERSIST_DELAY = 2000L; 100 private static final long SCHEDULED_JOB_HIGH_WATER_MARK_PERIOD_MS = 30 * 60_000L; 101 @VisibleForTesting 102 static final String JOB_FILE_SPLIT_PREFIX = "jobs_"; 103 private static final Pattern SPLIT_FILE_PATTERN = 104 Pattern.compile("^" + JOB_FILE_SPLIT_PREFIX + "\\d+.xml$"); 105 private static final int ALL_UIDS = -1; 106 @VisibleForTesting 107 static final int INVALID_UID = -2; 108 109 final Object mLock; 110 final Object mWriteScheduleLock; // used solely for invariants around write scheduling 111 final JobSet mJobSet; // per-caller-uid and per-source-uid tracking 112 final Context mContext; 113 114 // Bookkeeping around incorrect boot-time system clock 115 private final long mXmlTimestamp; 116 private boolean mRtcGood; 117 118 @GuardedBy("mWriteScheduleLock") 119 private boolean mWriteScheduled; 120 121 @GuardedBy("mWriteScheduleLock") 122 private boolean mWriteInProgress; 123 124 @GuardedBy("mWriteScheduleLock") 125 private boolean mSplitFileMigrationNeeded; 126 127 private static final Object sSingletonLock = new Object(); 128 private final SystemConfigFileCommitEventLogger mEventLogger; 129 private final AtomicFile mJobsFile; 130 private final File mJobFileDirectory; 131 private final SparseBooleanArray mPendingJobWriteUids = new SparseBooleanArray(); 132 /** Handler backed by IoThread for writing to disk. */ 133 private final Handler mIoHandler = IoThread.getHandler(); 134 private static JobStore sSingleton; 135 136 private boolean mUseSplitFiles = JobSchedulerService.Constants.DEFAULT_PERSIST_IN_SPLIT_FILES; 137 138 private JobStorePersistStats mPersistInfo = new JobStorePersistStats(); 139 140 /** 141 * Separately updated value of the JobSet size to avoid recalculating it frequently for logging 142 * purposes. Continue to use {@link JobSet#size()} for the up-to-date and accurate value. 143 */ 144 private int mCurrentJobSetSize = 0; 145 private int mScheduledJob30MinHighWaterMark = 0; 146 private static final Histogram sScheduledJob30MinHighWaterMarkLogger = new Histogram( 147 "job_scheduler.value_hist_scheduled_job_30_min_high_water_mark", 148 new Histogram.ScaledRangeOptions(15, 1, 99, 1.5f)); 149 private final Runnable mScheduledJobHighWaterMarkLoggingRunnable = new Runnable() { 150 @Override 151 public void run() { 152 AppSchedulingModuleThread.getHandler().removeCallbacks(this); 153 synchronized (mLock) { 154 sScheduledJob30MinHighWaterMarkLogger.logSample(mScheduledJob30MinHighWaterMark); 155 mScheduledJob30MinHighWaterMark = mJobSet.size(); 156 } 157 // The count doesn't need to be logged at exact times. Logging based on system uptime 158 // should be fine. 159 AppSchedulingModuleThread.getHandler() 160 .postDelayed(this, SCHEDULED_JOB_HIGH_WATER_MARK_PERIOD_MS); 161 } 162 }; 163 164 /** Used by the {@link JobSchedulerService} to instantiate the JobStore. */ get(JobSchedulerService jobManagerService)165 static JobStore get(JobSchedulerService jobManagerService) { 166 synchronized (sSingletonLock) { 167 if (sSingleton == null) { 168 sSingleton = new JobStore(jobManagerService.getContext(), 169 jobManagerService.getLock(), Environment.getDataDirectory()); 170 } 171 return sSingleton; 172 } 173 } 174 175 /** 176 * @return A freshly initialized job store object, with no loaded jobs. 177 */ 178 @VisibleForTesting initAndGetForTesting(Context context, File dataDir)179 public static JobStore initAndGetForTesting(Context context, File dataDir) { 180 JobStore jobStoreUnderTest = new JobStore(context, new Object(), dataDir); 181 jobStoreUnderTest.init(); 182 jobStoreUnderTest.clearForTesting(); 183 return jobStoreUnderTest; 184 } 185 186 /** 187 * Construct the instance of the job store. This results in a blocking read from disk. 188 */ JobStore(Context context, Object lock, File dataDir)189 private JobStore(Context context, Object lock, File dataDir) { 190 mLock = lock; 191 mWriteScheduleLock = new Object(); 192 mContext = context; 193 194 File systemDir = new File(dataDir, "system"); 195 mJobFileDirectory = new File(systemDir, "job"); 196 mJobFileDirectory.mkdirs(); 197 mEventLogger = new SystemConfigFileCommitEventLogger("jobs"); 198 mJobsFile = createJobFile(new File(mJobFileDirectory, "jobs.xml")); 199 200 mJobSet = new JobSet(); 201 202 // If the current RTC is earlier than the timestamp on our persisted jobs file, 203 // we suspect that the RTC is uninitialized and so we cannot draw conclusions 204 // about persisted job scheduling. 205 // 206 // Note that if the persisted jobs file does not exist, we proceed with the 207 // assumption that the RTC is good. This is less work and is safe: if the 208 // clock updates to sanity then we'll be saving the persisted jobs file in that 209 // correct state, which is normal; or we'll wind up writing the jobs file with 210 // an incorrect historical timestamp. That's fine; at worst we'll reboot with 211 // a *correct* timestamp, see a bunch of overdue jobs, and run them; then 212 // settle into normal operation. 213 mXmlTimestamp = mJobsFile.exists() 214 ? mJobsFile.getLastModifiedTime() : mJobFileDirectory.lastModified(); 215 mRtcGood = (sSystemClock.millis() > mXmlTimestamp); 216 217 AppSchedulingModuleThread.getHandler().postDelayed( 218 mScheduledJobHighWaterMarkLoggingRunnable, SCHEDULED_JOB_HIGH_WATER_MARK_PERIOD_MS); 219 } 220 init()221 private void init() { 222 readJobMapFromDisk(mJobSet, mRtcGood); 223 } 224 initAsync(CountDownLatch completionLatch)225 void initAsync(CountDownLatch completionLatch) { 226 mIoHandler.post(new ReadJobMapFromDiskRunnable(mJobSet, mRtcGood, completionLatch)); 227 } 228 createJobFile(String baseName)229 private AtomicFile createJobFile(String baseName) { 230 return createJobFile(new File(mJobFileDirectory, baseName + ".xml")); 231 } 232 createJobFile(File file)233 private AtomicFile createJobFile(File file) { 234 return new AtomicFile(file, mEventLogger); 235 } 236 jobTimesInflatedValid()237 public boolean jobTimesInflatedValid() { 238 return mRtcGood; 239 } 240 clockNowValidToInflate(long now)241 public boolean clockNowValidToInflate(long now) { 242 return now >= mXmlTimestamp; 243 } 244 245 /** 246 * Runs any necessary work asynchronously. If this is called after 247 * {@link #initAsync(CountDownLatch)}, this ensures the given work runs after 248 * the JobStore is initialized. 249 */ runWorkAsync(@onNull Runnable r)250 void runWorkAsync(@NonNull Runnable r) { 251 mIoHandler.post(r); 252 } 253 254 /** 255 * Find all the jobs that were affected by RTC clock uncertainty at boot time. Returns 256 * parallel lists of the existing JobStatus objects and of new, equivalent JobStatus instances 257 * with now-corrected time bounds. 258 */ getRtcCorrectedJobsLocked(final ArrayList<JobStatus> toAdd, final ArrayList<JobStatus> toRemove)259 public void getRtcCorrectedJobsLocked(final ArrayList<JobStatus> toAdd, 260 final ArrayList<JobStatus> toRemove) { 261 final long elapsedNow = sElapsedRealtimeClock.millis(); 262 263 // Find the jobs that need to be fixed up, collecting them for post-iteration 264 // replacement with their new versions 265 forEachJob(job -> { 266 final Pair<Long, Long> utcTimes = job.getPersistedUtcTimes(); 267 if (utcTimes != null) { 268 Pair<Long, Long> elapsedRuntimes = 269 convertRtcBoundsToElapsed(utcTimes, elapsedNow); 270 JobStatus newJob = new JobStatus(job, 271 elapsedRuntimes.first, elapsedRuntimes.second, 272 0 /* numFailures */, 0 /* numAbandonedFailures */, 273 0 /* numSystemStops */, job.getLastSuccessfulRunTime(), 274 job.getLastFailedRunTime(), 275 job.getCumulativeExecutionTimeMs()); 276 newJob.prepareLocked(); 277 toAdd.add(newJob); 278 toRemove.add(job); 279 } 280 }); 281 } 282 283 /** 284 * Add a job to the master list, persisting it if necessary. 285 * Similar jobs to the new job will not be removed. 286 * 287 * @param jobStatus Job to add. 288 */ add(JobStatus jobStatus)289 public void add(JobStatus jobStatus) { 290 if (mJobSet.add(jobStatus)) { 291 mCurrentJobSetSize++; 292 maybeUpdateHighWaterMark(); 293 } 294 if (jobStatus.isPersisted()) { 295 mPendingJobWriteUids.put(jobStatus.getUid(), true); 296 maybeWriteStatusToDiskAsync(); 297 } 298 if (DEBUG) { 299 Slog.d(TAG, "Added job status to store: " + jobStatus); 300 } 301 } 302 303 /** 304 * The same as above but does not schedule writing. This makes perf benchmarks more stable. 305 */ 306 @VisibleForTesting addForTesting(JobStatus jobStatus)307 public void addForTesting(JobStatus jobStatus) { 308 if (mJobSet.add(jobStatus)) { 309 mCurrentJobSetSize++; 310 maybeUpdateHighWaterMark(); 311 } 312 if (jobStatus.isPersisted()) { 313 mPendingJobWriteUids.put(jobStatus.getUid(), true); 314 } 315 } 316 containsJob(JobStatus jobStatus)317 boolean containsJob(JobStatus jobStatus) { 318 return mJobSet.contains(jobStatus); 319 } 320 size()321 public int size() { 322 return mJobSet.size(); 323 } 324 getPersistStats()325 public JobStorePersistStats getPersistStats() { 326 return mPersistInfo; 327 } 328 countJobsForUid(int uid)329 public int countJobsForUid(int uid) { 330 return mJobSet.countJobsForUid(uid); 331 } 332 333 /** 334 * Remove the provided job. Will also delete the job if it was persisted. 335 * @param removeFromPersisted If true, the job will be removed from the persisted job list 336 * immediately (if it was persisted). 337 * @return Whether or not the job existed to be removed. 338 */ remove(JobStatus jobStatus, boolean removeFromPersisted)339 public boolean remove(JobStatus jobStatus, boolean removeFromPersisted) { 340 boolean removed = mJobSet.remove(jobStatus); 341 if (!removed) { 342 if (DEBUG) { 343 Slog.d(TAG, "Couldn't remove job: didn't exist: " + jobStatus); 344 } 345 return false; 346 } 347 mCurrentJobSetSize--; 348 if (removeFromPersisted && jobStatus.isPersisted()) { 349 mPendingJobWriteUids.put(jobStatus.getUid(), true); 350 maybeWriteStatusToDiskAsync(); 351 } 352 return removed; 353 } 354 355 /** 356 * Like {@link #remove(JobStatus, boolean)}, but doesn't schedule a disk write. 357 */ 358 @VisibleForTesting removeForTesting(JobStatus jobStatus)359 public void removeForTesting(JobStatus jobStatus) { 360 if (mJobSet.remove(jobStatus)) { 361 mCurrentJobSetSize--; 362 } 363 if (jobStatus.isPersisted()) { 364 mPendingJobWriteUids.put(jobStatus.getUid(), true); 365 } 366 } 367 368 /** 369 * Remove the jobs of users not specified in the keepUserIds. 370 * @param keepUserIds Array of User IDs whose jobs should be kept and not removed. 371 */ removeJobsOfUnlistedUsers(int[] keepUserIds)372 public void removeJobsOfUnlistedUsers(int[] keepUserIds) { 373 mJobSet.removeJobsOfUnlistedUsers(keepUserIds); 374 mCurrentJobSetSize = mJobSet.size(); 375 } 376 377 /** Note a change in the specified JobStatus that necessitates writing job state to disk. */ touchJob(@onNull JobStatus jobStatus)378 void touchJob(@NonNull JobStatus jobStatus) { 379 if (!jobStatus.isPersisted()) { 380 return; 381 } 382 mPendingJobWriteUids.put(jobStatus.getUid(), true); 383 maybeWriteStatusToDiskAsync(); 384 } 385 386 @VisibleForTesting clear()387 public void clear() { 388 mJobSet.clear(); 389 mPendingJobWriteUids.put(ALL_UIDS, true); 390 mCurrentJobSetSize = 0; 391 maybeWriteStatusToDiskAsync(); 392 } 393 394 /** 395 * The same as above but does not schedule writing. This makes perf benchmarks more stable. 396 */ 397 @VisibleForTesting clearForTesting()398 public void clearForTesting() { 399 mJobSet.clear(); 400 mPendingJobWriteUids.put(ALL_UIDS, true); 401 mCurrentJobSetSize = 0; 402 } 403 setUseSplitFiles(boolean useSplitFiles)404 void setUseSplitFiles(boolean useSplitFiles) { 405 synchronized (mLock) { 406 if (mUseSplitFiles != useSplitFiles) { 407 mUseSplitFiles = useSplitFiles; 408 migrateJobFilesAsync(); 409 } 410 } 411 } 412 413 /** 414 * The same as above but does not schedule writing. This makes perf benchmarks more stable. 415 */ 416 @VisibleForTesting setUseSplitFilesForTesting(boolean useSplitFiles)417 public void setUseSplitFilesForTesting(boolean useSplitFiles) { 418 final boolean changed; 419 synchronized (mLock) { 420 changed = mUseSplitFiles != useSplitFiles; 421 if (changed) { 422 mUseSplitFiles = useSplitFiles; 423 mPendingJobWriteUids.put(ALL_UIDS, true); 424 } 425 } 426 if (changed) { 427 synchronized (mWriteScheduleLock) { 428 mSplitFileMigrationNeeded = true; 429 } 430 } 431 } 432 433 /** 434 * @param sourceUid Uid of the source app. 435 * @return A list of all the jobs scheduled for the source app. Never null. 436 */ 437 @NonNull getJobsBySourceUid(int sourceUid)438 public ArraySet<JobStatus> getJobsBySourceUid(int sourceUid) { 439 return mJobSet.getJobsBySourceUid(sourceUid); 440 } 441 getJobsBySourceUid(int sourceUid, @NonNull Set<JobStatus> insertInto)442 public void getJobsBySourceUid(int sourceUid, @NonNull Set<JobStatus> insertInto) { 443 mJobSet.getJobsBySourceUid(sourceUid, insertInto); 444 } 445 446 /** 447 * @param uid Uid of the requesting app. 448 * @return All JobStatus objects for a given uid from the master list. Never null. 449 */ 450 @NonNull getJobsByUid(int uid)451 public ArraySet<JobStatus> getJobsByUid(int uid) { 452 return mJobSet.getJobsByUid(uid); 453 } 454 getJobsByUid(int uid, @NonNull Set<JobStatus> insertInto)455 public void getJobsByUid(int uid, @NonNull Set<JobStatus> insertInto) { 456 mJobSet.getJobsByUid(uid, insertInto); 457 } 458 459 /** 460 * @param uid Uid of the requesting app. 461 * @param jobId Job id, specified at schedule-time. 462 * @return the JobStatus that matches the provided uId and jobId, or null if none found. 463 */ 464 @Nullable getJobByUidAndJobId(int uid, @Nullable String namespace, int jobId)465 public JobStatus getJobByUidAndJobId(int uid, @Nullable String namespace, int jobId) { 466 return mJobSet.get(uid, namespace, jobId); 467 } 468 469 /** 470 * Iterate over the set of all jobs, invoking the supplied functor on each. This is for 471 * customers who need to examine each job; we'd much rather not have to generate 472 * transient unified collections for them to iterate over and then discard, or creating 473 * iterators every time a client needs to perform a sweep. 474 */ forEachJob(Consumer<JobStatus> functor)475 public void forEachJob(Consumer<JobStatus> functor) { 476 mJobSet.forEachJob(null, functor); 477 } 478 forEachJob(@ullable Predicate<JobStatus> filterPredicate, Consumer<JobStatus> functor)479 public void forEachJob(@Nullable Predicate<JobStatus> filterPredicate, 480 Consumer<JobStatus> functor) { 481 mJobSet.forEachJob(filterPredicate, functor); 482 } 483 forEachJob(int uid, Consumer<JobStatus> functor)484 public void forEachJob(int uid, Consumer<JobStatus> functor) { 485 mJobSet.forEachJob(uid, functor); 486 } 487 forEachJobForSourceUid(int sourceUid, Consumer<JobStatus> functor)488 public void forEachJobForSourceUid(int sourceUid, Consumer<JobStatus> functor) { 489 mJobSet.forEachJobForSourceUid(sourceUid, functor); 490 } 491 maybeUpdateHighWaterMark()492 private void maybeUpdateHighWaterMark() { 493 if (mScheduledJob30MinHighWaterMark < mCurrentJobSetSize) { 494 mScheduledJob30MinHighWaterMark = mCurrentJobSetSize; 495 } 496 } 497 498 /** Version of the db schema. */ 499 private static final int JOBS_FILE_VERSION = 1; 500 /** 501 * For legacy reasons, this tag is used to encapsulate the entire job list. 502 */ 503 private static final String XML_TAG_JOB_INFO = "job-info"; 504 /** 505 * For legacy reasons, this tag represents a single {@link JobStatus} object. 506 */ 507 private static final String XML_TAG_JOB = "job"; 508 /** Tag corresponds to constraints this job needs. */ 509 private static final String XML_TAG_PARAMS_CONSTRAINTS = "constraints"; 510 /** Tag corresponds to execution parameters. */ 511 private static final String XML_TAG_PERIODIC = "periodic"; 512 private static final String XML_TAG_ONEOFF = "one-off"; 513 private static final String XML_TAG_EXTRAS = "extras"; 514 private static final String XML_TAG_JOB_WORK_ITEM = "job-work-item"; 515 private static final String XML_TAG_DEBUG_INFO = "debug-info"; 516 private static final String XML_TAG_DEBUG_TAG = "debug-tag"; 517 migrateJobFilesAsync()518 private void migrateJobFilesAsync() { 519 synchronized (mLock) { 520 mPendingJobWriteUids.put(ALL_UIDS, true); 521 } 522 synchronized (mWriteScheduleLock) { 523 mSplitFileMigrationNeeded = true; 524 maybeWriteStatusToDiskAsync(); 525 } 526 } 527 528 /** 529 * Every time the state changes we write all the jobs in one swath, instead of trying to 530 * track incremental changes. 531 */ maybeWriteStatusToDiskAsync()532 private void maybeWriteStatusToDiskAsync() { 533 synchronized (mWriteScheduleLock) { 534 if (!mWriteScheduled) { 535 if (DEBUG) { 536 Slog.v(TAG, "Scheduling persist of jobs to disk."); 537 } 538 mIoHandler.postDelayed(mWriteRunnable, JOB_PERSIST_DELAY); 539 mWriteScheduled = true; 540 } 541 } 542 } 543 544 @VisibleForTesting readJobMapFromDisk(JobSet jobSet, boolean rtcGood)545 public void readJobMapFromDisk(JobSet jobSet, boolean rtcGood) { 546 new ReadJobMapFromDiskRunnable(jobSet, rtcGood).run(); 547 } 548 549 /** Write persisted JobStore state to disk synchronously. Should only be used for testing. */ 550 @VisibleForTesting writeStatusToDiskForTesting()551 public void writeStatusToDiskForTesting() { 552 synchronized (mWriteScheduleLock) { 553 if (mWriteScheduled) { 554 throw new IllegalStateException("An asynchronous write is already scheduled."); 555 } 556 557 mWriteScheduled = true; 558 mWriteRunnable.run(); 559 } 560 } 561 562 /** 563 * Wait for any pending write to the persistent store to clear 564 * @param maxWaitMillis Maximum time from present to wait 565 * @return {@code true} if I/O cleared as expected, {@code false} if the wait 566 * timed out before the pending write completed. 567 */ 568 @VisibleForTesting waitForWriteToCompleteForTesting(long maxWaitMillis)569 public boolean waitForWriteToCompleteForTesting(long maxWaitMillis) { 570 final long start = SystemClock.uptimeMillis(); 571 final long end = start + maxWaitMillis; 572 synchronized (mWriteScheduleLock) { 573 while (mWriteScheduled || mWriteInProgress) { 574 final long now = SystemClock.uptimeMillis(); 575 if (now >= end) { 576 // still not done and we've hit the end; failure 577 return false; 578 } 579 try { 580 mWriteScheduleLock.wait(now - start + maxWaitMillis); 581 } catch (InterruptedException e) { 582 // Spurious; keep waiting 583 break; 584 } 585 } 586 } 587 return true; 588 } 589 590 /** 591 * Returns a single string representation of the contents of the specified intArray. 592 * If the intArray is [1, 2, 4] as the input, the return result will be the string "1,2,4". 593 */ 594 @VisibleForTesting intArrayToString(int[] values)595 static String intArrayToString(int[] values) { 596 final StringJoiner sj = new StringJoiner(","); 597 for (final int value : values) { 598 sj.add(String.valueOf(value)); 599 } 600 return sj.toString(); 601 } 602 603 604 /** 605 * Converts a string containing a comma-separated list of decimal representations 606 * of ints into an array of int. If the string is not correctly formatted, 607 * or if any value doesn't fit into an int, NumberFormatException is thrown. 608 */ 609 @VisibleForTesting stringToIntArray(String str)610 static int[] stringToIntArray(String str) { 611 if (TextUtils.isEmpty(str)) return new int[0]; 612 final String[] arr = str.split(","); 613 final int[] values = new int[arr.length]; 614 for (int i = 0; i < arr.length; i++) { 615 values[i] = Integer.parseInt(arr[i]); 616 } 617 return values; 618 } 619 620 @VisibleForTesting extractUidFromJobFileName(@onNull File file)621 static int extractUidFromJobFileName(@NonNull File file) { 622 final String fileName = file.getName(); 623 if (fileName.startsWith(JOB_FILE_SPLIT_PREFIX)) { 624 try { 625 final int subEnd = fileName.length() - 4; // -4 for ".xml" 626 final int uid = Integer.parseInt( 627 fileName.substring(JOB_FILE_SPLIT_PREFIX.length(), subEnd)); 628 if (uid < 0) { 629 return INVALID_UID; 630 } 631 return uid; 632 } catch (Exception e) { 633 Slog.e(TAG, "Unexpected file name format", e); 634 } 635 } 636 return INVALID_UID; 637 } 638 639 /** 640 * Runnable that writes {@link #mJobSet} out to xml. 641 * NOTE: This Runnable locks on mLock 642 */ 643 private final Runnable mWriteRunnable = new Runnable() { 644 private final SparseArray<AtomicFile> mJobFiles = new SparseArray<>(); 645 private final CopyConsumer mPersistedJobCopier = new CopyConsumer(); 646 647 class CopyConsumer implements Consumer<JobStatus> { 648 private final SparseArray<List<JobStatus>> mJobStoreCopy = new SparseArray<>(); 649 private boolean mCopyAllJobs; 650 651 private void prepare() { 652 mCopyAllJobs = !mUseSplitFiles || mPendingJobWriteUids.get(ALL_UIDS); 653 if (mUseSplitFiles) { 654 // Put the set of changed UIDs in the copy list so that we update each file, 655 // especially if we've dropped all jobs for that UID. 656 if (mPendingJobWriteUids.get(ALL_UIDS)) { 657 // ALL_UIDS is only used when we switch file splitting policy or for tests, 658 // so going through the file list here shouldn't be 659 // a large performance hit on user devices. 660 661 final File[] files; 662 try { 663 files = mJobFileDirectory.listFiles(); 664 } catch (SecurityException e) { 665 Slog.wtf(TAG, "Not allowed to read job file directory", e); 666 return; 667 } 668 if (files == null) { 669 Slog.wtfStack(TAG, "Couldn't get job file list"); 670 } else { 671 for (File file : files) { 672 final int uid = extractUidFromJobFileName(file); 673 if (uid != INVALID_UID) { 674 mJobStoreCopy.put(uid, new ArrayList<>()); 675 } 676 } 677 } 678 } else { 679 for (int i = 0; i < mPendingJobWriteUids.size(); ++i) { 680 mJobStoreCopy.put(mPendingJobWriteUids.keyAt(i), new ArrayList<>()); 681 } 682 } 683 } else { 684 // Single file mode. 685 // Put the catchall UID in the copy list so that we update the single file, 686 // especially if we've dropped all persisted jobs. 687 mJobStoreCopy.put(ALL_UIDS, new ArrayList<>()); 688 } 689 } 690 691 @Override 692 public void accept(JobStatus jobStatus) { 693 final int uid = mUseSplitFiles ? jobStatus.getUid() : ALL_UIDS; 694 if (jobStatus.isPersisted() && (mCopyAllJobs || mPendingJobWriteUids.get(uid))) { 695 List<JobStatus> uidJobList = mJobStoreCopy.get(uid); 696 if (uidJobList == null) { 697 uidJobList = new ArrayList<>(); 698 mJobStoreCopy.put(uid, uidJobList); 699 } 700 uidJobList.add(new JobStatus(jobStatus)); 701 } 702 } 703 704 private void reset() { 705 mJobStoreCopy.clear(); 706 } 707 } 708 709 @Override 710 public void run() { 711 final long startElapsed = sElapsedRealtimeClock.millis(); 712 // Intentionally allow new scheduling of a write operation *before* we clone 713 // the job set. If we reset it to false after cloning, there's a window in 714 // which no new write will be scheduled but mLock is not held, i.e. a new 715 // job might appear and fail to be recognized as needing a persist. The 716 // potential cost is one redundant write of an identical set of jobs in the 717 // rare case of that specific race, but by doing it this way we avoid quite 718 // a bit of lock contention. 719 synchronized (mWriteScheduleLock) { 720 mWriteScheduled = false; 721 if (mWriteInProgress) { 722 // Another runnable is currently writing. Postpone this new write task. 723 maybeWriteStatusToDiskAsync(); 724 return; 725 } 726 mWriteInProgress = true; 727 } 728 final boolean useSplitFiles; 729 synchronized (mLock) { 730 // Clone the jobs so we can release the lock before writing. 731 useSplitFiles = mUseSplitFiles; 732 mPersistedJobCopier.prepare(); 733 mJobSet.forEachJob(null, mPersistedJobCopier); 734 mPendingJobWriteUids.clear(); 735 } 736 mPersistInfo.countAllJobsSaved = 0; 737 mPersistInfo.countSystemServerJobsSaved = 0; 738 mPersistInfo.countSystemSyncManagerJobsSaved = 0; 739 for (int i = mPersistedJobCopier.mJobStoreCopy.size() - 1; i >= 0; --i) { 740 AtomicFile file; 741 if (useSplitFiles) { 742 final int uid = mPersistedJobCopier.mJobStoreCopy.keyAt(i); 743 file = mJobFiles.get(uid); 744 if (file == null) { 745 file = createJobFile(JOB_FILE_SPLIT_PREFIX + uid); 746 mJobFiles.put(uid, file); 747 } 748 } else { 749 file = mJobsFile; 750 } 751 if (DEBUG) { 752 Slog.d(TAG, "Writing for " + mPersistedJobCopier.mJobStoreCopy.keyAt(i) 753 + " to " + file.getBaseFile().getName() + ": " 754 + mPersistedJobCopier.mJobStoreCopy.valueAt(i).size() + " jobs"); 755 } 756 writeJobsMapImpl(file, mPersistedJobCopier.mJobStoreCopy.valueAt(i)); 757 } 758 if (DEBUG) { 759 Slog.v(TAG, "Finished writing, took " + (sElapsedRealtimeClock.millis() 760 - startElapsed) + "ms"); 761 } 762 mPersistedJobCopier.reset(); 763 if (!useSplitFiles) { 764 mJobFiles.clear(); 765 } 766 // Update the last modified time of the directory to aid in RTC time verification 767 // (see the JobStore constructor). 768 mJobFileDirectory.setLastModified(sSystemClock.millis()); 769 synchronized (mWriteScheduleLock) { 770 if (mSplitFileMigrationNeeded) { 771 final File[] files = mJobFileDirectory.listFiles(); 772 for (File file : files) { 773 if (useSplitFiles) { 774 if (!file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) { 775 // Delete the now unused file so there's no confusion in the future. 776 file.delete(); 777 } 778 } else if (file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) { 779 // Delete the now unused file so there's no confusion in the future. 780 file.delete(); 781 } 782 } 783 } 784 mWriteInProgress = false; 785 mWriteScheduleLock.notifyAll(); 786 } 787 } 788 789 private void writeJobsMapImpl(@NonNull AtomicFile file, @NonNull List<JobStatus> jobList) { 790 int numJobs = 0; 791 int numSystemJobs = 0; 792 int numSyncJobs = 0; 793 mEventLogger.setStartTime(SystemClock.uptimeMillis()); 794 try (FileOutputStream fos = file.startWrite()) { 795 TypedXmlSerializer out = Xml.resolveSerializer(fos); 796 out.startDocument(null, true); 797 out.setFeature("http://xmlpull.org/v1/doc/features.html#indent-output", true); 798 799 out.startTag(null, XML_TAG_JOB_INFO); 800 out.attribute(null, "version", Integer.toString(JOBS_FILE_VERSION)); 801 for (int i=0; i<jobList.size(); i++) { 802 JobStatus jobStatus = jobList.get(i); 803 if (DEBUG) { 804 Slog.d(TAG, "Saving job " + jobStatus.getJobId()); 805 } 806 out.startTag(null, XML_TAG_JOB); 807 addAttributesToJobTag(out, jobStatus); 808 writeConstraintsToXml(out, jobStatus); 809 writeExecutionCriteriaToXml(out, jobStatus); 810 writeBundleToXml(jobStatus.getJob().getExtras(), out); 811 writeJobWorkItemsToXml(out, jobStatus); 812 writeDebugInfoToXml(out, jobStatus); 813 out.endTag(null, XML_TAG_JOB); 814 815 numJobs++; 816 if (jobStatus.getUid() == Process.SYSTEM_UID) { 817 numSystemJobs++; 818 if (isSyncJob(jobStatus)) { 819 numSyncJobs++; 820 } 821 } 822 } 823 out.endTag(null, XML_TAG_JOB_INFO); 824 out.endDocument(); 825 826 file.finishWrite(fos); 827 } catch (IOException e) { 828 if (DEBUG) { 829 Slog.v(TAG, "Error writing out job data.", e); 830 } 831 } catch (XmlPullParserException e) { 832 if (DEBUG) { 833 Slog.d(TAG, "Error persisting bundle.", e); 834 } 835 } finally { 836 mPersistInfo.countAllJobsSaved += numJobs; 837 mPersistInfo.countSystemServerJobsSaved += numSystemJobs; 838 mPersistInfo.countSystemSyncManagerJobsSaved += numSyncJobs; 839 } 840 } 841 842 /** 843 * Write out a tag with data comprising the required fields and bias of this job and 844 * its client. 845 */ 846 private void addAttributesToJobTag(TypedXmlSerializer out, JobStatus jobStatus) 847 throws IOException { 848 out.attribute(null, "jobid", Integer.toString(jobStatus.getJobId())); 849 out.attribute(null, "package", jobStatus.getServiceComponent().getPackageName()); 850 out.attribute(null, "class", jobStatus.getServiceComponent().getClassName()); 851 if (jobStatus.getSourcePackageName() != null) { 852 out.attribute(null, "sourcePackageName", jobStatus.getSourcePackageName()); 853 } 854 if (jobStatus.getNamespace() != null) { 855 out.attribute(null, "namespace", jobStatus.getNamespace()); 856 } 857 if (jobStatus.getSourceTag() != null) { 858 out.attribute(null, "sourceTag", jobStatus.getSourceTag()); 859 } 860 out.attribute(null, "sourceUserId", String.valueOf(jobStatus.getSourceUserId())); 861 out.attribute(null, "uid", Integer.toString(jobStatus.getUid())); 862 out.attribute(null, "bias", String.valueOf(jobStatus.getBias())); 863 out.attribute(null, "priority", String.valueOf(jobStatus.getJob().getPriority())); 864 out.attribute(null, "flags", String.valueOf(jobStatus.getFlags())); 865 if (jobStatus.getInternalFlags() != 0) { 866 out.attribute(null, "internalFlags", String.valueOf(jobStatus.getInternalFlags())); 867 } 868 869 out.attribute(null, "lastSuccessfulRunTime", 870 String.valueOf(jobStatus.getLastSuccessfulRunTime())); 871 out.attribute(null, "lastFailedRunTime", 872 String.valueOf(jobStatus.getLastFailedRunTime())); 873 874 out.attributeLong(null, "cumulativeExecutionTime", 875 jobStatus.getCumulativeExecutionTimeMs()); 876 } 877 878 private void writeBundleToXml(PersistableBundle extras, XmlSerializer out) 879 throws IOException, XmlPullParserException { 880 out.startTag(null, XML_TAG_EXTRAS); 881 PersistableBundle extrasCopy = deepCopyBundle(extras, 10); 882 extrasCopy.saveToXml(out); 883 out.endTag(null, XML_TAG_EXTRAS); 884 } 885 886 private PersistableBundle deepCopyBundle(PersistableBundle bundle, int maxDepth) { 887 if (maxDepth <= 0) { 888 return null; 889 } 890 PersistableBundle copy = (PersistableBundle) bundle.clone(); 891 Set<String> keySet = bundle.keySet(); 892 for (String key: keySet) { 893 Object o = copy.get(key); 894 if (o instanceof PersistableBundle) { 895 PersistableBundle bCopy = deepCopyBundle((PersistableBundle) o, maxDepth-1); 896 copy.putPersistableBundle(key, bCopy); 897 } 898 } 899 return copy; 900 } 901 902 /** 903 * Write out a tag with data identifying this job's constraints. If the constraint isn't here 904 * it doesn't apply. 905 * TODO: b/183455312 Update this code to use proper serialization for NetworkRequest, 906 * because currently store is not including everything (like, UIDs, bandwidth, 907 * signal strength etc. are lost). 908 */ 909 private void writeConstraintsToXml(TypedXmlSerializer out, JobStatus jobStatus) 910 throws IOException { 911 out.startTag(null, XML_TAG_PARAMS_CONSTRAINTS); 912 final JobInfo job = jobStatus.getJob(); 913 if (jobStatus.hasConnectivityConstraint()) { 914 final NetworkRequest network = jobStatus.getJob().getRequiredNetwork(); 915 out.attribute(null, "net-capabilities-csv", intArrayToString( 916 network.getCapabilities())); 917 out.attribute(null, "net-forbidden-capabilities-csv", intArrayToString( 918 network.getForbiddenCapabilities())); 919 out.attribute(null, "net-transport-types-csv", intArrayToString( 920 network.getTransportTypes())); 921 if (job.getEstimatedNetworkDownloadBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 922 out.attributeLong(null, "estimated-download-bytes", 923 job.getEstimatedNetworkDownloadBytes()); 924 } 925 if (job.getEstimatedNetworkUploadBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 926 out.attributeLong(null, "estimated-upload-bytes", 927 job.getEstimatedNetworkUploadBytes()); 928 } 929 if (job.getMinimumNetworkChunkBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 930 out.attributeLong(null, "minimum-network-chunk-bytes", 931 job.getMinimumNetworkChunkBytes()); 932 } 933 } 934 if (job.isRequireDeviceIdle()) { 935 out.attribute(null, "idle", Boolean.toString(true)); 936 } 937 if (job.isRequireCharging()) { 938 out.attribute(null, "charging", Boolean.toString(true)); 939 } 940 if (job.isRequireBatteryNotLow()) { 941 out.attribute(null, "battery-not-low", Boolean.toString(true)); 942 } 943 if (job.isRequireStorageNotLow()) { 944 out.attribute(null, "storage-not-low", Boolean.toString(true)); 945 } 946 out.endTag(null, XML_TAG_PARAMS_CONSTRAINTS); 947 } 948 949 private void writeExecutionCriteriaToXml(XmlSerializer out, JobStatus jobStatus) 950 throws IOException { 951 final JobInfo job = jobStatus.getJob(); 952 if (jobStatus.getJob().isPeriodic()) { 953 out.startTag(null, XML_TAG_PERIODIC); 954 out.attribute(null, "period", Long.toString(job.getIntervalMillis())); 955 out.attribute(null, "flex", Long.toString(job.getFlexMillis())); 956 } else { 957 out.startTag(null, XML_TAG_ONEOFF); 958 } 959 960 // If we still have the persisted times, we need to record those directly because 961 // we haven't yet been able to calculate the usual elapsed-timebase bounds 962 // correctly due to wall-clock uncertainty. 963 Pair <Long, Long> utcJobTimes = jobStatus.getPersistedUtcTimes(); 964 if (DEBUG && utcJobTimes != null) { 965 Slog.i(TAG, "storing original UTC timestamps for " + jobStatus); 966 } 967 968 final long nowRTC = sSystemClock.millis(); 969 final long nowElapsed = sElapsedRealtimeClock.millis(); 970 if (jobStatus.hasDeadlineConstraint()) { 971 // Wall clock deadline. 972 final long deadlineWallclock = (utcJobTimes == null) 973 ? nowRTC + (jobStatus.getLatestRunTimeElapsed() - nowElapsed) 974 : utcJobTimes.second; 975 out.attribute(null, "deadline", Long.toString(deadlineWallclock)); 976 } 977 if (jobStatus.hasTimingDelayConstraint()) { 978 final long delayWallclock = (utcJobTimes == null) 979 ? nowRTC + (jobStatus.getEarliestRunTime() - nowElapsed) 980 : utcJobTimes.first; 981 out.attribute(null, "delay", Long.toString(delayWallclock)); 982 } 983 984 // Only write out back-off policy if it differs from the default. 985 // This also helps the case where the job is idle -> these aren't allowed to specify 986 // back-off. 987 if (jobStatus.getJob().getInitialBackoffMillis() != JobInfo.DEFAULT_INITIAL_BACKOFF_MILLIS 988 || jobStatus.getJob().getBackoffPolicy() != JobInfo.DEFAULT_BACKOFF_POLICY) { 989 out.attribute(null, "backoff-policy", Integer.toString(job.getBackoffPolicy())); 990 out.attribute(null, "initial-backoff", Long.toString(job.getInitialBackoffMillis())); 991 } 992 if (job.isPeriodic()) { 993 out.endTag(null, XML_TAG_PERIODIC); 994 } else { 995 out.endTag(null, XML_TAG_ONEOFF); 996 } 997 } 998 999 private void writeDebugInfoToXml(@NonNull TypedXmlSerializer out, 1000 @NonNull JobStatus jobStatus) throws IOException, XmlPullParserException { 1001 final ArraySet<String> debugTags = jobStatus.getJob().getDebugTagsArraySet(); 1002 final int numTags = debugTags.size(); 1003 final String traceTag = jobStatus.getJob().getTraceTag(); 1004 if (traceTag == null && numTags == 0) { 1005 return; 1006 } 1007 out.startTag(null, XML_TAG_DEBUG_INFO); 1008 if (traceTag != null) { 1009 out.attribute(null, "trace-tag", traceTag); 1010 } 1011 for (int i = 0; i < numTags; ++i) { 1012 out.startTag(null, XML_TAG_DEBUG_TAG); 1013 out.attribute(null, "tag", debugTags.valueAt(i)); 1014 out.endTag(null, XML_TAG_DEBUG_TAG); 1015 } 1016 out.endTag(null, XML_TAG_DEBUG_INFO); 1017 } 1018 1019 private void writeJobWorkItemsToXml(@NonNull TypedXmlSerializer out, 1020 @NonNull JobStatus jobStatus) throws IOException, XmlPullParserException { 1021 // Write executing first since they're technically at the front of the queue. 1022 writeJobWorkItemListToXml(out, jobStatus.executingWork); 1023 writeJobWorkItemListToXml(out, jobStatus.pendingWork); 1024 } 1025 1026 private void writeJobWorkItemListToXml(@NonNull TypedXmlSerializer out, 1027 @Nullable List<JobWorkItem> jobWorkItems) 1028 throws IOException, XmlPullParserException { 1029 if (jobWorkItems == null) { 1030 return; 1031 } 1032 // Write the items in list order to maintain the enqueue order. 1033 final int size = jobWorkItems.size(); 1034 for (int i = 0; i < size; ++i) { 1035 final JobWorkItem item = jobWorkItems.get(i); 1036 if (item.getGrants() != null) { 1037 // We currently don't allow persisting jobs when grants are involved. 1038 // TODO(256618122): allow persisting JobWorkItems with grant flags 1039 continue; 1040 } 1041 if (item.getIntent() != null) { 1042 // Intent.saveToXml() doesn't persist everything, so we shouldn't attempt to 1043 // persist these JobWorkItems at all. 1044 Slog.wtf(TAG, "Encountered JobWorkItem with Intent in persisting list"); 1045 continue; 1046 } 1047 out.startTag(null, XML_TAG_JOB_WORK_ITEM); 1048 out.attributeInt(null, "delivery-count", item.getDeliveryCount()); 1049 if (item.getEstimatedNetworkDownloadBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 1050 out.attributeLong(null, "estimated-download-bytes", 1051 item.getEstimatedNetworkDownloadBytes()); 1052 } 1053 if (item.getEstimatedNetworkUploadBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 1054 out.attributeLong(null, "estimated-upload-bytes", 1055 item.getEstimatedNetworkUploadBytes()); 1056 } 1057 if (item.getMinimumNetworkChunkBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 1058 out.attributeLong(null, "minimum-network-chunk-bytes", 1059 item.getMinimumNetworkChunkBytes()); 1060 } 1061 writeBundleToXml(item.getExtras(), out); 1062 out.endTag(null, XML_TAG_JOB_WORK_ITEM); 1063 } 1064 } 1065 }; 1066 1067 /** 1068 * Translate the supplied RTC times to the elapsed timebase, with clamping appropriate 1069 * to interpreting them as a job's delay + deadline times for alarm-setting purposes. 1070 * @param rtcTimes a Pair<Long, Long> in which {@code first} is the "delay" earliest 1071 * allowable runtime for the job, and {@code second} is the "deadline" time at which 1072 * the job becomes overdue. 1073 */ convertRtcBoundsToElapsed(Pair<Long, Long> rtcTimes, long nowElapsed)1074 private static Pair<Long, Long> convertRtcBoundsToElapsed(Pair<Long, Long> rtcTimes, 1075 long nowElapsed) { 1076 final long nowWallclock = sSystemClock.millis(); 1077 final long earliest = (rtcTimes.first > JobStatus.NO_EARLIEST_RUNTIME) 1078 ? nowElapsed + Math.max(rtcTimes.first - nowWallclock, 0) 1079 : JobStatus.NO_EARLIEST_RUNTIME; 1080 final long latest = (rtcTimes.second < JobStatus.NO_LATEST_RUNTIME) 1081 ? nowElapsed + Math.max(rtcTimes.second - nowWallclock, 0) 1082 : JobStatus.NO_LATEST_RUNTIME; 1083 return Pair.create(earliest, latest); 1084 } 1085 isSyncJob(JobStatus status)1086 private static boolean isSyncJob(JobStatus status) { 1087 return com.android.server.content.SyncJobService.class.getName() 1088 .equals(status.getServiceComponent().getClassName()); 1089 } 1090 1091 /** 1092 * Runnable that reads list of persisted job from xml. This is run once at start up, so doesn't 1093 * need to go through {@link JobStore#add(com.android.server.job.controllers.JobStatus)}. 1094 */ 1095 private final class ReadJobMapFromDiskRunnable implements Runnable { 1096 private final JobSet jobSet; 1097 private final boolean rtcGood; 1098 private final CountDownLatch mCompletionLatch; 1099 1100 /** 1101 * @param jobSet Reference to the (empty) set of JobStatus objects that back the JobStore, 1102 * so that after disk read we can populate it directly. 1103 */ ReadJobMapFromDiskRunnable(JobSet jobSet, boolean rtcIsGood)1104 ReadJobMapFromDiskRunnable(JobSet jobSet, boolean rtcIsGood) { 1105 this(jobSet, rtcIsGood, null); 1106 } 1107 ReadJobMapFromDiskRunnable(JobSet jobSet, boolean rtcIsGood, @Nullable CountDownLatch completionLatch)1108 ReadJobMapFromDiskRunnable(JobSet jobSet, boolean rtcIsGood, 1109 @Nullable CountDownLatch completionLatch) { 1110 this.jobSet = jobSet; 1111 this.rtcGood = rtcIsGood; 1112 this.mCompletionLatch = completionLatch; 1113 } 1114 1115 @Override run()1116 public void run() { 1117 if (!mJobFileDirectory.isDirectory()) { 1118 Slog.wtf(TAG, "jobs directory isn't a directory O.O"); 1119 mJobFileDirectory.mkdirs(); 1120 return; 1121 } 1122 1123 int numJobs = 0; 1124 int numSystemJobs = 0; 1125 int numSyncJobs = 0; 1126 List<JobStatus> jobs; 1127 final File[] files; 1128 try { 1129 files = mJobFileDirectory.listFiles(); 1130 } catch (SecurityException e) { 1131 Slog.wtf(TAG, "Not allowed to read job file directory", e); 1132 return; 1133 } 1134 if (files == null) { 1135 Slog.wtfStack(TAG, "Couldn't get job file list"); 1136 return; 1137 } 1138 boolean needFileMigration = false; 1139 long nowElapsed = sElapsedRealtimeClock.millis(); 1140 int numDuplicates = 0; 1141 synchronized (mLock) { 1142 for (File file : files) { 1143 if (!file.getName().equals("jobs.xml") 1144 && !SPLIT_FILE_PATTERN.matcher(file.getName()).matches()) { 1145 // Skip temporary or other files. 1146 continue; 1147 } 1148 final AtomicFile aFile = createJobFile(file); 1149 try (FileInputStream fis = aFile.openRead()) { 1150 jobs = readJobMapImpl(fis, rtcGood, nowElapsed); 1151 if (jobs != null) { 1152 for (int i = 0; i < jobs.size(); i++) { 1153 JobStatus js = jobs.get(i); 1154 final JobStatus existingJob = this.jobSet.get( 1155 js.getUid(), js.getNamespace(), js.getJobId()); 1156 if (existingJob != null) { 1157 numDuplicates++; 1158 // Jobs are meant to have unique uid-namespace-jobId 1159 // combinations, but we've somehow read multiple jobs with the 1160 // combination. Drop the latter one since keeping both will 1161 // result in other issues. 1162 continue; 1163 } 1164 js.prepareLocked(); 1165 js.enqueueTime = nowElapsed; 1166 this.jobSet.add(js); 1167 1168 numJobs++; 1169 if (js.getUid() == Process.SYSTEM_UID) { 1170 numSystemJobs++; 1171 if (isSyncJob(js)) { 1172 numSyncJobs++; 1173 } 1174 } 1175 } 1176 } 1177 } catch (FileNotFoundException e) { 1178 // mJobFileDirectory.listFiles() gave us this file...why can't we find it??? 1179 Slog.e(TAG, "Could not find jobs file: " + file.getName()); 1180 } catch (XmlPullParserException | IOException e) { 1181 Slog.wtf(TAG, "Error in " + file.getName(), e); 1182 } catch (Exception e) { 1183 // Crashing at this point would result in a boot loop, so live with a 1184 // generic Exception for system stability's sake. 1185 Slog.wtf(TAG, "Unexpected exception", e); 1186 } 1187 if (mUseSplitFiles) { 1188 if (!file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) { 1189 // We're supposed to be using the split file architecture, 1190 // but we still have 1191 // the old job file around. Fully migrate and remove the old file. 1192 needFileMigration = true; 1193 } 1194 } else if (file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) { 1195 // We're supposed to be using the legacy single file architecture, 1196 // but we still have some job split files around. Fully migrate 1197 // and remove the split files. 1198 needFileMigration = true; 1199 } 1200 } 1201 if (mPersistInfo.countAllJobsLoaded < 0) { // Only set them once. 1202 mPersistInfo.countAllJobsLoaded = numJobs; 1203 mPersistInfo.countSystemServerJobsLoaded = numSystemJobs; 1204 mPersistInfo.countSystemSyncManagerJobsLoaded = numSyncJobs; 1205 } 1206 } 1207 Slog.i(TAG, "Read " + numJobs + " jobs"); 1208 if (needFileMigration) { 1209 migrateJobFilesAsync(); 1210 } 1211 1212 if (numDuplicates > 0) { 1213 Slog.wtf(TAG, "Encountered " + numDuplicates + " duplicate persisted jobs"); 1214 } 1215 1216 // Log the count immediately after loading from boot. 1217 mCurrentJobSetSize = numJobs; 1218 mScheduledJob30MinHighWaterMark = mCurrentJobSetSize; 1219 mScheduledJobHighWaterMarkLoggingRunnable.run(); 1220 1221 if (mCompletionLatch != null) { 1222 mCompletionLatch.countDown(); 1223 } 1224 } 1225 1226 /** Returns the {@link String#intern() interned} String if it's not null. */ 1227 @Nullable intern(@ullable String val)1228 private static String intern(@Nullable String val) { 1229 return val == null ? null : val.intern(); 1230 } 1231 readJobMapImpl(InputStream fis, boolean rtcIsGood, long nowElapsed)1232 private List<JobStatus> readJobMapImpl(InputStream fis, boolean rtcIsGood, long nowElapsed) 1233 throws XmlPullParserException, IOException { 1234 TypedXmlPullParser parser = Xml.resolvePullParser(fis); 1235 1236 int eventType = parser.getEventType(); 1237 while (eventType != XmlPullParser.START_TAG && 1238 eventType != XmlPullParser.END_DOCUMENT) { 1239 eventType = parser.next(); 1240 Slog.d(TAG, "Start tag: " + parser.getName()); 1241 } 1242 if (eventType == XmlPullParser.END_DOCUMENT) { 1243 if (DEBUG) { 1244 Slog.d(TAG, "No persisted jobs."); 1245 } 1246 return null; 1247 } 1248 1249 String tagName = parser.getName(); 1250 if (XML_TAG_JOB_INFO.equals(tagName)) { 1251 final List<JobStatus> jobs = new ArrayList<JobStatus>(); 1252 final int version = parser.getAttributeInt(null, "version"); 1253 // Read in version info. 1254 if (version > JOBS_FILE_VERSION || version < 0) { 1255 Slog.d(TAG, "Invalid version number, aborting jobs file read."); 1256 return null; 1257 } 1258 1259 eventType = parser.next(); 1260 do { 1261 // Read each <job/> 1262 if (eventType == XmlPullParser.START_TAG) { 1263 tagName = parser.getName(); 1264 // Start reading job. 1265 if (XML_TAG_JOB.equals(tagName)) { 1266 JobStatus persistedJob = 1267 restoreJobFromXml(rtcIsGood, parser, version, nowElapsed); 1268 if (persistedJob != null) { 1269 if (DEBUG) { 1270 Slog.d(TAG, "Read out " + persistedJob); 1271 } 1272 jobs.add(persistedJob); 1273 } else { 1274 Slog.d(TAG, "Error reading job from file."); 1275 } 1276 } 1277 } 1278 eventType = parser.next(); 1279 } while (eventType != XmlPullParser.END_DOCUMENT); 1280 return jobs; 1281 } 1282 return null; 1283 } 1284 1285 /** 1286 * @param parser Xml parser at the beginning of a "<job/>" tag. The next "parser.next()" call 1287 * will take the parser into the body of the job tag. 1288 * @return Newly instantiated job holding all the information we just read out of the xml tag. 1289 */ restoreJobFromXml(boolean rtcIsGood, TypedXmlPullParser parser, int schemaVersion, long nowElapsed)1290 private JobStatus restoreJobFromXml(boolean rtcIsGood, TypedXmlPullParser parser, 1291 int schemaVersion, long nowElapsed) throws XmlPullParserException, IOException { 1292 JobInfo.Builder jobBuilder; 1293 int uid, sourceUserId; 1294 long lastSuccessfulRunTime; 1295 long lastFailedRunTime; 1296 long cumulativeExecutionTime; 1297 int internalFlags = 0; 1298 1299 // Read out job identifier attributes and bias. 1300 try { 1301 jobBuilder = buildBuilderFromXml(parser); 1302 jobBuilder.setPersisted(true); 1303 uid = Integer.parseInt(parser.getAttributeValue(null, "uid")); 1304 1305 String val; 1306 if (schemaVersion == 0) { 1307 val = parser.getAttributeValue(null, "priority"); 1308 if (val != null) { 1309 jobBuilder.setBias(Integer.parseInt(val)); 1310 } 1311 } else if (schemaVersion >= 1) { 1312 val = parser.getAttributeValue(null, "bias"); 1313 if (val != null) { 1314 jobBuilder.setBias(Integer.parseInt(val)); 1315 } 1316 val = parser.getAttributeValue(null, "priority"); 1317 if (val != null) { 1318 jobBuilder.setPriority(Integer.parseInt(val)); 1319 } 1320 } 1321 val = parser.getAttributeValue(null, "flags"); 1322 if (val != null) { 1323 jobBuilder.setFlags(Integer.parseInt(val)); 1324 } 1325 val = parser.getAttributeValue(null, "internalFlags"); 1326 if (val != null) { 1327 internalFlags = Integer.parseInt(val); 1328 } 1329 val = parser.getAttributeValue(null, "sourceUserId"); 1330 sourceUserId = val == null ? -1 : Integer.parseInt(val); 1331 1332 val = parser.getAttributeValue(null, "lastSuccessfulRunTime"); 1333 lastSuccessfulRunTime = val == null ? 0 : Long.parseLong(val); 1334 1335 val = parser.getAttributeValue(null, "lastFailedRunTime"); 1336 lastFailedRunTime = val == null ? 0 : Long.parseLong(val); 1337 1338 cumulativeExecutionTime = 1339 parser.getAttributeLong(null, "cumulativeExecutionTime", 0); 1340 } catch (NumberFormatException e) { 1341 Slog.e(TAG, "Error parsing job's required fields, skipping"); 1342 return null; 1343 } 1344 1345 String sourcePackageName = parser.getAttributeValue(null, "sourcePackageName"); 1346 final String namespace = intern(parser.getAttributeValue(null, "namespace")); 1347 final String sourceTag = intern(parser.getAttributeValue(null, "sourceTag")); 1348 1349 int eventType; 1350 // Read out constraints tag. 1351 do { 1352 eventType = parser.next(); 1353 } while (eventType == XmlPullParser.TEXT); // Push through to next START_TAG. 1354 1355 if (!(eventType == XmlPullParser.START_TAG && 1356 XML_TAG_PARAMS_CONSTRAINTS.equals(parser.getName()))) { 1357 // Expecting a <constraints> start tag. 1358 return null; 1359 } 1360 try { 1361 buildConstraintsFromXml(jobBuilder, parser); 1362 } catch (NumberFormatException e) { 1363 Slog.d(TAG, "Error reading constraints, skipping."); 1364 return null; 1365 } catch (XmlPullParserException e) { 1366 Slog.d(TAG, "Error Parser Exception.", e); 1367 return null; 1368 } catch (IOException e) { 1369 Slog.d(TAG, "Error I/O Exception.", e); 1370 return null; 1371 } catch (IllegalArgumentException e) { 1372 Slog.e(TAG, "Constraints contained invalid data", e); 1373 return null; 1374 } 1375 1376 parser.next(); // Consume </constraints> 1377 1378 // Read out execution parameters tag. 1379 do { 1380 eventType = parser.next(); 1381 } while (eventType == XmlPullParser.TEXT); 1382 if (eventType != XmlPullParser.START_TAG) { 1383 return null; 1384 } 1385 1386 // Tuple of (earliest runtime, latest runtime) in UTC. 1387 final Pair<Long, Long> rtcRuntimes = buildRtcExecutionTimesFromXml(parser); 1388 1389 Pair<Long, Long> elapsedRuntimes = convertRtcBoundsToElapsed(rtcRuntimes, nowElapsed); 1390 1391 if (XML_TAG_PERIODIC.equals(parser.getName())) { 1392 try { 1393 String val = parser.getAttributeValue(null, "period"); 1394 final long periodMillis = Long.parseLong(val); 1395 val = parser.getAttributeValue(null, "flex"); 1396 final long flexMillis = (val != null) ? Long.valueOf(val) : periodMillis; 1397 jobBuilder.setPeriodic(periodMillis, flexMillis); 1398 // As a sanity check, cap the recreated run time to be no later than flex+period 1399 // from now. This is the latest the periodic could be pushed out. This could 1400 // happen if the periodic ran early (at flex time before period), and then the 1401 // device rebooted. 1402 if (elapsedRuntimes.second > nowElapsed + periodMillis + flexMillis) { 1403 final long clampedLateRuntimeElapsed = nowElapsed + flexMillis 1404 + periodMillis; 1405 final long clampedEarlyRuntimeElapsed = clampedLateRuntimeElapsed 1406 - flexMillis; 1407 Slog.w(TAG, 1408 String.format("Periodic job for uid='%d' persisted run-time is" + 1409 " too big [%s, %s]. Clamping to [%s,%s]", 1410 uid, 1411 DateUtils.formatElapsedTime(elapsedRuntimes.first / 1000), 1412 DateUtils.formatElapsedTime(elapsedRuntimes.second / 1000), 1413 DateUtils.formatElapsedTime( 1414 clampedEarlyRuntimeElapsed / 1000), 1415 DateUtils.formatElapsedTime( 1416 clampedLateRuntimeElapsed / 1000)) 1417 ); 1418 elapsedRuntimes = 1419 Pair.create(clampedEarlyRuntimeElapsed, clampedLateRuntimeElapsed); 1420 } 1421 } catch (NumberFormatException e) { 1422 Slog.d(TAG, "Error reading periodic execution criteria, skipping."); 1423 return null; 1424 } 1425 } else if (XML_TAG_ONEOFF.equals(parser.getName())) { 1426 try { 1427 if (elapsedRuntimes.first != JobStatus.NO_EARLIEST_RUNTIME) { 1428 jobBuilder.setMinimumLatency(elapsedRuntimes.first - nowElapsed); 1429 } 1430 if (elapsedRuntimes.second != JobStatus.NO_LATEST_RUNTIME) { 1431 jobBuilder.setOverrideDeadline( 1432 elapsedRuntimes.second - nowElapsed); 1433 } 1434 } catch (NumberFormatException e) { 1435 Slog.d(TAG, "Error reading job execution criteria, skipping."); 1436 return null; 1437 } 1438 } else { 1439 if (DEBUG) { 1440 Slog.d(TAG, "Invalid parameter tag, skipping - " + parser.getName()); 1441 } 1442 // Expecting a parameters start tag. 1443 return null; 1444 } 1445 maybeBuildBackoffPolicyFromXml(jobBuilder, parser); 1446 1447 parser.nextTag(); // Consume parameters end tag. 1448 1449 // Read out extras Bundle. 1450 do { 1451 eventType = parser.next(); 1452 } while (eventType == XmlPullParser.TEXT); 1453 if (!(eventType == XmlPullParser.START_TAG 1454 && XML_TAG_EXTRAS.equals(parser.getName()))) { 1455 if (DEBUG) { 1456 Slog.d(TAG, "Error reading extras, skipping."); 1457 } 1458 return null; 1459 } 1460 1461 final PersistableBundle extras; 1462 try { 1463 extras = PersistableBundle.restoreFromXml(parser); 1464 jobBuilder.setExtras(extras); 1465 } catch (IllegalArgumentException e) { 1466 Slog.e(TAG, "Persisted extras contained invalid data", e); 1467 return null; 1468 } 1469 eventType = parser.nextTag(); // Consume </extras> 1470 1471 List<JobWorkItem> jobWorkItems = null; 1472 if (eventType == XmlPullParser.START_TAG 1473 && XML_TAG_JOB_WORK_ITEM.equals(parser.getName())) { 1474 jobWorkItems = readJobWorkItemsFromXml(parser); 1475 } 1476 1477 if (eventType == XmlPullParser.START_TAG 1478 && XML_TAG_DEBUG_INFO.equals(parser.getName())) { 1479 try { 1480 jobBuilder.setTraceTag(parser.getAttributeValue(null, "trace-tag")); 1481 } catch (Exception e) { 1482 Slog.wtf(TAG, "Invalid trace tag persisted to disk", e); 1483 } 1484 parser.next(); 1485 jobBuilder.addDebugTags(readDebugTagsFromXml(parser)); 1486 eventType = parser.nextTag(); // Consume </debug-info> 1487 } 1488 1489 final JobInfo builtJob; 1490 try { 1491 // Don't perform prefetch-deadline check here. Apps targeting S- shouldn't have 1492 // any prefetch-with-deadline jobs accidentally dropped. It's not worth doing 1493 // target SDK version checks here for apps targeting T+. There's no way for an 1494 // app to keep a perpetually scheduled prefetch job with a deadline. Prefetch jobs 1495 // with a deadline would run and then any newly scheduled prefetch jobs wouldn't 1496 // have a deadline. If a job is rescheduled (via jobFinished(true) or onStopJob()'s 1497 // return value), the deadline is dropped. Periodic jobs require all constraints 1498 // to be met, so there's no issue with their deadlines. 1499 // The same logic applies for other target SDK-based validation checks. 1500 builtJob = jobBuilder.build(false, false, false, false); 1501 } catch (Exception e) { 1502 Slog.w(TAG, "Unable to build job from XML, ignoring: " + jobBuilder.summarize(), e); 1503 return null; 1504 } 1505 1506 // Migrate sync jobs forward from earlier, incomplete representation 1507 if ("android".equals(sourcePackageName) 1508 && extras != null 1509 && extras.getBoolean("SyncManagerJob", false)) { 1510 sourcePackageName = extras.getString("owningPackage", sourcePackageName); 1511 if (DEBUG) { 1512 Slog.i(TAG, "Fixing up sync job source package name from 'android' to '" 1513 + sourcePackageName + "'"); 1514 } 1515 } 1516 1517 // And now we're done 1518 final int appBucket = JobSchedulerService.standbyBucketForPackage(sourcePackageName, 1519 sourceUserId, nowElapsed); 1520 JobStatus js = new JobStatus( 1521 builtJob, uid, intern(sourcePackageName), sourceUserId, 1522 appBucket, namespace, sourceTag, 1523 elapsedRuntimes.first, elapsedRuntimes.second, 1524 lastSuccessfulRunTime, lastFailedRunTime, cumulativeExecutionTime, 1525 (rtcIsGood) ? null : rtcRuntimes, internalFlags, /* dynamicConstraints */ 0); 1526 if (jobWorkItems != null) { 1527 for (int i = 0; i < jobWorkItems.size(); ++i) { 1528 js.enqueueWorkLocked(jobWorkItems.get(i)); 1529 } 1530 } 1531 return js; 1532 } 1533 buildBuilderFromXml(TypedXmlPullParser parser)1534 private JobInfo.Builder buildBuilderFromXml(TypedXmlPullParser parser) 1535 throws XmlPullParserException { 1536 // Pull out required fields from <job> attributes. 1537 int jobId = parser.getAttributeInt(null, "jobid"); 1538 String packageName = intern(parser.getAttributeValue(null, "package")); 1539 String className = intern(parser.getAttributeValue(null, "class")); 1540 ComponentName cname = new ComponentName(packageName, className); 1541 1542 return new JobInfo.Builder(jobId, cname); 1543 } 1544 1545 /** 1546 * In S, there has been a change in format to make the code more robust and more 1547 * maintainable. 1548 * If the capabities are bits 4, 14, 15, the format in R, it is a long string as 1549 * netCapabilitiesLong = '49168' from the old XML file attribute "net-capabilities". 1550 * The format in S is the int array string as netCapabilitiesIntArray = '4,14,15' 1551 * from the new XML file attribute "net-capabilities-array". 1552 * For backward compatibility, when reading old XML the old format is still supported in 1553 * reading, but in order to avoid issues with OEM-defined flags, the accepted capabilities 1554 * are limited to that(maxNetCapabilityInR & maxTransportInR) defined in R. 1555 */ buildConstraintsFromXml(JobInfo.Builder jobBuilder, TypedXmlPullParser parser)1556 private void buildConstraintsFromXml(JobInfo.Builder jobBuilder, TypedXmlPullParser parser) 1557 throws XmlPullParserException, IOException { 1558 String val; 1559 String netCapabilitiesLong = null; 1560 String netForbiddenCapabilitiesLong = null; 1561 String netTransportTypesLong = null; 1562 1563 final String netCapabilitiesIntArray = parser.getAttributeValue( 1564 null, "net-capabilities-csv"); 1565 final String netForbiddenCapabilitiesIntArray = parser.getAttributeValue( 1566 null, "net-forbidden-capabilities-csv"); 1567 final String netTransportTypesIntArray = parser.getAttributeValue( 1568 null, "net-transport-types-csv"); 1569 if (netCapabilitiesIntArray == null || netTransportTypesIntArray == null) { 1570 netCapabilitiesLong = parser.getAttributeValue(null, "net-capabilities"); 1571 netForbiddenCapabilitiesLong = parser.getAttributeValue( 1572 null, "net-unwanted-capabilities"); 1573 netTransportTypesLong = parser.getAttributeValue(null, "net-transport-types"); 1574 } 1575 1576 if ((netCapabilitiesIntArray != null) && (netTransportTypesIntArray != null)) { 1577 // S+ format. No capability or transport validation since the values should be in 1578 // line with what's defined in the Connectivity mainline module. 1579 final NetworkRequest.Builder builder = new NetworkRequest.Builder() 1580 .clearCapabilities(); 1581 1582 for (int capability : stringToIntArray(netCapabilitiesIntArray)) { 1583 builder.addCapability(capability); 1584 } 1585 1586 for (int forbiddenCapability : stringToIntArray(netForbiddenCapabilitiesIntArray)) { 1587 builder.addForbiddenCapability(forbiddenCapability); 1588 } 1589 1590 for (int transport : stringToIntArray(netTransportTypesIntArray)) { 1591 builder.addTransportType(transport); 1592 } 1593 jobBuilder 1594 .setRequiredNetwork(builder.build()) 1595 .setEstimatedNetworkBytes( 1596 parser.getAttributeLong(null, 1597 "estimated-download-bytes", JobInfo.NETWORK_BYTES_UNKNOWN), 1598 parser.getAttributeLong(null, 1599 "estimated-upload-bytes", JobInfo.NETWORK_BYTES_UNKNOWN)) 1600 .setMinimumNetworkChunkBytes( 1601 parser.getAttributeLong(null, 1602 "minimum-network-chunk-bytes", 1603 JobInfo.NETWORK_BYTES_UNKNOWN)); 1604 } else if (netCapabilitiesLong != null && netTransportTypesLong != null) { 1605 // Format used on R- builds. Drop any unexpected capabilities and transports. 1606 final NetworkRequest.Builder builder = new NetworkRequest.Builder() 1607 .clearCapabilities(); 1608 final int maxNetCapabilityInR = NET_CAPABILITY_TEMPORARILY_NOT_METERED; 1609 // We're okay throwing NFE here; caught by caller 1610 for (int capability : BitUtils.unpackBits(Long.parseLong( 1611 netCapabilitiesLong))) { 1612 if (capability <= maxNetCapabilityInR) { 1613 builder.addCapability(capability); 1614 } 1615 } 1616 for (int forbiddenCapability : BitUtils.unpackBits(Long.parseLong( 1617 netForbiddenCapabilitiesLong))) { 1618 if (forbiddenCapability <= maxNetCapabilityInR) { 1619 builder.addForbiddenCapability(forbiddenCapability); 1620 } 1621 } 1622 1623 final int maxTransportInR = TRANSPORT_TEST; 1624 for (int transport : BitUtils.unpackBits(Long.parseLong( 1625 netTransportTypesLong))) { 1626 if (transport <= maxTransportInR) { 1627 builder.addTransportType(transport); 1628 } 1629 } 1630 jobBuilder.setRequiredNetwork(builder.build()); 1631 // Estimated bytes weren't persisted on R- builds, so no point querying for the 1632 // attributes here. 1633 } else { 1634 // Read legacy values 1635 val = parser.getAttributeValue(null, "connectivity"); 1636 if (val != null) { 1637 jobBuilder.setRequiredNetworkType(JobInfo.NETWORK_TYPE_ANY); 1638 } 1639 val = parser.getAttributeValue(null, "metered"); 1640 if (val != null) { 1641 jobBuilder.setRequiredNetworkType(JobInfo.NETWORK_TYPE_METERED); 1642 } 1643 val = parser.getAttributeValue(null, "unmetered"); 1644 if (val != null) { 1645 jobBuilder.setRequiredNetworkType(JobInfo.NETWORK_TYPE_UNMETERED); 1646 } 1647 val = parser.getAttributeValue(null, "not-roaming"); 1648 if (val != null) { 1649 jobBuilder.setRequiredNetworkType(JobInfo.NETWORK_TYPE_NOT_ROAMING); 1650 } 1651 } 1652 1653 val = parser.getAttributeValue(null, "idle"); 1654 if (val != null) { 1655 jobBuilder.setRequiresDeviceIdle(true); 1656 } 1657 val = parser.getAttributeValue(null, "charging"); 1658 if (val != null) { 1659 jobBuilder.setRequiresCharging(true); 1660 } 1661 val = parser.getAttributeValue(null, "battery-not-low"); 1662 if (val != null) { 1663 jobBuilder.setRequiresBatteryNotLow(true); 1664 } 1665 val = parser.getAttributeValue(null, "storage-not-low"); 1666 if (val != null) { 1667 jobBuilder.setRequiresStorageNotLow(true); 1668 } 1669 } 1670 1671 /** 1672 * Builds the back-off policy out of the params tag. These attributes may not exist, depending 1673 * on whether the back-off was set when the job was first scheduled. 1674 */ maybeBuildBackoffPolicyFromXml(JobInfo.Builder jobBuilder, XmlPullParser parser)1675 private void maybeBuildBackoffPolicyFromXml(JobInfo.Builder jobBuilder, XmlPullParser parser) { 1676 String val = parser.getAttributeValue(null, "initial-backoff"); 1677 if (val != null) { 1678 long initialBackoff = Long.parseLong(val); 1679 val = parser.getAttributeValue(null, "backoff-policy"); 1680 int backoffPolicy = Integer.parseInt(val); // Will throw NFE which we catch higher up. 1681 jobBuilder.setBackoffCriteria(initialBackoff, backoffPolicy); 1682 } 1683 } 1684 1685 /** 1686 * Extract a job's earliest/latest run time data from XML. These are returned in 1687 * unadjusted UTC wall clock time, because we do not yet know whether the system 1688 * clock is reliable for purposes of calculating deltas from 'now'. 1689 * 1690 * @param parser 1691 * @return A Pair of timestamps in UTC wall-clock time. The first is the earliest 1692 * time at which the job is to become runnable, and the second is the deadline at 1693 * which it becomes overdue to execute. 1694 */ buildRtcExecutionTimesFromXml(TypedXmlPullParser parser)1695 private Pair<Long, Long> buildRtcExecutionTimesFromXml(TypedXmlPullParser parser) { 1696 // Pull out execution time data. 1697 final long earliestRunTimeRtc = 1698 parser.getAttributeLong(null, "delay", JobStatus.NO_EARLIEST_RUNTIME); 1699 final long latestRunTimeRtc = 1700 parser.getAttributeLong(null, "deadline", JobStatus.NO_LATEST_RUNTIME); 1701 return Pair.create(earliestRunTimeRtc, latestRunTimeRtc); 1702 } 1703 1704 @NonNull readJobWorkItemsFromXml(TypedXmlPullParser parser)1705 private List<JobWorkItem> readJobWorkItemsFromXml(TypedXmlPullParser parser) 1706 throws IOException, XmlPullParserException { 1707 List<JobWorkItem> jobWorkItems = new ArrayList<>(); 1708 1709 for (int eventType = parser.getEventType(); eventType != XmlPullParser.END_DOCUMENT; 1710 eventType = parser.next()) { 1711 final String tagName = parser.getName(); 1712 if (!XML_TAG_JOB_WORK_ITEM.equals(tagName)) { 1713 // We're no longer operating with work items. 1714 break; 1715 } 1716 try { 1717 JobWorkItem jwi = readJobWorkItemFromXml(parser); 1718 if (jwi != null) { 1719 jobWorkItems.add(jwi); 1720 } 1721 } catch (Exception e) { 1722 // If there's an issue with one JobWorkItem, drop only the one item and not the 1723 // whole job. 1724 Slog.e(TAG, "Problem with persisted JobWorkItem", e); 1725 } 1726 } 1727 1728 return jobWorkItems; 1729 } 1730 1731 @Nullable readJobWorkItemFromXml(TypedXmlPullParser parser)1732 private JobWorkItem readJobWorkItemFromXml(TypedXmlPullParser parser) 1733 throws IOException, XmlPullParserException { 1734 JobWorkItem.Builder jwiBuilder = new JobWorkItem.Builder(); 1735 1736 jwiBuilder 1737 .setDeliveryCount(parser.getAttributeInt(null, "delivery-count")) 1738 .setEstimatedNetworkBytes( 1739 parser.getAttributeLong(null, 1740 "estimated-download-bytes", JobInfo.NETWORK_BYTES_UNKNOWN), 1741 parser.getAttributeLong(null, 1742 "estimated-upload-bytes", JobInfo.NETWORK_BYTES_UNKNOWN)) 1743 .setMinimumNetworkChunkBytes(parser.getAttributeLong(null, 1744 "minimum-network-chunk-bytes", JobInfo.NETWORK_BYTES_UNKNOWN)); 1745 parser.next(); 1746 try { 1747 final PersistableBundle extras = PersistableBundle.restoreFromXml(parser); 1748 jwiBuilder.setExtras(extras); 1749 } catch (IllegalArgumentException e) { 1750 Slog.e(TAG, "Persisted extras contained invalid data", e); 1751 return null; 1752 } 1753 1754 try { 1755 return jwiBuilder.build(); 1756 } catch (Exception e) { 1757 Slog.e(TAG, "Invalid JobWorkItem", e); 1758 return null; 1759 } 1760 } 1761 1762 @NonNull readDebugTagsFromXml(TypedXmlPullParser parser)1763 private Set<String> readDebugTagsFromXml(TypedXmlPullParser parser) 1764 throws IOException, XmlPullParserException { 1765 Set<String> debugTags = new ArraySet<>(); 1766 1767 for (int eventType = parser.getEventType(); eventType != XmlPullParser.END_DOCUMENT; 1768 eventType = parser.next()) { 1769 final String tagName = parser.getName(); 1770 if (!XML_TAG_DEBUG_TAG.equals(tagName)) { 1771 // We're no longer operating with debug tags. 1772 break; 1773 } 1774 if (debugTags.size() < JobInfo.MAX_NUM_DEBUG_TAGS) { 1775 final String debugTag; 1776 try { 1777 debugTag = JobInfo.validateDebugTag(parser.getAttributeValue(null, "tag")); 1778 } catch (Exception e) { 1779 Slog.wtf(TAG, "Invalid debug tag persisted to disk", e); 1780 continue; 1781 } 1782 debugTags.add(debugTag); 1783 } 1784 } 1785 1786 return debugTags; 1787 } 1788 } 1789 1790 /** Set of all tracked jobs. */ 1791 @VisibleForTesting 1792 public static final class JobSet { 1793 @VisibleForTesting // Key is the getUid() originator of the jobs in each sheaf 1794 final SparseArray<ArraySet<JobStatus>> mJobs; 1795 1796 @VisibleForTesting // Same data but with the key as getSourceUid() of the jobs in each sheaf 1797 final SparseArray<ArraySet<JobStatus>> mJobsPerSourceUid; 1798 JobSet()1799 public JobSet() { 1800 mJobs = new SparseArray<ArraySet<JobStatus>>(); 1801 mJobsPerSourceUid = new SparseArray<>(); 1802 } 1803 getJobsByUid(int uid)1804 public ArraySet<JobStatus> getJobsByUid(int uid) { 1805 ArraySet<JobStatus> matchingJobs = new ArraySet<>(); 1806 getJobsByUid(uid, matchingJobs); 1807 return matchingJobs; 1808 } 1809 getJobsByUid(int uid, Set<JobStatus> insertInto)1810 public void getJobsByUid(int uid, Set<JobStatus> insertInto) { 1811 ArraySet<JobStatus> jobs = mJobs.get(uid); 1812 if (jobs != null) { 1813 insertInto.addAll(jobs); 1814 } 1815 } 1816 1817 @NonNull getJobsBySourceUid(int sourceUid)1818 public ArraySet<JobStatus> getJobsBySourceUid(int sourceUid) { 1819 final ArraySet<JobStatus> result = new ArraySet<>(); 1820 getJobsBySourceUid(sourceUid, result); 1821 return result; 1822 } 1823 getJobsBySourceUid(int sourceUid, Set<JobStatus> insertInto)1824 public void getJobsBySourceUid(int sourceUid, Set<JobStatus> insertInto) { 1825 final ArraySet<JobStatus> jobs = mJobsPerSourceUid.get(sourceUid); 1826 if (jobs != null) { 1827 insertInto.addAll(jobs); 1828 } 1829 } 1830 add(JobStatus job)1831 public boolean add(JobStatus job) { 1832 final int uid = job.getUid(); 1833 final int sourceUid = job.getSourceUid(); 1834 ArraySet<JobStatus> jobs = mJobs.get(uid); 1835 if (jobs == null) { 1836 jobs = new ArraySet<JobStatus>(); 1837 mJobs.put(uid, jobs); 1838 } 1839 ArraySet<JobStatus> jobsForSourceUid = mJobsPerSourceUid.get(sourceUid); 1840 if (jobsForSourceUid == null) { 1841 jobsForSourceUid = new ArraySet<>(); 1842 mJobsPerSourceUid.put(sourceUid, jobsForSourceUid); 1843 } 1844 final boolean added = jobs.add(job); 1845 final boolean addedInSource = jobsForSourceUid.add(job); 1846 if (added != addedInSource) { 1847 Slog.wtf(TAG, "mJobs and mJobsPerSourceUid mismatch; caller= " + added 1848 + " source= " + addedInSource); 1849 } 1850 return added || addedInSource; 1851 } 1852 remove(JobStatus job)1853 public boolean remove(JobStatus job) { 1854 final int uid = job.getUid(); 1855 final ArraySet<JobStatus> jobs = mJobs.get(uid); 1856 final int sourceUid = job.getSourceUid(); 1857 final ArraySet<JobStatus> jobsForSourceUid = mJobsPerSourceUid.get(sourceUid); 1858 final boolean didRemove = jobs != null && jobs.remove(job); 1859 final boolean sourceRemove = jobsForSourceUid != null && jobsForSourceUid.remove(job); 1860 if (didRemove != sourceRemove) { 1861 Slog.wtf(TAG, "Job presence mismatch; caller=" + didRemove 1862 + " source=" + sourceRemove); 1863 } 1864 if (didRemove || sourceRemove) { 1865 // no more jobs for this uid? let the now-empty set objects be GC'd. 1866 if (jobs != null && jobs.size() == 0) { 1867 mJobs.remove(uid); 1868 } 1869 if (jobsForSourceUid != null && jobsForSourceUid.size() == 0) { 1870 mJobsPerSourceUid.remove(sourceUid); 1871 } 1872 return true; 1873 } 1874 return false; 1875 } 1876 1877 /** 1878 * Removes the jobs of all users not specified by the keepUserIds of user ids. 1879 * This will remove jobs scheduled *by* and *for* any unlisted users. 1880 */ removeJobsOfUnlistedUsers(final int[] keepUserIds)1881 public void removeJobsOfUnlistedUsers(final int[] keepUserIds) { 1882 final Predicate<JobStatus> noSourceUser = 1883 job -> !ArrayUtils.contains(keepUserIds, job.getSourceUserId()); 1884 final Predicate<JobStatus> noCallingUser = 1885 job -> !ArrayUtils.contains(keepUserIds, job.getUserId()); 1886 removeAll(noSourceUser.or(noCallingUser)); 1887 } 1888 removeAll(Predicate<JobStatus> predicate)1889 private void removeAll(Predicate<JobStatus> predicate) { 1890 for (int jobSetIndex = mJobs.size() - 1; jobSetIndex >= 0; jobSetIndex--) { 1891 final ArraySet<JobStatus> jobs = mJobs.valueAt(jobSetIndex); 1892 jobs.removeIf(predicate); 1893 if (jobs.size() == 0) { 1894 mJobs.removeAt(jobSetIndex); 1895 } 1896 } 1897 for (int jobSetIndex = mJobsPerSourceUid.size() - 1; jobSetIndex >= 0; jobSetIndex--) { 1898 final ArraySet<JobStatus> jobs = mJobsPerSourceUid.valueAt(jobSetIndex); 1899 jobs.removeIf(predicate); 1900 if (jobs.size() == 0) { 1901 mJobsPerSourceUid.removeAt(jobSetIndex); 1902 } 1903 } 1904 } 1905 contains(JobStatus job)1906 public boolean contains(JobStatus job) { 1907 final int uid = job.getUid(); 1908 ArraySet<JobStatus> jobs = mJobs.get(uid); 1909 return jobs != null && jobs.contains(job); 1910 } 1911 get(int uid, @Nullable String namespace, int jobId)1912 public JobStatus get(int uid, @Nullable String namespace, int jobId) { 1913 ArraySet<JobStatus> jobs = mJobs.get(uid); 1914 if (jobs != null) { 1915 for (int i = jobs.size() - 1; i >= 0; i--) { 1916 JobStatus job = jobs.valueAt(i); 1917 if (job.getJobId() == jobId && Objects.equals(namespace, job.getNamespace())) { 1918 return job; 1919 } 1920 } 1921 } 1922 return null; 1923 } 1924 1925 // Inefficient; use only for testing getAllJobs()1926 public List<JobStatus> getAllJobs() { 1927 ArrayList<JobStatus> allJobs = new ArrayList<JobStatus>(size()); 1928 for (int i = mJobs.size() - 1; i >= 0; i--) { 1929 ArraySet<JobStatus> jobs = mJobs.valueAt(i); 1930 if (jobs != null) { 1931 // Use a for loop over the ArraySet, so we don't need to make its 1932 // optional collection class iterator implementation or have to go 1933 // through a temporary array from toArray(). 1934 for (int j = jobs.size() - 1; j >= 0; j--) { 1935 allJobs.add(jobs.valueAt(j)); 1936 } 1937 } 1938 } 1939 return allJobs; 1940 } 1941 clear()1942 public void clear() { 1943 mJobs.clear(); 1944 mJobsPerSourceUid.clear(); 1945 } 1946 size()1947 public int size() { 1948 int total = 0; 1949 for (int i = mJobs.size() - 1; i >= 0; i--) { 1950 total += mJobs.valueAt(i).size(); 1951 } 1952 return total; 1953 } 1954 1955 // We only want to count the jobs that this uid has scheduled on its own 1956 // behalf, not those that the app has scheduled on someone else's behalf. countJobsForUid(int uid)1957 public int countJobsForUid(int uid) { 1958 int total = 0; 1959 ArraySet<JobStatus> jobs = mJobs.get(uid); 1960 if (jobs != null) { 1961 for (int i = jobs.size() - 1; i >= 0; i--) { 1962 JobStatus job = jobs.valueAt(i); 1963 if (job.getUid() == job.getSourceUid()) { 1964 total++; 1965 } 1966 } 1967 } 1968 return total; 1969 } 1970 forEachJob(@ullable Predicate<JobStatus> filterPredicate, @NonNull Consumer<JobStatus> functor)1971 public void forEachJob(@Nullable Predicate<JobStatus> filterPredicate, 1972 @NonNull Consumer<JobStatus> functor) { 1973 for (int uidIndex = mJobs.size() - 1; uidIndex >= 0; uidIndex--) { 1974 ArraySet<JobStatus> jobs = mJobs.valueAt(uidIndex); 1975 if (jobs != null) { 1976 for (int i = jobs.size() - 1; i >= 0; i--) { 1977 final JobStatus jobStatus = jobs.valueAt(i); 1978 if ((filterPredicate == null) || filterPredicate.test(jobStatus)) { 1979 functor.accept(jobStatus); 1980 } 1981 } 1982 } 1983 } 1984 } 1985 forEachJob(int callingUid, Consumer<JobStatus> functor)1986 public void forEachJob(int callingUid, Consumer<JobStatus> functor) { 1987 ArraySet<JobStatus> jobs = mJobs.get(callingUid); 1988 if (jobs != null) { 1989 for (int i = jobs.size() - 1; i >= 0; i--) { 1990 functor.accept(jobs.valueAt(i)); 1991 } 1992 } 1993 } 1994 forEachJobForSourceUid(int sourceUid, Consumer<JobStatus> functor)1995 public void forEachJobForSourceUid(int sourceUid, Consumer<JobStatus> functor) { 1996 final ArraySet<JobStatus> jobs = mJobsPerSourceUid.get(sourceUid); 1997 if (jobs != null) { 1998 for (int i = jobs.size() - 1; i >= 0; i--) { 1999 functor.accept(jobs.valueAt(i)); 2000 } 2001 } 2002 } 2003 } 2004 } 2005