1 /* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License 15 */ 16 17 package com.android.server.job; 18 19 import static android.net.NetworkCapabilities.NET_CAPABILITY_TEMPORARILY_NOT_METERED; 20 import static android.net.NetworkCapabilities.TRANSPORT_TEST; 21 22 import static com.android.server.job.JobSchedulerService.sElapsedRealtimeClock; 23 import static com.android.server.job.JobSchedulerService.sSystemClock; 24 25 import android.annotation.Nullable; 26 import android.app.job.JobInfo; 27 import android.content.ComponentName; 28 import android.content.Context; 29 import android.net.NetworkRequest; 30 import android.os.Environment; 31 import android.os.Handler; 32 import android.os.PersistableBundle; 33 import android.os.Process; 34 import android.os.SystemClock; 35 import android.os.UserHandle; 36 import android.text.TextUtils; 37 import android.text.format.DateUtils; 38 import android.util.ArraySet; 39 import android.util.AtomicFile; 40 import android.util.Pair; 41 import android.util.Slog; 42 import android.util.SparseArray; 43 import android.util.SystemConfigFileCommitEventLogger; 44 import android.util.TypedXmlSerializer; 45 import android.util.Xml; 46 47 import com.android.internal.annotations.GuardedBy; 48 import com.android.internal.annotations.VisibleForTesting; 49 import com.android.internal.util.ArrayUtils; 50 import com.android.internal.util.BitUtils; 51 import com.android.server.IoThread; 52 import com.android.server.job.JobSchedulerInternal.JobStorePersistStats; 53 import com.android.server.job.controllers.JobStatus; 54 55 import org.xmlpull.v1.XmlPullParser; 56 import org.xmlpull.v1.XmlPullParserException; 57 import org.xmlpull.v1.XmlSerializer; 58 59 import java.io.File; 60 import java.io.FileInputStream; 61 import java.io.FileNotFoundException; 62 import java.io.FileOutputStream; 63 import java.io.IOException; 64 import java.io.InputStream; 65 import java.util.ArrayList; 66 import java.util.List; 67 import java.util.Set; 68 import java.util.StringJoiner; 69 import java.util.function.Consumer; 70 import java.util.function.Predicate; 71 72 /** 73 * Maintains the master list of jobs that the job scheduler is tracking. These jobs are compared by 74 * reference, so none of the functions in this class should make a copy. 75 * Also handles read/write of persisted jobs. 76 * 77 * Note on locking: 78 * All callers to this class must <strong>lock on the class object they are calling</strong>. 79 * This is important b/c {@link com.android.server.job.JobStore.WriteJobsMapToDiskRunnable} 80 * and {@link com.android.server.job.JobStore.ReadJobMapFromDiskRunnable} lock on that 81 * object. 82 * 83 * Test: 84 * atest $ANDROID_BUILD_TOP/frameworks/base/services/tests/servicestests/src/com/android/server/job/JobStoreTest.java 85 */ 86 public final class JobStore { 87 private static final String TAG = "JobStore"; 88 private static final boolean DEBUG = JobSchedulerService.DEBUG; 89 90 /** Threshold to adjust how often we want to write to the db. */ 91 private static final long JOB_PERSIST_DELAY = 2000L; 92 93 final Object mLock; 94 final Object mWriteScheduleLock; // used solely for invariants around write scheduling 95 final JobSet mJobSet; // per-caller-uid and per-source-uid tracking 96 final Context mContext; 97 98 // Bookkeeping around incorrect boot-time system clock 99 private final long mXmlTimestamp; 100 private boolean mRtcGood; 101 102 @GuardedBy("mWriteScheduleLock") 103 private boolean mWriteScheduled; 104 105 @GuardedBy("mWriteScheduleLock") 106 private boolean mWriteInProgress; 107 108 private static final Object sSingletonLock = new Object(); 109 private final SystemConfigFileCommitEventLogger mEventLogger; 110 private final AtomicFile mJobsFile; 111 /** Handler backed by IoThread for writing to disk. */ 112 private final Handler mIoHandler = IoThread.getHandler(); 113 private static JobStore sSingleton; 114 115 private JobStorePersistStats mPersistInfo = new JobStorePersistStats(); 116 117 /** Used by the {@link JobSchedulerService} to instantiate the JobStore. */ initAndGet(JobSchedulerService jobManagerService)118 static JobStore initAndGet(JobSchedulerService jobManagerService) { 119 synchronized (sSingletonLock) { 120 if (sSingleton == null) { 121 sSingleton = new JobStore(jobManagerService.getContext(), 122 jobManagerService.getLock(), Environment.getDataDirectory()); 123 } 124 return sSingleton; 125 } 126 } 127 128 /** 129 * @return A freshly initialized job store object, with no loaded jobs. 130 */ 131 @VisibleForTesting initAndGetForTesting(Context context, File dataDir)132 public static JobStore initAndGetForTesting(Context context, File dataDir) { 133 JobStore jobStoreUnderTest = new JobStore(context, new Object(), dataDir); 134 jobStoreUnderTest.clearForTesting(); 135 return jobStoreUnderTest; 136 } 137 138 /** 139 * Construct the instance of the job store. This results in a blocking read from disk. 140 */ JobStore(Context context, Object lock, File dataDir)141 private JobStore(Context context, Object lock, File dataDir) { 142 mLock = lock; 143 mWriteScheduleLock = new Object(); 144 mContext = context; 145 146 File systemDir = new File(dataDir, "system"); 147 File jobDir = new File(systemDir, "job"); 148 jobDir.mkdirs(); 149 mEventLogger = new SystemConfigFileCommitEventLogger("jobs"); 150 mJobsFile = new AtomicFile(new File(jobDir, "jobs.xml"), mEventLogger); 151 152 mJobSet = new JobSet(); 153 154 // If the current RTC is earlier than the timestamp on our persisted jobs file, 155 // we suspect that the RTC is uninitialized and so we cannot draw conclusions 156 // about persisted job scheduling. 157 // 158 // Note that if the persisted jobs file does not exist, we proceed with the 159 // assumption that the RTC is good. This is less work and is safe: if the 160 // clock updates to sanity then we'll be saving the persisted jobs file in that 161 // correct state, which is normal; or we'll wind up writing the jobs file with 162 // an incorrect historical timestamp. That's fine; at worst we'll reboot with 163 // a *correct* timestamp, see a bunch of overdue jobs, and run them; then 164 // settle into normal operation. 165 mXmlTimestamp = mJobsFile.getLastModifiedTime(); 166 mRtcGood = (sSystemClock.millis() > mXmlTimestamp); 167 168 readJobMapFromDisk(mJobSet, mRtcGood); 169 } 170 jobTimesInflatedValid()171 public boolean jobTimesInflatedValid() { 172 return mRtcGood; 173 } 174 clockNowValidToInflate(long now)175 public boolean clockNowValidToInflate(long now) { 176 return now >= mXmlTimestamp; 177 } 178 179 /** 180 * Find all the jobs that were affected by RTC clock uncertainty at boot time. Returns 181 * parallel lists of the existing JobStatus objects and of new, equivalent JobStatus instances 182 * with now-corrected time bounds. 183 */ getRtcCorrectedJobsLocked(final ArrayList<JobStatus> toAdd, final ArrayList<JobStatus> toRemove)184 public void getRtcCorrectedJobsLocked(final ArrayList<JobStatus> toAdd, 185 final ArrayList<JobStatus> toRemove) { 186 final long elapsedNow = sElapsedRealtimeClock.millis(); 187 188 // Find the jobs that need to be fixed up, collecting them for post-iteration 189 // replacement with their new versions 190 forEachJob(job -> { 191 final Pair<Long, Long> utcTimes = job.getPersistedUtcTimes(); 192 if (utcTimes != null) { 193 Pair<Long, Long> elapsedRuntimes = 194 convertRtcBoundsToElapsed(utcTimes, elapsedNow); 195 JobStatus newJob = new JobStatus(job, 196 elapsedRuntimes.first, elapsedRuntimes.second, 197 0, job.getLastSuccessfulRunTime(), job.getLastFailedRunTime()); 198 newJob.prepareLocked(); 199 toAdd.add(newJob); 200 toRemove.add(job); 201 } 202 }); 203 } 204 205 /** 206 * Add a job to the master list, persisting it if necessary. If the JobStatus already exists, 207 * it will be replaced. 208 * @param jobStatus Job to add. 209 * @return Whether or not an equivalent JobStatus was replaced by this operation. 210 */ add(JobStatus jobStatus)211 public boolean add(JobStatus jobStatus) { 212 boolean replaced = mJobSet.remove(jobStatus); 213 mJobSet.add(jobStatus); 214 if (jobStatus.isPersisted()) { 215 maybeWriteStatusToDiskAsync(); 216 } 217 if (DEBUG) { 218 Slog.d(TAG, "Added job status to store: " + jobStatus); 219 } 220 return replaced; 221 } 222 223 /** 224 * The same as above but does not schedule writing. This makes perf benchmarks more stable. 225 */ 226 @VisibleForTesting addForTesting(JobStatus jobStatus)227 public void addForTesting(JobStatus jobStatus) { 228 mJobSet.add(jobStatus); 229 } 230 containsJob(JobStatus jobStatus)231 boolean containsJob(JobStatus jobStatus) { 232 return mJobSet.contains(jobStatus); 233 } 234 size()235 public int size() { 236 return mJobSet.size(); 237 } 238 getPersistStats()239 public JobStorePersistStats getPersistStats() { 240 return mPersistInfo; 241 } 242 countJobsForUid(int uid)243 public int countJobsForUid(int uid) { 244 return mJobSet.countJobsForUid(uid); 245 } 246 247 /** 248 * Remove the provided job. Will also delete the job if it was persisted. 249 * @param removeFromPersisted If true, the job will be removed from the persisted job list 250 * immediately (if it was persisted). 251 * @return Whether or not the job existed to be removed. 252 */ remove(JobStatus jobStatus, boolean removeFromPersisted)253 public boolean remove(JobStatus jobStatus, boolean removeFromPersisted) { 254 boolean removed = mJobSet.remove(jobStatus); 255 if (!removed) { 256 if (DEBUG) { 257 Slog.d(TAG, "Couldn't remove job: didn't exist: " + jobStatus); 258 } 259 return false; 260 } 261 if (removeFromPersisted && jobStatus.isPersisted()) { 262 maybeWriteStatusToDiskAsync(); 263 } 264 return removed; 265 } 266 267 /** 268 * Remove the jobs of users not specified in the keepUserIds. 269 * @param keepUserIds Array of User IDs whose jobs should be kept and not removed. 270 */ removeJobsOfUnlistedUsers(int[] keepUserIds)271 public void removeJobsOfUnlistedUsers(int[] keepUserIds) { 272 mJobSet.removeJobsOfUnlistedUsers(keepUserIds); 273 } 274 275 @VisibleForTesting clear()276 public void clear() { 277 mJobSet.clear(); 278 maybeWriteStatusToDiskAsync(); 279 } 280 281 /** 282 * The same as above but does not schedule writing. This makes perf benchmarks more stable. 283 */ 284 @VisibleForTesting clearForTesting()285 public void clearForTesting() { 286 mJobSet.clear(); 287 } 288 289 /** 290 * @param userHandle User for whom we are querying the list of jobs. 291 * @return A list of all the jobs scheduled for the provided user. Never null. 292 */ getJobsByUser(int userHandle)293 public List<JobStatus> getJobsByUser(int userHandle) { 294 return mJobSet.getJobsByUser(userHandle); 295 } 296 297 /** 298 * @param uid Uid of the requesting app. 299 * @return All JobStatus objects for a given uid from the master list. Never null. 300 */ getJobsByUid(int uid)301 public List<JobStatus> getJobsByUid(int uid) { 302 return mJobSet.getJobsByUid(uid); 303 } 304 305 /** 306 * @param uid Uid of the requesting app. 307 * @param jobId Job id, specified at schedule-time. 308 * @return the JobStatus that matches the provided uId and jobId, or null if none found. 309 */ getJobByUidAndJobId(int uid, int jobId)310 public JobStatus getJobByUidAndJobId(int uid, int jobId) { 311 return mJobSet.get(uid, jobId); 312 } 313 314 /** 315 * Iterate over the set of all jobs, invoking the supplied functor on each. This is for 316 * customers who need to examine each job; we'd much rather not have to generate 317 * transient unified collections for them to iterate over and then discard, or creating 318 * iterators every time a client needs to perform a sweep. 319 */ forEachJob(Consumer<JobStatus> functor)320 public void forEachJob(Consumer<JobStatus> functor) { 321 mJobSet.forEachJob(null, functor); 322 } 323 forEachJob(@ullable Predicate<JobStatus> filterPredicate, Consumer<JobStatus> functor)324 public void forEachJob(@Nullable Predicate<JobStatus> filterPredicate, 325 Consumer<JobStatus> functor) { 326 mJobSet.forEachJob(filterPredicate, functor); 327 } 328 forEachJob(int uid, Consumer<JobStatus> functor)329 public void forEachJob(int uid, Consumer<JobStatus> functor) { 330 mJobSet.forEachJob(uid, functor); 331 } 332 forEachJobForSourceUid(int sourceUid, Consumer<JobStatus> functor)333 public void forEachJobForSourceUid(int sourceUid, Consumer<JobStatus> functor) { 334 mJobSet.forEachJobForSourceUid(sourceUid, functor); 335 } 336 337 /** Version of the db schema. */ 338 private static final int JOBS_FILE_VERSION = 1; 339 /** Tag corresponds to constraints this job needs. */ 340 private static final String XML_TAG_PARAMS_CONSTRAINTS = "constraints"; 341 /** Tag corresponds to execution parameters. */ 342 private static final String XML_TAG_PERIODIC = "periodic"; 343 private static final String XML_TAG_ONEOFF = "one-off"; 344 private static final String XML_TAG_EXTRAS = "extras"; 345 346 /** 347 * Every time the state changes we write all the jobs in one swath, instead of trying to 348 * track incremental changes. 349 */ maybeWriteStatusToDiskAsync()350 private void maybeWriteStatusToDiskAsync() { 351 synchronized (mWriteScheduleLock) { 352 if (!mWriteScheduled) { 353 if (DEBUG) { 354 Slog.v(TAG, "Scheduling persist of jobs to disk."); 355 } 356 mIoHandler.postDelayed(mWriteRunnable, JOB_PERSIST_DELAY); 357 mWriteScheduled = true; 358 } 359 } 360 } 361 362 @VisibleForTesting readJobMapFromDisk(JobSet jobSet, boolean rtcGood)363 public void readJobMapFromDisk(JobSet jobSet, boolean rtcGood) { 364 new ReadJobMapFromDiskRunnable(jobSet, rtcGood).run(); 365 } 366 367 /** Write persisted JobStore state to disk synchronously. Should only be used for testing. */ 368 @VisibleForTesting writeStatusToDiskForTesting()369 public void writeStatusToDiskForTesting() { 370 synchronized (mWriteScheduleLock) { 371 if (mWriteScheduled) { 372 throw new IllegalStateException("An asynchronous write is already scheduled."); 373 } 374 375 mWriteScheduled = true; 376 mWriteRunnable.run(); 377 } 378 } 379 380 /** 381 * Wait for any pending write to the persistent store to clear 382 * @param maxWaitMillis Maximum time from present to wait 383 * @return {@code true} if I/O cleared as expected, {@code false} if the wait 384 * timed out before the pending write completed. 385 */ 386 @VisibleForTesting waitForWriteToCompleteForTesting(long maxWaitMillis)387 public boolean waitForWriteToCompleteForTesting(long maxWaitMillis) { 388 final long start = SystemClock.uptimeMillis(); 389 final long end = start + maxWaitMillis; 390 synchronized (mWriteScheduleLock) { 391 while (mWriteScheduled || mWriteInProgress) { 392 final long now = SystemClock.uptimeMillis(); 393 if (now >= end) { 394 // still not done and we've hit the end; failure 395 return false; 396 } 397 try { 398 mWriteScheduleLock.wait(now - start + maxWaitMillis); 399 } catch (InterruptedException e) { 400 // Spurious; keep waiting 401 break; 402 } 403 } 404 } 405 return true; 406 } 407 408 /** 409 * Returns a single string representation of the contents of the specified intArray. 410 * If the intArray is [1, 2, 4] as the input, the return result will be the string "1,2,4". 411 */ 412 @VisibleForTesting intArrayToString(int[] values)413 static String intArrayToString(int[] values) { 414 final StringJoiner sj = new StringJoiner(","); 415 for (final int value : values) { 416 sj.add(String.valueOf(value)); 417 } 418 return sj.toString(); 419 } 420 421 422 /** 423 * Converts a string containing a comma-separated list of decimal representations 424 * of ints into an array of int. If the string is not correctly formatted, 425 * or if any value doesn't fit into an int, NumberFormatException is thrown. 426 */ 427 @VisibleForTesting stringToIntArray(String str)428 static int[] stringToIntArray(String str) { 429 if (TextUtils.isEmpty(str)) return new int[0]; 430 final String[] arr = str.split(","); 431 final int[] values = new int[arr.length]; 432 for (int i = 0; i < arr.length; i++) { 433 values[i] = Integer.parseInt(arr[i]); 434 } 435 return values; 436 } 437 438 /** 439 * Runnable that writes {@link #mJobSet} out to xml. 440 * NOTE: This Runnable locks on mLock 441 */ 442 private final Runnable mWriteRunnable = new Runnable() { 443 @Override 444 public void run() { 445 final long startElapsed = sElapsedRealtimeClock.millis(); 446 final List<JobStatus> storeCopy = new ArrayList<JobStatus>(); 447 // Intentionally allow new scheduling of a write operation *before* we clone 448 // the job set. If we reset it to false after cloning, there's a window in 449 // which no new write will be scheduled but mLock is not held, i.e. a new 450 // job might appear and fail to be recognized as needing a persist. The 451 // potential cost is one redundant write of an identical set of jobs in the 452 // rare case of that specific race, but by doing it this way we avoid quite 453 // a bit of lock contention. 454 synchronized (mWriteScheduleLock) { 455 mWriteScheduled = false; 456 if (mWriteInProgress) { 457 // Another runnable is currently writing. Postpone this new write task. 458 maybeWriteStatusToDiskAsync(); 459 return; 460 } 461 mWriteInProgress = true; 462 } 463 synchronized (mLock) { 464 // Clone the jobs so we can release the lock before writing. 465 mJobSet.forEachJob(null, (job) -> { 466 if (job.isPersisted()) { 467 storeCopy.add(new JobStatus(job)); 468 } 469 }); 470 } 471 writeJobsMapImpl(storeCopy); 472 if (DEBUG) { 473 Slog.v(TAG, "Finished writing, took " + (sElapsedRealtimeClock.millis() 474 - startElapsed) + "ms"); 475 } 476 synchronized (mWriteScheduleLock) { 477 mWriteInProgress = false; 478 mWriteScheduleLock.notifyAll(); 479 } 480 } 481 482 private void writeJobsMapImpl(List<JobStatus> jobList) { 483 int numJobs = 0; 484 int numSystemJobs = 0; 485 int numSyncJobs = 0; 486 mEventLogger.setStartTime(SystemClock.uptimeMillis()); 487 try (FileOutputStream fos = mJobsFile.startWrite()) { 488 TypedXmlSerializer out = Xml.resolveSerializer(fos); 489 out.startDocument(null, true); 490 out.setFeature("http://xmlpull.org/v1/doc/features.html#indent-output", true); 491 492 out.startTag(null, "job-info"); 493 out.attribute(null, "version", Integer.toString(JOBS_FILE_VERSION)); 494 for (int i=0; i<jobList.size(); i++) { 495 JobStatus jobStatus = jobList.get(i); 496 if (DEBUG) { 497 Slog.d(TAG, "Saving job " + jobStatus.getJobId()); 498 } 499 out.startTag(null, "job"); 500 addAttributesToJobTag(out, jobStatus); 501 writeConstraintsToXml(out, jobStatus); 502 writeExecutionCriteriaToXml(out, jobStatus); 503 writeBundleToXml(jobStatus.getJob().getExtras(), out); 504 out.endTag(null, "job"); 505 506 numJobs++; 507 if (jobStatus.getUid() == Process.SYSTEM_UID) { 508 numSystemJobs++; 509 if (isSyncJob(jobStatus)) { 510 numSyncJobs++; 511 } 512 } 513 } 514 out.endTag(null, "job-info"); 515 out.endDocument(); 516 517 mJobsFile.finishWrite(fos); 518 } catch (IOException e) { 519 if (DEBUG) { 520 Slog.v(TAG, "Error writing out job data.", e); 521 } 522 } catch (XmlPullParserException e) { 523 if (DEBUG) { 524 Slog.d(TAG, "Error persisting bundle.", e); 525 } 526 } finally { 527 mPersistInfo.countAllJobsSaved = numJobs; 528 mPersistInfo.countSystemServerJobsSaved = numSystemJobs; 529 mPersistInfo.countSystemSyncManagerJobsSaved = numSyncJobs; 530 } 531 } 532 533 /** 534 * Write out a tag with data comprising the required fields and bias of this job and 535 * its client. 536 */ 537 private void addAttributesToJobTag(XmlSerializer out, JobStatus jobStatus) 538 throws IOException { 539 out.attribute(null, "jobid", Integer.toString(jobStatus.getJobId())); 540 out.attribute(null, "package", jobStatus.getServiceComponent().getPackageName()); 541 out.attribute(null, "class", jobStatus.getServiceComponent().getClassName()); 542 if (jobStatus.getSourcePackageName() != null) { 543 out.attribute(null, "sourcePackageName", jobStatus.getSourcePackageName()); 544 } 545 if (jobStatus.getSourceTag() != null) { 546 out.attribute(null, "sourceTag", jobStatus.getSourceTag()); 547 } 548 out.attribute(null, "sourceUserId", String.valueOf(jobStatus.getSourceUserId())); 549 out.attribute(null, "uid", Integer.toString(jobStatus.getUid())); 550 out.attribute(null, "bias", String.valueOf(jobStatus.getBias())); 551 out.attribute(null, "priority", String.valueOf(jobStatus.getJob().getPriority())); 552 out.attribute(null, "flags", String.valueOf(jobStatus.getFlags())); 553 if (jobStatus.getInternalFlags() != 0) { 554 out.attribute(null, "internalFlags", String.valueOf(jobStatus.getInternalFlags())); 555 } 556 557 out.attribute(null, "lastSuccessfulRunTime", 558 String.valueOf(jobStatus.getLastSuccessfulRunTime())); 559 out.attribute(null, "lastFailedRunTime", 560 String.valueOf(jobStatus.getLastFailedRunTime())); 561 } 562 563 private void writeBundleToXml(PersistableBundle extras, XmlSerializer out) 564 throws IOException, XmlPullParserException { 565 out.startTag(null, XML_TAG_EXTRAS); 566 PersistableBundle extrasCopy = deepCopyBundle(extras, 10); 567 extrasCopy.saveToXml(out); 568 out.endTag(null, XML_TAG_EXTRAS); 569 } 570 571 private PersistableBundle deepCopyBundle(PersistableBundle bundle, int maxDepth) { 572 if (maxDepth <= 0) { 573 return null; 574 } 575 PersistableBundle copy = (PersistableBundle) bundle.clone(); 576 Set<String> keySet = bundle.keySet(); 577 for (String key: keySet) { 578 Object o = copy.get(key); 579 if (o instanceof PersistableBundle) { 580 PersistableBundle bCopy = deepCopyBundle((PersistableBundle) o, maxDepth-1); 581 copy.putPersistableBundle(key, bCopy); 582 } 583 } 584 return copy; 585 } 586 587 /** 588 * Write out a tag with data identifying this job's constraints. If the constraint isn't here 589 * it doesn't apply. 590 * TODO: b/183455312 Update this code to use proper serialization for NetworkRequest, 591 * because currently store is not including everything (like, UIDs, bandwidth, 592 * signal strength etc. are lost). 593 */ 594 private void writeConstraintsToXml(XmlSerializer out, JobStatus jobStatus) throws IOException { 595 out.startTag(null, XML_TAG_PARAMS_CONSTRAINTS); 596 if (jobStatus.hasConnectivityConstraint()) { 597 final NetworkRequest network = jobStatus.getJob().getRequiredNetwork(); 598 out.attribute(null, "net-capabilities-csv", intArrayToString( 599 network.getCapabilities())); 600 out.attribute(null, "net-forbidden-capabilities-csv", intArrayToString( 601 network.getForbiddenCapabilities())); 602 out.attribute(null, "net-transport-types-csv", intArrayToString( 603 network.getTransportTypes())); 604 } 605 if (jobStatus.hasIdleConstraint()) { 606 out.attribute(null, "idle", Boolean.toString(true)); 607 } 608 if (jobStatus.hasChargingConstraint()) { 609 out.attribute(null, "charging", Boolean.toString(true)); 610 } 611 if (jobStatus.hasBatteryNotLowConstraint()) { 612 out.attribute(null, "battery-not-low", Boolean.toString(true)); 613 } 614 if (jobStatus.hasStorageNotLowConstraint()) { 615 out.attribute(null, "storage-not-low", Boolean.toString(true)); 616 } 617 out.endTag(null, XML_TAG_PARAMS_CONSTRAINTS); 618 } 619 620 private void writeExecutionCriteriaToXml(XmlSerializer out, JobStatus jobStatus) 621 throws IOException { 622 final JobInfo job = jobStatus.getJob(); 623 if (jobStatus.getJob().isPeriodic()) { 624 out.startTag(null, XML_TAG_PERIODIC); 625 out.attribute(null, "period", Long.toString(job.getIntervalMillis())); 626 out.attribute(null, "flex", Long.toString(job.getFlexMillis())); 627 } else { 628 out.startTag(null, XML_TAG_ONEOFF); 629 } 630 631 // If we still have the persisted times, we need to record those directly because 632 // we haven't yet been able to calculate the usual elapsed-timebase bounds 633 // correctly due to wall-clock uncertainty. 634 Pair <Long, Long> utcJobTimes = jobStatus.getPersistedUtcTimes(); 635 if (DEBUG && utcJobTimes != null) { 636 Slog.i(TAG, "storing original UTC timestamps for " + jobStatus); 637 } 638 639 final long nowRTC = sSystemClock.millis(); 640 final long nowElapsed = sElapsedRealtimeClock.millis(); 641 if (jobStatus.hasDeadlineConstraint()) { 642 // Wall clock deadline. 643 final long deadlineWallclock = (utcJobTimes == null) 644 ? nowRTC + (jobStatus.getLatestRunTimeElapsed() - nowElapsed) 645 : utcJobTimes.second; 646 out.attribute(null, "deadline", Long.toString(deadlineWallclock)); 647 } 648 if (jobStatus.hasTimingDelayConstraint()) { 649 final long delayWallclock = (utcJobTimes == null) 650 ? nowRTC + (jobStatus.getEarliestRunTime() - nowElapsed) 651 : utcJobTimes.first; 652 out.attribute(null, "delay", Long.toString(delayWallclock)); 653 } 654 655 // Only write out back-off policy if it differs from the default. 656 // This also helps the case where the job is idle -> these aren't allowed to specify 657 // back-off. 658 if (jobStatus.getJob().getInitialBackoffMillis() != JobInfo.DEFAULT_INITIAL_BACKOFF_MILLIS 659 || jobStatus.getJob().getBackoffPolicy() != JobInfo.DEFAULT_BACKOFF_POLICY) { 660 out.attribute(null, "backoff-policy", Integer.toString(job.getBackoffPolicy())); 661 out.attribute(null, "initial-backoff", Long.toString(job.getInitialBackoffMillis())); 662 } 663 if (job.isPeriodic()) { 664 out.endTag(null, XML_TAG_PERIODIC); 665 } else { 666 out.endTag(null, XML_TAG_ONEOFF); 667 } 668 } 669 }; 670 671 /** 672 * Translate the supplied RTC times to the elapsed timebase, with clamping appropriate 673 * to interpreting them as a job's delay + deadline times for alarm-setting purposes. 674 * @param rtcTimes a Pair<Long, Long> in which {@code first} is the "delay" earliest 675 * allowable runtime for the job, and {@code second} is the "deadline" time at which 676 * the job becomes overdue. 677 */ convertRtcBoundsToElapsed(Pair<Long, Long> rtcTimes, long nowElapsed)678 private static Pair<Long, Long> convertRtcBoundsToElapsed(Pair<Long, Long> rtcTimes, 679 long nowElapsed) { 680 final long nowWallclock = sSystemClock.millis(); 681 final long earliest = (rtcTimes.first > JobStatus.NO_EARLIEST_RUNTIME) 682 ? nowElapsed + Math.max(rtcTimes.first - nowWallclock, 0) 683 : JobStatus.NO_EARLIEST_RUNTIME; 684 final long latest = (rtcTimes.second < JobStatus.NO_LATEST_RUNTIME) 685 ? nowElapsed + Math.max(rtcTimes.second - nowWallclock, 0) 686 : JobStatus.NO_LATEST_RUNTIME; 687 return Pair.create(earliest, latest); 688 } 689 isSyncJob(JobStatus status)690 private static boolean isSyncJob(JobStatus status) { 691 return com.android.server.content.SyncJobService.class.getName() 692 .equals(status.getServiceComponent().getClassName()); 693 } 694 695 /** 696 * Runnable that reads list of persisted job from xml. This is run once at start up, so doesn't 697 * need to go through {@link JobStore#add(com.android.server.job.controllers.JobStatus)}. 698 */ 699 private final class ReadJobMapFromDiskRunnable implements Runnable { 700 private final JobSet jobSet; 701 private final boolean rtcGood; 702 703 /** 704 * @param jobSet Reference to the (empty) set of JobStatus objects that back the JobStore, 705 * so that after disk read we can populate it directly. 706 */ ReadJobMapFromDiskRunnable(JobSet jobSet, boolean rtcIsGood)707 ReadJobMapFromDiskRunnable(JobSet jobSet, boolean rtcIsGood) { 708 this.jobSet = jobSet; 709 this.rtcGood = rtcIsGood; 710 } 711 712 @Override run()713 public void run() { 714 int numJobs = 0; 715 int numSystemJobs = 0; 716 int numSyncJobs = 0; 717 List<JobStatus> jobs; 718 try (FileInputStream fis = mJobsFile.openRead()) { 719 synchronized (mLock) { 720 jobs = readJobMapImpl(fis, rtcGood); 721 if (jobs != null) { 722 long now = sElapsedRealtimeClock.millis(); 723 for (int i=0; i<jobs.size(); i++) { 724 JobStatus js = jobs.get(i); 725 js.prepareLocked(); 726 js.enqueueTime = now; 727 this.jobSet.add(js); 728 729 numJobs++; 730 if (js.getUid() == Process.SYSTEM_UID) { 731 numSystemJobs++; 732 if (isSyncJob(js)) { 733 numSyncJobs++; 734 } 735 } 736 } 737 } 738 } 739 } catch (FileNotFoundException e) { 740 if (DEBUG) { 741 Slog.d(TAG, "Could not find jobs file, probably there was nothing to load."); 742 } 743 } catch (XmlPullParserException | IOException e) { 744 Slog.wtf(TAG, "Error jobstore xml.", e); 745 } catch (Exception e) { 746 // Crashing at this point would result in a boot loop, so live with a general 747 // Exception for system stability's sake. 748 Slog.wtf(TAG, "Unexpected exception", e); 749 } finally { 750 if (mPersistInfo.countAllJobsLoaded < 0) { // Only set them once. 751 mPersistInfo.countAllJobsLoaded = numJobs; 752 mPersistInfo.countSystemServerJobsLoaded = numSystemJobs; 753 mPersistInfo.countSystemSyncManagerJobsLoaded = numSyncJobs; 754 } 755 } 756 Slog.i(TAG, "Read " + numJobs + " jobs"); 757 } 758 readJobMapImpl(InputStream fis, boolean rtcIsGood)759 private List<JobStatus> readJobMapImpl(InputStream fis, boolean rtcIsGood) 760 throws XmlPullParserException, IOException { 761 XmlPullParser parser = Xml.resolvePullParser(fis); 762 763 int eventType = parser.getEventType(); 764 while (eventType != XmlPullParser.START_TAG && 765 eventType != XmlPullParser.END_DOCUMENT) { 766 eventType = parser.next(); 767 Slog.d(TAG, "Start tag: " + parser.getName()); 768 } 769 if (eventType == XmlPullParser.END_DOCUMENT) { 770 if (DEBUG) { 771 Slog.d(TAG, "No persisted jobs."); 772 } 773 return null; 774 } 775 776 String tagName = parser.getName(); 777 if ("job-info".equals(tagName)) { 778 final List<JobStatus> jobs = new ArrayList<JobStatus>(); 779 final int version; 780 // Read in version info. 781 try { 782 version = Integer.parseInt(parser.getAttributeValue(null, "version")); 783 if (version > JOBS_FILE_VERSION || version < 0) { 784 Slog.d(TAG, "Invalid version number, aborting jobs file read."); 785 return null; 786 } 787 } catch (NumberFormatException e) { 788 Slog.e(TAG, "Invalid version number, aborting jobs file read."); 789 return null; 790 } 791 eventType = parser.next(); 792 do { 793 // Read each <job/> 794 if (eventType == XmlPullParser.START_TAG) { 795 tagName = parser.getName(); 796 // Start reading job. 797 if ("job".equals(tagName)) { 798 JobStatus persistedJob = restoreJobFromXml(rtcIsGood, parser, version); 799 if (persistedJob != null) { 800 if (DEBUG) { 801 Slog.d(TAG, "Read out " + persistedJob); 802 } 803 jobs.add(persistedJob); 804 } else { 805 Slog.d(TAG, "Error reading job from file."); 806 } 807 } 808 } 809 eventType = parser.next(); 810 } while (eventType != XmlPullParser.END_DOCUMENT); 811 return jobs; 812 } 813 return null; 814 } 815 816 /** 817 * @param parser Xml parser at the beginning of a "<job/>" tag. The next "parser.next()" call 818 * will take the parser into the body of the job tag. 819 * @return Newly instantiated job holding all the information we just read out of the xml tag. 820 */ restoreJobFromXml(boolean rtcIsGood, XmlPullParser parser, int schemaVersion)821 private JobStatus restoreJobFromXml(boolean rtcIsGood, XmlPullParser parser, 822 int schemaVersion) throws XmlPullParserException, IOException { 823 JobInfo.Builder jobBuilder; 824 int uid, sourceUserId; 825 long lastSuccessfulRunTime; 826 long lastFailedRunTime; 827 int internalFlags = 0; 828 829 // Read out job identifier attributes and bias. 830 try { 831 jobBuilder = buildBuilderFromXml(parser); 832 jobBuilder.setPersisted(true); 833 uid = Integer.parseInt(parser.getAttributeValue(null, "uid")); 834 835 String val; 836 if (schemaVersion == 0) { 837 val = parser.getAttributeValue(null, "priority"); 838 if (val != null) { 839 jobBuilder.setBias(Integer.parseInt(val)); 840 } 841 } else if (schemaVersion >= 1) { 842 val = parser.getAttributeValue(null, "bias"); 843 if (val != null) { 844 jobBuilder.setBias(Integer.parseInt(val)); 845 } 846 val = parser.getAttributeValue(null, "priority"); 847 if (val != null) { 848 jobBuilder.setPriority(Integer.parseInt(val)); 849 } 850 } 851 val = parser.getAttributeValue(null, "flags"); 852 if (val != null) { 853 jobBuilder.setFlags(Integer.parseInt(val)); 854 } 855 val = parser.getAttributeValue(null, "internalFlags"); 856 if (val != null) { 857 internalFlags = Integer.parseInt(val); 858 } 859 val = parser.getAttributeValue(null, "sourceUserId"); 860 sourceUserId = val == null ? -1 : Integer.parseInt(val); 861 862 val = parser.getAttributeValue(null, "lastSuccessfulRunTime"); 863 lastSuccessfulRunTime = val == null ? 0 : Long.parseLong(val); 864 865 val = parser.getAttributeValue(null, "lastFailedRunTime"); 866 lastFailedRunTime = val == null ? 0 : Long.parseLong(val); 867 } catch (NumberFormatException e) { 868 Slog.e(TAG, "Error parsing job's required fields, skipping"); 869 return null; 870 } 871 872 String sourcePackageName = parser.getAttributeValue(null, "sourcePackageName"); 873 final String sourceTag = parser.getAttributeValue(null, "sourceTag"); 874 875 int eventType; 876 // Read out constraints tag. 877 do { 878 eventType = parser.next(); 879 } while (eventType == XmlPullParser.TEXT); // Push through to next START_TAG. 880 881 if (!(eventType == XmlPullParser.START_TAG && 882 XML_TAG_PARAMS_CONSTRAINTS.equals(parser.getName()))) { 883 // Expecting a <constraints> start tag. 884 return null; 885 } 886 try { 887 buildConstraintsFromXml(jobBuilder, parser); 888 } catch (NumberFormatException e) { 889 Slog.d(TAG, "Error reading constraints, skipping."); 890 return null; 891 } catch (XmlPullParserException e) { 892 Slog.d(TAG, "Error Parser Exception.", e); 893 return null; 894 } catch (IOException e) { 895 Slog.d(TAG, "Error I/O Exception.", e); 896 return null; 897 } catch (IllegalArgumentException e) { 898 Slog.e(TAG, "Constraints contained invalid data", e); 899 return null; 900 } 901 902 parser.next(); // Consume </constraints> 903 904 // Read out execution parameters tag. 905 do { 906 eventType = parser.next(); 907 } while (eventType == XmlPullParser.TEXT); 908 if (eventType != XmlPullParser.START_TAG) { 909 return null; 910 } 911 912 // Tuple of (earliest runtime, latest runtime) in UTC. 913 final Pair<Long, Long> rtcRuntimes; 914 try { 915 rtcRuntimes = buildRtcExecutionTimesFromXml(parser); 916 } catch (NumberFormatException e) { 917 if (DEBUG) { 918 Slog.d(TAG, "Error parsing execution time parameters, skipping."); 919 } 920 return null; 921 } 922 923 final long elapsedNow = sElapsedRealtimeClock.millis(); 924 Pair<Long, Long> elapsedRuntimes = convertRtcBoundsToElapsed(rtcRuntimes, elapsedNow); 925 926 if (XML_TAG_PERIODIC.equals(parser.getName())) { 927 try { 928 String val = parser.getAttributeValue(null, "period"); 929 final long periodMillis = Long.parseLong(val); 930 val = parser.getAttributeValue(null, "flex"); 931 final long flexMillis = (val != null) ? Long.valueOf(val) : periodMillis; 932 jobBuilder.setPeriodic(periodMillis, flexMillis); 933 // As a sanity check, cap the recreated run time to be no later than flex+period 934 // from now. This is the latest the periodic could be pushed out. This could 935 // happen if the periodic ran early (at flex time before period), and then the 936 // device rebooted. 937 if (elapsedRuntimes.second > elapsedNow + periodMillis + flexMillis) { 938 final long clampedLateRuntimeElapsed = elapsedNow + flexMillis 939 + periodMillis; 940 final long clampedEarlyRuntimeElapsed = clampedLateRuntimeElapsed 941 - flexMillis; 942 Slog.w(TAG, 943 String.format("Periodic job for uid='%d' persisted run-time is" + 944 " too big [%s, %s]. Clamping to [%s,%s]", 945 uid, 946 DateUtils.formatElapsedTime(elapsedRuntimes.first / 1000), 947 DateUtils.formatElapsedTime(elapsedRuntimes.second / 1000), 948 DateUtils.formatElapsedTime( 949 clampedEarlyRuntimeElapsed / 1000), 950 DateUtils.formatElapsedTime( 951 clampedLateRuntimeElapsed / 1000)) 952 ); 953 elapsedRuntimes = 954 Pair.create(clampedEarlyRuntimeElapsed, clampedLateRuntimeElapsed); 955 } 956 } catch (NumberFormatException e) { 957 Slog.d(TAG, "Error reading periodic execution criteria, skipping."); 958 return null; 959 } 960 } else if (XML_TAG_ONEOFF.equals(parser.getName())) { 961 try { 962 if (elapsedRuntimes.first != JobStatus.NO_EARLIEST_RUNTIME) { 963 jobBuilder.setMinimumLatency(elapsedRuntimes.first - elapsedNow); 964 } 965 if (elapsedRuntimes.second != JobStatus.NO_LATEST_RUNTIME) { 966 jobBuilder.setOverrideDeadline( 967 elapsedRuntimes.second - elapsedNow); 968 } 969 } catch (NumberFormatException e) { 970 Slog.d(TAG, "Error reading job execution criteria, skipping."); 971 return null; 972 } 973 } else { 974 if (DEBUG) { 975 Slog.d(TAG, "Invalid parameter tag, skipping - " + parser.getName()); 976 } 977 // Expecting a parameters start tag. 978 return null; 979 } 980 maybeBuildBackoffPolicyFromXml(jobBuilder, parser); 981 982 parser.nextTag(); // Consume parameters end tag. 983 984 // Read out extras Bundle. 985 do { 986 eventType = parser.next(); 987 } while (eventType == XmlPullParser.TEXT); 988 if (!(eventType == XmlPullParser.START_TAG 989 && XML_TAG_EXTRAS.equals(parser.getName()))) { 990 if (DEBUG) { 991 Slog.d(TAG, "Error reading extras, skipping."); 992 } 993 return null; 994 } 995 996 final PersistableBundle extras; 997 try { 998 extras = PersistableBundle.restoreFromXml(parser); 999 jobBuilder.setExtras(extras); 1000 } catch (IllegalArgumentException e) { 1001 Slog.e(TAG, "Persisted extras contained invalid data", e); 1002 return null; 1003 } 1004 parser.nextTag(); // Consume </extras> 1005 1006 final JobInfo builtJob; 1007 try { 1008 // Don't perform prefetch-deadline check here. Apps targeting S- shouldn't have 1009 // any prefetch-with-deadline jobs accidentally dropped. It's not worth doing 1010 // target SDK version checks here for apps targeting T+. There's no way for an 1011 // app to keep a perpetually scheduled prefetch job with a deadline. Prefetch jobs 1012 // with a deadline would run and then any newly scheduled prefetch jobs wouldn't 1013 // have a deadline. If a job is rescheduled (via jobFinished(true) or onStopJob()'s 1014 // return value), the deadline is dropped. Periodic jobs require all constraints 1015 // to be met, so there's no issue with their deadlines. 1016 builtJob = jobBuilder.build(false); 1017 } catch (Exception e) { 1018 Slog.w(TAG, "Unable to build job from XML, ignoring: " + jobBuilder.summarize(), e); 1019 return null; 1020 } 1021 1022 // Migrate sync jobs forward from earlier, incomplete representation 1023 if ("android".equals(sourcePackageName) 1024 && extras != null 1025 && extras.getBoolean("SyncManagerJob", false)) { 1026 sourcePackageName = extras.getString("owningPackage", sourcePackageName); 1027 if (DEBUG) { 1028 Slog.i(TAG, "Fixing up sync job source package name from 'android' to '" 1029 + sourcePackageName + "'"); 1030 } 1031 } 1032 1033 // And now we're done 1034 final int appBucket = JobSchedulerService.standbyBucketForPackage(sourcePackageName, 1035 sourceUserId, elapsedNow); 1036 JobStatus js = new JobStatus( 1037 builtJob, uid, sourcePackageName, sourceUserId, 1038 appBucket, sourceTag, 1039 elapsedRuntimes.first, elapsedRuntimes.second, 1040 lastSuccessfulRunTime, lastFailedRunTime, 1041 (rtcIsGood) ? null : rtcRuntimes, internalFlags, /* dynamicConstraints */ 0); 1042 return js; 1043 } 1044 buildBuilderFromXml(XmlPullParser parser)1045 private JobInfo.Builder buildBuilderFromXml(XmlPullParser parser) throws NumberFormatException { 1046 // Pull out required fields from <job> attributes. 1047 int jobId = Integer.parseInt(parser.getAttributeValue(null, "jobid")); 1048 String packageName = parser.getAttributeValue(null, "package"); 1049 String className = parser.getAttributeValue(null, "class"); 1050 ComponentName cname = new ComponentName(packageName, className); 1051 1052 return new JobInfo.Builder(jobId, cname); 1053 } 1054 1055 /** 1056 * In S, there has been a change in format to make the code more robust and more 1057 * maintainable. 1058 * If the capabities are bits 4, 14, 15, the format in R, it is a long string as 1059 * netCapabilitiesLong = '49168' from the old XML file attribute "net-capabilities". 1060 * The format in S is the int array string as netCapabilitiesIntArray = '4,14,15' 1061 * from the new XML file attribute "net-capabilities-array". 1062 * For backward compatibility, when reading old XML the old format is still supported in 1063 * reading, but in order to avoid issues with OEM-defined flags, the accepted capabilities 1064 * are limited to that(maxNetCapabilityInR & maxTransportInR) defined in R. 1065 */ buildConstraintsFromXml(JobInfo.Builder jobBuilder, XmlPullParser parser)1066 private void buildConstraintsFromXml(JobInfo.Builder jobBuilder, XmlPullParser parser) 1067 throws XmlPullParserException, IOException { 1068 String val; 1069 String netCapabilitiesLong = null; 1070 String netForbiddenCapabilitiesLong = null; 1071 String netTransportTypesLong = null; 1072 1073 final String netCapabilitiesIntArray = parser.getAttributeValue( 1074 null, "net-capabilities-csv"); 1075 final String netForbiddenCapabilitiesIntArray = parser.getAttributeValue( 1076 null, "net-forbidden-capabilities-csv"); 1077 final String netTransportTypesIntArray = parser.getAttributeValue( 1078 null, "net-transport-types-csv"); 1079 if (netCapabilitiesIntArray == null || netTransportTypesIntArray == null) { 1080 netCapabilitiesLong = parser.getAttributeValue(null, "net-capabilities"); 1081 netForbiddenCapabilitiesLong = parser.getAttributeValue( 1082 null, "net-unwanted-capabilities"); 1083 netTransportTypesLong = parser.getAttributeValue(null, "net-transport-types"); 1084 } 1085 1086 if ((netCapabilitiesIntArray != null) && (netTransportTypesIntArray != null)) { 1087 final NetworkRequest.Builder builder = new NetworkRequest.Builder() 1088 .clearCapabilities(); 1089 1090 for (int capability : stringToIntArray(netCapabilitiesIntArray)) { 1091 builder.addCapability(capability); 1092 } 1093 1094 for (int forbiddenCapability : stringToIntArray(netForbiddenCapabilitiesIntArray)) { 1095 builder.addForbiddenCapability(forbiddenCapability); 1096 } 1097 1098 for (int transport : stringToIntArray(netTransportTypesIntArray)) { 1099 builder.addTransportType(transport); 1100 } 1101 jobBuilder.setRequiredNetwork(builder.build()); 1102 } else if (netCapabilitiesLong != null && netTransportTypesLong != null) { 1103 final NetworkRequest.Builder builder = new NetworkRequest.Builder() 1104 .clearCapabilities(); 1105 final int maxNetCapabilityInR = NET_CAPABILITY_TEMPORARILY_NOT_METERED; 1106 // We're okay throwing NFE here; caught by caller 1107 for (int capability : BitUtils.unpackBits(Long.parseLong( 1108 netCapabilitiesLong))) { 1109 if (capability <= maxNetCapabilityInR) { 1110 builder.addCapability(capability); 1111 } 1112 } 1113 for (int forbiddenCapability : BitUtils.unpackBits(Long.parseLong( 1114 netForbiddenCapabilitiesLong))) { 1115 if (forbiddenCapability <= maxNetCapabilityInR) { 1116 builder.addForbiddenCapability(forbiddenCapability); 1117 } 1118 } 1119 1120 final int maxTransportInR = TRANSPORT_TEST; 1121 for (int transport : BitUtils.unpackBits(Long.parseLong( 1122 netTransportTypesLong))) { 1123 if (transport <= maxTransportInR) { 1124 builder.addTransportType(transport); 1125 } 1126 } 1127 jobBuilder.setRequiredNetwork(builder.build()); 1128 } else { 1129 // Read legacy values 1130 val = parser.getAttributeValue(null, "connectivity"); 1131 if (val != null) { 1132 jobBuilder.setRequiredNetworkType(JobInfo.NETWORK_TYPE_ANY); 1133 } 1134 val = parser.getAttributeValue(null, "metered"); 1135 if (val != null) { 1136 jobBuilder.setRequiredNetworkType(JobInfo.NETWORK_TYPE_METERED); 1137 } 1138 val = parser.getAttributeValue(null, "unmetered"); 1139 if (val != null) { 1140 jobBuilder.setRequiredNetworkType(JobInfo.NETWORK_TYPE_UNMETERED); 1141 } 1142 val = parser.getAttributeValue(null, "not-roaming"); 1143 if (val != null) { 1144 jobBuilder.setRequiredNetworkType(JobInfo.NETWORK_TYPE_NOT_ROAMING); 1145 } 1146 } 1147 1148 val = parser.getAttributeValue(null, "idle"); 1149 if (val != null) { 1150 jobBuilder.setRequiresDeviceIdle(true); 1151 } 1152 val = parser.getAttributeValue(null, "charging"); 1153 if (val != null) { 1154 jobBuilder.setRequiresCharging(true); 1155 } 1156 val = parser.getAttributeValue(null, "battery-not-low"); 1157 if (val != null) { 1158 jobBuilder.setRequiresBatteryNotLow(true); 1159 } 1160 val = parser.getAttributeValue(null, "storage-not-low"); 1161 if (val != null) { 1162 jobBuilder.setRequiresStorageNotLow(true); 1163 } 1164 } 1165 1166 /** 1167 * Builds the back-off policy out of the params tag. These attributes may not exist, depending 1168 * on whether the back-off was set when the job was first scheduled. 1169 */ maybeBuildBackoffPolicyFromXml(JobInfo.Builder jobBuilder, XmlPullParser parser)1170 private void maybeBuildBackoffPolicyFromXml(JobInfo.Builder jobBuilder, XmlPullParser parser) { 1171 String val = parser.getAttributeValue(null, "initial-backoff"); 1172 if (val != null) { 1173 long initialBackoff = Long.parseLong(val); 1174 val = parser.getAttributeValue(null, "backoff-policy"); 1175 int backoffPolicy = Integer.parseInt(val); // Will throw NFE which we catch higher up. 1176 jobBuilder.setBackoffCriteria(initialBackoff, backoffPolicy); 1177 } 1178 } 1179 1180 /** 1181 * Extract a job's earliest/latest run time data from XML. These are returned in 1182 * unadjusted UTC wall clock time, because we do not yet know whether the system 1183 * clock is reliable for purposes of calculating deltas from 'now'. 1184 * 1185 * @param parser 1186 * @return A Pair of timestamps in UTC wall-clock time. The first is the earliest 1187 * time at which the job is to become runnable, and the second is the deadline at 1188 * which it becomes overdue to execute. 1189 * @throws NumberFormatException 1190 */ buildRtcExecutionTimesFromXml(XmlPullParser parser)1191 private Pair<Long, Long> buildRtcExecutionTimesFromXml(XmlPullParser parser) 1192 throws NumberFormatException { 1193 String val; 1194 // Pull out execution time data. 1195 val = parser.getAttributeValue(null, "delay"); 1196 final long earliestRunTimeRtc = (val != null) 1197 ? Long.parseLong(val) 1198 : JobStatus.NO_EARLIEST_RUNTIME; 1199 val = parser.getAttributeValue(null, "deadline"); 1200 final long latestRunTimeRtc = (val != null) 1201 ? Long.parseLong(val) 1202 : JobStatus.NO_LATEST_RUNTIME; 1203 return Pair.create(earliestRunTimeRtc, latestRunTimeRtc); 1204 } 1205 } 1206 1207 /** Set of all tracked jobs. */ 1208 @VisibleForTesting 1209 public static final class JobSet { 1210 @VisibleForTesting // Key is the getUid() originator of the jobs in each sheaf 1211 final SparseArray<ArraySet<JobStatus>> mJobs; 1212 1213 @VisibleForTesting // Same data but with the key as getSourceUid() of the jobs in each sheaf 1214 final SparseArray<ArraySet<JobStatus>> mJobsPerSourceUid; 1215 JobSet()1216 public JobSet() { 1217 mJobs = new SparseArray<ArraySet<JobStatus>>(); 1218 mJobsPerSourceUid = new SparseArray<>(); 1219 } 1220 getJobsByUid(int uid)1221 public List<JobStatus> getJobsByUid(int uid) { 1222 ArrayList<JobStatus> matchingJobs = new ArrayList<JobStatus>(); 1223 ArraySet<JobStatus> jobs = mJobs.get(uid); 1224 if (jobs != null) { 1225 matchingJobs.addAll(jobs); 1226 } 1227 return matchingJobs; 1228 } 1229 1230 // By user, not by uid, so we need to traverse by key and check getJobsByUser(int userId)1231 public List<JobStatus> getJobsByUser(int userId) { 1232 final ArrayList<JobStatus> result = new ArrayList<JobStatus>(); 1233 for (int i = mJobsPerSourceUid.size() - 1; i >= 0; i--) { 1234 if (UserHandle.getUserId(mJobsPerSourceUid.keyAt(i)) == userId) { 1235 final ArraySet<JobStatus> jobs = mJobsPerSourceUid.valueAt(i); 1236 if (jobs != null) { 1237 result.addAll(jobs); 1238 } 1239 } 1240 } 1241 return result; 1242 } 1243 add(JobStatus job)1244 public boolean add(JobStatus job) { 1245 final int uid = job.getUid(); 1246 final int sourceUid = job.getSourceUid(); 1247 ArraySet<JobStatus> jobs = mJobs.get(uid); 1248 if (jobs == null) { 1249 jobs = new ArraySet<JobStatus>(); 1250 mJobs.put(uid, jobs); 1251 } 1252 ArraySet<JobStatus> jobsForSourceUid = mJobsPerSourceUid.get(sourceUid); 1253 if (jobsForSourceUid == null) { 1254 jobsForSourceUid = new ArraySet<>(); 1255 mJobsPerSourceUid.put(sourceUid, jobsForSourceUid); 1256 } 1257 final boolean added = jobs.add(job); 1258 final boolean addedInSource = jobsForSourceUid.add(job); 1259 if (added != addedInSource) { 1260 Slog.wtf(TAG, "mJobs and mJobsPerSourceUid mismatch; caller= " + added 1261 + " source= " + addedInSource); 1262 } 1263 return added || addedInSource; 1264 } 1265 remove(JobStatus job)1266 public boolean remove(JobStatus job) { 1267 final int uid = job.getUid(); 1268 final ArraySet<JobStatus> jobs = mJobs.get(uid); 1269 final int sourceUid = job.getSourceUid(); 1270 final ArraySet<JobStatus> jobsForSourceUid = mJobsPerSourceUid.get(sourceUid); 1271 final boolean didRemove = jobs != null && jobs.remove(job); 1272 final boolean sourceRemove = jobsForSourceUid != null && jobsForSourceUid.remove(job); 1273 if (didRemove != sourceRemove) { 1274 Slog.wtf(TAG, "Job presence mismatch; caller=" + didRemove 1275 + " source=" + sourceRemove); 1276 } 1277 if (didRemove || sourceRemove) { 1278 // no more jobs for this uid? let the now-empty set objects be GC'd. 1279 if (jobs != null && jobs.size() == 0) { 1280 mJobs.remove(uid); 1281 } 1282 if (jobsForSourceUid != null && jobsForSourceUid.size() == 0) { 1283 mJobsPerSourceUid.remove(sourceUid); 1284 } 1285 return true; 1286 } 1287 return false; 1288 } 1289 1290 /** 1291 * Removes the jobs of all users not specified by the keepUserIds of user ids. 1292 * This will remove jobs scheduled *by* and *for* any unlisted users. 1293 */ removeJobsOfUnlistedUsers(final int[] keepUserIds)1294 public void removeJobsOfUnlistedUsers(final int[] keepUserIds) { 1295 final Predicate<JobStatus> noSourceUser = 1296 job -> !ArrayUtils.contains(keepUserIds, job.getSourceUserId()); 1297 final Predicate<JobStatus> noCallingUser = 1298 job -> !ArrayUtils.contains(keepUserIds, job.getUserId()); 1299 removeAll(noSourceUser.or(noCallingUser)); 1300 } 1301 removeAll(Predicate<JobStatus> predicate)1302 private void removeAll(Predicate<JobStatus> predicate) { 1303 for (int jobSetIndex = mJobs.size() - 1; jobSetIndex >= 0; jobSetIndex--) { 1304 final ArraySet<JobStatus> jobs = mJobs.valueAt(jobSetIndex); 1305 jobs.removeIf(predicate); 1306 if (jobs.size() == 0) { 1307 mJobs.removeAt(jobSetIndex); 1308 } 1309 } 1310 for (int jobSetIndex = mJobsPerSourceUid.size() - 1; jobSetIndex >= 0; jobSetIndex--) { 1311 final ArraySet<JobStatus> jobs = mJobsPerSourceUid.valueAt(jobSetIndex); 1312 jobs.removeIf(predicate); 1313 if (jobs.size() == 0) { 1314 mJobsPerSourceUid.removeAt(jobSetIndex); 1315 } 1316 } 1317 } 1318 contains(JobStatus job)1319 public boolean contains(JobStatus job) { 1320 final int uid = job.getUid(); 1321 ArraySet<JobStatus> jobs = mJobs.get(uid); 1322 return jobs != null && jobs.contains(job); 1323 } 1324 get(int uid, int jobId)1325 public JobStatus get(int uid, int jobId) { 1326 ArraySet<JobStatus> jobs = mJobs.get(uid); 1327 if (jobs != null) { 1328 for (int i = jobs.size() - 1; i >= 0; i--) { 1329 JobStatus job = jobs.valueAt(i); 1330 if (job.getJobId() == jobId) { 1331 return job; 1332 } 1333 } 1334 } 1335 return null; 1336 } 1337 1338 // Inefficient; use only for testing getAllJobs()1339 public List<JobStatus> getAllJobs() { 1340 ArrayList<JobStatus> allJobs = new ArrayList<JobStatus>(size()); 1341 for (int i = mJobs.size() - 1; i >= 0; i--) { 1342 ArraySet<JobStatus> jobs = mJobs.valueAt(i); 1343 if (jobs != null) { 1344 // Use a for loop over the ArraySet, so we don't need to make its 1345 // optional collection class iterator implementation or have to go 1346 // through a temporary array from toArray(). 1347 for (int j = jobs.size() - 1; j >= 0; j--) { 1348 allJobs.add(jobs.valueAt(j)); 1349 } 1350 } 1351 } 1352 return allJobs; 1353 } 1354 clear()1355 public void clear() { 1356 mJobs.clear(); 1357 mJobsPerSourceUid.clear(); 1358 } 1359 size()1360 public int size() { 1361 int total = 0; 1362 for (int i = mJobs.size() - 1; i >= 0; i--) { 1363 total += mJobs.valueAt(i).size(); 1364 } 1365 return total; 1366 } 1367 1368 // We only want to count the jobs that this uid has scheduled on its own 1369 // behalf, not those that the app has scheduled on someone else's behalf. countJobsForUid(int uid)1370 public int countJobsForUid(int uid) { 1371 int total = 0; 1372 ArraySet<JobStatus> jobs = mJobs.get(uid); 1373 if (jobs != null) { 1374 for (int i = jobs.size() - 1; i >= 0; i--) { 1375 JobStatus job = jobs.valueAt(i); 1376 if (job.getUid() == job.getSourceUid()) { 1377 total++; 1378 } 1379 } 1380 } 1381 return total; 1382 } 1383 forEachJob(@ullable Predicate<JobStatus> filterPredicate, Consumer<JobStatus> functor)1384 public void forEachJob(@Nullable Predicate<JobStatus> filterPredicate, 1385 Consumer<JobStatus> functor) { 1386 for (int uidIndex = mJobs.size() - 1; uidIndex >= 0; uidIndex--) { 1387 ArraySet<JobStatus> jobs = mJobs.valueAt(uidIndex); 1388 if (jobs != null) { 1389 for (int i = jobs.size() - 1; i >= 0; i--) { 1390 final JobStatus jobStatus = jobs.valueAt(i); 1391 if ((filterPredicate == null) || filterPredicate.test(jobStatus)) { 1392 functor.accept(jobStatus); 1393 } 1394 } 1395 } 1396 } 1397 } 1398 forEachJob(int callingUid, Consumer<JobStatus> functor)1399 public void forEachJob(int callingUid, Consumer<JobStatus> functor) { 1400 ArraySet<JobStatus> jobs = mJobs.get(callingUid); 1401 if (jobs != null) { 1402 for (int i = jobs.size() - 1; i >= 0; i--) { 1403 functor.accept(jobs.valueAt(i)); 1404 } 1405 } 1406 } 1407 forEachJobForSourceUid(int sourceUid, Consumer<JobStatus> functor)1408 public void forEachJobForSourceUid(int sourceUid, Consumer<JobStatus> functor) { 1409 final ArraySet<JobStatus> jobs = mJobsPerSourceUid.get(sourceUid); 1410 if (jobs != null) { 1411 for (int i = jobs.size() - 1; i >= 0; i--) { 1412 functor.accept(jobs.valueAt(i)); 1413 } 1414 } 1415 } 1416 } 1417 } 1418