1 /* 2 * Written by Doug Lea with assistance from members of JCP JSR-166 3 * Expert Group and released to the public domain, as explained at 4 * http://creativecommons.org/publicdomain/zero/1.0/ 5 */ 6 7 package java.util.concurrent; 8 9 import java.lang.Thread.UncaughtExceptionHandler; 10 import java.util.ArrayList; 11 import java.util.Arrays; 12 import java.util.Collection; 13 import java.util.Collections; 14 import java.util.List; 15 16 /** 17 * An {@link ExecutorService} for running {@link ForkJoinTask}s. 18 * A {@code ForkJoinPool} provides the entry point for submissions 19 * from non-{@code ForkJoinTask} clients, as well as management and 20 * monitoring operations. 21 * 22 * <p>A {@code ForkJoinPool} differs from other kinds of {@link 23 * ExecutorService} mainly by virtue of employing 24 * <em>work-stealing</em>: all threads in the pool attempt to find and 25 * execute tasks submitted to the pool and/or created by other active 26 * tasks (eventually blocking waiting for work if none exist). This 27 * enables efficient processing when most tasks spawn other subtasks 28 * (as do most {@code ForkJoinTask}s), as well as when many small 29 * tasks are submitted to the pool from external clients. Especially 30 * when setting <em>asyncMode</em> to true in constructors, {@code 31 * ForkJoinPool}s may also be appropriate for use with event-style 32 * tasks that are never joined. 33 * 34 * <p>A static {@code commonPool()} is available and appropriate for 35 * most applications. The common pool is used by any ForkJoinTask that 36 * is not explicitly submitted to a specified pool. Using the common 37 * pool normally reduces resource usage (its threads are slowly 38 * reclaimed during periods of non-use, and reinstated upon subsequent 39 * use). 40 * 41 * <p>For applications that require separate or custom pools, a {@code 42 * ForkJoinPool} may be constructed with a given target parallelism 43 * level; by default, equal to the number of available processors. The 44 * pool attempts to maintain enough active (or available) threads by 45 * dynamically adding, suspending, or resuming internal worker 46 * threads, even if some tasks are stalled waiting to join others. 47 * However, no such adjustments are guaranteed in the face of blocked 48 * I/O or other unmanaged synchronization. The nested {@link 49 * ManagedBlocker} interface enables extension of the kinds of 50 * synchronization accommodated. 51 * 52 * <p>In addition to execution and lifecycle control methods, this 53 * class provides status check methods (for example 54 * {@link #getStealCount}) that are intended to aid in developing, 55 * tuning, and monitoring fork/join applications. Also, method 56 * {@link #toString} returns indications of pool state in a 57 * convenient form for informal monitoring. 58 * 59 * <p>As is the case with other ExecutorServices, there are three 60 * main task execution methods summarized in the following table. 61 * These are designed to be used primarily by clients not already 62 * engaged in fork/join computations in the current pool. The main 63 * forms of these methods accept instances of {@code ForkJoinTask}, 64 * but overloaded forms also allow mixed execution of plain {@code 65 * Runnable}- or {@code Callable}- based activities as well. However, 66 * tasks that are already executing in a pool should normally instead 67 * use the within-computation forms listed in the table unless using 68 * async event-style tasks that are not usually joined, in which case 69 * there is little difference among choice of methods. 70 * 71 * <table BORDER CELLPADDING=3 CELLSPACING=1> 72 * <caption>Summary of task execution methods</caption> 73 * <tr> 74 * <td></td> 75 * <td ALIGN=CENTER> <b>Call from non-fork/join clients</b></td> 76 * <td ALIGN=CENTER> <b>Call from within fork/join computations</b></td> 77 * </tr> 78 * <tr> 79 * <td> <b>Arrange async execution</b></td> 80 * <td> {@link #execute(ForkJoinTask)}</td> 81 * <td> {@link ForkJoinTask#fork}</td> 82 * </tr> 83 * <tr> 84 * <td> <b>Await and obtain result</b></td> 85 * <td> {@link #invoke(ForkJoinTask)}</td> 86 * <td> {@link ForkJoinTask#invoke}</td> 87 * </tr> 88 * <tr> 89 * <td> <b>Arrange exec and obtain Future</b></td> 90 * <td> {@link #submit(ForkJoinTask)}</td> 91 * <td> {@link ForkJoinTask#fork} (ForkJoinTasks <em>are</em> Futures)</td> 92 * </tr> 93 * </table> 94 * 95 * <p>The common pool is by default constructed with default 96 * parameters, but these may be controlled by setting three 97 * {@linkplain System#getProperty system properties}: 98 * <ul> 99 * <li>{@code java.util.concurrent.ForkJoinPool.common.parallelism} 100 * - the parallelism level, a non-negative integer 101 * <li>{@code java.util.concurrent.ForkJoinPool.common.threadFactory} 102 * - the class name of a {@link ForkJoinWorkerThreadFactory} 103 * <li>{@code java.util.concurrent.ForkJoinPool.common.exceptionHandler} 104 * - the class name of a {@link UncaughtExceptionHandler} 105 * </ul> 106 * The system class loader is used to load these classes. 107 * Upon any error in establishing these settings, default parameters 108 * are used. It is possible to disable or limit the use of threads in 109 * the common pool by setting the parallelism property to zero, and/or 110 * using a factory that may return {@code null}. 111 * 112 * <p><b>Implementation notes</b>: This implementation restricts the 113 * maximum number of running threads to 32767. Attempts to create 114 * pools with greater than the maximum number result in 115 * {@code IllegalArgumentException}. 116 * 117 * <p>This implementation rejects submitted tasks (that is, by throwing 118 * {@link RejectedExecutionException}) only when the pool is shut down 119 * or internal resources have been exhausted. 120 * 121 * @since 1.7 122 * @author Doug Lea 123 */ 124 public class ForkJoinPool extends AbstractExecutorService { 125 126 /* 127 * Implementation Overview 128 * 129 * This class and its nested classes provide the main 130 * functionality and control for a set of worker threads: 131 * Submissions from non-FJ threads enter into submission queues. 132 * Workers take these tasks and typically split them into subtasks 133 * that may be stolen by other workers. Preference rules give 134 * first priority to processing tasks from their own queues (LIFO 135 * or FIFO, depending on mode), then to randomized FIFO steals of 136 * tasks in other queues. 137 * 138 * WorkQueues 139 * ========== 140 * 141 * Most operations occur within work-stealing queues (in nested 142 * class WorkQueue). These are special forms of Deques that 143 * support only three of the four possible end-operations -- push, 144 * pop, and poll (aka steal), under the further constraints that 145 * push and pop are called only from the owning thread (or, as 146 * extended here, under a lock), while poll may be called from 147 * other threads. (If you are unfamiliar with them, you probably 148 * want to read Herlihy and Shavit's book "The Art of 149 * Multiprocessor programming", chapter 16 describing these in 150 * more detail before proceeding.) The main work-stealing queue 151 * design is roughly similar to those in the papers "Dynamic 152 * Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005 153 * (http://research.sun.com/scalable/pubs/index.html) and 154 * "Idempotent work stealing" by Michael, Saraswat, and Vechev, 155 * PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186). 156 * See also "Correct and Efficient Work-Stealing for Weak Memory 157 * Models" by Le, Pop, Cohen, and Nardelli, PPoPP 2013 158 * (http://www.di.ens.fr/~zappa/readings/ppopp13.pdf) for an 159 * analysis of memory ordering (atomic, volatile etc) issues. The 160 * main differences ultimately stem from GC requirements that we 161 * null out taken slots as soon as we can, to maintain as small a 162 * footprint as possible even in programs generating huge numbers 163 * of tasks. To accomplish this, we shift the CAS arbitrating pop 164 * vs poll (steal) from being on the indices ("base" and "top") to 165 * the slots themselves. So, both a successful pop and poll 166 * mainly entail a CAS of a slot from non-null to null. Because 167 * we rely on CASes of references, we do not need tag bits on base 168 * or top. They are simple ints as used in any circular 169 * array-based queue (see for example ArrayDeque). Updates to the 170 * indices must still be ordered in a way that guarantees that top 171 * == base means the queue is empty, but otherwise may err on the 172 * side of possibly making the queue appear nonempty when a push, 173 * pop, or poll have not fully committed. Note that this means 174 * that the poll operation, considered individually, is not 175 * wait-free. One thief cannot successfully continue until another 176 * in-progress one (or, if previously empty, a push) completes. 177 * However, in the aggregate, we ensure at least probabilistic 178 * non-blockingness. If an attempted steal fails, a thief always 179 * chooses a different random victim target to try next. So, in 180 * order for one thief to progress, it suffices for any 181 * in-progress poll or new push on any empty queue to 182 * complete. (This is why we normally use method pollAt and its 183 * variants that try once at the apparent base index, else 184 * consider alternative actions, rather than method poll.) 185 * 186 * This approach also enables support of a user mode in which local 187 * task processing is in FIFO, not LIFO order, simply by using 188 * poll rather than pop. This can be useful in message-passing 189 * frameworks in which tasks are never joined. However neither 190 * mode considers affinities, loads, cache localities, etc, so 191 * rarely provide the best possible performance on a given 192 * machine, but portably provide good throughput by averaging over 193 * these factors. (Further, even if we did try to use such 194 * information, we do not usually have a basis for exploiting it. 195 * For example, some sets of tasks profit from cache affinities, 196 * but others are harmed by cache pollution effects.) 197 * 198 * WorkQueues are also used in a similar way for tasks submitted 199 * to the pool. We cannot mix these tasks in the same queues used 200 * for work-stealing (this would contaminate lifo/fifo 201 * processing). Instead, we randomly associate submission queues 202 * with submitting threads, using a form of hashing. The 203 * Submitter probe value serves as a hash code for 204 * choosing existing queues, and may be randomly repositioned upon 205 * contention with other submitters. In essence, submitters act 206 * like workers except that they are restricted to executing local 207 * tasks that they submitted. However, because most 208 * shared/external queue operations are more expensive than 209 * internal, and because, at steady state, external submitters 210 * will compete for CPU with workers, ForkJoinTask.join and 211 * related methods disable them from repeatedly helping to process 212 * tasks if all workers are active. Insertion of tasks in shared 213 * mode requires a lock (mainly to protect in the case of 214 * resizing) but we use only a simple spinlock (using bits in 215 * field qlock), because submitters encountering a busy queue move 216 * on to try or create other queues -- they block only when 217 * creating and registering new queues. 218 * 219 * Management 220 * ========== 221 * 222 * The main throughput advantages of work-stealing stem from 223 * decentralized control -- workers mostly take tasks from 224 * themselves or each other. We cannot negate this in the 225 * implementation of other management responsibilities. The main 226 * tactic for avoiding bottlenecks is packing nearly all 227 * essentially atomic control state into two volatile variables 228 * that are by far most often read (not written) as status and 229 * consistency checks. 230 * 231 * Field "ctl" contains 64 bits holding all the information needed 232 * to atomically decide to add, inactivate, enqueue (on an event 233 * queue), dequeue, and/or re-activate workers. To enable this 234 * packing, we restrict maximum parallelism to (1<<15)-1 (which is 235 * far in excess of normal operating range) to allow ids, counts, 236 * and their negations (used for thresholding) to fit into 16bit 237 * fields. 238 * 239 * Field "plock" is a form of sequence lock with a saturating 240 * shutdown bit (similarly for per-queue "qlocks"), mainly 241 * protecting updates to the workQueues array, as well as to 242 * enable shutdown. When used as a lock, it is normally only very 243 * briefly held, so is nearly always available after at most a 244 * brief spin, but we use a monitor-based backup strategy to 245 * block when needed. 246 * 247 * Recording WorkQueues. WorkQueues are recorded in the 248 * "workQueues" array that is created upon first use and expanded 249 * if necessary. Updates to the array while recording new workers 250 * and unrecording terminated ones are protected from each other 251 * by a lock but the array is otherwise concurrently readable, and 252 * accessed directly. To simplify index-based operations, the 253 * array size is always a power of two, and all readers must 254 * tolerate null slots. Worker queues are at odd indices. Shared 255 * (submission) queues are at even indices, up to a maximum of 64 256 * slots, to limit growth even if array needs to expand to add 257 * more workers. Grouping them together in this way simplifies and 258 * speeds up task scanning. 259 * 260 * All worker thread creation is on-demand, triggered by task 261 * submissions, replacement of terminated workers, and/or 262 * compensation for blocked workers. However, all other support 263 * code is set up to work with other policies. To ensure that we 264 * do not hold on to worker references that would prevent GC, ALL 265 * accesses to workQueues are via indices into the workQueues 266 * array (which is one source of some of the messy code 267 * constructions here). In essence, the workQueues array serves as 268 * a weak reference mechanism. Thus for example the wait queue 269 * field of ctl stores indices, not references. Access to the 270 * workQueues in associated methods (for example signalWork) must 271 * both index-check and null-check the IDs. All such accesses 272 * ignore bad IDs by returning out early from what they are doing, 273 * since this can only be associated with termination, in which 274 * case it is OK to give up. All uses of the workQueues array 275 * also check that it is non-null (even if previously 276 * non-null). This allows nulling during termination, which is 277 * currently not necessary, but remains an option for 278 * resource-revocation-based shutdown schemes. It also helps 279 * reduce JIT issuance of uncommon-trap code, which tends to 280 * unnecessarily complicate control flow in some methods. 281 * 282 * Event Queuing. Unlike HPC work-stealing frameworks, we cannot 283 * let workers spin indefinitely scanning for tasks when none can 284 * be found immediately, and we cannot start/resume workers unless 285 * there appear to be tasks available. On the other hand, we must 286 * quickly prod them into action when new tasks are submitted or 287 * generated. In many usages, ramp-up time to activate workers is 288 * the main limiting factor in overall performance (this is 289 * compounded at program start-up by JIT compilation and 290 * allocation). So we try to streamline this as much as possible. 291 * We park/unpark workers after placing in an event wait queue 292 * when they cannot find work. This "queue" is actually a simple 293 * Treiber stack, headed by the "id" field of ctl, plus a 15bit 294 * counter value (that reflects the number of times a worker has 295 * been inactivated) to avoid ABA effects (we need only as many 296 * version numbers as worker threads). Successors are held in 297 * field WorkQueue.nextWait. Queuing deals with several intrinsic 298 * races, mainly that a task-producing thread can miss seeing (and 299 * signalling) another thread that gave up looking for work but 300 * has not yet entered the wait queue. We solve this by requiring 301 * a full sweep of all workers (via repeated calls to method 302 * scan()) both before and after a newly waiting worker is added 303 * to the wait queue. Because enqueued workers may actually be 304 * rescanning rather than waiting, we set and clear the "parker" 305 * field of WorkQueues to reduce unnecessary calls to unpark. 306 * (This requires a secondary recheck to avoid missed signals.) 307 * Note the unusual conventions about Thread.interrupts 308 * surrounding parking and other blocking: Because interrupts are 309 * used solely to alert threads to check termination, which is 310 * checked anyway upon blocking, we clear status (using 311 * Thread.interrupted) before any call to park, so that park does 312 * not immediately return due to status being set via some other 313 * unrelated call to interrupt in user code. 314 * 315 * Signalling. We create or wake up workers only when there 316 * appears to be at least one task they might be able to find and 317 * execute. When a submission is added or another worker adds a 318 * task to a queue that has fewer than two tasks, they signal 319 * waiting workers (or trigger creation of new ones if fewer than 320 * the given parallelism level -- signalWork). These primary 321 * signals are buttressed by others whenever other threads remove 322 * a task from a queue and notice that there are other tasks there 323 * as well. So in general, pools will be over-signalled. On most 324 * platforms, signalling (unpark) overhead time is noticeably 325 * long, and the time between signalling a thread and it actually 326 * making progress can be very noticeably long, so it is worth 327 * offloading these delays from critical paths as much as 328 * possible. Additionally, workers spin-down gradually, by staying 329 * alive so long as they see the ctl state changing. Similar 330 * stability-sensing techniques are also used before blocking in 331 * awaitJoin and helpComplete. 332 * 333 * Trimming workers. To release resources after periods of lack of 334 * use, a worker starting to wait when the pool is quiescent will 335 * time out and terminate if the pool has remained quiescent for a 336 * given period -- a short period if there are more threads than 337 * parallelism, longer as the number of threads decreases. This 338 * will slowly propagate, eventually terminating all workers after 339 * periods of non-use. 340 * 341 * Shutdown and Termination. A call to shutdownNow atomically sets 342 * a plock bit and then (non-atomically) sets each worker's 343 * qlock status, cancels all unprocessed tasks, and wakes up 344 * all waiting workers. Detecting whether termination should 345 * commence after a non-abrupt shutdown() call requires more work 346 * and bookkeeping. We need consensus about quiescence (i.e., that 347 * there is no more work). The active count provides a primary 348 * indication but non-abrupt shutdown still requires a rechecking 349 * scan for any workers that are inactive but not queued. 350 * 351 * Joining Tasks 352 * ============= 353 * 354 * Any of several actions may be taken when one worker is waiting 355 * to join a task stolen (or always held) by another. Because we 356 * are multiplexing many tasks on to a pool of workers, we can't 357 * just let them block (as in Thread.join). We also cannot just 358 * reassign the joiner's run-time stack with another and replace 359 * it later, which would be a form of "continuation", that even if 360 * possible is not necessarily a good idea since we sometimes need 361 * both an unblocked task and its continuation to progress. 362 * Instead we combine two tactics: 363 * 364 * Helping: Arranging for the joiner to execute some task that it 365 * would be running if the steal had not occurred. 366 * 367 * Compensating: Unless there are already enough live threads, 368 * method tryCompensate() may create or re-activate a spare 369 * thread to compensate for blocked joiners until they unblock. 370 * 371 * A third form (implemented in tryRemoveAndExec) amounts to 372 * helping a hypothetical compensator: If we can readily tell that 373 * a possible action of a compensator is to steal and execute the 374 * task being joined, the joining thread can do so directly, 375 * without the need for a compensation thread (although at the 376 * expense of larger run-time stacks, but the tradeoff is 377 * typically worthwhile). 378 * 379 * The ManagedBlocker extension API can't use helping so relies 380 * only on compensation in method awaitBlocker. 381 * 382 * The algorithm in tryHelpStealer entails a form of "linear" 383 * helping: Each worker records (in field currentSteal) the most 384 * recent task it stole from some other worker. Plus, it records 385 * (in field currentJoin) the task it is currently actively 386 * joining. Method tryHelpStealer uses these markers to try to 387 * find a worker to help (i.e., steal back a task from and execute 388 * it) that could hasten completion of the actively joined task. 389 * In essence, the joiner executes a task that would be on its own 390 * local deque had the to-be-joined task not been stolen. This may 391 * be seen as a conservative variant of the approach in Wagner & 392 * Calder "Leapfrogging: a portable technique for implementing 393 * efficient futures" SIGPLAN Notices, 1993 394 * (http://portal.acm.org/citation.cfm?id=155354). It differs in 395 * that: (1) We only maintain dependency links across workers upon 396 * steals, rather than use per-task bookkeeping. This sometimes 397 * requires a linear scan of workQueues array to locate stealers, 398 * but often doesn't because stealers leave hints (that may become 399 * stale/wrong) of where to locate them. It is only a hint 400 * because a worker might have had multiple steals and the hint 401 * records only one of them (usually the most current). Hinting 402 * isolates cost to when it is needed, rather than adding to 403 * per-task overhead. (2) It is "shallow", ignoring nesting and 404 * potentially cyclic mutual steals. (3) It is intentionally 405 * racy: field currentJoin is updated only while actively joining, 406 * which means that we miss links in the chain during long-lived 407 * tasks, GC stalls etc (which is OK since blocking in such cases 408 * is usually a good idea). (4) We bound the number of attempts 409 * to find work (see MAX_HELP) and fall back to suspending the 410 * worker and if necessary replacing it with another. 411 * 412 * It is impossible to keep exactly the target parallelism number 413 * of threads running at any given time. Determining the 414 * existence of conservatively safe helping targets, the 415 * availability of already-created spares, and the apparent need 416 * to create new spares are all racy, so we rely on multiple 417 * retries of each. Compensation in the apparent absence of 418 * helping opportunities is challenging to control on JVMs, where 419 * GC and other activities can stall progress of tasks that in 420 * turn stall out many other dependent tasks, without us being 421 * able to determine whether they will ever require compensation. 422 * Even though work-stealing otherwise encounters little 423 * degradation in the presence of more threads than cores, 424 * aggressively adding new threads in such cases entails risk of 425 * unwanted positive feedback control loops in which more threads 426 * cause more dependent stalls (as well as delayed progress of 427 * unblocked threads to the point that we know they are available) 428 * leading to more situations requiring more threads, and so 429 * on. This aspect of control can be seen as an (analytically 430 * intractable) game with an opponent that may choose the worst 431 * (for us) active thread to stall at any time. We take several 432 * precautions to bound losses (and thus bound gains), mainly in 433 * methods tryCompensate and awaitJoin. 434 * 435 * Common Pool 436 * =========== 437 * 438 * The static common pool always exists after static 439 * initialization. Since it (or any other created pool) need 440 * never be used, we minimize initial construction overhead and 441 * footprint to the setup of about a dozen fields, with no nested 442 * allocation. Most bootstrapping occurs within method 443 * fullExternalPush during the first submission to the pool. 444 * 445 * When external threads submit to the common pool, they can 446 * perform subtask processing (see externalHelpJoin and related 447 * methods). This caller-helps policy makes it sensible to set 448 * common pool parallelism level to one (or more) less than the 449 * total number of available cores, or even zero for pure 450 * caller-runs. We do not need to record whether external 451 * submissions are to the common pool -- if not, externalHelpJoin 452 * returns quickly (at the most helping to signal some common pool 453 * workers). These submitters would otherwise be blocked waiting 454 * for completion, so the extra effort (with liberally sprinkled 455 * task status checks) in inapplicable cases amounts to an odd 456 * form of limited spin-wait before blocking in ForkJoinTask.join. 457 * 458 * Style notes 459 * =========== 460 * 461 * There is a lot of representation-level coupling among classes 462 * ForkJoinPool, ForkJoinWorkerThread, and ForkJoinTask. The 463 * fields of WorkQueue maintain data structures managed by 464 * ForkJoinPool, so are directly accessed. There is little point 465 * trying to reduce this, since any associated future changes in 466 * representations will need to be accompanied by algorithmic 467 * changes anyway. Several methods intrinsically sprawl because 468 * they must accumulate sets of consistent reads of volatiles held 469 * in local variables. Methods signalWork() and scan() are the 470 * main bottlenecks, so are especially heavily 471 * micro-optimized/mangled. There are lots of inline assignments 472 * (of form "while ((local = field) != 0)") which are usually the 473 * simplest way to ensure the required read orderings (which are 474 * sometimes critical). This leads to a "C"-like style of listing 475 * declarations of these locals at the heads of methods or blocks. 476 * There are several occurrences of the unusual "do {} while 477 * (!cas...)" which is the simplest way to force an update of a 478 * CAS'ed variable. There are also other coding oddities (including 479 * several unnecessary-looking hoisted null checks) that help 480 * some methods perform reasonably even when interpreted (not 481 * compiled). 482 * 483 * The order of declarations in this file is: 484 * (1) Static utility functions 485 * (2) Nested (static) classes 486 * (3) Static fields 487 * (4) Fields, along with constants used when unpacking some of them 488 * (5) Internal control methods 489 * (6) Callbacks and other support for ForkJoinTask methods 490 * (7) Exported methods 491 * (8) Static block initializing statics in minimally dependent order 492 */ 493 // android-note: Removed references to CountedCompleters. 494 495 // Static utilities 496 497 /** 498 * If there is a security manager, makes sure caller has 499 * permission to modify threads. 500 */ checkPermission()501 private static void checkPermission() { 502 SecurityManager security = System.getSecurityManager(); 503 if (security != null) 504 security.checkPermission(modifyThreadPermission); 505 } 506 507 // Nested classes 508 509 /** 510 * Factory for creating new {@link ForkJoinWorkerThread}s. 511 * A {@code ForkJoinWorkerThreadFactory} must be defined and used 512 * for {@code ForkJoinWorkerThread} subclasses that extend base 513 * functionality or initialize threads with different contexts. 514 */ 515 public static interface ForkJoinWorkerThreadFactory { 516 /** 517 * Returns a new worker thread operating in the given pool. 518 * 519 * @param pool the pool this thread works in 520 * @return the new worker thread 521 * @throws NullPointerException if the pool is null 522 */ newThread(ForkJoinPool pool)523 public ForkJoinWorkerThread newThread(ForkJoinPool pool); 524 } 525 526 /** 527 * Default ForkJoinWorkerThreadFactory implementation; creates a 528 * new ForkJoinWorkerThread. 529 */ 530 static final class DefaultForkJoinWorkerThreadFactory 531 implements ForkJoinWorkerThreadFactory { newThread(ForkJoinPool pool)532 public final ForkJoinWorkerThread newThread(ForkJoinPool pool) { 533 return new ForkJoinWorkerThread(pool); 534 } 535 } 536 537 /** 538 * Class for artificial tasks that are used to replace the target 539 * of local joins if they are removed from an interior queue slot 540 * in WorkQueue.tryRemoveAndExec. We don't need the proxy to 541 * actually do anything beyond having a unique identity. 542 */ 543 static final class EmptyTask extends ForkJoinTask<Void> { 544 private static final long serialVersionUID = -7721805057305804111L; EmptyTask()545 EmptyTask() { status = ForkJoinTask.NORMAL; } // force done getRawResult()546 public final Void getRawResult() { return null; } setRawResult(Void x)547 public final void setRawResult(Void x) {} exec()548 public final boolean exec() { return true; } 549 } 550 551 /** 552 * Queues supporting work-stealing as well as external task 553 * submission. See above for main rationale and algorithms. 554 * Implementation relies heavily on "Unsafe" intrinsics 555 * and selective use of "volatile": 556 * 557 * Field "base" is the index (mod array.length) of the least valid 558 * queue slot, which is always the next position to steal (poll) 559 * from if nonempty. Reads and writes require volatile orderings 560 * but not CAS, because updates are only performed after slot 561 * CASes. 562 * 563 * Field "top" is the index (mod array.length) of the next queue 564 * slot to push to or pop from. It is written only by owner thread 565 * for push, or under lock for external/shared push, and accessed 566 * by other threads only after reading (volatile) base. Both top 567 * and base are allowed to wrap around on overflow, but (top - 568 * base) (or more commonly -(base - top) to force volatile read of 569 * base before top) still estimates size. The lock ("qlock") is 570 * forced to -1 on termination, causing all further lock attempts 571 * to fail. (Note: we don't need CAS for termination state because 572 * upon pool shutdown, all shared-queues will stop being used 573 * anyway.) Nearly all lock bodies are set up so that exceptions 574 * within lock bodies are "impossible" (modulo JVM errors that 575 * would cause failure anyway.) 576 * 577 * The array slots are read and written using the emulation of 578 * volatiles/atomics provided by Unsafe. Insertions must in 579 * general use putOrderedObject as a form of releasing store to 580 * ensure that all writes to the task object are ordered before 581 * its publication in the queue. All removals entail a CAS to 582 * null. The array is always a power of two. To ensure safety of 583 * Unsafe array operations, all accesses perform explicit null 584 * checks and implicit bounds checks via power-of-two masking. 585 * 586 * In addition to basic queuing support, this class contains 587 * fields described elsewhere to control execution. It turns out 588 * to work better memory-layout-wise to include them in this class 589 * rather than a separate class. 590 * 591 * Performance on most platforms is very sensitive to placement of 592 * instances of both WorkQueues and their arrays -- we absolutely 593 * do not want multiple WorkQueue instances or multiple queue 594 * arrays sharing cache lines. (It would be best for queue objects 595 * and their arrays to share, but there is nothing available to 596 * help arrange that). The @Contended annotation alerts JVMs to 597 * try to keep instances apart. 598 */ 599 static final class WorkQueue { 600 /** 601 * Capacity of work-stealing queue array upon initialization. 602 * Must be a power of two; at least 4, but should be larger to 603 * reduce or eliminate cacheline sharing among queues. 604 * Currently, it is much larger, as a partial workaround for 605 * the fact that JVMs often place arrays in locations that 606 * share GC bookkeeping (especially cardmarks) such that 607 * per-write accesses encounter serious memory contention. 608 */ 609 static final int INITIAL_QUEUE_CAPACITY = 1 << 13; 610 611 /** 612 * Maximum size for queue arrays. Must be a power of two less 613 * than or equal to 1 << (31 - width of array entry) to ensure 614 * lack of wraparound of index calculations, but defined to a 615 * value a bit less than this to help users trap runaway 616 * programs before saturating systems. 617 */ 618 static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M 619 620 // Heuristic padding to ameliorate unfortunate memory placements 621 volatile long pad00, pad01, pad02, pad03, pad04, pad05, pad06; 622 623 volatile int eventCount; // encoded inactivation count; < 0 if inactive 624 int nextWait; // encoded record of next event waiter 625 int nsteals; // number of steals 626 int hint; // steal index hint 627 short poolIndex; // index of this queue in pool 628 final short mode; // 0: lifo, > 0: fifo, < 0: shared 629 volatile int qlock; // 1: locked, -1: terminate; else 0 630 volatile int base; // index of next slot for poll 631 int top; // index of next slot for push 632 ForkJoinTask<?>[] array; // the elements (initially unallocated) 633 final ForkJoinPool pool; // the containing pool (may be null) 634 final ForkJoinWorkerThread owner; // owning thread or null if shared 635 volatile Thread parker; // == owner during call to park; else null 636 volatile ForkJoinTask<?> currentJoin; // task being joined in awaitJoin 637 ForkJoinTask<?> currentSteal; // current non-local task being executed 638 639 volatile Object pad10, pad11, pad12, pad13, pad14, pad15, pad16, pad17; 640 volatile Object pad18, pad19, pad1a, pad1b, pad1c, pad1d; 641 WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode, int seed)642 WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode, 643 int seed) { 644 this.pool = pool; 645 this.owner = owner; 646 this.mode = (short)mode; 647 this.hint = seed; // store initial seed for runWorker 648 // Place indices in the center of array (that is not yet allocated) 649 base = top = INITIAL_QUEUE_CAPACITY >>> 1; 650 } 651 652 /** 653 * Returns the approximate number of tasks in the queue. 654 */ queueSize()655 final int queueSize() { 656 int n = base - top; // non-owner callers must read base first 657 return (n >= 0) ? 0 : -n; // ignore transient negative 658 } 659 660 /** 661 * Provides a more accurate estimate of whether this queue has 662 * any tasks than does queueSize, by checking whether a 663 * near-empty queue has at least one unclaimed task. 664 */ isEmpty()665 final boolean isEmpty() { 666 ForkJoinTask<?>[] a; int m, s; 667 int n = base - (s = top); 668 return (n >= 0 || 669 (n == -1 && 670 ((a = array) == null || 671 (m = a.length - 1) < 0 || 672 U.getObject 673 (a, (long)((m & (s - 1)) << ASHIFT) + ABASE) == null))); 674 } 675 676 /** 677 * Pushes a task. Call only by owner in unshared queues. (The 678 * shared-queue version is embedded in method externalPush.) 679 * 680 * @param task the task. Caller must ensure non-null. 681 * @throws RejectedExecutionException if array cannot be resized 682 */ push(ForkJoinTask<?> task)683 final void push(ForkJoinTask<?> task) { 684 ForkJoinTask<?>[] a; ForkJoinPool p; 685 int s = top, n; 686 if ((a = array) != null) { // ignore if queue removed 687 int m = a.length - 1; 688 U.putOrderedObject(a, ((m & s) << ASHIFT) + ABASE, task); 689 if ((n = (top = s + 1) - base) <= 2) 690 (p = pool).signalWork(p.workQueues, this); 691 else if (n >= m) 692 growArray(); 693 } 694 } 695 696 /** 697 * Initializes or doubles the capacity of array. Call either 698 * by owner or with lock held -- it is OK for base, but not 699 * top, to move while resizings are in progress. 700 */ growArray()701 final ForkJoinTask<?>[] growArray() { 702 ForkJoinTask<?>[] oldA = array; 703 int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY; 704 if (size > MAXIMUM_QUEUE_CAPACITY) 705 throw new RejectedExecutionException("Queue capacity exceeded"); 706 int oldMask, t, b; 707 ForkJoinTask<?>[] a = array = new ForkJoinTask<?>[size]; 708 if (oldA != null && (oldMask = oldA.length - 1) >= 0 && 709 (t = top) - (b = base) > 0) { 710 int mask = size - 1; 711 do { 712 ForkJoinTask<?> x; 713 int oldj = ((b & oldMask) << ASHIFT) + ABASE; 714 int j = ((b & mask) << ASHIFT) + ABASE; 715 x = (ForkJoinTask<?>)U.getObjectVolatile(oldA, oldj); 716 if (x != null && 717 U.compareAndSwapObject(oldA, oldj, x, null)) 718 U.putObjectVolatile(a, j, x); 719 } while (++b != t); 720 } 721 return a; 722 } 723 724 /** 725 * Takes next task, if one exists, in LIFO order. Call only 726 * by owner in unshared queues. 727 */ pop()728 final ForkJoinTask<?> pop() { 729 ForkJoinTask<?>[] a; ForkJoinTask<?> t; int m; 730 if ((a = array) != null && (m = a.length - 1) >= 0) { 731 for (int s; (s = top - 1) - base >= 0;) { 732 long j = ((m & s) << ASHIFT) + ABASE; 733 if ((t = (ForkJoinTask<?>)U.getObject(a, j)) == null) 734 break; 735 if (U.compareAndSwapObject(a, j, t, null)) { 736 top = s; 737 return t; 738 } 739 } 740 } 741 return null; 742 } 743 744 /** 745 * Takes a task in FIFO order if b is base of queue and a task 746 * can be claimed without contention. Specialized versions 747 * appear in ForkJoinPool methods scan and tryHelpStealer. 748 */ pollAt(int b)749 final ForkJoinTask<?> pollAt(int b) { 750 ForkJoinTask<?> t; ForkJoinTask<?>[] a; 751 if ((a = array) != null) { 752 int j = (((a.length - 1) & b) << ASHIFT) + ABASE; 753 if ((t = (ForkJoinTask<?>)U.getObjectVolatile(a, j)) != null && 754 base == b && U.compareAndSwapObject(a, j, t, null)) { 755 U.putOrderedInt(this, QBASE, b + 1); 756 return t; 757 } 758 } 759 return null; 760 } 761 762 /** 763 * Takes next task, if one exists, in FIFO order. 764 */ poll()765 final ForkJoinTask<?> poll() { 766 ForkJoinTask<?>[] a; int b; ForkJoinTask<?> t; 767 while ((b = base) - top < 0 && (a = array) != null) { 768 int j = (((a.length - 1) & b) << ASHIFT) + ABASE; 769 t = (ForkJoinTask<?>)U.getObjectVolatile(a, j); 770 if (t != null) { 771 if (U.compareAndSwapObject(a, j, t, null)) { 772 U.putOrderedInt(this, QBASE, b + 1); 773 return t; 774 } 775 } 776 else if (base == b) { 777 if (b + 1 == top) 778 break; 779 Thread.yield(); // wait for lagging update (very rare) 780 } 781 } 782 return null; 783 } 784 785 /** 786 * Takes next task, if one exists, in order specified by mode. 787 */ nextLocalTask()788 final ForkJoinTask<?> nextLocalTask() { 789 return mode == 0 ? pop() : poll(); 790 } 791 792 /** 793 * Returns next task, if one exists, in order specified by mode. 794 */ peek()795 final ForkJoinTask<?> peek() { 796 ForkJoinTask<?>[] a = array; int m; 797 if (a == null || (m = a.length - 1) < 0) 798 return null; 799 int i = mode == 0 ? top - 1 : base; 800 int j = ((i & m) << ASHIFT) + ABASE; 801 return (ForkJoinTask<?>)U.getObjectVolatile(a, j); 802 } 803 804 /** 805 * Pops the given task only if it is at the current top. 806 * (A shared version is available only via FJP.tryExternalUnpush) 807 */ tryUnpush(ForkJoinTask<?> t)808 final boolean tryUnpush(ForkJoinTask<?> t) { 809 ForkJoinTask<?>[] a; int s; 810 if ((a = array) != null && (s = top) != base && 811 U.compareAndSwapObject 812 (a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) { 813 top = s; 814 return true; 815 } 816 return false; 817 } 818 819 /** 820 * Removes and cancels all known tasks, ignoring any exceptions. 821 */ cancelAll()822 final void cancelAll() { 823 ForkJoinTask.cancelIgnoringExceptions(currentJoin); 824 ForkJoinTask.cancelIgnoringExceptions(currentSteal); 825 for (ForkJoinTask<?> t; (t = poll()) != null; ) 826 ForkJoinTask.cancelIgnoringExceptions(t); 827 } 828 829 // Specialized execution methods 830 831 /** 832 * Polls and runs tasks until empty. 833 */ pollAndExecAll()834 final void pollAndExecAll() { 835 for (ForkJoinTask<?> t; (t = poll()) != null;) 836 t.doExec(); 837 } 838 839 /** 840 * Executes a top-level task and any local tasks remaining 841 * after execution. 842 */ runTask(ForkJoinTask<?> task)843 final void runTask(ForkJoinTask<?> task) { 844 if ((currentSteal = task) != null) { 845 task.doExec(); 846 ForkJoinTask<?>[] a = array; 847 int md = mode; 848 ++nsteals; 849 currentSteal = null; 850 if (md != 0) 851 pollAndExecAll(); 852 else if (a != null) { 853 int s, m = a.length - 1; 854 while ((s = top - 1) - base >= 0) { 855 long i = ((m & s) << ASHIFT) + ABASE; 856 ForkJoinTask<?> t = (ForkJoinTask<?>)U.getObject(a, i); 857 if (t == null) 858 break; 859 if (U.compareAndSwapObject(a, i, t, null)) { 860 top = s; 861 t.doExec(); 862 } 863 } 864 } 865 } 866 } 867 868 /** 869 * If present, removes from queue and executes the given task, 870 * or any other cancelled task. Returns (true) on any CAS 871 * or consistency check failure so caller can retry. 872 * 873 * @return false if no progress can be made, else true 874 */ tryRemoveAndExec(ForkJoinTask<?> task)875 final boolean tryRemoveAndExec(ForkJoinTask<?> task) { 876 boolean stat; 877 ForkJoinTask<?>[] a; int m, s, b, n; 878 if (task != null && (a = array) != null && (m = a.length - 1) >= 0 && 879 (n = (s = top) - (b = base)) > 0) { 880 boolean removed = false, empty = true; 881 stat = true; 882 for (ForkJoinTask<?> t;;) { // traverse from s to b 883 long j = ((--s & m) << ASHIFT) + ABASE; 884 t = (ForkJoinTask<?>)U.getObject(a, j); 885 if (t == null) // inconsistent length 886 break; 887 else if (t == task) { 888 if (s + 1 == top) { // pop 889 if (!U.compareAndSwapObject(a, j, task, null)) 890 break; 891 top = s; 892 removed = true; 893 } 894 else if (base == b) // replace with proxy 895 removed = U.compareAndSwapObject(a, j, task, 896 new EmptyTask()); 897 break; 898 } 899 else if (t.status >= 0) 900 empty = false; 901 else if (s + 1 == top) { // pop and throw away 902 if (U.compareAndSwapObject(a, j, t, null)) 903 top = s; 904 break; 905 } 906 if (--n == 0) { 907 if (!empty && base == b) 908 stat = false; 909 break; 910 } 911 } 912 if (removed) 913 task.doExec(); 914 } 915 else 916 stat = false; 917 return stat; 918 } 919 920 /** 921 * Tries to poll for and execute the given task or any other 922 * task in its CountedCompleter computation. 923 */ pollAndExecCC(CountedCompleter<?> root)924 final boolean pollAndExecCC(CountedCompleter<?> root) { 925 ForkJoinTask<?>[] a; int b; Object o; CountedCompleter<?> t, r; 926 if ((b = base) - top < 0 && (a = array) != null) { 927 long j = (((a.length - 1) & b) << ASHIFT) + ABASE; 928 if ((o = U.getObjectVolatile(a, j)) == null) 929 return true; // retry 930 if (o instanceof CountedCompleter) { 931 for (t = (CountedCompleter<?>)o, r = t;;) { 932 if (r == root) { 933 if (base == b && 934 U.compareAndSwapObject(a, j, t, null)) { 935 U.putOrderedInt(this, QBASE, b + 1); 936 t.doExec(); 937 } 938 return true; 939 } 940 else if ((r = r.completer) == null) 941 break; // not part of root computation 942 } 943 } 944 } 945 return false; 946 } 947 948 /** 949 * Tries to pop and execute the given task or any other task 950 * in its CountedCompleter computation. 951 */ externalPopAndExecCC(CountedCompleter<?> root)952 final boolean externalPopAndExecCC(CountedCompleter<?> root) { 953 ForkJoinTask<?>[] a; int s; Object o; CountedCompleter<?> t, r; 954 if (base - (s = top) < 0 && (a = array) != null) { 955 long j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE; 956 if ((o = U.getObject(a, j)) instanceof CountedCompleter) { 957 for (t = (CountedCompleter<?>)o, r = t;;) { 958 if (r == root) { 959 if (U.compareAndSwapInt(this, QLOCK, 0, 1)) { 960 if (top == s && array == a && 961 U.compareAndSwapObject(a, j, t, null)) { 962 top = s - 1; 963 qlock = 0; 964 t.doExec(); 965 } 966 else 967 qlock = 0; 968 } 969 return true; 970 } 971 else if ((r = r.completer) == null) 972 break; 973 } 974 } 975 } 976 return false; 977 } 978 979 /** 980 * Internal version 981 */ internalPopAndExecCC(CountedCompleter<?> root)982 final boolean internalPopAndExecCC(CountedCompleter<?> root) { 983 ForkJoinTask<?>[] a; int s; Object o; CountedCompleter<?> t, r; 984 if (base - (s = top) < 0 && (a = array) != null) { 985 long j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE; 986 if ((o = U.getObject(a, j)) instanceof CountedCompleter) { 987 for (t = (CountedCompleter<?>)o, r = t;;) { 988 if (r == root) { 989 if (U.compareAndSwapObject(a, j, t, null)) { 990 top = s - 1; 991 t.doExec(); 992 } 993 return true; 994 } 995 else if ((r = r.completer) == null) 996 break; 997 } 998 } 999 } 1000 return false; 1001 } 1002 1003 /** 1004 * Returns true if owned and not known to be blocked. 1005 */ isApparentlyUnblocked()1006 final boolean isApparentlyUnblocked() { 1007 Thread wt; Thread.State s; 1008 return (eventCount >= 0 && 1009 (wt = owner) != null && 1010 (s = wt.getState()) != Thread.State.BLOCKED && 1011 s != Thread.State.WAITING && 1012 s != Thread.State.TIMED_WAITING); 1013 } 1014 1015 // Unsafe mechanics 1016 private static final sun.misc.Unsafe U; 1017 private static final long QBASE; 1018 private static final long QLOCK; 1019 private static final int ABASE; 1020 private static final int ASHIFT; 1021 static { 1022 try { 1023 U = sun.misc.Unsafe.getUnsafe(); 1024 Class<?> k = WorkQueue.class; 1025 Class<?> ak = ForkJoinTask[].class; 1026 QBASE = U.objectFieldOffset 1027 (k.getDeclaredField("base")); 1028 QLOCK = U.objectFieldOffset 1029 (k.getDeclaredField("qlock")); 1030 ABASE = U.arrayBaseOffset(ak); 1031 int scale = U.arrayIndexScale(ak); 1032 if ((scale & (scale - 1)) != 0) 1033 throw new Error("data type scale not a power of two"); 1034 ASHIFT = 31 - Integer.numberOfLeadingZeros(scale); 1035 } catch (Exception e) { 1036 throw new Error(e); 1037 } 1038 } 1039 } 1040 1041 // static fields (initialized in static initializer below) 1042 1043 /** 1044 * Per-thread submission bookkeeping. Shared across all pools 1045 * to reduce ThreadLocal pollution and because random motion 1046 * to avoid contention in one pool is likely to hold for others. 1047 * Lazily initialized on first submission (but null-checked 1048 * in other contexts to avoid unnecessary initialization). 1049 */ 1050 static final ThreadLocal<Submitter> submitters; 1051 1052 /** 1053 * Creates a new ForkJoinWorkerThread. This factory is used unless 1054 * overridden in ForkJoinPool constructors. 1055 */ 1056 public static final ForkJoinWorkerThreadFactory 1057 defaultForkJoinWorkerThreadFactory; 1058 1059 /** 1060 * Permission required for callers of methods that may start or 1061 * kill threads. 1062 */ 1063 private static final RuntimePermission modifyThreadPermission; 1064 1065 /** 1066 * Common (static) pool. Non-null for public use unless a static 1067 * construction exception, but internal usages null-check on use 1068 * to paranoically avoid potential initialization circularities 1069 * as well as to simplify generated code. 1070 */ 1071 static final ForkJoinPool common; 1072 1073 /** 1074 * Common pool parallelism. To allow simpler use and management 1075 * when common pool threads are disabled, we allow the underlying 1076 * common.parallelism field to be zero, but in that case still report 1077 * parallelism as 1 to reflect resulting caller-runs mechanics. 1078 */ 1079 static final int commonParallelism; 1080 1081 /** 1082 * Sequence number for creating workerNamePrefix. 1083 */ 1084 private static int poolNumberSequence; 1085 1086 /** 1087 * Returns the next sequence number. We don't expect this to 1088 * ever contend, so use simple builtin sync. 1089 */ nextPoolId()1090 private static final synchronized int nextPoolId() { 1091 return ++poolNumberSequence; 1092 } 1093 1094 // static constants 1095 1096 /** 1097 * Initial timeout value (in nanoseconds) for the thread 1098 * triggering quiescence to park waiting for new work. On timeout, 1099 * the thread will instead try to shrink the number of 1100 * workers. The value should be large enough to avoid overly 1101 * aggressive shrinkage during most transient stalls (long GCs 1102 * etc). 1103 */ 1104 private static final long IDLE_TIMEOUT = 2000L * 1000L * 1000L; // 2sec 1105 1106 /** 1107 * Timeout value when there are more threads than parallelism level 1108 */ 1109 private static final long FAST_IDLE_TIMEOUT = 200L * 1000L * 1000L; 1110 1111 /** 1112 * Tolerance for idle timeouts, to cope with timer undershoots 1113 */ 1114 private static final long TIMEOUT_SLOP = 2000000L; 1115 1116 /** 1117 * The maximum stolen->joining link depth allowed in method 1118 * tryHelpStealer. Must be a power of two. Depths for legitimate 1119 * chains are unbounded, but we use a fixed constant to avoid 1120 * (otherwise unchecked) cycles and to bound staleness of 1121 * traversal parameters at the expense of sometimes blocking when 1122 * we could be helping. 1123 */ 1124 private static final int MAX_HELP = 64; 1125 1126 /** 1127 * Increment for seed generators. See class ThreadLocal for 1128 * explanation. 1129 */ 1130 private static final int SEED_INCREMENT = 0x61c88647; 1131 1132 /* 1133 * Bits and masks for control variables 1134 * 1135 * Field ctl is a long packed with: 1136 * AC: Number of active running workers minus target parallelism (16 bits) 1137 * TC: Number of total workers minus target parallelism (16 bits) 1138 * ST: true if pool is terminating (1 bit) 1139 * EC: the wait count of top waiting thread (15 bits) 1140 * ID: poolIndex of top of Treiber stack of waiters (16 bits) 1141 * 1142 * When convenient, we can extract the upper 32 bits of counts and 1143 * the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e = 1144 * (int)ctl. The ec field is never accessed alone, but always 1145 * together with id and st. The offsets of counts by the target 1146 * parallelism and the positionings of fields makes it possible to 1147 * perform the most common checks via sign tests of fields: When 1148 * ac is negative, there are not enough active workers, when tc is 1149 * negative, there are not enough total workers, and when e is 1150 * negative, the pool is terminating. To deal with these possibly 1151 * negative fields, we use casts in and out of "short" and/or 1152 * signed shifts to maintain signedness. 1153 * 1154 * When a thread is queued (inactivated), its eventCount field is 1155 * set negative, which is the only way to tell if a worker is 1156 * prevented from executing tasks, even though it must continue to 1157 * scan for them to avoid queuing races. Note however that 1158 * eventCount updates lag releases so usage requires care. 1159 * 1160 * Field plock is an int packed with: 1161 * SHUTDOWN: true if shutdown is enabled (1 bit) 1162 * SEQ: a sequence lock, with PL_LOCK bit set if locked (30 bits) 1163 * SIGNAL: set when threads may be waiting on the lock (1 bit) 1164 * 1165 * The sequence number enables simple consistency checks: 1166 * Staleness of read-only operations on the workQueues array can 1167 * be checked by comparing plock before vs after the reads. 1168 */ 1169 1170 // bit positions/shifts for fields 1171 private static final int AC_SHIFT = 48; 1172 private static final int TC_SHIFT = 32; 1173 private static final int ST_SHIFT = 31; 1174 private static final int EC_SHIFT = 16; 1175 1176 // bounds 1177 private static final int SMASK = 0xffff; // short bits 1178 private static final int MAX_CAP = 0x7fff; // max #workers - 1 1179 private static final int EVENMASK = 0xfffe; // even short bits 1180 private static final int SQMASK = 0x007e; // max 64 (even) slots 1181 private static final int SHORT_SIGN = 1 << 15; 1182 private static final int INT_SIGN = 1 << 31; 1183 1184 // masks 1185 private static final long STOP_BIT = 0x0001L << ST_SHIFT; 1186 private static final long AC_MASK = ((long)SMASK) << AC_SHIFT; 1187 private static final long TC_MASK = ((long)SMASK) << TC_SHIFT; 1188 1189 // units for incrementing and decrementing 1190 private static final long TC_UNIT = 1L << TC_SHIFT; 1191 private static final long AC_UNIT = 1L << AC_SHIFT; 1192 1193 // masks and units for dealing with u = (int)(ctl >>> 32) 1194 private static final int UAC_SHIFT = AC_SHIFT - 32; 1195 private static final int UTC_SHIFT = TC_SHIFT - 32; 1196 private static final int UAC_MASK = SMASK << UAC_SHIFT; 1197 private static final int UTC_MASK = SMASK << UTC_SHIFT; 1198 private static final int UAC_UNIT = 1 << UAC_SHIFT; 1199 private static final int UTC_UNIT = 1 << UTC_SHIFT; 1200 1201 // masks and units for dealing with e = (int)ctl 1202 private static final int E_MASK = 0x7fffffff; // no STOP_BIT 1203 private static final int E_SEQ = 1 << EC_SHIFT; 1204 1205 // plock bits 1206 private static final int SHUTDOWN = 1 << 31; 1207 private static final int PL_LOCK = 2; 1208 private static final int PL_SIGNAL = 1; 1209 private static final int PL_SPINS = 1 << 8; 1210 1211 // access mode for WorkQueue 1212 static final int LIFO_QUEUE = 0; 1213 static final int FIFO_QUEUE = 1; 1214 static final int SHARED_QUEUE = -1; 1215 1216 // Heuristic padding to ameliorate unfortunate memory placements 1217 volatile long pad00, pad01, pad02, pad03, pad04, pad05, pad06; 1218 1219 // Instance fields 1220 volatile long stealCount; // collects worker counts 1221 volatile long ctl; // main pool control 1222 volatile int plock; // shutdown status and seqLock 1223 volatile int indexSeed; // worker/submitter index seed 1224 final short parallelism; // parallelism level 1225 final short mode; // LIFO/FIFO 1226 WorkQueue[] workQueues; // main registry 1227 final ForkJoinWorkerThreadFactory factory; 1228 final UncaughtExceptionHandler ueh; // per-worker UEH 1229 final String workerNamePrefix; // to create worker name string 1230 1231 volatile Object pad10, pad11, pad12, pad13, pad14, pad15, pad16, pad17; 1232 volatile Object pad18, pad19, pad1a, pad1b; 1233 1234 /** 1235 * Acquires the plock lock to protect worker array and related 1236 * updates. This method is called only if an initial CAS on plock 1237 * fails. This acts as a spinlock for normal cases, but falls back 1238 * to builtin monitor to block when (rarely) needed. This would be 1239 * a terrible idea for a highly contended lock, but works fine as 1240 * a more conservative alternative to a pure spinlock. 1241 */ acquirePlock()1242 private int acquirePlock() { 1243 int spins = PL_SPINS, ps, nps; 1244 for (;;) { 1245 if (((ps = plock) & PL_LOCK) == 0 && 1246 U.compareAndSwapInt(this, PLOCK, ps, nps = ps + PL_LOCK)) 1247 return nps; 1248 else if (spins >= 0) { 1249 if (ThreadLocalRandom.current().nextInt() >= 0) 1250 --spins; 1251 } 1252 else if (U.compareAndSwapInt(this, PLOCK, ps, ps | PL_SIGNAL)) { 1253 synchronized (this) { 1254 if ((plock & PL_SIGNAL) != 0) { 1255 try { 1256 wait(); 1257 } catch (InterruptedException ie) { 1258 try { 1259 Thread.currentThread().interrupt(); 1260 } catch (SecurityException ignore) { 1261 } 1262 } 1263 } 1264 else 1265 notifyAll(); 1266 } 1267 } 1268 } 1269 } 1270 1271 /** 1272 * Unlocks and signals any thread waiting for plock. Called only 1273 * when CAS of seq value for unlock fails. 1274 */ releasePlock(int ps)1275 private void releasePlock(int ps) { 1276 plock = ps; 1277 synchronized (this) { notifyAll(); } 1278 } 1279 1280 /** 1281 * Tries to create and start one worker if fewer than target 1282 * parallelism level exist. Adjusts counts etc on failure. 1283 */ tryAddWorker()1284 private void tryAddWorker() { 1285 long c; int u, e; 1286 while ((u = (int)((c = ctl) >>> 32)) < 0 && 1287 (u & SHORT_SIGN) != 0 && (e = (int)c) >= 0) { 1288 long nc = ((long)(((u + UTC_UNIT) & UTC_MASK) | 1289 ((u + UAC_UNIT) & UAC_MASK)) << 32) | (long)e; 1290 if (U.compareAndSwapLong(this, CTL, c, nc)) { 1291 ForkJoinWorkerThreadFactory fac; 1292 Throwable ex = null; 1293 ForkJoinWorkerThread wt = null; 1294 try { 1295 if ((fac = factory) != null && 1296 (wt = fac.newThread(this)) != null) { 1297 wt.start(); 1298 break; 1299 } 1300 } catch (Throwable rex) { 1301 ex = rex; 1302 } 1303 deregisterWorker(wt, ex); 1304 break; 1305 } 1306 } 1307 } 1308 1309 // Registering and deregistering workers 1310 1311 /** 1312 * Callback from ForkJoinWorkerThread to establish and record its 1313 * WorkQueue. To avoid scanning bias due to packing entries in 1314 * front of the workQueues array, we treat the array as a simple 1315 * power-of-two hash table using per-thread seed as hash, 1316 * expanding as needed. 1317 * 1318 * @param wt the worker thread 1319 * @return the worker's queue 1320 */ registerWorker(ForkJoinWorkerThread wt)1321 final WorkQueue registerWorker(ForkJoinWorkerThread wt) { 1322 UncaughtExceptionHandler handler; WorkQueue[] ws; int s, ps; 1323 wt.setDaemon(true); 1324 if ((handler = ueh) != null) 1325 wt.setUncaughtExceptionHandler(handler); 1326 do {} while (!U.compareAndSwapInt(this, INDEXSEED, s = indexSeed, 1327 s += SEED_INCREMENT) || 1328 s == 0); // skip 0 1329 WorkQueue w = new WorkQueue(this, wt, mode, s); 1330 if (((ps = plock) & PL_LOCK) != 0 || 1331 !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK)) 1332 ps = acquirePlock(); 1333 int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN); 1334 try { 1335 if ((ws = workQueues) != null) { // skip if shutting down 1336 int n = ws.length, m = n - 1; 1337 int r = (s << 1) | 1; // use odd-numbered indices 1338 if (ws[r &= m] != null) { // collision 1339 int probes = 0; // step by approx half size 1340 int step = (n <= 4) ? 2 : ((n >>> 1) & EVENMASK) + 2; 1341 while (ws[r = (r + step) & m] != null) { 1342 if (++probes >= n) { 1343 workQueues = ws = Arrays.copyOf(ws, n <<= 1); 1344 m = n - 1; 1345 probes = 0; 1346 } 1347 } 1348 } 1349 w.poolIndex = (short)r; 1350 w.eventCount = r; // volatile write orders 1351 ws[r] = w; 1352 } 1353 } finally { 1354 if (!U.compareAndSwapInt(this, PLOCK, ps, nps)) 1355 releasePlock(nps); 1356 } 1357 wt.setName(workerNamePrefix.concat(Integer.toString(w.poolIndex >>> 1))); 1358 return w; 1359 } 1360 1361 /** 1362 * Final callback from terminating worker, as well as upon failure 1363 * to construct or start a worker. Removes record of worker from 1364 * array, and adjusts counts. If pool is shutting down, tries to 1365 * complete termination. 1366 * 1367 * @param wt the worker thread, or null if construction failed 1368 * @param ex the exception causing failure, or null if none 1369 */ deregisterWorker(ForkJoinWorkerThread wt, Throwable ex)1370 final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) { 1371 WorkQueue w = null; 1372 if (wt != null && (w = wt.workQueue) != null) { 1373 int ps; long sc; 1374 w.qlock = -1; // ensure set 1375 do {} while (!U.compareAndSwapLong(this, STEALCOUNT, 1376 sc = stealCount, 1377 sc + w.nsteals)); 1378 if (((ps = plock) & PL_LOCK) != 0 || 1379 !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK)) 1380 ps = acquirePlock(); 1381 int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN); 1382 try { 1383 int idx = w.poolIndex; 1384 WorkQueue[] ws = workQueues; 1385 if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w) 1386 ws[idx] = null; 1387 } finally { 1388 if (!U.compareAndSwapInt(this, PLOCK, ps, nps)) 1389 releasePlock(nps); 1390 } 1391 } 1392 1393 long c; // adjust ctl counts 1394 do {} while (!U.compareAndSwapLong 1395 (this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) | 1396 ((c - TC_UNIT) & TC_MASK) | 1397 (c & ~(AC_MASK|TC_MASK))))); 1398 1399 if (!tryTerminate(false, false) && w != null && w.array != null) { 1400 w.cancelAll(); // cancel remaining tasks 1401 WorkQueue[] ws; WorkQueue v; Thread p; int u, i, e; 1402 while ((u = (int)((c = ctl) >>> 32)) < 0 && (e = (int)c) >= 0) { 1403 if (e > 0) { // activate or create replacement 1404 if ((ws = workQueues) == null || 1405 (i = e & SMASK) >= ws.length || 1406 (v = ws[i]) == null) 1407 break; 1408 long nc = (((long)(v.nextWait & E_MASK)) | 1409 ((long)(u + UAC_UNIT) << 32)); 1410 if (v.eventCount != (e | INT_SIGN)) 1411 break; 1412 if (U.compareAndSwapLong(this, CTL, c, nc)) { 1413 v.eventCount = (e + E_SEQ) & E_MASK; 1414 if ((p = v.parker) != null) 1415 U.unpark(p); 1416 break; 1417 } 1418 } 1419 else { 1420 if ((short)u < 0) 1421 tryAddWorker(); 1422 break; 1423 } 1424 } 1425 } 1426 if (ex == null) // help clean refs on way out 1427 ForkJoinTask.helpExpungeStaleExceptions(); 1428 else // rethrow 1429 ForkJoinTask.rethrow(ex); 1430 } 1431 1432 // Submissions 1433 1434 /** 1435 * Per-thread records for threads that submit to pools. Currently 1436 * holds only pseudo-random seed / index that is used to choose 1437 * submission queues in method externalPush. In the future, this may 1438 * also incorporate a means to implement different task rejection 1439 * and resubmission policies. 1440 * 1441 * Seeds for submitters and workers/workQueues work in basically 1442 * the same way but are initialized and updated using slightly 1443 * different mechanics. Both are initialized using the same 1444 * approach as in class ThreadLocal, where successive values are 1445 * unlikely to collide with previous values. Seeds are then 1446 * randomly modified upon collisions using xorshifts, which 1447 * requires a non-zero seed. 1448 */ 1449 static final class Submitter { 1450 int seed; Submitter(int s)1451 Submitter(int s) { seed = s; } 1452 } 1453 1454 /** 1455 * Unless shutting down, adds the given task to a submission queue 1456 * at submitter's current queue index (modulo submission 1457 * range). Only the most common path is directly handled in this 1458 * method. All others are relayed to fullExternalPush. 1459 * 1460 * @param task the task. Caller must ensure non-null. 1461 */ externalPush(ForkJoinTask<?> task)1462 final void externalPush(ForkJoinTask<?> task) { 1463 Submitter z = submitters.get(); 1464 WorkQueue q; int r, m, s, n, am; ForkJoinTask<?>[] a; 1465 int ps = plock; 1466 WorkQueue[] ws = workQueues; 1467 if (z != null && ps > 0 && ws != null && (m = (ws.length - 1)) >= 0 && 1468 (q = ws[m & (r = z.seed) & SQMASK]) != null && r != 0 && 1469 U.compareAndSwapInt(q, QLOCK, 0, 1)) { // lock 1470 if ((a = q.array) != null && 1471 (am = a.length - 1) > (n = (s = q.top) - q.base)) { 1472 int j = ((am & s) << ASHIFT) + ABASE; 1473 U.putOrderedObject(a, j, task); 1474 q.top = s + 1; // push on to deque 1475 q.qlock = 0; 1476 if (n <= 1) 1477 signalWork(ws, q); 1478 return; 1479 } 1480 q.qlock = 0; 1481 } 1482 fullExternalPush(task); 1483 } 1484 1485 /** 1486 * Full version of externalPush. This method is called, among 1487 * other times, upon the first submission of the first task to the 1488 * pool, so must perform secondary initialization. It also 1489 * detects first submission by an external thread by looking up 1490 * its ThreadLocal, and creates a new shared queue if the one at 1491 * index if empty or contended. The plock lock body must be 1492 * exception-free (so no try/finally) so we optimistically 1493 * allocate new queues outside the lock and throw them away if 1494 * (very rarely) not needed. 1495 * 1496 * Secondary initialization occurs when plock is zero, to create 1497 * workQueue array and set plock to a valid value. This lock body 1498 * must also be exception-free. Because the plock seq value can 1499 * eventually wrap around zero, this method harmlessly fails to 1500 * reinitialize if workQueues exists, while still advancing plock. 1501 */ fullExternalPush(ForkJoinTask<?> task)1502 private void fullExternalPush(ForkJoinTask<?> task) { 1503 int r = 0; // random index seed 1504 for (Submitter z = submitters.get();;) { 1505 WorkQueue[] ws; WorkQueue q; int ps, m, k; 1506 if (z == null) { 1507 if (U.compareAndSwapInt(this, INDEXSEED, r = indexSeed, 1508 r += SEED_INCREMENT) && r != 0) 1509 submitters.set(z = new Submitter(r)); 1510 } 1511 else if (r == 0) { // move to a different index 1512 r = z.seed; 1513 r ^= r << 13; // same xorshift as WorkQueues 1514 r ^= r >>> 17; 1515 z.seed = r ^= (r << 5); 1516 } 1517 if ((ps = plock) < 0) 1518 throw new RejectedExecutionException(); 1519 else if (ps == 0 || (ws = workQueues) == null || 1520 (m = ws.length - 1) < 0) { // initialize workQueues 1521 int p = parallelism; // find power of two table size 1522 int n = (p > 1) ? p - 1 : 1; // ensure at least 2 slots 1523 n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; 1524 n |= n >>> 8; n |= n >>> 16; n = (n + 1) << 1; 1525 WorkQueue[] nws = ((ws = workQueues) == null || ws.length == 0 ? 1526 new WorkQueue[n] : null); 1527 if (((ps = plock) & PL_LOCK) != 0 || 1528 !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK)) 1529 ps = acquirePlock(); 1530 if (((ws = workQueues) == null || ws.length == 0) && nws != null) 1531 workQueues = nws; 1532 int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN); 1533 if (!U.compareAndSwapInt(this, PLOCK, ps, nps)) 1534 releasePlock(nps); 1535 } 1536 else if ((q = ws[k = r & m & SQMASK]) != null) { 1537 if (q.qlock == 0 && U.compareAndSwapInt(q, QLOCK, 0, 1)) { 1538 ForkJoinTask<?>[] a = q.array; 1539 int s = q.top; 1540 boolean submitted = false; 1541 try { // locked version of push 1542 if ((a != null && a.length > s + 1 - q.base) || 1543 (a = q.growArray()) != null) { // must presize 1544 int j = (((a.length - 1) & s) << ASHIFT) + ABASE; 1545 U.putOrderedObject(a, j, task); 1546 q.top = s + 1; 1547 submitted = true; 1548 } 1549 } finally { 1550 q.qlock = 0; // unlock 1551 } 1552 if (submitted) { 1553 signalWork(ws, q); 1554 return; 1555 } 1556 } 1557 r = 0; // move on failure 1558 } 1559 else if (((ps = plock) & PL_LOCK) == 0) { // create new queue 1560 q = new WorkQueue(this, null, SHARED_QUEUE, r); 1561 q.poolIndex = (short)k; 1562 if (((ps = plock) & PL_LOCK) != 0 || 1563 !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK)) 1564 ps = acquirePlock(); 1565 if ((ws = workQueues) != null && k < ws.length && ws[k] == null) 1566 ws[k] = q; 1567 int nps = (ps & SHUTDOWN) | ((ps + PL_LOCK) & ~SHUTDOWN); 1568 if (!U.compareAndSwapInt(this, PLOCK, ps, nps)) 1569 releasePlock(nps); 1570 } 1571 else 1572 r = 0; 1573 } 1574 } 1575 1576 // Maintaining ctl counts 1577 1578 /** 1579 * Increments active count; mainly called upon return from blocking. 1580 */ incrementActiveCount()1581 final void incrementActiveCount() { 1582 long c; 1583 do {} while (!U.compareAndSwapLong 1584 (this, CTL, c = ctl, ((c & ~AC_MASK) | 1585 ((c & AC_MASK) + AC_UNIT)))); 1586 } 1587 1588 /** 1589 * Tries to create or activate a worker if too few are active. 1590 * 1591 * @param ws the worker array to use to find signallees 1592 * @param q if non-null, the queue holding tasks to be processed 1593 */ signalWork(WorkQueue[] ws, WorkQueue q)1594 final void signalWork(WorkQueue[] ws, WorkQueue q) { 1595 for (;;) { 1596 long c; int e, u, i; WorkQueue w; Thread p; 1597 if ((u = (int)((c = ctl) >>> 32)) >= 0) 1598 break; 1599 if ((e = (int)c) <= 0) { 1600 if ((short)u < 0) 1601 tryAddWorker(); 1602 break; 1603 } 1604 if (ws == null || ws.length <= (i = e & SMASK) || 1605 (w = ws[i]) == null) 1606 break; 1607 long nc = (((long)(w.nextWait & E_MASK)) | 1608 ((long)(u + UAC_UNIT)) << 32); 1609 int ne = (e + E_SEQ) & E_MASK; 1610 if (w.eventCount == (e | INT_SIGN) && 1611 U.compareAndSwapLong(this, CTL, c, nc)) { 1612 w.eventCount = ne; 1613 if ((p = w.parker) != null) 1614 U.unpark(p); 1615 break; 1616 } 1617 if (q != null && q.base >= q.top) 1618 break; 1619 } 1620 } 1621 1622 // Scanning for tasks 1623 1624 /** 1625 * Top-level runloop for workers, called by ForkJoinWorkerThread.run. 1626 */ runWorker(WorkQueue w)1627 final void runWorker(WorkQueue w) { 1628 w.growArray(); // allocate queue 1629 for (int r = w.hint; scan(w, r) == 0; ) { 1630 r ^= r << 13; r ^= r >>> 17; r ^= r << 5; // xorshift 1631 } 1632 } 1633 1634 /** 1635 * Scans for and, if found, runs one task, else possibly 1636 * inactivates the worker. This method operates on single reads of 1637 * volatile state and is designed to be re-invoked continuously, 1638 * in part because it returns upon detecting inconsistencies, 1639 * contention, or state changes that indicate possible success on 1640 * re-invocation. 1641 * 1642 * The scan searches for tasks across queues starting at a random 1643 * index, checking each at least twice. The scan terminates upon 1644 * either finding a non-empty queue, or completing the sweep. If 1645 * the worker is not inactivated, it takes and runs a task from 1646 * this queue. Otherwise, if not activated, it tries to activate 1647 * itself or some other worker by signalling. On failure to find a 1648 * task, returns (for retry) if pool state may have changed during 1649 * an empty scan, or tries to inactivate if active, else possibly 1650 * blocks or terminates via method awaitWork. 1651 * 1652 * @param w the worker (via its WorkQueue) 1653 * @param r a random seed 1654 * @return worker qlock status if would have waited, else 0 1655 */ scan(WorkQueue w, int r)1656 private final int scan(WorkQueue w, int r) { 1657 WorkQueue[] ws; int m; 1658 long c = ctl; // for consistency check 1659 if ((ws = workQueues) != null && (m = ws.length - 1) >= 0 && w != null) { 1660 for (int j = m + m + 1, ec = w.eventCount;;) { 1661 WorkQueue q; int b, e; ForkJoinTask<?>[] a; ForkJoinTask<?> t; 1662 if ((q = ws[(r - j) & m]) != null && 1663 (b = q.base) - q.top < 0 && (a = q.array) != null) { 1664 long i = (((a.length - 1) & b) << ASHIFT) + ABASE; 1665 if ((t = ((ForkJoinTask<?>) 1666 U.getObjectVolatile(a, i))) != null) { 1667 if (ec < 0) 1668 helpRelease(c, ws, w, q, b); 1669 else if (q.base == b && 1670 U.compareAndSwapObject(a, i, t, null)) { 1671 U.putOrderedInt(q, QBASE, b + 1); 1672 if ((b + 1) - q.top < 0) 1673 signalWork(ws, q); 1674 w.runTask(t); 1675 } 1676 } 1677 break; 1678 } 1679 else if (--j < 0) { 1680 if ((ec | (e = (int)c)) < 0) // inactive or terminating 1681 return awaitWork(w, c, ec); 1682 else if (ctl == c) { // try to inactivate and enqueue 1683 long nc = (long)ec | ((c - AC_UNIT) & (AC_MASK|TC_MASK)); 1684 w.nextWait = e; 1685 w.eventCount = ec | INT_SIGN; 1686 if (!U.compareAndSwapLong(this, CTL, c, nc)) 1687 w.eventCount = ec; // back out 1688 } 1689 break; 1690 } 1691 } 1692 } 1693 return 0; 1694 } 1695 1696 /** 1697 * A continuation of scan(), possibly blocking or terminating 1698 * worker w. Returns without blocking if pool state has apparently 1699 * changed since last invocation. Also, if inactivating w has 1700 * caused the pool to become quiescent, checks for pool 1701 * termination, and, so long as this is not the only worker, waits 1702 * for event for up to a given duration. On timeout, if ctl has 1703 * not changed, terminates the worker, which will in turn wake up 1704 * another worker to possibly repeat this process. 1705 * 1706 * @param w the calling worker 1707 * @param c the ctl value on entry to scan 1708 * @param ec the worker's eventCount on entry to scan 1709 */ awaitWork(WorkQueue w, long c, int ec)1710 private final int awaitWork(WorkQueue w, long c, int ec) { 1711 int stat, ns; long parkTime, deadline; 1712 if ((stat = w.qlock) >= 0 && w.eventCount == ec && ctl == c && 1713 !Thread.interrupted()) { 1714 int e = (int)c; 1715 int u = (int)(c >>> 32); 1716 int d = (u >> UAC_SHIFT) + parallelism; // active count 1717 1718 if (e < 0 || (d <= 0 && tryTerminate(false, false))) 1719 stat = w.qlock = -1; // pool is terminating 1720 else if ((ns = w.nsteals) != 0) { // collect steals and retry 1721 long sc; 1722 w.nsteals = 0; 1723 do {} while (!U.compareAndSwapLong(this, STEALCOUNT, 1724 sc = stealCount, sc + ns)); 1725 } 1726 else { 1727 long pc = ((d > 0 || ec != (e | INT_SIGN)) ? 0L : 1728 ((long)(w.nextWait & E_MASK)) | // ctl to restore 1729 ((long)(u + UAC_UNIT)) << 32); 1730 if (pc != 0L) { // timed wait if last waiter 1731 int dc = -(short)(c >>> TC_SHIFT); 1732 parkTime = (dc < 0 ? FAST_IDLE_TIMEOUT: 1733 (dc + 1) * IDLE_TIMEOUT); 1734 deadline = System.nanoTime() + parkTime - TIMEOUT_SLOP; 1735 } 1736 else 1737 parkTime = deadline = 0L; 1738 if (w.eventCount == ec && ctl == c) { 1739 Thread wt = Thread.currentThread(); 1740 U.putObject(wt, PARKBLOCKER, this); 1741 w.parker = wt; // emulate LockSupport.park 1742 if (w.eventCount == ec && ctl == c) 1743 U.park(false, parkTime); // must recheck before park 1744 w.parker = null; 1745 U.putObject(wt, PARKBLOCKER, null); 1746 if (parkTime != 0L && ctl == c && 1747 deadline - System.nanoTime() <= 0L && 1748 U.compareAndSwapLong(this, CTL, c, pc)) 1749 stat = w.qlock = -1; // shrink pool 1750 } 1751 } 1752 } 1753 return stat; 1754 } 1755 1756 /** 1757 * Possibly releases (signals) a worker. Called only from scan() 1758 * when a worker with apparently inactive status finds a non-empty 1759 * queue. This requires revalidating all of the associated state 1760 * from caller. 1761 */ helpRelease(long c, WorkQueue[] ws, WorkQueue w, WorkQueue q, int b)1762 private final void helpRelease(long c, WorkQueue[] ws, WorkQueue w, 1763 WorkQueue q, int b) { 1764 WorkQueue v; int e, i; Thread p; 1765 if (w != null && w.eventCount < 0 && (e = (int)c) > 0 && 1766 ws != null && ws.length > (i = e & SMASK) && 1767 (v = ws[i]) != null && ctl == c) { 1768 long nc = (((long)(v.nextWait & E_MASK)) | 1769 ((long)((int)(c >>> 32) + UAC_UNIT)) << 32); 1770 int ne = (e + E_SEQ) & E_MASK; 1771 if (q != null && q.base == b && w.eventCount < 0 && 1772 v.eventCount == (e | INT_SIGN) && 1773 U.compareAndSwapLong(this, CTL, c, nc)) { 1774 v.eventCount = ne; 1775 if ((p = v.parker) != null) 1776 U.unpark(p); 1777 } 1778 } 1779 } 1780 1781 /** 1782 * Tries to locate and execute tasks for a stealer of the given 1783 * task, or in turn one of its stealers, Traces currentSteal -> 1784 * currentJoin links looking for a thread working on a descendant 1785 * of the given task and with a non-empty queue to steal back and 1786 * execute tasks from. The first call to this method upon a 1787 * waiting join will often entail scanning/search, (which is OK 1788 * because the joiner has nothing better to do), but this method 1789 * leaves hints in workers to speed up subsequent calls. The 1790 * implementation is very branchy to cope with potential 1791 * inconsistencies or loops encountering chains that are stale, 1792 * unknown, or so long that they are likely cyclic. 1793 * 1794 * @param joiner the joining worker 1795 * @param task the task to join 1796 * @return 0 if no progress can be made, negative if task 1797 * known complete, else positive 1798 */ tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task)1799 private int tryHelpStealer(WorkQueue joiner, ForkJoinTask<?> task) { 1800 int stat = 0, steps = 0; // bound to avoid cycles 1801 if (task != null && joiner != null && 1802 joiner.base - joiner.top >= 0) { // hoist checks 1803 restart: for (;;) { 1804 ForkJoinTask<?> subtask = task; // current target 1805 for (WorkQueue j = joiner, v;;) { // v is stealer of subtask 1806 WorkQueue[] ws; int m, s, h; 1807 if ((s = task.status) < 0) { 1808 stat = s; 1809 break restart; 1810 } 1811 if ((ws = workQueues) == null || (m = ws.length - 1) <= 0) 1812 break restart; // shutting down 1813 if ((v = ws[h = (j.hint | 1) & m]) == null || 1814 v.currentSteal != subtask) { 1815 for (int origin = h;;) { // find stealer 1816 if (((h = (h + 2) & m) & 15) == 1 && 1817 (subtask.status < 0 || j.currentJoin != subtask)) 1818 continue restart; // occasional staleness check 1819 if ((v = ws[h]) != null && 1820 v.currentSteal == subtask) { 1821 j.hint = h; // save hint 1822 break; 1823 } 1824 if (h == origin) 1825 break restart; // cannot find stealer 1826 } 1827 } 1828 for (;;) { // help stealer or descend to its stealer 1829 ForkJoinTask[] a; int b; 1830 if (subtask.status < 0) // surround probes with 1831 continue restart; // consistency checks 1832 if ((b = v.base) - v.top < 0 && (a = v.array) != null) { 1833 int i = (((a.length - 1) & b) << ASHIFT) + ABASE; 1834 ForkJoinTask<?> t = 1835 (ForkJoinTask<?>)U.getObjectVolatile(a, i); 1836 if (subtask.status < 0 || j.currentJoin != subtask || 1837 v.currentSteal != subtask) 1838 continue restart; // stale 1839 stat = 1; // apparent progress 1840 if (v.base == b) { 1841 if (t == null) 1842 break restart; 1843 if (U.compareAndSwapObject(a, i, t, null)) { 1844 U.putOrderedInt(v, QBASE, b + 1); 1845 ForkJoinTask<?> ps = joiner.currentSteal; 1846 int jt = joiner.top; 1847 do { 1848 joiner.currentSteal = t; 1849 t.doExec(); // clear local tasks too 1850 } while (task.status >= 0 && 1851 joiner.top != jt && 1852 (t = joiner.pop()) != null); 1853 joiner.currentSteal = ps; 1854 break restart; 1855 } 1856 } 1857 } 1858 else { // empty -- try to descend 1859 ForkJoinTask<?> next = v.currentJoin; 1860 if (subtask.status < 0 || j.currentJoin != subtask || 1861 v.currentSteal != subtask) 1862 continue restart; // stale 1863 else if (next == null || ++steps == MAX_HELP) 1864 break restart; // dead-end or maybe cyclic 1865 else { 1866 subtask = next; 1867 j = v; 1868 break; 1869 } 1870 } 1871 } 1872 } 1873 } 1874 } 1875 return stat; 1876 } 1877 1878 /** 1879 * Analog of tryHelpStealer for CountedCompleters. Tries to steal 1880 * and run tasks within the target's computation. 1881 * 1882 * @param task the task to join 1883 */ helpComplete(WorkQueue joiner, CountedCompleter<?> task)1884 private int helpComplete(WorkQueue joiner, CountedCompleter<?> task) { 1885 WorkQueue[] ws; int m; 1886 int s = 0; 1887 if ((ws = workQueues) != null && (m = ws.length - 1) >= 0 && 1888 joiner != null && task != null) { 1889 int j = joiner.poolIndex; 1890 int scans = m + m + 1; 1891 long c = 0L; // for stability check 1892 for (int k = scans; ; j += 2) { 1893 WorkQueue q; 1894 if ((s = task.status) < 0) 1895 break; 1896 else if (joiner.internalPopAndExecCC(task)) 1897 k = scans; 1898 else if ((s = task.status) < 0) 1899 break; 1900 else if ((q = ws[j & m]) != null && q.pollAndExecCC(task)) 1901 k = scans; 1902 else if (--k < 0) { 1903 if (c == (c = ctl)) 1904 break; 1905 k = scans; 1906 } 1907 } 1908 } 1909 return s; 1910 } 1911 1912 /** 1913 * Tries to decrement active count (sometimes implicitly) and 1914 * possibly release or create a compensating worker in preparation 1915 * for blocking. Fails on contention or termination. Otherwise, 1916 * adds a new thread if no idle workers are available and pool 1917 * may become starved. 1918 * 1919 * @param c the assumed ctl value 1920 */ tryCompensate(long c)1921 final boolean tryCompensate(long c) { 1922 WorkQueue[] ws = workQueues; 1923 int pc = parallelism, e = (int)c, m, tc; 1924 if (ws != null && (m = ws.length - 1) >= 0 && e >= 0 && ctl == c) { 1925 WorkQueue w = ws[e & m]; 1926 if (e != 0 && w != null) { 1927 Thread p; 1928 long nc = ((long)(w.nextWait & E_MASK) | 1929 (c & (AC_MASK|TC_MASK))); 1930 int ne = (e + E_SEQ) & E_MASK; 1931 if (w.eventCount == (e | INT_SIGN) && 1932 U.compareAndSwapLong(this, CTL, c, nc)) { 1933 w.eventCount = ne; 1934 if ((p = w.parker) != null) 1935 U.unpark(p); 1936 return true; // replace with idle worker 1937 } 1938 } 1939 else if ((tc = (short)(c >>> TC_SHIFT)) >= 0 && 1940 (int)(c >> AC_SHIFT) + pc > 1) { 1941 long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK); 1942 if (U.compareAndSwapLong(this, CTL, c, nc)) 1943 return true; // no compensation 1944 } 1945 else if (tc + pc < MAX_CAP) { 1946 long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK); 1947 if (U.compareAndSwapLong(this, CTL, c, nc)) { 1948 ForkJoinWorkerThreadFactory fac; 1949 Throwable ex = null; 1950 ForkJoinWorkerThread wt = null; 1951 try { 1952 if ((fac = factory) != null && 1953 (wt = fac.newThread(this)) != null) { 1954 wt.start(); 1955 return true; 1956 } 1957 } catch (Throwable rex) { 1958 ex = rex; 1959 } 1960 deregisterWorker(wt, ex); // clean up and return false 1961 } 1962 } 1963 } 1964 return false; 1965 } 1966 1967 /** 1968 * Helps and/or blocks until the given task is done. 1969 * 1970 * @param joiner the joining worker 1971 * @param task the task 1972 * @return task status on exit 1973 */ awaitJoin(WorkQueue joiner, ForkJoinTask<?> task)1974 final int awaitJoin(WorkQueue joiner, ForkJoinTask<?> task) { 1975 int s = 0; 1976 if (task != null && (s = task.status) >= 0 && joiner != null) { 1977 ForkJoinTask<?> prevJoin = joiner.currentJoin; 1978 joiner.currentJoin = task; 1979 do {} while (joiner.tryRemoveAndExec(task) && // process local tasks 1980 (s = task.status) >= 0); 1981 if (s >= 0 && (task instanceof CountedCompleter)) 1982 s = helpComplete(joiner, (CountedCompleter<?>)task); 1983 long cc = 0; // for stability checks 1984 while (s >= 0 && (s = task.status) >= 0) { 1985 if ((s = tryHelpStealer(joiner, task)) == 0 && 1986 (s = task.status) >= 0) { 1987 if (!tryCompensate(cc)) 1988 cc = ctl; 1989 else { 1990 if (task.trySetSignal() && (s = task.status) >= 0) { 1991 synchronized (task) { 1992 if (task.status >= 0) { 1993 try { // see ForkJoinTask 1994 task.wait(); // for explanation 1995 } catch (InterruptedException ie) { 1996 } 1997 } 1998 else 1999 task.notifyAll(); 2000 } 2001 } 2002 long c; // reactivate 2003 do {} while (!U.compareAndSwapLong 2004 (this, CTL, c = ctl, 2005 ((c & ~AC_MASK) | 2006 ((c & AC_MASK) + AC_UNIT)))); 2007 } 2008 } 2009 } 2010 joiner.currentJoin = prevJoin; 2011 } 2012 return s; 2013 } 2014 2015 /** 2016 * Stripped-down variant of awaitJoin used by timed joins. Tries 2017 * to help join only while there is continuous progress. (Caller 2018 * will then enter a timed wait.) 2019 * 2020 * @param joiner the joining worker 2021 * @param task the task 2022 */ helpJoinOnce(WorkQueue joiner, ForkJoinTask<?> task)2023 final void helpJoinOnce(WorkQueue joiner, ForkJoinTask<?> task) { 2024 int s; 2025 if (joiner != null && task != null && (s = task.status) >= 0) { 2026 ForkJoinTask<?> prevJoin = joiner.currentJoin; 2027 joiner.currentJoin = task; 2028 do {} while (joiner.tryRemoveAndExec(task) && // process local tasks 2029 (s = task.status) >= 0); 2030 if (s >= 0) { 2031 if (task instanceof CountedCompleter) 2032 helpComplete(joiner, (CountedCompleter<?>)task); 2033 do {} while (task.status >= 0 && 2034 tryHelpStealer(joiner, task) > 0); 2035 } 2036 joiner.currentJoin = prevJoin; 2037 } 2038 } 2039 2040 /** 2041 * Returns a (probably) non-empty steal queue, if one is found 2042 * during a scan, else null. This method must be retried by 2043 * caller if, by the time it tries to use the queue, it is empty. 2044 */ findNonEmptyStealQueue()2045 private WorkQueue findNonEmptyStealQueue() { 2046 int r = ThreadLocalRandom.current().nextInt(); 2047 for (;;) { 2048 int ps = plock, m; WorkQueue[] ws; WorkQueue q; 2049 if ((ws = workQueues) != null && (m = ws.length - 1) >= 0) { 2050 for (int j = (m + 1) << 2; j >= 0; --j) { 2051 if ((q = ws[(((r - j) << 1) | 1) & m]) != null && 2052 q.base - q.top < 0) 2053 return q; 2054 } 2055 } 2056 if (plock == ps) 2057 return null; 2058 } 2059 } 2060 2061 /** 2062 * Runs tasks until {@code isQuiescent()}. We piggyback on 2063 * active count ctl maintenance, but rather than blocking 2064 * when tasks cannot be found, we rescan until all others cannot 2065 * find tasks either. 2066 */ helpQuiescePool(WorkQueue w)2067 final void helpQuiescePool(WorkQueue w) { 2068 ForkJoinTask<?> ps = w.currentSteal; 2069 for (boolean active = true;;) { 2070 long c; WorkQueue q; ForkJoinTask<?> t; int b; 2071 while ((t = w.nextLocalTask()) != null) 2072 t.doExec(); 2073 if ((q = findNonEmptyStealQueue()) != null) { 2074 if (!active) { // re-establish active count 2075 active = true; 2076 do {} while (!U.compareAndSwapLong 2077 (this, CTL, c = ctl, 2078 ((c & ~AC_MASK) | 2079 ((c & AC_MASK) + AC_UNIT)))); 2080 } 2081 if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) { 2082 (w.currentSteal = t).doExec(); 2083 w.currentSteal = ps; 2084 } 2085 } 2086 else if (active) { // decrement active count without queuing 2087 long nc = ((c = ctl) & ~AC_MASK) | ((c & AC_MASK) - AC_UNIT); 2088 if ((int)(nc >> AC_SHIFT) + parallelism == 0) 2089 break; // bypass decrement-then-increment 2090 if (U.compareAndSwapLong(this, CTL, c, nc)) 2091 active = false; 2092 } 2093 else if ((int)((c = ctl) >> AC_SHIFT) + parallelism <= 0 && 2094 U.compareAndSwapLong 2095 (this, CTL, c, ((c & ~AC_MASK) | 2096 ((c & AC_MASK) + AC_UNIT)))) 2097 break; 2098 } 2099 } 2100 2101 /** 2102 * Gets and removes a local or stolen task for the given worker. 2103 * 2104 * @return a task, if available 2105 */ nextTaskFor(WorkQueue w)2106 final ForkJoinTask<?> nextTaskFor(WorkQueue w) { 2107 for (ForkJoinTask<?> t;;) { 2108 WorkQueue q; int b; 2109 if ((t = w.nextLocalTask()) != null) 2110 return t; 2111 if ((q = findNonEmptyStealQueue()) == null) 2112 return null; 2113 if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) 2114 return t; 2115 } 2116 } 2117 2118 /** 2119 * Returns a cheap heuristic guide for task partitioning when 2120 * programmers, frameworks, tools, or languages have little or no 2121 * idea about task granularity. In essence by offering this 2122 * method, we ask users only about tradeoffs in overhead vs 2123 * expected throughput and its variance, rather than how finely to 2124 * partition tasks. 2125 * 2126 * In a steady state strict (tree-structured) computation, each 2127 * thread makes available for stealing enough tasks for other 2128 * threads to remain active. Inductively, if all threads play by 2129 * the same rules, each thread should make available only a 2130 * constant number of tasks. 2131 * 2132 * The minimum useful constant is just 1. But using a value of 1 2133 * would require immediate replenishment upon each steal to 2134 * maintain enough tasks, which is infeasible. Further, 2135 * partitionings/granularities of offered tasks should minimize 2136 * steal rates, which in general means that threads nearer the top 2137 * of computation tree should generate more than those nearer the 2138 * bottom. In perfect steady state, each thread is at 2139 * approximately the same level of computation tree. However, 2140 * producing extra tasks amortizes the uncertainty of progress and 2141 * diffusion assumptions. 2142 * 2143 * So, users will want to use values larger (but not much larger) 2144 * than 1 to both smooth over transient shortages and hedge 2145 * against uneven progress; as traded off against the cost of 2146 * extra task overhead. We leave the user to pick a threshold 2147 * value to compare with the results of this call to guide 2148 * decisions, but recommend values such as 3. 2149 * 2150 * When all threads are active, it is on average OK to estimate 2151 * surplus strictly locally. In steady-state, if one thread is 2152 * maintaining say 2 surplus tasks, then so are others. So we can 2153 * just use estimated queue length. However, this strategy alone 2154 * leads to serious mis-estimates in some non-steady-state 2155 * conditions (ramp-up, ramp-down, other stalls). We can detect 2156 * many of these by further considering the number of "idle" 2157 * threads, that are known to have zero queued tasks, so 2158 * compensate by a factor of (#idle/#active) threads. 2159 * 2160 * Note: The approximation of #busy workers as #active workers is 2161 * not very good under current signalling scheme, and should be 2162 * improved. 2163 */ getSurplusQueuedTaskCount()2164 static int getSurplusQueuedTaskCount() { 2165 Thread t; ForkJoinWorkerThread wt; ForkJoinPool pool; WorkQueue q; 2166 if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) { 2167 int p = (pool = (wt = (ForkJoinWorkerThread)t).pool).parallelism; 2168 int n = (q = wt.workQueue).top - q.base; 2169 int a = (int)(pool.ctl >> AC_SHIFT) + p; 2170 return n - (a > (p >>>= 1) ? 0 : 2171 a > (p >>>= 1) ? 1 : 2172 a > (p >>>= 1) ? 2 : 2173 a > (p >>>= 1) ? 4 : 2174 8); 2175 } 2176 return 0; 2177 } 2178 2179 // Termination 2180 2181 /** 2182 * Possibly initiates and/or completes termination. The caller 2183 * triggering termination runs three passes through workQueues: 2184 * (0) Setting termination status, followed by wakeups of queued 2185 * workers; (1) cancelling all tasks; (2) interrupting lagging 2186 * threads (likely in external tasks, but possibly also blocked in 2187 * joins). Each pass repeats previous steps because of potential 2188 * lagging thread creation. 2189 * 2190 * @param now if true, unconditionally terminate, else only 2191 * if no work and no active workers 2192 * @param enable if true, enable shutdown when next possible 2193 * @return true if now terminating or terminated 2194 */ tryTerminate(boolean now, boolean enable)2195 private boolean tryTerminate(boolean now, boolean enable) { 2196 int ps; 2197 if (this == common) // cannot shut down 2198 return false; 2199 if ((ps = plock) >= 0) { // enable by setting plock 2200 if (!enable) 2201 return false; 2202 if ((ps & PL_LOCK) != 0 || 2203 !U.compareAndSwapInt(this, PLOCK, ps, ps += PL_LOCK)) 2204 ps = acquirePlock(); 2205 int nps = ((ps + PL_LOCK) & ~SHUTDOWN) | SHUTDOWN; 2206 if (!U.compareAndSwapInt(this, PLOCK, ps, nps)) 2207 releasePlock(nps); 2208 } 2209 for (long c;;) { 2210 if (((c = ctl) & STOP_BIT) != 0) { // already terminating 2211 if ((short)(c >>> TC_SHIFT) + parallelism <= 0) { 2212 synchronized (this) { 2213 notifyAll(); // signal when 0 workers 2214 } 2215 } 2216 return true; 2217 } 2218 if (!now) { // check if idle & no tasks 2219 WorkQueue[] ws; WorkQueue w; 2220 if ((int)(c >> AC_SHIFT) + parallelism > 0) 2221 return false; 2222 if ((ws = workQueues) != null) { 2223 for (int i = 0; i < ws.length; ++i) { 2224 if ((w = ws[i]) != null && 2225 (!w.isEmpty() || 2226 ((i & 1) != 0 && w.eventCount >= 0))) { 2227 signalWork(ws, w); 2228 return false; 2229 } 2230 } 2231 } 2232 } 2233 if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) { 2234 for (int pass = 0; pass < 3; ++pass) { 2235 WorkQueue[] ws; WorkQueue w; Thread wt; 2236 if ((ws = workQueues) != null) { 2237 int n = ws.length; 2238 for (int i = 0; i < n; ++i) { 2239 if ((w = ws[i]) != null) { 2240 w.qlock = -1; 2241 if (pass > 0) { 2242 w.cancelAll(); 2243 if (pass > 1 && (wt = w.owner) != null) { 2244 if (!wt.isInterrupted()) { 2245 try { 2246 wt.interrupt(); 2247 } catch (Throwable ignore) { 2248 } 2249 } 2250 U.unpark(wt); 2251 } 2252 } 2253 } 2254 } 2255 // Wake up workers parked on event queue 2256 int i, e; long cc; Thread p; 2257 while ((e = (int)(cc = ctl) & E_MASK) != 0 && 2258 (i = e & SMASK) < n && i >= 0 && 2259 (w = ws[i]) != null) { 2260 long nc = ((long)(w.nextWait & E_MASK) | 2261 ((cc + AC_UNIT) & AC_MASK) | 2262 (cc & (TC_MASK|STOP_BIT))); 2263 if (w.eventCount == (e | INT_SIGN) && 2264 U.compareAndSwapLong(this, CTL, cc, nc)) { 2265 w.eventCount = (e + E_SEQ) & E_MASK; 2266 w.qlock = -1; 2267 if ((p = w.parker) != null) 2268 U.unpark(p); 2269 } 2270 } 2271 } 2272 } 2273 } 2274 } 2275 } 2276 2277 // external operations on common pool 2278 2279 /** 2280 * Returns common pool queue for a thread that has submitted at 2281 * least one task. 2282 */ commonSubmitterQueue()2283 static WorkQueue commonSubmitterQueue() { 2284 Submitter z; ForkJoinPool p; WorkQueue[] ws; int m, r; 2285 return ((z = submitters.get()) != null && 2286 (p = common) != null && 2287 (ws = p.workQueues) != null && 2288 (m = ws.length - 1) >= 0) ? 2289 ws[m & z.seed & SQMASK] : null; 2290 } 2291 2292 /** 2293 * Tries to pop the given task from submitter's queue in common pool. 2294 */ tryExternalUnpush(ForkJoinTask<?> task)2295 final boolean tryExternalUnpush(ForkJoinTask<?> task) { 2296 WorkQueue joiner; ForkJoinTask<?>[] a; int m, s; 2297 Submitter z = submitters.get(); 2298 WorkQueue[] ws = workQueues; 2299 boolean popped = false; 2300 if (z != null && ws != null && (m = ws.length - 1) >= 0 && 2301 (joiner = ws[z.seed & m & SQMASK]) != null && 2302 joiner.base != (s = joiner.top) && 2303 (a = joiner.array) != null) { 2304 long j = (((a.length - 1) & (s - 1)) << ASHIFT) + ABASE; 2305 if (U.getObject(a, j) == task && 2306 U.compareAndSwapInt(joiner, QLOCK, 0, 1)) { 2307 if (joiner.top == s && joiner.array == a && 2308 U.compareAndSwapObject(a, j, task, null)) { 2309 joiner.top = s - 1; 2310 popped = true; 2311 } 2312 joiner.qlock = 0; 2313 } 2314 } 2315 return popped; 2316 } 2317 externalHelpComplete(CountedCompleter<?> task)2318 final int externalHelpComplete(CountedCompleter<?> task) { 2319 WorkQueue joiner; int m, j; 2320 Submitter z = submitters.get(); 2321 WorkQueue[] ws = workQueues; 2322 int s = 0; 2323 if (z != null && ws != null && (m = ws.length - 1) >= 0 && 2324 (joiner = ws[(j = z.seed) & m & SQMASK]) != null && task != null) { 2325 int scans = m + m + 1; 2326 long c = 0L; // for stability check 2327 j |= 1; // poll odd queues 2328 for (int k = scans; ; j += 2) { 2329 WorkQueue q; 2330 if ((s = task.status) < 0) 2331 break; 2332 else if (joiner.externalPopAndExecCC(task)) 2333 k = scans; 2334 else if ((s = task.status) < 0) 2335 break; 2336 else if ((q = ws[j & m]) != null && q.pollAndExecCC(task)) 2337 k = scans; 2338 else if (--k < 0) { 2339 if (c == (c = ctl)) 2340 break; 2341 k = scans; 2342 } 2343 } 2344 } 2345 return s; 2346 } 2347 2348 // Exported methods 2349 2350 // Constructors 2351 2352 /** 2353 * Creates a {@code ForkJoinPool} with parallelism equal to {@link 2354 * java.lang.Runtime#availableProcessors}, using the {@linkplain 2355 * #defaultForkJoinWorkerThreadFactory default thread factory}, 2356 * no UncaughtExceptionHandler, and non-async LIFO processing mode. 2357 */ ForkJoinPool()2358 public ForkJoinPool() { 2359 this(Math.min(MAX_CAP, Runtime.getRuntime().availableProcessors()), 2360 defaultForkJoinWorkerThreadFactory, null, false); 2361 } 2362 2363 /** 2364 * Creates a {@code ForkJoinPool} with the indicated parallelism 2365 * level, the {@linkplain 2366 * #defaultForkJoinWorkerThreadFactory default thread factory}, 2367 * no UncaughtExceptionHandler, and non-async LIFO processing mode. 2368 * 2369 * @param parallelism the parallelism level 2370 * @throws IllegalArgumentException if parallelism less than or 2371 * equal to zero, or greater than implementation limit 2372 */ ForkJoinPool(int parallelism)2373 public ForkJoinPool(int parallelism) { 2374 this(parallelism, defaultForkJoinWorkerThreadFactory, null, false); 2375 } 2376 2377 /** 2378 * Creates a {@code ForkJoinPool} with the given parameters. 2379 * 2380 * @param parallelism the parallelism level. For default value, 2381 * use {@link java.lang.Runtime#availableProcessors}. 2382 * @param factory the factory for creating new threads. For default value, 2383 * use {@link #defaultForkJoinWorkerThreadFactory}. 2384 * @param handler the handler for internal worker threads that 2385 * terminate due to unrecoverable errors encountered while executing 2386 * tasks. For default value, use {@code null}. 2387 * @param asyncMode if true, 2388 * establishes local first-in-first-out scheduling mode for forked 2389 * tasks that are never joined. This mode may be more appropriate 2390 * than default locally stack-based mode in applications in which 2391 * worker threads only process event-style asynchronous tasks. 2392 * For default value, use {@code false}. 2393 * @throws IllegalArgumentException if parallelism less than or 2394 * equal to zero, or greater than implementation limit 2395 * @throws NullPointerException if the factory is null 2396 */ ForkJoinPool(int parallelism, ForkJoinWorkerThreadFactory factory, UncaughtExceptionHandler handler, boolean asyncMode)2397 public ForkJoinPool(int parallelism, 2398 ForkJoinWorkerThreadFactory factory, 2399 UncaughtExceptionHandler handler, 2400 boolean asyncMode) { 2401 this(checkParallelism(parallelism), 2402 checkFactory(factory), 2403 handler, 2404 (asyncMode ? FIFO_QUEUE : LIFO_QUEUE), 2405 "ForkJoinPool-" + nextPoolId() + "-worker-"); 2406 checkPermission(); 2407 } 2408 checkParallelism(int parallelism)2409 private static int checkParallelism(int parallelism) { 2410 if (parallelism <= 0 || parallelism > MAX_CAP) 2411 throw new IllegalArgumentException(); 2412 return parallelism; 2413 } 2414 checkFactory(ForkJoinWorkerThreadFactory factory)2415 private static ForkJoinWorkerThreadFactory checkFactory 2416 (ForkJoinWorkerThreadFactory factory) { 2417 if (factory == null) 2418 throw new NullPointerException(); 2419 return factory; 2420 } 2421 2422 /** 2423 * Creates a {@code ForkJoinPool} with the given parameters, without 2424 * any security checks or parameter validation. Invoked directly by 2425 * makeCommonPool. 2426 */ ForkJoinPool(int parallelism, ForkJoinWorkerThreadFactory factory, UncaughtExceptionHandler handler, int mode, String workerNamePrefix)2427 private ForkJoinPool(int parallelism, 2428 ForkJoinWorkerThreadFactory factory, 2429 UncaughtExceptionHandler handler, 2430 int mode, 2431 String workerNamePrefix) { 2432 this.workerNamePrefix = workerNamePrefix; 2433 this.factory = factory; 2434 this.ueh = handler; 2435 this.mode = (short)mode; 2436 this.parallelism = (short)parallelism; 2437 long np = (long)(-parallelism); // offset ctl counts 2438 this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK); 2439 } 2440 2441 /** 2442 * Returns the common pool instance. This pool is statically 2443 * constructed; its run state is unaffected by attempts to {@link 2444 * #shutdown} or {@link #shutdownNow}. However this pool and any 2445 * ongoing processing are automatically terminated upon program 2446 * {@link System#exit}. Any program that relies on asynchronous 2447 * task processing to complete before program termination should 2448 * invoke {@code commonPool().}{@link #awaitQuiescence awaitQuiescence}, 2449 * before exit. 2450 * 2451 * @return the common pool instance 2452 * @since 1.8 2453 * @hide 2454 */ commonPool()2455 public static ForkJoinPool commonPool() { 2456 // assert common != null : "static init error"; 2457 return common; 2458 } 2459 2460 // Execution methods 2461 2462 /** 2463 * Performs the given task, returning its result upon completion. 2464 * If the computation encounters an unchecked Exception or Error, 2465 * it is rethrown as the outcome of this invocation. Rethrown 2466 * exceptions behave in the same way as regular exceptions, but, 2467 * when possible, contain stack traces (as displayed for example 2468 * using {@code ex.printStackTrace()}) of both the current thread 2469 * as well as the thread actually encountering the exception; 2470 * minimally only the latter. 2471 * 2472 * @param task the task 2473 * @return the task's result 2474 * @throws NullPointerException if the task is null 2475 * @throws RejectedExecutionException if the task cannot be 2476 * scheduled for execution 2477 */ invoke(ForkJoinTask<T> task)2478 public <T> T invoke(ForkJoinTask<T> task) { 2479 if (task == null) 2480 throw new NullPointerException(); 2481 externalPush(task); 2482 return task.join(); 2483 } 2484 2485 /** 2486 * Arranges for (asynchronous) execution of the given task. 2487 * 2488 * @param task the task 2489 * @throws NullPointerException if the task is null 2490 * @throws RejectedExecutionException if the task cannot be 2491 * scheduled for execution 2492 */ execute(ForkJoinTask<?> task)2493 public void execute(ForkJoinTask<?> task) { 2494 if (task == null) 2495 throw new NullPointerException(); 2496 externalPush(task); 2497 } 2498 2499 // AbstractExecutorService methods 2500 2501 /** 2502 * @throws NullPointerException if the task is null 2503 * @throws RejectedExecutionException if the task cannot be 2504 * scheduled for execution 2505 */ execute(Runnable task)2506 public void execute(Runnable task) { 2507 if (task == null) 2508 throw new NullPointerException(); 2509 ForkJoinTask<?> job; 2510 if (task instanceof ForkJoinTask<?>) // avoid re-wrap 2511 job = (ForkJoinTask<?>) task; 2512 else 2513 job = new ForkJoinTask.RunnableExecuteAction(task); 2514 externalPush(job); 2515 } 2516 2517 /** 2518 * Submits a ForkJoinTask for execution. 2519 * 2520 * @param task the task to submit 2521 * @return the task 2522 * @throws NullPointerException if the task is null 2523 * @throws RejectedExecutionException if the task cannot be 2524 * scheduled for execution 2525 */ submit(ForkJoinTask<T> task)2526 public <T> ForkJoinTask<T> submit(ForkJoinTask<T> task) { 2527 if (task == null) 2528 throw new NullPointerException(); 2529 externalPush(task); 2530 return task; 2531 } 2532 2533 /** 2534 * @throws NullPointerException if the task is null 2535 * @throws RejectedExecutionException if the task cannot be 2536 * scheduled for execution 2537 */ submit(Callable<T> task)2538 public <T> ForkJoinTask<T> submit(Callable<T> task) { 2539 ForkJoinTask<T> job = new ForkJoinTask.AdaptedCallable<T>(task); 2540 externalPush(job); 2541 return job; 2542 } 2543 2544 /** 2545 * @throws NullPointerException if the task is null 2546 * @throws RejectedExecutionException if the task cannot be 2547 * scheduled for execution 2548 */ submit(Runnable task, T result)2549 public <T> ForkJoinTask<T> submit(Runnable task, T result) { 2550 ForkJoinTask<T> job = new ForkJoinTask.AdaptedRunnable<T>(task, result); 2551 externalPush(job); 2552 return job; 2553 } 2554 2555 /** 2556 * @throws NullPointerException if the task is null 2557 * @throws RejectedExecutionException if the task cannot be 2558 * scheduled for execution 2559 */ submit(Runnable task)2560 public ForkJoinTask<?> submit(Runnable task) { 2561 if (task == null) 2562 throw new NullPointerException(); 2563 ForkJoinTask<?> job; 2564 if (task instanceof ForkJoinTask<?>) // avoid re-wrap 2565 job = (ForkJoinTask<?>) task; 2566 else 2567 job = new ForkJoinTask.AdaptedRunnableAction(task); 2568 externalPush(job); 2569 return job; 2570 } 2571 2572 /** 2573 * @throws NullPointerException {@inheritDoc} 2574 * @throws RejectedExecutionException {@inheritDoc} 2575 */ invokeAll(Collection<? extends Callable<T>> tasks)2576 public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) { 2577 // In previous versions of this class, this method constructed 2578 // a task to run ForkJoinTask.invokeAll, but now external 2579 // invocation of multiple tasks is at least as efficient. 2580 ArrayList<Future<T>> futures = new ArrayList<Future<T>>(tasks.size()); 2581 2582 boolean done = false; 2583 try { 2584 for (Callable<T> t : tasks) { 2585 ForkJoinTask<T> f = new ForkJoinTask.AdaptedCallable<T>(t); 2586 futures.add(f); 2587 externalPush(f); 2588 } 2589 for (int i = 0, size = futures.size(); i < size; i++) 2590 ((ForkJoinTask<?>)futures.get(i)).quietlyJoin(); 2591 done = true; 2592 return futures; 2593 } finally { 2594 if (!done) 2595 for (int i = 0, size = futures.size(); i < size; i++) 2596 futures.get(i).cancel(false); 2597 } 2598 } 2599 2600 /** 2601 * Returns the factory used for constructing new workers. 2602 * 2603 * @return the factory used for constructing new workers 2604 */ getFactory()2605 public ForkJoinWorkerThreadFactory getFactory() { 2606 return factory; 2607 } 2608 2609 /** 2610 * Returns the handler for internal worker threads that terminate 2611 * due to unrecoverable errors encountered while executing tasks. 2612 * 2613 * @return the handler, or {@code null} if none 2614 */ getUncaughtExceptionHandler()2615 public UncaughtExceptionHandler getUncaughtExceptionHandler() { 2616 return ueh; 2617 } 2618 2619 /** 2620 * Returns the targeted parallelism level of this pool. 2621 * 2622 * @return the targeted parallelism level of this pool 2623 */ getParallelism()2624 public int getParallelism() { 2625 int par; 2626 return ((par = parallelism) > 0) ? par : 1; 2627 } 2628 2629 /** 2630 * Returns the targeted parallelism level of the common pool. 2631 * 2632 * @return the targeted parallelism level of the common pool 2633 * @since 1.8 2634 * @hide 2635 */ getCommonPoolParallelism()2636 public static int getCommonPoolParallelism() { 2637 return commonParallelism; 2638 } 2639 2640 /** 2641 * Returns the number of worker threads that have started but not 2642 * yet terminated. The result returned by this method may differ 2643 * from {@link #getParallelism} when threads are created to 2644 * maintain parallelism when others are cooperatively blocked. 2645 * 2646 * @return the number of worker threads 2647 */ getPoolSize()2648 public int getPoolSize() { 2649 return parallelism + (short)(ctl >>> TC_SHIFT); 2650 } 2651 2652 /** 2653 * Returns {@code true} if this pool uses local first-in-first-out 2654 * scheduling mode for forked tasks that are never joined. 2655 * 2656 * @return {@code true} if this pool uses async mode 2657 */ getAsyncMode()2658 public boolean getAsyncMode() { 2659 return mode == FIFO_QUEUE; 2660 } 2661 2662 /** 2663 * Returns an estimate of the number of worker threads that are 2664 * not blocked waiting to join tasks or for other managed 2665 * synchronization. This method may overestimate the 2666 * number of running threads. 2667 * 2668 * @return the number of worker threads 2669 */ getRunningThreadCount()2670 public int getRunningThreadCount() { 2671 int rc = 0; 2672 WorkQueue[] ws; WorkQueue w; 2673 if ((ws = workQueues) != null) { 2674 for (int i = 1; i < ws.length; i += 2) { 2675 if ((w = ws[i]) != null && w.isApparentlyUnblocked()) 2676 ++rc; 2677 } 2678 } 2679 return rc; 2680 } 2681 2682 /** 2683 * Returns an estimate of the number of threads that are currently 2684 * stealing or executing tasks. This method may overestimate the 2685 * number of active threads. 2686 * 2687 * @return the number of active threads 2688 */ getActiveThreadCount()2689 public int getActiveThreadCount() { 2690 int r = parallelism + (int)(ctl >> AC_SHIFT); 2691 return (r <= 0) ? 0 : r; // suppress momentarily negative values 2692 } 2693 2694 /** 2695 * Returns {@code true} if all worker threads are currently idle. 2696 * An idle worker is one that cannot obtain a task to execute 2697 * because none are available to steal from other threads, and 2698 * there are no pending submissions to the pool. This method is 2699 * conservative; it might not return {@code true} immediately upon 2700 * idleness of all threads, but will eventually become true if 2701 * threads remain inactive. 2702 * 2703 * @return {@code true} if all threads are currently idle 2704 */ isQuiescent()2705 public boolean isQuiescent() { 2706 return parallelism + (int)(ctl >> AC_SHIFT) <= 0; 2707 } 2708 2709 /** 2710 * Returns an estimate of the total number of tasks stolen from 2711 * one thread's work queue by another. The reported value 2712 * underestimates the actual total number of steals when the pool 2713 * is not quiescent. This value may be useful for monitoring and 2714 * tuning fork/join programs: in general, steal counts should be 2715 * high enough to keep threads busy, but low enough to avoid 2716 * overhead and contention across threads. 2717 * 2718 * @return the number of steals 2719 */ getStealCount()2720 public long getStealCount() { 2721 long count = stealCount; 2722 WorkQueue[] ws; WorkQueue w; 2723 if ((ws = workQueues) != null) { 2724 for (int i = 1; i < ws.length; i += 2) { 2725 if ((w = ws[i]) != null) 2726 count += w.nsteals; 2727 } 2728 } 2729 return count; 2730 } 2731 2732 /** 2733 * Returns an estimate of the total number of tasks currently held 2734 * in queues by worker threads (but not including tasks submitted 2735 * to the pool that have not begun executing). This value is only 2736 * an approximation, obtained by iterating across all threads in 2737 * the pool. This method may be useful for tuning task 2738 * granularities. 2739 * 2740 * @return the number of queued tasks 2741 */ getQueuedTaskCount()2742 public long getQueuedTaskCount() { 2743 long count = 0; 2744 WorkQueue[] ws; WorkQueue w; 2745 if ((ws = workQueues) != null) { 2746 for (int i = 1; i < ws.length; i += 2) { 2747 if ((w = ws[i]) != null) 2748 count += w.queueSize(); 2749 } 2750 } 2751 return count; 2752 } 2753 2754 /** 2755 * Returns an estimate of the number of tasks submitted to this 2756 * pool that have not yet begun executing. This method may take 2757 * time proportional to the number of submissions. 2758 * 2759 * @return the number of queued submissions 2760 */ getQueuedSubmissionCount()2761 public int getQueuedSubmissionCount() { 2762 int count = 0; 2763 WorkQueue[] ws; WorkQueue w; 2764 if ((ws = workQueues) != null) { 2765 for (int i = 0; i < ws.length; i += 2) { 2766 if ((w = ws[i]) != null) 2767 count += w.queueSize(); 2768 } 2769 } 2770 return count; 2771 } 2772 2773 /** 2774 * Returns {@code true} if there are any tasks submitted to this 2775 * pool that have not yet begun executing. 2776 * 2777 * @return {@code true} if there are any queued submissions 2778 */ hasQueuedSubmissions()2779 public boolean hasQueuedSubmissions() { 2780 WorkQueue[] ws; WorkQueue w; 2781 if ((ws = workQueues) != null) { 2782 for (int i = 0; i < ws.length; i += 2) { 2783 if ((w = ws[i]) != null && !w.isEmpty()) 2784 return true; 2785 } 2786 } 2787 return false; 2788 } 2789 2790 /** 2791 * Removes and returns the next unexecuted submission if one is 2792 * available. This method may be useful in extensions to this 2793 * class that re-assign work in systems with multiple pools. 2794 * 2795 * @return the next submission, or {@code null} if none 2796 */ pollSubmission()2797 protected ForkJoinTask<?> pollSubmission() { 2798 WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t; 2799 if ((ws = workQueues) != null) { 2800 for (int i = 0; i < ws.length; i += 2) { 2801 if ((w = ws[i]) != null && (t = w.poll()) != null) 2802 return t; 2803 } 2804 } 2805 return null; 2806 } 2807 2808 /** 2809 * Removes all available unexecuted submitted and forked tasks 2810 * from scheduling queues and adds them to the given collection, 2811 * without altering their execution status. These may include 2812 * artificially generated or wrapped tasks. This method is 2813 * designed to be invoked only when the pool is known to be 2814 * quiescent. Invocations at other times may not remove all 2815 * tasks. A failure encountered while attempting to add elements 2816 * to collection {@code c} may result in elements being in 2817 * neither, either or both collections when the associated 2818 * exception is thrown. The behavior of this operation is 2819 * undefined if the specified collection is modified while the 2820 * operation is in progress. 2821 * 2822 * @param c the collection to transfer elements into 2823 * @return the number of elements transferred 2824 */ drainTasksTo(Collection<? super ForkJoinTask<?>> c)2825 protected int drainTasksTo(Collection<? super ForkJoinTask<?>> c) { 2826 int count = 0; 2827 WorkQueue[] ws; WorkQueue w; ForkJoinTask<?> t; 2828 if ((ws = workQueues) != null) { 2829 for (int i = 0; i < ws.length; ++i) { 2830 if ((w = ws[i]) != null) { 2831 while ((t = w.poll()) != null) { 2832 c.add(t); 2833 ++count; 2834 } 2835 } 2836 } 2837 } 2838 return count; 2839 } 2840 2841 /** 2842 * Returns a string identifying this pool, as well as its state, 2843 * including indications of run state, parallelism level, and 2844 * worker and task counts. 2845 * 2846 * @return a string identifying this pool, as well as its state 2847 */ toString()2848 public String toString() { 2849 // Use a single pass through workQueues to collect counts 2850 long qt = 0L, qs = 0L; int rc = 0; 2851 long st = stealCount; 2852 long c = ctl; 2853 WorkQueue[] ws; WorkQueue w; 2854 if ((ws = workQueues) != null) { 2855 for (int i = 0; i < ws.length; ++i) { 2856 if ((w = ws[i]) != null) { 2857 int size = w.queueSize(); 2858 if ((i & 1) == 0) 2859 qs += size; 2860 else { 2861 qt += size; 2862 st += w.nsteals; 2863 if (w.isApparentlyUnblocked()) 2864 ++rc; 2865 } 2866 } 2867 } 2868 } 2869 int pc = parallelism; 2870 int tc = pc + (short)(c >>> TC_SHIFT); 2871 int ac = pc + (int)(c >> AC_SHIFT); 2872 if (ac < 0) // ignore transient negative 2873 ac = 0; 2874 String level; 2875 if ((c & STOP_BIT) != 0) 2876 level = (tc == 0) ? "Terminated" : "Terminating"; 2877 else 2878 level = plock < 0 ? "Shutting down" : "Running"; 2879 return super.toString() + 2880 "[" + level + 2881 ", parallelism = " + pc + 2882 ", size = " + tc + 2883 ", active = " + ac + 2884 ", running = " + rc + 2885 ", steals = " + st + 2886 ", tasks = " + qt + 2887 ", submissions = " + qs + 2888 "]"; 2889 } 2890 2891 /** 2892 * Possibly initiates an orderly shutdown in which previously 2893 * submitted tasks are executed, but no new tasks will be 2894 * accepted. Invocation has no effect on execution state if this 2895 * is the {@code commonPool()}, and no additional effect if 2896 * already shut down. Tasks that are in the process of being 2897 * submitted concurrently during the course of this method may or 2898 * may not be rejected. 2899 */ 2900 public void shutdown() { 2901 checkPermission(); 2902 tryTerminate(false, true); 2903 } 2904 2905 /** 2906 * Possibly attempts to cancel and/or stop all tasks, and reject 2907 * all subsequently submitted tasks. Invocation has no effect on 2908 * execution state if this is the {@code commonPool()}, and no 2909 * additional effect if already shut down. Otherwise, tasks that 2910 * are in the process of being submitted or executed concurrently 2911 * during the course of this method may or may not be 2912 * rejected. This method cancels both existing and unexecuted 2913 * tasks, in order to permit termination in the presence of task 2914 * dependencies. So the method always returns an empty list 2915 * (unlike the case for some other Executors). 2916 * 2917 * @return an empty list 2918 */ 2919 public List<Runnable> shutdownNow() { 2920 checkPermission(); 2921 tryTerminate(true, true); 2922 return Collections.emptyList(); 2923 } 2924 2925 /** 2926 * Returns {@code true} if all tasks have completed following shut down. 2927 * 2928 * @return {@code true} if all tasks have completed following shut down 2929 */ 2930 public boolean isTerminated() { 2931 long c = ctl; 2932 return ((c & STOP_BIT) != 0L && 2933 (short)(c >>> TC_SHIFT) + parallelism <= 0); 2934 } 2935 2936 /** 2937 * Returns {@code true} if the process of termination has 2938 * commenced but not yet completed. This method may be useful for 2939 * debugging. A return of {@code true} reported a sufficient 2940 * period after shutdown may indicate that submitted tasks have 2941 * ignored or suppressed interruption, or are waiting for I/O, 2942 * causing this executor not to properly terminate. (See the 2943 * advisory notes for class {@link ForkJoinTask} stating that 2944 * tasks should not normally entail blocking operations. But if 2945 * they do, they must abort them on interrupt.) 2946 * 2947 * @return {@code true} if terminating but not yet terminated 2948 */ isTerminating()2949 public boolean isTerminating() { 2950 long c = ctl; 2951 return ((c & STOP_BIT) != 0L && 2952 (short)(c >>> TC_SHIFT) + parallelism > 0); 2953 } 2954 2955 /** 2956 * Returns {@code true} if this pool has been shut down. 2957 * 2958 * @return {@code true} if this pool has been shut down 2959 */ isShutdown()2960 public boolean isShutdown() { 2961 return plock < 0; 2962 } 2963 2964 /** 2965 * Blocks until all tasks have completed execution after a 2966 * shutdown request, or the timeout occurs, or the current thread 2967 * is interrupted, whichever happens first. Because the {@code 2968 * commonPool()} never terminates until program shutdown, when 2969 * applied to the common pool, this method is equivalent to {@link 2970 * #awaitQuiescence(long, TimeUnit)} but always returns {@code false}. 2971 * 2972 * @param timeout the maximum time to wait 2973 * @param unit the time unit of the timeout argument 2974 * @return {@code true} if this executor terminated and 2975 * {@code false} if the timeout elapsed before termination 2976 * @throws InterruptedException if interrupted while waiting 2977 */ awaitTermination(long timeout, TimeUnit unit)2978 public boolean awaitTermination(long timeout, TimeUnit unit) 2979 throws InterruptedException { 2980 if (Thread.interrupted()) 2981 throw new InterruptedException(); 2982 if (this == common) { 2983 awaitQuiescence(timeout, unit); 2984 return false; 2985 } 2986 long nanos = unit.toNanos(timeout); 2987 if (isTerminated()) 2988 return true; 2989 if (nanos <= 0L) 2990 return false; 2991 long deadline = System.nanoTime() + nanos; 2992 synchronized (this) { 2993 for (;;) { 2994 if (isTerminated()) 2995 return true; 2996 if (nanos <= 0L) 2997 return false; 2998 long millis = TimeUnit.NANOSECONDS.toMillis(nanos); 2999 wait(millis > 0L ? millis : 1L); 3000 nanos = deadline - System.nanoTime(); 3001 } 3002 } 3003 } 3004 3005 /** 3006 * If called by a ForkJoinTask operating in this pool, equivalent 3007 * in effect to {@link ForkJoinTask#helpQuiesce}. Otherwise, 3008 * waits and/or attempts to assist performing tasks until this 3009 * pool {@link #isQuiescent} or the indicated timeout elapses. 3010 * 3011 * @param timeout the maximum time to wait 3012 * @param unit the time unit of the timeout argument 3013 * @return {@code true} if quiescent; {@code false} if the 3014 * timeout elapsed. 3015 */ awaitQuiescence(long timeout, TimeUnit unit)3016 public boolean awaitQuiescence(long timeout, TimeUnit unit) { 3017 long nanos = unit.toNanos(timeout); 3018 ForkJoinWorkerThread wt; 3019 Thread thread = Thread.currentThread(); 3020 if ((thread instanceof ForkJoinWorkerThread) && 3021 (wt = (ForkJoinWorkerThread)thread).pool == this) { 3022 helpQuiescePool(wt.workQueue); 3023 return true; 3024 } 3025 long startTime = System.nanoTime(); 3026 WorkQueue[] ws; 3027 int r = 0, m; 3028 boolean found = true; 3029 while (!isQuiescent() && (ws = workQueues) != null && 3030 (m = ws.length - 1) >= 0) { 3031 if (!found) { 3032 if ((System.nanoTime() - startTime) > nanos) 3033 return false; 3034 Thread.yield(); // cannot block 3035 } 3036 found = false; 3037 for (int j = (m + 1) << 2; j >= 0; --j) { 3038 ForkJoinTask<?> t; WorkQueue q; int b; 3039 if ((q = ws[r++ & m]) != null && (b = q.base) - q.top < 0) { 3040 found = true; 3041 if ((t = q.pollAt(b)) != null) 3042 t.doExec(); 3043 break; 3044 } 3045 } 3046 } 3047 return true; 3048 } 3049 3050 /** 3051 * Waits and/or attempts to assist performing tasks indefinitely 3052 * until the {@code commonPool()} {@link #isQuiescent}. 3053 */ quiesceCommonPool()3054 static void quiesceCommonPool() { 3055 common.awaitQuiescence(Long.MAX_VALUE, TimeUnit.NANOSECONDS); 3056 } 3057 3058 /** 3059 * Interface for extending managed parallelism for tasks running 3060 * in {@link ForkJoinPool}s. 3061 * 3062 * <p>A {@code ManagedBlocker} provides two methods. Method 3063 * {@code isReleasable} must return {@code true} if blocking is 3064 * not necessary. Method {@code block} blocks the current thread 3065 * if necessary (perhaps internally invoking {@code isReleasable} 3066 * before actually blocking). These actions are performed by any 3067 * thread invoking {@link ForkJoinPool#managedBlock(ManagedBlocker)}. 3068 * The unusual methods in this API accommodate synchronizers that 3069 * may, but don't usually, block for long periods. Similarly, they 3070 * allow more efficient internal handling of cases in which 3071 * additional workers may be, but usually are not, needed to 3072 * ensure sufficient parallelism. Toward this end, 3073 * implementations of method {@code isReleasable} must be amenable 3074 * to repeated invocation. 3075 * 3076 * <p>For example, here is a ManagedBlocker based on a 3077 * ReentrantLock: 3078 * <pre> {@code 3079 * class ManagedLocker implements ManagedBlocker { 3080 * final ReentrantLock lock; 3081 * boolean hasLock = false; 3082 * ManagedLocker(ReentrantLock lock) { this.lock = lock; } 3083 * public boolean block() { 3084 * if (!hasLock) 3085 * lock.lock(); 3086 * return true; 3087 * } 3088 * public boolean isReleasable() { 3089 * return hasLock || (hasLock = lock.tryLock()); 3090 * } 3091 * }}</pre> 3092 * 3093 * <p>Here is a class that possibly blocks waiting for an 3094 * item on a given queue: 3095 * <pre> {@code 3096 * class QueueTaker<E> implements ManagedBlocker { 3097 * final BlockingQueue<E> queue; 3098 * volatile E item = null; 3099 * QueueTaker(BlockingQueue<E> q) { this.queue = q; } 3100 * public boolean block() throws InterruptedException { 3101 * if (item == null) 3102 * item = queue.take(); 3103 * return true; 3104 * } 3105 * public boolean isReleasable() { 3106 * return item != null || (item = queue.poll()) != null; 3107 * } 3108 * public E getItem() { // call after pool.managedBlock completes 3109 * return item; 3110 * } 3111 * }}</pre> 3112 */ 3113 public static interface ManagedBlocker { 3114 /** 3115 * Possibly blocks the current thread, for example waiting for 3116 * a lock or condition. 3117 * 3118 * @return {@code true} if no additional blocking is necessary 3119 * (i.e., if isReleasable would return true) 3120 * @throws InterruptedException if interrupted while waiting 3121 * (the method is not required to do so, but is allowed to) 3122 */ block()3123 boolean block() throws InterruptedException; 3124 3125 /** 3126 * Returns {@code true} if blocking is unnecessary. 3127 * @return {@code true} if blocking is unnecessary 3128 */ isReleasable()3129 boolean isReleasable(); 3130 } 3131 3132 /** 3133 * Blocks in accord with the given blocker. If the current thread 3134 * is a {@link ForkJoinWorkerThread}, this method possibly 3135 * arranges for a spare thread to be activated if necessary to 3136 * ensure sufficient parallelism while the current thread is blocked. 3137 * 3138 * <p>If the caller is not a {@link ForkJoinTask}, this method is 3139 * behaviorally equivalent to 3140 * <pre> {@code 3141 * while (!blocker.isReleasable()) 3142 * if (blocker.block()) 3143 * return; 3144 * }</pre> 3145 * 3146 * If the caller is a {@code ForkJoinTask}, then the pool may 3147 * first be expanded to ensure parallelism, and later adjusted. 3148 * 3149 * @param blocker the blocker 3150 * @throws InterruptedException if blocker.block did so 3151 */ managedBlock(ManagedBlocker blocker)3152 public static void managedBlock(ManagedBlocker blocker) 3153 throws InterruptedException { 3154 Thread t = Thread.currentThread(); 3155 if (t instanceof ForkJoinWorkerThread) { 3156 ForkJoinPool p = ((ForkJoinWorkerThread)t).pool; 3157 while (!blocker.isReleasable()) { 3158 if (p.tryCompensate(p.ctl)) { 3159 try { 3160 do {} while (!blocker.isReleasable() && 3161 !blocker.block()); 3162 } finally { 3163 p.incrementActiveCount(); 3164 } 3165 break; 3166 } 3167 } 3168 } 3169 else { 3170 do {} while (!blocker.isReleasable() && 3171 !blocker.block()); 3172 } 3173 } 3174 3175 // AbstractExecutorService overrides. These rely on undocumented 3176 // fact that ForkJoinTask.adapt returns ForkJoinTasks that also 3177 // implement RunnableFuture. 3178 newTaskFor(Runnable runnable, T value)3179 protected <T> RunnableFuture<T> newTaskFor(Runnable runnable, T value) { 3180 return new ForkJoinTask.AdaptedRunnable<T>(runnable, value); 3181 } 3182 newTaskFor(Callable<T> callable)3183 protected <T> RunnableFuture<T> newTaskFor(Callable<T> callable) { 3184 return new ForkJoinTask.AdaptedCallable<T>(callable); 3185 } 3186 3187 // Unsafe mechanics 3188 private static final sun.misc.Unsafe U; 3189 private static final long CTL; 3190 private static final long PARKBLOCKER; 3191 private static final int ABASE; 3192 private static final int ASHIFT; 3193 private static final long STEALCOUNT; 3194 private static final long PLOCK; 3195 private static final long INDEXSEED; 3196 private static final long QBASE; 3197 private static final long QLOCK; 3198 3199 static { 3200 // initialize field offsets for CAS etc 3201 try { 3202 U = sun.misc.Unsafe.getUnsafe(); 3203 Class<?> k = ForkJoinPool.class; 3204 CTL = U.objectFieldOffset 3205 (k.getDeclaredField("ctl")); 3206 STEALCOUNT = U.objectFieldOffset 3207 (k.getDeclaredField("stealCount")); 3208 PLOCK = U.objectFieldOffset 3209 (k.getDeclaredField("plock")); 3210 INDEXSEED = U.objectFieldOffset 3211 (k.getDeclaredField("indexSeed")); 3212 Class<?> tk = Thread.class; 3213 PARKBLOCKER = U.objectFieldOffset 3214 (tk.getDeclaredField("parkBlocker")); 3215 Class<?> wk = WorkQueue.class; 3216 QBASE = U.objectFieldOffset 3217 (wk.getDeclaredField("base")); 3218 QLOCK = U.objectFieldOffset 3219 (wk.getDeclaredField("qlock")); 3220 Class<?> ak = ForkJoinTask[].class; 3221 ABASE = U.arrayBaseOffset(ak); 3222 int scale = U.arrayIndexScale(ak); 3223 if ((scale & (scale - 1)) != 0) 3224 throw new Error("data type scale not a power of two"); 3225 ASHIFT = 31 - Integer.numberOfLeadingZeros(scale); 3226 } catch (Exception e) { 3227 throw new Error(e); 3228 } 3229 3230 submitters = new ThreadLocal<Submitter>(); 3231 defaultForkJoinWorkerThreadFactory = 3232 new DefaultForkJoinWorkerThreadFactory(); 3233 modifyThreadPermission = new RuntimePermission("modifyThread"); 3234 3235 common = java.security.AccessController.doPrivileged 3236 (new java.security.PrivilegedAction<ForkJoinPool>() { 3237 public ForkJoinPool run() { return makeCommonPool(); }}); 3238 int par = common.parallelism; // report 1 even if threads disabled 3239 commonParallelism = par > 0 ? par : 1; 3240 } 3241 3242 /** 3243 * Creates and returns the common pool, respecting user settings 3244 * specified via system properties. 3245 */ makeCommonPool()3246 private static ForkJoinPool makeCommonPool() { 3247 int parallelism = -1; 3248 ForkJoinWorkerThreadFactory factory 3249 = defaultForkJoinWorkerThreadFactory; 3250 UncaughtExceptionHandler handler = null; 3251 try { // ignore exceptions in accessing/parsing properties 3252 String pp = System.getProperty 3253 ("java.util.concurrent.ForkJoinPool.common.parallelism"); 3254 String fp = System.getProperty 3255 ("java.util.concurrent.ForkJoinPool.common.threadFactory"); 3256 String hp = System.getProperty 3257 ("java.util.concurrent.ForkJoinPool.common.exceptionHandler"); 3258 if (pp != null) 3259 parallelism = Integer.parseInt(pp); 3260 if (fp != null) 3261 factory = ((ForkJoinWorkerThreadFactory)ClassLoader. 3262 getSystemClassLoader().loadClass(fp).newInstance()); 3263 if (hp != null) 3264 handler = ((UncaughtExceptionHandler)ClassLoader. 3265 getSystemClassLoader().loadClass(hp).newInstance()); 3266 } catch (Exception ignore) { 3267 } 3268 3269 if (parallelism < 0 && // default 1 less than #cores 3270 (parallelism = Runtime.getRuntime().availableProcessors() - 1) < 0) 3271 parallelism = 0; 3272 if (parallelism > MAX_CAP) 3273 parallelism = MAX_CAP; 3274 return new ForkJoinPool(parallelism, factory, handler, LIFO_QUEUE, 3275 "ForkJoinPool.commonPool-worker-"); 3276 } 3277 3278 } 3279