1 /* 2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. Oracle designates this 7 * particular file as subject to the "Classpath" exception as provided 8 * by Oracle in the LICENSE file that accompanied this code. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 */ 24 25 /* 26 * This file is available under and governed by the GNU General Public 27 * License version 2 only, as published by the Free Software Foundation. 28 * However, the following notice accompanied the original version of this 29 * file: 30 * 31 * Written by Doug Lea with assistance from members of JCP JSR-166 32 * Expert Group and released to the public domain, as explained at 33 * http://creativecommons.org/publicdomain/zero/1.0/ 34 */ 35 36 package java.util.concurrent; 37 38 import java.io.ObjectStreamField; 39 import java.io.Serializable; 40 import java.lang.reflect.ParameterizedType; 41 import java.lang.reflect.Type; 42 import java.util.AbstractMap; 43 import java.util.Arrays; 44 import java.util.Collection; 45 import java.util.Enumeration; 46 import java.util.HashMap; 47 import java.util.Hashtable; 48 import java.util.Iterator; 49 import java.util.Map; 50 import java.util.NoSuchElementException; 51 import java.util.Set; 52 import java.util.Spliterator; 53 import java.util.concurrent.atomic.AtomicReference; 54 import java.util.concurrent.locks.LockSupport; 55 import java.util.concurrent.locks.ReentrantLock; 56 import java.util.function.BiConsumer; 57 import java.util.function.BiFunction; 58 import java.util.function.Consumer; 59 import java.util.function.DoubleBinaryOperator; 60 import java.util.function.Function; 61 import java.util.function.IntBinaryOperator; 62 import java.util.function.LongBinaryOperator; 63 import java.util.function.Predicate; 64 import java.util.function.ToDoubleBiFunction; 65 import java.util.function.ToDoubleFunction; 66 import java.util.function.ToIntBiFunction; 67 import java.util.function.ToIntFunction; 68 import java.util.function.ToLongBiFunction; 69 import java.util.function.ToLongFunction; 70 import java.util.stream.Stream; 71 72 // BEGIN android-note 73 // removed link to collections framework docs 74 // END android-note 75 76 /** 77 * A hash table supporting full concurrency of retrievals and 78 * high expected concurrency for updates. This class obeys the 79 * same functional specification as {@link java.util.Hashtable}, and 80 * includes versions of methods corresponding to each method of 81 * {@code Hashtable}. However, even though all operations are 82 * thread-safe, retrieval operations do <em>not</em> entail locking, 83 * and there is <em>not</em> any support for locking the entire table 84 * in a way that prevents all access. This class is fully 85 * interoperable with {@code Hashtable} in programs that rely on its 86 * thread safety but not on its synchronization details. 87 * 88 * <p>Retrieval operations (including {@code get}) generally do not 89 * block, so may overlap with update operations (including {@code put} 90 * and {@code remove}). Retrievals reflect the results of the most 91 * recently <em>completed</em> update operations holding upon their 92 * onset. (More formally, an update operation for a given key bears a 93 * <em>happens-before</em> relation with any (non-null) retrieval for 94 * that key reporting the updated value.) For aggregate operations 95 * such as {@code putAll} and {@code clear}, concurrent retrievals may 96 * reflect insertion or removal of only some entries. Similarly, 97 * Iterators, Spliterators and Enumerations return elements reflecting the 98 * state of the hash table at some point at or since the creation of the 99 * iterator/enumeration. They do <em>not</em> throw {@link 100 * java.util.ConcurrentModificationException ConcurrentModificationException}. 101 * However, iterators are designed to be used by only one thread at a time. 102 * Bear in mind that the results of aggregate status methods including 103 * {@code size}, {@code isEmpty}, and {@code containsValue} are typically 104 * useful only when a map is not undergoing concurrent updates in other threads. 105 * Otherwise the results of these methods reflect transient states 106 * that may be adequate for monitoring or estimation purposes, but not 107 * for program control. 108 * 109 * <p>The table is dynamically expanded when there are too many 110 * collisions (i.e., keys that have distinct hash codes but fall into 111 * the same slot modulo the table size), with the expected average 112 * effect of maintaining roughly two bins per mapping (corresponding 113 * to a 0.75 load factor threshold for resizing). There may be much 114 * variance around this average as mappings are added and removed, but 115 * overall, this maintains a commonly accepted time/space tradeoff for 116 * hash tables. However, resizing this or any other kind of hash 117 * table may be a relatively slow operation. When possible, it is a 118 * good idea to provide a size estimate as an optional {@code 119 * initialCapacity} constructor argument. An additional optional 120 * {@code loadFactor} constructor argument provides a further means of 121 * customizing initial table capacity by specifying the table density 122 * to be used in calculating the amount of space to allocate for the 123 * given number of elements. Also, for compatibility with previous 124 * versions of this class, constructors may optionally specify an 125 * expected {@code concurrencyLevel} as an additional hint for 126 * internal sizing. Note that using many keys with exactly the same 127 * {@code hashCode()} is a sure way to slow down performance of any 128 * hash table. To ameliorate impact, when keys are {@link Comparable}, 129 * this class may use comparison order among keys to help break ties. 130 * 131 * <p>A {@link Set} projection of a ConcurrentHashMap may be created 132 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed 133 * (using {@link #keySet(Object)} when only keys are of interest, and the 134 * mapped values are (perhaps transiently) not used or all take the 135 * same mapping value. 136 * 137 * <p>A ConcurrentHashMap can be used as a scalable frequency map (a 138 * form of histogram or multiset) by using {@link 139 * java.util.concurrent.atomic.LongAdder} values and initializing via 140 * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count 141 * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use 142 * {@code freqs.computeIfAbsent(key, k -> new LongAdder()).increment();} 143 * 144 * <p>This class and its views and iterators implement all of the 145 * <em>optional</em> methods of the {@link Map} and {@link Iterator} 146 * interfaces. 147 * 148 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class 149 * does <em>not</em> allow {@code null} to be used as a key or value. 150 * 151 * <p>ConcurrentHashMaps support a set of sequential and parallel bulk 152 * operations that, unlike most {@link Stream} methods, are designed 153 * to be safely, and often sensibly, applied even with maps that are 154 * being concurrently updated by other threads; for example, when 155 * computing a snapshot summary of the values in a shared registry. 156 * There are three kinds of operation, each with four forms, accepting 157 * functions with keys, values, entries, and (key, value) pairs as 158 * arguments and/or return values. Because the elements of a 159 * ConcurrentHashMap are not ordered in any particular way, and may be 160 * processed in different orders in different parallel executions, the 161 * correctness of supplied functions should not depend on any 162 * ordering, or on any other objects or values that may transiently 163 * change while computation is in progress; and except for forEach 164 * actions, should ideally be side-effect-free. Bulk operations on 165 * {@link java.util.Map.Entry} objects do not support method {@code 166 * setValue}. 167 * 168 * <ul> 169 * <li>forEach: Performs a given action on each element. 170 * A variant form applies a given transformation on each element 171 * before performing the action. 172 * 173 * <li>search: Returns the first available non-null result of 174 * applying a given function on each element; skipping further 175 * search when a result is found. 176 * 177 * <li>reduce: Accumulates each element. The supplied reduction 178 * function cannot rely on ordering (more formally, it should be 179 * both associative and commutative). There are five variants: 180 * 181 * <ul> 182 * 183 * <li>Plain reductions. (There is not a form of this method for 184 * (key, value) function arguments since there is no corresponding 185 * return type.) 186 * 187 * <li>Mapped reductions that accumulate the results of a given 188 * function applied to each element. 189 * 190 * <li>Reductions to scalar doubles, longs, and ints, using a 191 * given basis value. 192 * 193 * </ul> 194 * </ul> 195 * 196 * <p>These bulk operations accept a {@code parallelismThreshold} 197 * argument. Methods proceed sequentially if the current map size is 198 * estimated to be less than the given threshold. Using a value of 199 * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value 200 * of {@code 1} results in maximal parallelism by partitioning into 201 * enough subtasks to fully utilize the {@link 202 * ForkJoinPool#commonPool()} that is used for all parallel 203 * computations. Normally, you would initially choose one of these 204 * extreme values, and then measure performance of using in-between 205 * values that trade off overhead versus throughput. 206 * 207 * <p>The concurrency properties of bulk operations follow 208 * from those of ConcurrentHashMap: Any non-null result returned 209 * from {@code get(key)} and related access methods bears a 210 * happens-before relation with the associated insertion or 211 * update. The result of any bulk operation reflects the 212 * composition of these per-element relations (but is not 213 * necessarily atomic with respect to the map as a whole unless it 214 * is somehow known to be quiescent). Conversely, because keys 215 * and values in the map are never null, null serves as a reliable 216 * atomic indicator of the current lack of any result. To 217 * maintain this property, null serves as an implicit basis for 218 * all non-scalar reduction operations. For the double, long, and 219 * int versions, the basis should be one that, when combined with 220 * any other value, returns that other value (more formally, it 221 * should be the identity element for the reduction). Most common 222 * reductions have these properties; for example, computing a sum 223 * with basis 0 or a minimum with basis MAX_VALUE. 224 * 225 * <p>Search and transformation functions provided as arguments 226 * should similarly return null to indicate the lack of any result 227 * (in which case it is not used). In the case of mapped 228 * reductions, this also enables transformations to serve as 229 * filters, returning null (or, in the case of primitive 230 * specializations, the identity basis) if the element should not 231 * be combined. You can create compound transformations and 232 * filterings by composing them yourself under this "null means 233 * there is nothing there now" rule before using them in search or 234 * reduce operations. 235 * 236 * <p>Methods accepting and/or returning Entry arguments maintain 237 * key-value associations. They may be useful for example when 238 * finding the key for the greatest value. Note that "plain" Entry 239 * arguments can be supplied using {@code new 240 * AbstractMap.SimpleEntry(k,v)}. 241 * 242 * <p>Bulk operations may complete abruptly, throwing an 243 * exception encountered in the application of a supplied 244 * function. Bear in mind when handling such exceptions that other 245 * concurrently executing functions could also have thrown 246 * exceptions, or would have done so if the first exception had 247 * not occurred. 248 * 249 * <p>Speedups for parallel compared to sequential forms are common 250 * but not guaranteed. Parallel operations involving brief functions 251 * on small maps may execute more slowly than sequential forms if the 252 * underlying work to parallelize the computation is more expensive 253 * than the computation itself. Similarly, parallelization may not 254 * lead to much actual parallelism if all processors are busy 255 * performing unrelated tasks. 256 * 257 * <p>All arguments to all task methods must be non-null. 258 * 259 * @since 1.5 260 * @author Doug Lea 261 * @param <K> the type of keys maintained by this map 262 * @param <V> the type of mapped values 263 */ 264 public class ConcurrentHashMap<K,V> extends AbstractMap<K,V> 265 implements ConcurrentMap<K,V>, Serializable { 266 private static final long serialVersionUID = 7249069246763182397L; 267 268 /* 269 * Overview: 270 * 271 * The primary design goal of this hash table is to maintain 272 * concurrent readability (typically method get(), but also 273 * iterators and related methods) while minimizing update 274 * contention. Secondary goals are to keep space consumption about 275 * the same or better than java.util.HashMap, and to support high 276 * initial insertion rates on an empty table by many threads. 277 * 278 * This map usually acts as a binned (bucketed) hash table. Each 279 * key-value mapping is held in a Node. Most nodes are instances 280 * of the basic Node class with hash, key, value, and next 281 * fields. However, various subclasses exist: TreeNodes are 282 * arranged in balanced trees, not lists. TreeBins hold the roots 283 * of sets of TreeNodes. ForwardingNodes are placed at the heads 284 * of bins during resizing. ReservationNodes are used as 285 * placeholders while establishing values in computeIfAbsent and 286 * related methods. The types TreeBin, ForwardingNode, and 287 * ReservationNode do not hold normal user keys, values, or 288 * hashes, and are readily distinguishable during search etc 289 * because they have negative hash fields and null key and value 290 * fields. (These special nodes are either uncommon or transient, 291 * so the impact of carrying around some unused fields is 292 * insignificant.) 293 * 294 * The table is lazily initialized to a power-of-two size upon the 295 * first insertion. Each bin in the table normally contains a 296 * list of Nodes (most often, the list has only zero or one Node). 297 * Table accesses require volatile/atomic reads, writes, and 298 * CASes. Because there is no other way to arrange this without 299 * adding further indirections, we use intrinsics 300 * (sun.misc.Unsafe) operations. 301 * 302 * We use the top (sign) bit of Node hash fields for control 303 * purposes -- it is available anyway because of addressing 304 * constraints. Nodes with negative hash fields are specially 305 * handled or ignored in map methods. 306 * 307 * Insertion (via put or its variants) of the first node in an 308 * empty bin is performed by just CASing it to the bin. This is 309 * by far the most common case for put operations under most 310 * key/hash distributions. Other update operations (insert, 311 * delete, and replace) require locks. We do not want to waste 312 * the space required to associate a distinct lock object with 313 * each bin, so instead use the first node of a bin list itself as 314 * a lock. Locking support for these locks relies on builtin 315 * "synchronized" monitors. 316 * 317 * Using the first node of a list as a lock does not by itself 318 * suffice though: When a node is locked, any update must first 319 * validate that it is still the first node after locking it, and 320 * retry if not. Because new nodes are always appended to lists, 321 * once a node is first in a bin, it remains first until deleted 322 * or the bin becomes invalidated (upon resizing). 323 * 324 * The main disadvantage of per-bin locks is that other update 325 * operations on other nodes in a bin list protected by the same 326 * lock can stall, for example when user equals() or mapping 327 * functions take a long time. However, statistically, under 328 * random hash codes, this is not a common problem. Ideally, the 329 * frequency of nodes in bins follows a Poisson distribution 330 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a 331 * parameter of about 0.5 on average, given the resizing threshold 332 * of 0.75, although with a large variance because of resizing 333 * granularity. Ignoring variance, the expected occurrences of 334 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The 335 * first values are: 336 * 337 * 0: 0.60653066 338 * 1: 0.30326533 339 * 2: 0.07581633 340 * 3: 0.01263606 341 * 4: 0.00157952 342 * 5: 0.00015795 343 * 6: 0.00001316 344 * 7: 0.00000094 345 * 8: 0.00000006 346 * more: less than 1 in ten million 347 * 348 * Lock contention probability for two threads accessing distinct 349 * elements is roughly 1 / (8 * #elements) under random hashes. 350 * 351 * Actual hash code distributions encountered in practice 352 * sometimes deviate significantly from uniform randomness. This 353 * includes the case when N > (1<<30), so some keys MUST collide. 354 * Similarly for dumb or hostile usages in which multiple keys are 355 * designed to have identical hash codes or ones that differs only 356 * in masked-out high bits. So we use a secondary strategy that 357 * applies when the number of nodes in a bin exceeds a 358 * threshold. These TreeBins use a balanced tree to hold nodes (a 359 * specialized form of red-black trees), bounding search time to 360 * O(log N). Each search step in a TreeBin is at least twice as 361 * slow as in a regular list, but given that N cannot exceed 362 * (1<<64) (before running out of addresses) this bounds search 363 * steps, lock hold times, etc, to reasonable constants (roughly 364 * 100 nodes inspected per operation worst case) so long as keys 365 * are Comparable (which is very common -- String, Long, etc). 366 * TreeBin nodes (TreeNodes) also maintain the same "next" 367 * traversal pointers as regular nodes, so can be traversed in 368 * iterators in the same way. 369 * 370 * The table is resized when occupancy exceeds a percentage 371 * threshold (nominally, 0.75, but see below). Any thread 372 * noticing an overfull bin may assist in resizing after the 373 * initiating thread allocates and sets up the replacement array. 374 * However, rather than stalling, these other threads may proceed 375 * with insertions etc. The use of TreeBins shields us from the 376 * worst case effects of overfilling while resizes are in 377 * progress. Resizing proceeds by transferring bins, one by one, 378 * from the table to the next table. However, threads claim small 379 * blocks of indices to transfer (via field transferIndex) before 380 * doing so, reducing contention. A generation stamp in field 381 * sizeCtl ensures that resizings do not overlap. Because we are 382 * using power-of-two expansion, the elements from each bin must 383 * either stay at same index, or move with a power of two 384 * offset. We eliminate unnecessary node creation by catching 385 * cases where old nodes can be reused because their next fields 386 * won't change. On average, only about one-sixth of them need 387 * cloning when a table doubles. The nodes they replace will be 388 * garbage collectable as soon as they are no longer referenced by 389 * any reader thread that may be in the midst of concurrently 390 * traversing table. Upon transfer, the old table bin contains 391 * only a special forwarding node (with hash field "MOVED") that 392 * contains the next table as its key. On encountering a 393 * forwarding node, access and update operations restart, using 394 * the new table. 395 * 396 * Each bin transfer requires its bin lock, which can stall 397 * waiting for locks while resizing. However, because other 398 * threads can join in and help resize rather than contend for 399 * locks, average aggregate waits become shorter as resizing 400 * progresses. The transfer operation must also ensure that all 401 * accessible bins in both the old and new table are usable by any 402 * traversal. This is arranged in part by proceeding from the 403 * last bin (table.length - 1) up towards the first. Upon seeing 404 * a forwarding node, traversals (see class Traverser) arrange to 405 * move to the new table without revisiting nodes. To ensure that 406 * no intervening nodes are skipped even when moved out of order, 407 * a stack (see class TableStack) is created on first encounter of 408 * a forwarding node during a traversal, to maintain its place if 409 * later processing the current table. The need for these 410 * save/restore mechanics is relatively rare, but when one 411 * forwarding node is encountered, typically many more will be. 412 * So Traversers use a simple caching scheme to avoid creating so 413 * many new TableStack nodes. (Thanks to Peter Levart for 414 * suggesting use of a stack here.) 415 * 416 * The traversal scheme also applies to partial traversals of 417 * ranges of bins (via an alternate Traverser constructor) 418 * to support partitioned aggregate operations. Also, read-only 419 * operations give up if ever forwarded to a null table, which 420 * provides support for shutdown-style clearing, which is also not 421 * currently implemented. 422 * 423 * Lazy table initialization minimizes footprint until first use, 424 * and also avoids resizings when the first operation is from a 425 * putAll, constructor with map argument, or deserialization. 426 * These cases attempt to override the initial capacity settings, 427 * but harmlessly fail to take effect in cases of races. 428 * 429 * The element count is maintained using a specialization of 430 * LongAdder. We need to incorporate a specialization rather than 431 * just use a LongAdder in order to access implicit 432 * contention-sensing that leads to creation of multiple 433 * CounterCells. The counter mechanics avoid contention on 434 * updates but can encounter cache thrashing if read too 435 * frequently during concurrent access. To avoid reading so often, 436 * resizing under contention is attempted only upon adding to a 437 * bin already holding two or more nodes. Under uniform hash 438 * distributions, the probability of this occurring at threshold 439 * is around 13%, meaning that only about 1 in 8 puts check 440 * threshold (and after resizing, many fewer do so). 441 * 442 * TreeBins use a special form of comparison for search and 443 * related operations (which is the main reason we cannot use 444 * existing collections such as TreeMaps). TreeBins contain 445 * Comparable elements, but may contain others, as well as 446 * elements that are Comparable but not necessarily Comparable for 447 * the same T, so we cannot invoke compareTo among them. To handle 448 * this, the tree is ordered primarily by hash value, then by 449 * Comparable.compareTo order if applicable. On lookup at a node, 450 * if elements are not comparable or compare as 0 then both left 451 * and right children may need to be searched in the case of tied 452 * hash values. (This corresponds to the full list search that 453 * would be necessary if all elements were non-Comparable and had 454 * tied hashes.) On insertion, to keep a total ordering (or as 455 * close as is required here) across rebalancings, we compare 456 * classes and identityHashCodes as tie-breakers. The red-black 457 * balancing code is updated from pre-jdk-collections 458 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) 459 * based in turn on Cormen, Leiserson, and Rivest "Introduction to 460 * Algorithms" (CLR). 461 * 462 * TreeBins also require an additional locking mechanism. While 463 * list traversal is always possible by readers even during 464 * updates, tree traversal is not, mainly because of tree-rotations 465 * that may change the root node and/or its linkages. TreeBins 466 * include a simple read-write lock mechanism parasitic on the 467 * main bin-synchronization strategy: Structural adjustments 468 * associated with an insertion or removal are already bin-locked 469 * (and so cannot conflict with other writers) but must wait for 470 * ongoing readers to finish. Since there can be only one such 471 * waiter, we use a simple scheme using a single "waiter" field to 472 * block writers. However, readers need never block. If the root 473 * lock is held, they proceed along the slow traversal path (via 474 * next-pointers) until the lock becomes available or the list is 475 * exhausted, whichever comes first. These cases are not fast, but 476 * maximize aggregate expected throughput. 477 * 478 * Maintaining API and serialization compatibility with previous 479 * versions of this class introduces several oddities. Mainly: We 480 * leave untouched but unused constructor arguments referring to 481 * concurrencyLevel. We accept a loadFactor constructor argument, 482 * but apply it only to initial table capacity (which is the only 483 * time that we can guarantee to honor it.) We also declare an 484 * unused "Segment" class that is instantiated in minimal form 485 * only when serializing. 486 * 487 * Also, solely for compatibility with previous versions of this 488 * class, it extends AbstractMap, even though all of its methods 489 * are overridden, so it is just useless baggage. 490 * 491 * This file is organized to make things a little easier to follow 492 * while reading than they might otherwise: First the main static 493 * declarations and utilities, then fields, then main public 494 * methods (with a few factorings of multiple public methods into 495 * internal ones), then sizing methods, trees, traversers, and 496 * bulk operations. 497 */ 498 499 /* ---------------- Constants -------------- */ 500 501 /** 502 * The largest possible table capacity. This value must be 503 * exactly 1<<30 to stay within Java array allocation and indexing 504 * bounds for power of two table sizes, and is further required 505 * because the top two bits of 32bit hash fields are used for 506 * control purposes. 507 */ 508 private static final int MAXIMUM_CAPACITY = 1 << 30; 509 510 /** 511 * The default initial table capacity. Must be a power of 2 512 * (i.e., at least 1) and at most MAXIMUM_CAPACITY. 513 */ 514 private static final int DEFAULT_CAPACITY = 16; 515 516 /** 517 * The largest possible (non-power of two) array size. 518 * Needed by toArray and related methods. 519 */ 520 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; 521 522 /** 523 * The default concurrency level for this table. Unused but 524 * defined for compatibility with previous versions of this class. 525 */ 526 private static final int DEFAULT_CONCURRENCY_LEVEL = 16; 527 528 /** 529 * The load factor for this table. Overrides of this value in 530 * constructors affect only the initial table capacity. The 531 * actual floating point value isn't normally used -- it is 532 * simpler to use expressions such as {@code n - (n >>> 2)} for 533 * the associated resizing threshold. 534 */ 535 private static final float LOAD_FACTOR = 0.75f; 536 537 /** 538 * The bin count threshold for using a tree rather than list for a 539 * bin. Bins are converted to trees when adding an element to a 540 * bin with at least this many nodes. The value must be greater 541 * than 2, and should be at least 8 to mesh with assumptions in 542 * tree removal about conversion back to plain bins upon 543 * shrinkage. 544 */ 545 static final int TREEIFY_THRESHOLD = 8; 546 547 /** 548 * The bin count threshold for untreeifying a (split) bin during a 549 * resize operation. Should be less than TREEIFY_THRESHOLD, and at 550 * most 6 to mesh with shrinkage detection under removal. 551 */ 552 static final int UNTREEIFY_THRESHOLD = 6; 553 554 /** 555 * The smallest table capacity for which bins may be treeified. 556 * (Otherwise the table is resized if too many nodes in a bin.) 557 * The value should be at least 4 * TREEIFY_THRESHOLD to avoid 558 * conflicts between resizing and treeification thresholds. 559 */ 560 static final int MIN_TREEIFY_CAPACITY = 64; 561 562 /** 563 * Minimum number of rebinnings per transfer step. Ranges are 564 * subdivided to allow multiple resizer threads. This value 565 * serves as a lower bound to avoid resizers encountering 566 * excessive memory contention. The value should be at least 567 * DEFAULT_CAPACITY. 568 */ 569 private static final int MIN_TRANSFER_STRIDE = 16; 570 571 /** 572 * The number of bits used for generation stamp in sizeCtl. 573 * Must be at least 6 for 32bit arrays. 574 */ 575 private static final int RESIZE_STAMP_BITS = 16; 576 577 /** 578 * The maximum number of threads that can help resize. 579 * Must fit in 32 - RESIZE_STAMP_BITS bits. 580 */ 581 private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1; 582 583 /** 584 * The bit shift for recording size stamp in sizeCtl. 585 */ 586 private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS; 587 588 /* 589 * Encodings for Node hash fields. See above for explanation. 590 */ 591 static final int MOVED = -1; // hash for forwarding nodes 592 static final int TREEBIN = -2; // hash for roots of trees 593 static final int RESERVED = -3; // hash for transient reservations 594 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash 595 596 /** Number of CPUS, to place bounds on some sizings */ 597 static final int NCPU = Runtime.getRuntime().availableProcessors(); 598 599 /** 600 * Serialized pseudo-fields, provided only for jdk7 compatibility. 601 * @serialField segments Segment[] 602 * The segments, each of which is a specialized hash table. 603 * @serialField segmentMask int 604 * Mask value for indexing into segments. The upper bits of a 605 * key's hash code are used to choose the segment. 606 * @serialField segmentShift int 607 * Shift value for indexing within segments. 608 */ 609 private static final ObjectStreamField[] serialPersistentFields = { 610 new ObjectStreamField("segments", Segment[].class), 611 new ObjectStreamField("segmentMask", Integer.TYPE), 612 new ObjectStreamField("segmentShift", Integer.TYPE), 613 }; 614 615 /* ---------------- Nodes -------------- */ 616 617 /** 618 * Key-value entry. This class is never exported out as a 619 * user-mutable Map.Entry (i.e., one supporting setValue; see 620 * MapEntry below), but can be used for read-only traversals used 621 * in bulk tasks. Subclasses of Node with a negative hash field 622 * are special, and contain null keys and values (but are never 623 * exported). Otherwise, keys and vals are never null. 624 */ 625 static class Node<K,V> implements Map.Entry<K,V> { 626 final int hash; 627 final K key; 628 volatile V val; 629 volatile Node<K,V> next; 630 Node(int hash, K key, V val, Node<K,V> next)631 Node(int hash, K key, V val, Node<K,V> next) { 632 this.hash = hash; 633 this.key = key; 634 this.val = val; 635 this.next = next; 636 } 637 getKey()638 public final K getKey() { return key; } getValue()639 public final V getValue() { return val; } hashCode()640 public final int hashCode() { return key.hashCode() ^ val.hashCode(); } toString()641 public final String toString() { 642 return Helpers.mapEntryToString(key, val); 643 } setValue(V value)644 public final V setValue(V value) { 645 throw new UnsupportedOperationException(); 646 } 647 equals(Object o)648 public final boolean equals(Object o) { 649 Object k, v, u; Map.Entry<?,?> e; 650 return ((o instanceof Map.Entry) && 651 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 652 (v = e.getValue()) != null && 653 (k == key || k.equals(key)) && 654 (v == (u = val) || v.equals(u))); 655 } 656 657 /** 658 * Virtualized support for map.get(); overridden in subclasses. 659 */ find(int h, Object k)660 Node<K,V> find(int h, Object k) { 661 Node<K,V> e = this; 662 if (k != null) { 663 do { 664 K ek; 665 if (e.hash == h && 666 ((ek = e.key) == k || (ek != null && k.equals(ek)))) 667 return e; 668 } while ((e = e.next) != null); 669 } 670 return null; 671 } 672 } 673 674 /* ---------------- Static utilities -------------- */ 675 676 /** 677 * Spreads (XORs) higher bits of hash to lower and also forces top 678 * bit to 0. Because the table uses power-of-two masking, sets of 679 * hashes that vary only in bits above the current mask will 680 * always collide. (Among known examples are sets of Float keys 681 * holding consecutive whole numbers in small tables.) So we 682 * apply a transform that spreads the impact of higher bits 683 * downward. There is a tradeoff between speed, utility, and 684 * quality of bit-spreading. Because many common sets of hashes 685 * are already reasonably distributed (so don't benefit from 686 * spreading), and because we use trees to handle large sets of 687 * collisions in bins, we just XOR some shifted bits in the 688 * cheapest possible way to reduce systematic lossage, as well as 689 * to incorporate impact of the highest bits that would otherwise 690 * never be used in index calculations because of table bounds. 691 */ spread(int h)692 static final int spread(int h) { 693 return (h ^ (h >>> 16)) & HASH_BITS; 694 } 695 696 /** 697 * Returns a power of two table size for the given desired capacity. 698 * See Hackers Delight, sec 3.2 699 */ tableSizeFor(int c)700 private static final int tableSizeFor(int c) { 701 int n = c - 1; 702 n |= n >>> 1; 703 n |= n >>> 2; 704 n |= n >>> 4; 705 n |= n >>> 8; 706 n |= n >>> 16; 707 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; 708 } 709 710 /** 711 * Returns x's Class if it is of the form "class C implements 712 * Comparable<C>", else null. 713 */ comparableClassFor(Object x)714 static Class<?> comparableClassFor(Object x) { 715 if (x instanceof Comparable) { 716 Class<?> c; Type[] ts, as; Type t; ParameterizedType p; 717 if ((c = x.getClass()) == String.class) // bypass checks 718 return c; 719 if ((ts = c.getGenericInterfaces()) != null) { 720 for (int i = 0; i < ts.length; ++i) { 721 if (((t = ts[i]) instanceof ParameterizedType) && 722 ((p = (ParameterizedType)t).getRawType() == 723 Comparable.class) && 724 (as = p.getActualTypeArguments()) != null && 725 as.length == 1 && as[0] == c) // type arg is c 726 return c; 727 } 728 } 729 } 730 return null; 731 } 732 733 /** 734 * Returns k.compareTo(x) if x matches kc (k's screened comparable 735 * class), else 0. 736 */ 737 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable compareComparables(Class<?> kc, Object k, Object x)738 static int compareComparables(Class<?> kc, Object k, Object x) { 739 return (x == null || x.getClass() != kc ? 0 : 740 ((Comparable)k).compareTo(x)); 741 } 742 743 /* ---------------- Table element access -------------- */ 744 745 /* 746 * Volatile access methods are used for table elements as well as 747 * elements of in-progress next table while resizing. All uses of 748 * the tab arguments must be null checked by callers. All callers 749 * also paranoically precheck that tab's length is not zero (or an 750 * equivalent check), thus ensuring that any index argument taking 751 * the form of a hash value anded with (length - 1) is a valid 752 * index. Note that, to be correct wrt arbitrary concurrency 753 * errors by users, these checks must operate on local variables, 754 * which accounts for some odd-looking inline assignments below. 755 * Note that calls to setTabAt always occur within locked regions, 756 * and so in principle require only release ordering, not 757 * full volatile semantics, but are currently coded as volatile 758 * writes to be conservative. 759 */ 760 761 @SuppressWarnings("unchecked") tabAt(Node<K,V>[] tab, int i)762 static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) { 763 return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE); 764 } 765 casTabAt(Node<K,V>[] tab, int i, Node<K,V> c, Node<K,V> v)766 static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i, 767 Node<K,V> c, Node<K,V> v) { 768 return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v); 769 } 770 setTabAt(Node<K,V>[] tab, int i, Node<K,V> v)771 static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) { 772 U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v); 773 } 774 775 /* ---------------- Fields -------------- */ 776 777 /** 778 * The array of bins. Lazily initialized upon first insertion. 779 * Size is always a power of two. Accessed directly by iterators. 780 */ 781 transient volatile Node<K,V>[] table; 782 783 /** 784 * The next table to use; non-null only while resizing. 785 */ 786 private transient volatile Node<K,V>[] nextTable; 787 788 /** 789 * Base counter value, used mainly when there is no contention, 790 * but also as a fallback during table initialization 791 * races. Updated via CAS. 792 */ 793 private transient volatile long baseCount; 794 795 /** 796 * Table initialization and resizing control. When negative, the 797 * table is being initialized or resized: -1 for initialization, 798 * else -(1 + the number of active resizing threads). Otherwise, 799 * when table is null, holds the initial table size to use upon 800 * creation, or 0 for default. After initialization, holds the 801 * next element count value upon which to resize the table. 802 */ 803 private transient volatile int sizeCtl; 804 805 /** 806 * The next table index (plus one) to split while resizing. 807 */ 808 private transient volatile int transferIndex; 809 810 /** 811 * Spinlock (locked via CAS) used when resizing and/or creating CounterCells. 812 */ 813 private transient volatile int cellsBusy; 814 815 /** 816 * Table of counter cells. When non-null, size is a power of 2. 817 */ 818 private transient volatile CounterCell[] counterCells; 819 820 // views 821 private transient KeySetView<K,V> keySet; 822 private transient ValuesView<K,V> values; 823 private transient EntrySetView<K,V> entrySet; 824 825 826 /* ---------------- Public operations -------------- */ 827 828 /** 829 * Creates a new, empty map with the default initial table size (16). 830 */ ConcurrentHashMap()831 public ConcurrentHashMap() { 832 } 833 834 /** 835 * Creates a new, empty map with an initial table size 836 * accommodating the specified number of elements without the need 837 * to dynamically resize. 838 * 839 * @param initialCapacity The implementation performs internal 840 * sizing to accommodate this many elements. 841 * @throws IllegalArgumentException if the initial capacity of 842 * elements is negative 843 */ ConcurrentHashMap(int initialCapacity)844 public ConcurrentHashMap(int initialCapacity) { 845 if (initialCapacity < 0) 846 throw new IllegalArgumentException(); 847 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? 848 MAXIMUM_CAPACITY : 849 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); 850 this.sizeCtl = cap; 851 } 852 853 /** 854 * Creates a new map with the same mappings as the given map. 855 * 856 * @param m the map 857 */ ConcurrentHashMap(Map<? extends K, ? extends V> m)858 public ConcurrentHashMap(Map<? extends K, ? extends V> m) { 859 this.sizeCtl = DEFAULT_CAPACITY; 860 putAll(m); 861 } 862 863 /** 864 * Creates a new, empty map with an initial table size based on 865 * the given number of elements ({@code initialCapacity}) and 866 * initial table density ({@code loadFactor}). 867 * 868 * @param initialCapacity the initial capacity. The implementation 869 * performs internal sizing to accommodate this many elements, 870 * given the specified load factor. 871 * @param loadFactor the load factor (table density) for 872 * establishing the initial table size 873 * @throws IllegalArgumentException if the initial capacity of 874 * elements is negative or the load factor is nonpositive 875 * 876 * @since 1.6 877 */ ConcurrentHashMap(int initialCapacity, float loadFactor)878 public ConcurrentHashMap(int initialCapacity, float loadFactor) { 879 this(initialCapacity, loadFactor, 1); 880 } 881 882 /** 883 * Creates a new, empty map with an initial table size based on 884 * the given number of elements ({@code initialCapacity}), table 885 * density ({@code loadFactor}), and number of concurrently 886 * updating threads ({@code concurrencyLevel}). 887 * 888 * @param initialCapacity the initial capacity. The implementation 889 * performs internal sizing to accommodate this many elements, 890 * given the specified load factor. 891 * @param loadFactor the load factor (table density) for 892 * establishing the initial table size 893 * @param concurrencyLevel the estimated number of concurrently 894 * updating threads. The implementation may use this value as 895 * a sizing hint. 896 * @throws IllegalArgumentException if the initial capacity is 897 * negative or the load factor or concurrencyLevel are 898 * nonpositive 899 */ ConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel)900 public ConcurrentHashMap(int initialCapacity, 901 float loadFactor, int concurrencyLevel) { 902 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) 903 throw new IllegalArgumentException(); 904 if (initialCapacity < concurrencyLevel) // Use at least as many bins 905 initialCapacity = concurrencyLevel; // as estimated threads 906 long size = (long)(1.0 + (long)initialCapacity / loadFactor); 907 int cap = (size >= (long)MAXIMUM_CAPACITY) ? 908 MAXIMUM_CAPACITY : tableSizeFor((int)size); 909 this.sizeCtl = cap; 910 } 911 912 // Original (since JDK1.2) Map methods 913 914 /** 915 * {@inheritDoc} 916 */ size()917 public int size() { 918 long n = sumCount(); 919 return ((n < 0L) ? 0 : 920 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : 921 (int)n); 922 } 923 924 /** 925 * {@inheritDoc} 926 */ isEmpty()927 public boolean isEmpty() { 928 return sumCount() <= 0L; // ignore transient negative values 929 } 930 931 /** 932 * Returns the value to which the specified key is mapped, 933 * or {@code null} if this map contains no mapping for the key. 934 * 935 * <p>More formally, if this map contains a mapping from a key 936 * {@code k} to a value {@code v} such that {@code key.equals(k)}, 937 * then this method returns {@code v}; otherwise it returns 938 * {@code null}. (There can be at most one such mapping.) 939 * 940 * @throws NullPointerException if the specified key is null 941 */ get(Object key)942 public V get(Object key) { 943 Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek; 944 int h = spread(key.hashCode()); 945 if ((tab = table) != null && (n = tab.length) > 0 && 946 (e = tabAt(tab, (n - 1) & h)) != null) { 947 if ((eh = e.hash) == h) { 948 if ((ek = e.key) == key || (ek != null && key.equals(ek))) 949 return e.val; 950 } 951 else if (eh < 0) 952 return (p = e.find(h, key)) != null ? p.val : null; 953 while ((e = e.next) != null) { 954 if (e.hash == h && 955 ((ek = e.key) == key || (ek != null && key.equals(ek)))) 956 return e.val; 957 } 958 } 959 return null; 960 } 961 962 /** 963 * Tests if the specified object is a key in this table. 964 * 965 * @param key possible key 966 * @return {@code true} if and only if the specified object 967 * is a key in this table, as determined by the 968 * {@code equals} method; {@code false} otherwise 969 * @throws NullPointerException if the specified key is null 970 */ containsKey(Object key)971 public boolean containsKey(Object key) { 972 return get(key) != null; 973 } 974 975 /** 976 * Returns {@code true} if this map maps one or more keys to the 977 * specified value. Note: This method may require a full traversal 978 * of the map, and is much slower than method {@code containsKey}. 979 * 980 * @param value value whose presence in this map is to be tested 981 * @return {@code true} if this map maps one or more keys to the 982 * specified value 983 * @throws NullPointerException if the specified value is null 984 */ containsValue(Object value)985 public boolean containsValue(Object value) { 986 if (value == null) 987 throw new NullPointerException(); 988 Node<K,V>[] t; 989 if ((t = table) != null) { 990 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 991 for (Node<K,V> p; (p = it.advance()) != null; ) { 992 V v; 993 if ((v = p.val) == value || (v != null && value.equals(v))) 994 return true; 995 } 996 } 997 return false; 998 } 999 1000 /** 1001 * Maps the specified key to the specified value in this table. 1002 * Neither the key nor the value can be null. 1003 * 1004 * <p>The value can be retrieved by calling the {@code get} method 1005 * with a key that is equal to the original key. 1006 * 1007 * @param key key with which the specified value is to be associated 1008 * @param value value to be associated with the specified key 1009 * @return the previous value associated with {@code key}, or 1010 * {@code null} if there was no mapping for {@code key} 1011 * @throws NullPointerException if the specified key or value is null 1012 */ put(K key, V value)1013 public V put(K key, V value) { 1014 return putVal(key, value, false); 1015 } 1016 1017 /** Implementation for put and putIfAbsent */ putVal(K key, V value, boolean onlyIfAbsent)1018 final V putVal(K key, V value, boolean onlyIfAbsent) { 1019 if (key == null || value == null) throw new NullPointerException(); 1020 int hash = spread(key.hashCode()); 1021 int binCount = 0; 1022 for (Node<K,V>[] tab = table;;) { 1023 Node<K,V> f; int n, i, fh; 1024 if (tab == null || (n = tab.length) == 0) 1025 tab = initTable(); 1026 else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) { 1027 if (casTabAt(tab, i, null, 1028 new Node<K,V>(hash, key, value, null))) 1029 break; // no lock when adding to empty bin 1030 } 1031 else if ((fh = f.hash) == MOVED) 1032 tab = helpTransfer(tab, f); 1033 else { 1034 V oldVal = null; 1035 synchronized (f) { 1036 if (tabAt(tab, i) == f) { 1037 if (fh >= 0) { 1038 binCount = 1; 1039 for (Node<K,V> e = f;; ++binCount) { 1040 K ek; 1041 if (e.hash == hash && 1042 ((ek = e.key) == key || 1043 (ek != null && key.equals(ek)))) { 1044 oldVal = e.val; 1045 if (!onlyIfAbsent) 1046 e.val = value; 1047 break; 1048 } 1049 Node<K,V> pred = e; 1050 if ((e = e.next) == null) { 1051 pred.next = new Node<K,V>(hash, key, 1052 value, null); 1053 break; 1054 } 1055 } 1056 } 1057 else if (f instanceof TreeBin) { 1058 Node<K,V> p; 1059 binCount = 2; 1060 if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key, 1061 value)) != null) { 1062 oldVal = p.val; 1063 if (!onlyIfAbsent) 1064 p.val = value; 1065 } 1066 } 1067 else if (f instanceof ReservationNode) 1068 throw new IllegalStateException("Recursive update"); 1069 } 1070 } 1071 if (binCount != 0) { 1072 if (binCount >= TREEIFY_THRESHOLD) 1073 treeifyBin(tab, i); 1074 if (oldVal != null) 1075 return oldVal; 1076 break; 1077 } 1078 } 1079 } 1080 addCount(1L, binCount); 1081 return null; 1082 } 1083 1084 /** 1085 * Copies all of the mappings from the specified map to this one. 1086 * These mappings replace any mappings that this map had for any of the 1087 * keys currently in the specified map. 1088 * 1089 * @param m mappings to be stored in this map 1090 */ putAll(Map<? extends K, ? extends V> m)1091 public void putAll(Map<? extends K, ? extends V> m) { 1092 tryPresize(m.size()); 1093 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) 1094 putVal(e.getKey(), e.getValue(), false); 1095 } 1096 1097 /** 1098 * Removes the key (and its corresponding value) from this map. 1099 * This method does nothing if the key is not in the map. 1100 * 1101 * @param key the key that needs to be removed 1102 * @return the previous value associated with {@code key}, or 1103 * {@code null} if there was no mapping for {@code key} 1104 * @throws NullPointerException if the specified key is null 1105 */ remove(Object key)1106 public V remove(Object key) { 1107 return replaceNode(key, null, null); 1108 } 1109 1110 /** 1111 * Implementation for the four public remove/replace methods: 1112 * Replaces node value with v, conditional upon match of cv if 1113 * non-null. If resulting value is null, delete. 1114 */ replaceNode(Object key, V value, Object cv)1115 final V replaceNode(Object key, V value, Object cv) { 1116 int hash = spread(key.hashCode()); 1117 for (Node<K,V>[] tab = table;;) { 1118 Node<K,V> f; int n, i, fh; 1119 if (tab == null || (n = tab.length) == 0 || 1120 (f = tabAt(tab, i = (n - 1) & hash)) == null) 1121 break; 1122 else if ((fh = f.hash) == MOVED) 1123 tab = helpTransfer(tab, f); 1124 else { 1125 V oldVal = null; 1126 boolean validated = false; 1127 synchronized (f) { 1128 if (tabAt(tab, i) == f) { 1129 if (fh >= 0) { 1130 validated = true; 1131 for (Node<K,V> e = f, pred = null;;) { 1132 K ek; 1133 if (e.hash == hash && 1134 ((ek = e.key) == key || 1135 (ek != null && key.equals(ek)))) { 1136 V ev = e.val; 1137 if (cv == null || cv == ev || 1138 (ev != null && cv.equals(ev))) { 1139 oldVal = ev; 1140 if (value != null) 1141 e.val = value; 1142 else if (pred != null) 1143 pred.next = e.next; 1144 else 1145 setTabAt(tab, i, e.next); 1146 } 1147 break; 1148 } 1149 pred = e; 1150 if ((e = e.next) == null) 1151 break; 1152 } 1153 } 1154 else if (f instanceof TreeBin) { 1155 validated = true; 1156 TreeBin<K,V> t = (TreeBin<K,V>)f; 1157 TreeNode<K,V> r, p; 1158 if ((r = t.root) != null && 1159 (p = r.findTreeNode(hash, key, null)) != null) { 1160 V pv = p.val; 1161 if (cv == null || cv == pv || 1162 (pv != null && cv.equals(pv))) { 1163 oldVal = pv; 1164 if (value != null) 1165 p.val = value; 1166 else if (t.removeTreeNode(p)) 1167 setTabAt(tab, i, untreeify(t.first)); 1168 } 1169 } 1170 } 1171 else if (f instanceof ReservationNode) 1172 throw new IllegalStateException("Recursive update"); 1173 } 1174 } 1175 if (validated) { 1176 if (oldVal != null) { 1177 if (value == null) 1178 addCount(-1L, -1); 1179 return oldVal; 1180 } 1181 break; 1182 } 1183 } 1184 } 1185 return null; 1186 } 1187 1188 /** 1189 * Removes all of the mappings from this map. 1190 */ clear()1191 public void clear() { 1192 long delta = 0L; // negative number of deletions 1193 int i = 0; 1194 Node<K,V>[] tab = table; 1195 while (tab != null && i < tab.length) { 1196 int fh; 1197 Node<K,V> f = tabAt(tab, i); 1198 if (f == null) 1199 ++i; 1200 else if ((fh = f.hash) == MOVED) { 1201 tab = helpTransfer(tab, f); 1202 i = 0; // restart 1203 } 1204 else { 1205 synchronized (f) { 1206 if (tabAt(tab, i) == f) { 1207 Node<K,V> p = (fh >= 0 ? f : 1208 (f instanceof TreeBin) ? 1209 ((TreeBin<K,V>)f).first : null); 1210 while (p != null) { 1211 --delta; 1212 p = p.next; 1213 } 1214 setTabAt(tab, i++, null); 1215 } 1216 } 1217 } 1218 } 1219 if (delta != 0L) 1220 addCount(delta, -1); 1221 } 1222 1223 /** 1224 * Returns a {@link Set} view of the keys contained in this map. 1225 * The set is backed by the map, so changes to the map are 1226 * reflected in the set, and vice-versa. The set supports element 1227 * removal, which removes the corresponding mapping from this map, 1228 * via the {@code Iterator.remove}, {@code Set.remove}, 1229 * {@code removeAll}, {@code retainAll}, and {@code clear} 1230 * operations. It does not support the {@code add} or 1231 * {@code addAll} operations. 1232 * 1233 * <p> The set returned by this method is guaranteed to an instance of 1234 * {@link KeySetView}. 1235 * 1236 * <p>The view's iterators and spliterators are 1237 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1238 * 1239 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}, 1240 * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}. 1241 * 1242 * @return the set view 1243 */ 1244 // Android-changed: Return type for backwards compat. Was KeySetView<K,V>. http://b/28099367 1245 @dalvik.annotation.codegen.CovariantReturnType(returnType = KeySetView.class, presentAfter = 28) keySet()1246 public Set<K> keySet() { 1247 KeySetView<K,V> ks; 1248 return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null)); 1249 } 1250 1251 /** 1252 * Returns a {@link Collection} view of the values contained in this map. 1253 * The collection is backed by the map, so changes to the map are 1254 * reflected in the collection, and vice-versa. The collection 1255 * supports element removal, which removes the corresponding 1256 * mapping from this map, via the {@code Iterator.remove}, 1257 * {@code Collection.remove}, {@code removeAll}, 1258 * {@code retainAll}, and {@code clear} operations. It does not 1259 * support the {@code add} or {@code addAll} operations. 1260 * 1261 * <p>The view's iterators and spliterators are 1262 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1263 * 1264 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT} 1265 * and {@link Spliterator#NONNULL}. 1266 * 1267 * @return the collection view 1268 */ values()1269 public Collection<V> values() { 1270 ValuesView<K,V> vs; 1271 return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this)); 1272 } 1273 1274 /** 1275 * Returns a {@link Set} view of the mappings contained in this map. 1276 * The set is backed by the map, so changes to the map are 1277 * reflected in the set, and vice-versa. The set supports element 1278 * removal, which removes the corresponding mapping from the map, 1279 * via the {@code Iterator.remove}, {@code Set.remove}, 1280 * {@code removeAll}, {@code retainAll}, and {@code clear} 1281 * operations. 1282 * 1283 * <p>The view's iterators and spliterators are 1284 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1285 * 1286 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}, 1287 * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}. 1288 * 1289 * @return the set view 1290 */ entrySet()1291 public Set<Map.Entry<K,V>> entrySet() { 1292 EntrySetView<K,V> es; 1293 return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this)); 1294 } 1295 1296 /** 1297 * Returns the hash code value for this {@link Map}, i.e., 1298 * the sum of, for each key-value pair in the map, 1299 * {@code key.hashCode() ^ value.hashCode()}. 1300 * 1301 * @return the hash code value for this map 1302 */ hashCode()1303 public int hashCode() { 1304 int h = 0; 1305 Node<K,V>[] t; 1306 if ((t = table) != null) { 1307 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1308 for (Node<K,V> p; (p = it.advance()) != null; ) 1309 h += p.key.hashCode() ^ p.val.hashCode(); 1310 } 1311 return h; 1312 } 1313 1314 /** 1315 * Returns a string representation of this map. The string 1316 * representation consists of a list of key-value mappings (in no 1317 * particular order) enclosed in braces ("{@code {}}"). Adjacent 1318 * mappings are separated by the characters {@code ", "} (comma 1319 * and space). Each key-value mapping is rendered as the key 1320 * followed by an equals sign ("{@code =}") followed by the 1321 * associated value. 1322 * 1323 * @return a string representation of this map 1324 */ toString()1325 public String toString() { 1326 Node<K,V>[] t; 1327 int f = (t = table) == null ? 0 : t.length; 1328 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f); 1329 StringBuilder sb = new StringBuilder(); 1330 sb.append('{'); 1331 Node<K,V> p; 1332 if ((p = it.advance()) != null) { 1333 for (;;) { 1334 K k = p.key; 1335 V v = p.val; 1336 sb.append(k == this ? "(this Map)" : k); 1337 sb.append('='); 1338 sb.append(v == this ? "(this Map)" : v); 1339 if ((p = it.advance()) == null) 1340 break; 1341 sb.append(',').append(' '); 1342 } 1343 } 1344 return sb.append('}').toString(); 1345 } 1346 1347 /** 1348 * Compares the specified object with this map for equality. 1349 * Returns {@code true} if the given object is a map with the same 1350 * mappings as this map. This operation may return misleading 1351 * results if either map is concurrently modified during execution 1352 * of this method. 1353 * 1354 * @param o object to be compared for equality with this map 1355 * @return {@code true} if the specified object is equal to this map 1356 */ equals(Object o)1357 public boolean equals(Object o) { 1358 if (o != this) { 1359 if (!(o instanceof Map)) 1360 return false; 1361 Map<?,?> m = (Map<?,?>) o; 1362 Node<K,V>[] t; 1363 int f = (t = table) == null ? 0 : t.length; 1364 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f); 1365 for (Node<K,V> p; (p = it.advance()) != null; ) { 1366 V val = p.val; 1367 Object v = m.get(p.key); 1368 if (v == null || (v != val && !v.equals(val))) 1369 return false; 1370 } 1371 for (Map.Entry<?,?> e : m.entrySet()) { 1372 Object mk, mv, v; 1373 if ((mk = e.getKey()) == null || 1374 (mv = e.getValue()) == null || 1375 (v = get(mk)) == null || 1376 (mv != v && !mv.equals(v))) 1377 return false; 1378 } 1379 } 1380 return true; 1381 } 1382 1383 /** 1384 * Stripped-down version of helper class used in previous version, 1385 * declared for the sake of serialization compatibility. 1386 */ 1387 static class Segment<K,V> extends ReentrantLock implements Serializable { 1388 private static final long serialVersionUID = 2249069246763182397L; 1389 final float loadFactor; Segment(float lf)1390 Segment(float lf) { this.loadFactor = lf; } 1391 } 1392 1393 /** 1394 * Saves the state of the {@code ConcurrentHashMap} instance to a 1395 * stream (i.e., serializes it). 1396 * @param s the stream 1397 * @throws java.io.IOException if an I/O error occurs 1398 * @serialData 1399 * the serialized fields, followed by the key (Object) and value 1400 * (Object) for each key-value mapping, followed by a null pair. 1401 * The key-value mappings are emitted in no particular order. 1402 */ writeObject(java.io.ObjectOutputStream s)1403 private void writeObject(java.io.ObjectOutputStream s) 1404 throws java.io.IOException { 1405 // For serialization compatibility 1406 // Emulate segment calculation from previous version of this class 1407 int sshift = 0; 1408 int ssize = 1; 1409 while (ssize < DEFAULT_CONCURRENCY_LEVEL) { 1410 ++sshift; 1411 ssize <<= 1; 1412 } 1413 int segmentShift = 32 - sshift; 1414 int segmentMask = ssize - 1; 1415 @SuppressWarnings("unchecked") 1416 Segment<K,V>[] segments = (Segment<K,V>[]) 1417 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL]; 1418 for (int i = 0; i < segments.length; ++i) 1419 segments[i] = new Segment<K,V>(LOAD_FACTOR); 1420 java.io.ObjectOutputStream.PutField streamFields = s.putFields(); 1421 streamFields.put("segments", segments); 1422 streamFields.put("segmentShift", segmentShift); 1423 streamFields.put("segmentMask", segmentMask); 1424 s.writeFields(); 1425 1426 Node<K,V>[] t; 1427 if ((t = table) != null) { 1428 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1429 for (Node<K,V> p; (p = it.advance()) != null; ) { 1430 s.writeObject(p.key); 1431 s.writeObject(p.val); 1432 } 1433 } 1434 s.writeObject(null); 1435 s.writeObject(null); 1436 } 1437 1438 /** 1439 * Reconstitutes the instance from a stream (that is, deserializes it). 1440 * @param s the stream 1441 * @throws ClassNotFoundException if the class of a serialized object 1442 * could not be found 1443 * @throws java.io.IOException if an I/O error occurs 1444 */ readObject(java.io.ObjectInputStream s)1445 private void readObject(java.io.ObjectInputStream s) 1446 throws java.io.IOException, ClassNotFoundException { 1447 /* 1448 * To improve performance in typical cases, we create nodes 1449 * while reading, then place in table once size is known. 1450 * However, we must also validate uniqueness and deal with 1451 * overpopulated bins while doing so, which requires 1452 * specialized versions of putVal mechanics. 1453 */ 1454 sizeCtl = -1; // force exclusion for table construction 1455 s.defaultReadObject(); 1456 long size = 0L; 1457 Node<K,V> p = null; 1458 for (;;) { 1459 @SuppressWarnings("unchecked") 1460 K k = (K) s.readObject(); 1461 @SuppressWarnings("unchecked") 1462 V v = (V) s.readObject(); 1463 if (k != null && v != null) { 1464 p = new Node<K,V>(spread(k.hashCode()), k, v, p); 1465 ++size; 1466 } 1467 else 1468 break; 1469 } 1470 if (size == 0L) 1471 sizeCtl = 0; 1472 else { 1473 int n; 1474 if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) 1475 n = MAXIMUM_CAPACITY; 1476 else { 1477 int sz = (int)size; 1478 n = tableSizeFor(sz + (sz >>> 1) + 1); 1479 } 1480 @SuppressWarnings("unchecked") 1481 Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n]; 1482 int mask = n - 1; 1483 long added = 0L; 1484 while (p != null) { 1485 boolean insertAtFront; 1486 Node<K,V> next = p.next, first; 1487 int h = p.hash, j = h & mask; 1488 if ((first = tabAt(tab, j)) == null) 1489 insertAtFront = true; 1490 else { 1491 K k = p.key; 1492 if (first.hash < 0) { 1493 TreeBin<K,V> t = (TreeBin<K,V>)first; 1494 if (t.putTreeVal(h, k, p.val) == null) 1495 ++added; 1496 insertAtFront = false; 1497 } 1498 else { 1499 int binCount = 0; 1500 insertAtFront = true; 1501 Node<K,V> q; K qk; 1502 for (q = first; q != null; q = q.next) { 1503 if (q.hash == h && 1504 ((qk = q.key) == k || 1505 (qk != null && k.equals(qk)))) { 1506 insertAtFront = false; 1507 break; 1508 } 1509 ++binCount; 1510 } 1511 if (insertAtFront && binCount >= TREEIFY_THRESHOLD) { 1512 insertAtFront = false; 1513 ++added; 1514 p.next = first; 1515 TreeNode<K,V> hd = null, tl = null; 1516 for (q = p; q != null; q = q.next) { 1517 TreeNode<K,V> t = new TreeNode<K,V> 1518 (q.hash, q.key, q.val, null, null); 1519 if ((t.prev = tl) == null) 1520 hd = t; 1521 else 1522 tl.next = t; 1523 tl = t; 1524 } 1525 setTabAt(tab, j, new TreeBin<K,V>(hd)); 1526 } 1527 } 1528 } 1529 if (insertAtFront) { 1530 ++added; 1531 p.next = first; 1532 setTabAt(tab, j, p); 1533 } 1534 p = next; 1535 } 1536 table = tab; 1537 sizeCtl = n - (n >>> 2); 1538 baseCount = added; 1539 } 1540 } 1541 1542 // ConcurrentMap methods 1543 1544 /** 1545 * {@inheritDoc} 1546 * 1547 * @return the previous value associated with the specified key, 1548 * or {@code null} if there was no mapping for the key 1549 * @throws NullPointerException if the specified key or value is null 1550 */ putIfAbsent(K key, V value)1551 public V putIfAbsent(K key, V value) { 1552 return putVal(key, value, true); 1553 } 1554 1555 /** 1556 * {@inheritDoc} 1557 * 1558 * @throws NullPointerException if the specified key is null 1559 */ remove(Object key, Object value)1560 public boolean remove(Object key, Object value) { 1561 if (key == null) 1562 throw new NullPointerException(); 1563 return value != null && replaceNode(key, null, value) != null; 1564 } 1565 1566 /** 1567 * {@inheritDoc} 1568 * 1569 * @throws NullPointerException if any of the arguments are null 1570 */ replace(K key, V oldValue, V newValue)1571 public boolean replace(K key, V oldValue, V newValue) { 1572 if (key == null || oldValue == null || newValue == null) 1573 throw new NullPointerException(); 1574 return replaceNode(key, newValue, oldValue) != null; 1575 } 1576 1577 /** 1578 * {@inheritDoc} 1579 * 1580 * @return the previous value associated with the specified key, 1581 * or {@code null} if there was no mapping for the key 1582 * @throws NullPointerException if the specified key or value is null 1583 */ replace(K key, V value)1584 public V replace(K key, V value) { 1585 if (key == null || value == null) 1586 throw new NullPointerException(); 1587 return replaceNode(key, value, null); 1588 } 1589 1590 // Overrides of JDK8+ Map extension method defaults 1591 1592 /** 1593 * Returns the value to which the specified key is mapped, or the 1594 * given default value if this map contains no mapping for the 1595 * key. 1596 * 1597 * @param key the key whose associated value is to be returned 1598 * @param defaultValue the value to return if this map contains 1599 * no mapping for the given key 1600 * @return the mapping for the key, if present; else the default value 1601 * @throws NullPointerException if the specified key is null 1602 */ getOrDefault(Object key, V defaultValue)1603 public V getOrDefault(Object key, V defaultValue) { 1604 V v; 1605 return (v = get(key)) == null ? defaultValue : v; 1606 } 1607 forEach(BiConsumer<? super K, ? super V> action)1608 public void forEach(BiConsumer<? super K, ? super V> action) { 1609 if (action == null) throw new NullPointerException(); 1610 Node<K,V>[] t; 1611 if ((t = table) != null) { 1612 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1613 for (Node<K,V> p; (p = it.advance()) != null; ) { 1614 action.accept(p.key, p.val); 1615 } 1616 } 1617 } 1618 replaceAll(BiFunction<? super K, ? super V, ? extends V> function)1619 public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { 1620 if (function == null) throw new NullPointerException(); 1621 Node<K,V>[] t; 1622 if ((t = table) != null) { 1623 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1624 for (Node<K,V> p; (p = it.advance()) != null; ) { 1625 V oldValue = p.val; 1626 for (K key = p.key;;) { 1627 V newValue = function.apply(key, oldValue); 1628 if (newValue == null) 1629 throw new NullPointerException(); 1630 if (replaceNode(key, newValue, oldValue) != null || 1631 (oldValue = get(key)) == null) 1632 break; 1633 } 1634 } 1635 } 1636 } 1637 1638 /** 1639 * Helper method for EntrySetView.removeIf. 1640 */ removeEntryIf(Predicate<? super Entry<K,V>> function)1641 boolean removeEntryIf(Predicate<? super Entry<K,V>> function) { 1642 if (function == null) throw new NullPointerException(); 1643 Node<K,V>[] t; 1644 boolean removed = false; 1645 if ((t = table) != null) { 1646 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1647 for (Node<K,V> p; (p = it.advance()) != null; ) { 1648 K k = p.key; 1649 V v = p.val; 1650 Map.Entry<K,V> e = new AbstractMap.SimpleImmutableEntry<>(k, v); 1651 if (function.test(e) && replaceNode(k, null, v) != null) 1652 removed = true; 1653 } 1654 } 1655 return removed; 1656 } 1657 1658 /** 1659 * Helper method for ValuesView.removeIf. 1660 */ removeValueIf(Predicate<? super V> function)1661 boolean removeValueIf(Predicate<? super V> function) { 1662 if (function == null) throw new NullPointerException(); 1663 Node<K,V>[] t; 1664 boolean removed = false; 1665 if ((t = table) != null) { 1666 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1667 for (Node<K,V> p; (p = it.advance()) != null; ) { 1668 K k = p.key; 1669 V v = p.val; 1670 if (function.test(v) && replaceNode(k, null, v) != null) 1671 removed = true; 1672 } 1673 } 1674 return removed; 1675 } 1676 1677 /** 1678 * If the specified key is not already associated with a value, 1679 * attempts to compute its value using the given mapping function 1680 * and enters it into this map unless {@code null}. The entire 1681 * method invocation is performed atomically, so the function is 1682 * applied at most once per key. Some attempted update operations 1683 * on this map by other threads may be blocked while computation 1684 * is in progress, so the computation should be short and simple, 1685 * and must not attempt to update any other mappings of this map. 1686 * 1687 * @param key key with which the specified value is to be associated 1688 * @param mappingFunction the function to compute a value 1689 * @return the current (existing or computed) value associated with 1690 * the specified key, or null if the computed value is null 1691 * @throws NullPointerException if the specified key or mappingFunction 1692 * is null 1693 * @throws IllegalStateException if the computation detectably 1694 * attempts a recursive update to this map that would 1695 * otherwise never complete 1696 * @throws RuntimeException or Error if the mappingFunction does so, 1697 * in which case the mapping is left unestablished 1698 */ computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction)1699 public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) { 1700 if (key == null || mappingFunction == null) 1701 throw new NullPointerException(); 1702 int h = spread(key.hashCode()); 1703 V val = null; 1704 int binCount = 0; 1705 for (Node<K,V>[] tab = table;;) { 1706 Node<K,V> f; int n, i, fh; 1707 if (tab == null || (n = tab.length) == 0) 1708 tab = initTable(); 1709 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 1710 Node<K,V> r = new ReservationNode<K,V>(); 1711 synchronized (r) { 1712 if (casTabAt(tab, i, null, r)) { 1713 binCount = 1; 1714 Node<K,V> node = null; 1715 try { 1716 if ((val = mappingFunction.apply(key)) != null) 1717 node = new Node<K,V>(h, key, val, null); 1718 } finally { 1719 setTabAt(tab, i, node); 1720 } 1721 } 1722 } 1723 if (binCount != 0) 1724 break; 1725 } 1726 else if ((fh = f.hash) == MOVED) 1727 tab = helpTransfer(tab, f); 1728 else { 1729 boolean added = false; 1730 synchronized (f) { 1731 if (tabAt(tab, i) == f) { 1732 if (fh >= 0) { 1733 binCount = 1; 1734 for (Node<K,V> e = f;; ++binCount) { 1735 K ek; 1736 if (e.hash == h && 1737 ((ek = e.key) == key || 1738 (ek != null && key.equals(ek)))) { 1739 val = e.val; 1740 break; 1741 } 1742 Node<K,V> pred = e; 1743 if ((e = e.next) == null) { 1744 if ((val = mappingFunction.apply(key)) != null) { 1745 if (pred.next != null) 1746 throw new IllegalStateException("Recursive update"); 1747 added = true; 1748 pred.next = new Node<K,V>(h, key, val, null); 1749 } 1750 break; 1751 } 1752 } 1753 } 1754 else if (f instanceof TreeBin) { 1755 binCount = 2; 1756 TreeBin<K,V> t = (TreeBin<K,V>)f; 1757 TreeNode<K,V> r, p; 1758 if ((r = t.root) != null && 1759 (p = r.findTreeNode(h, key, null)) != null) 1760 val = p.val; 1761 else if ((val = mappingFunction.apply(key)) != null) { 1762 added = true; 1763 t.putTreeVal(h, key, val); 1764 } 1765 } 1766 else if (f instanceof ReservationNode) 1767 throw new IllegalStateException("Recursive update"); 1768 } 1769 } 1770 if (binCount != 0) { 1771 if (binCount >= TREEIFY_THRESHOLD) 1772 treeifyBin(tab, i); 1773 if (!added) 1774 return val; 1775 break; 1776 } 1777 } 1778 } 1779 if (val != null) 1780 addCount(1L, binCount); 1781 return val; 1782 } 1783 1784 /** 1785 * If the value for the specified key is present, attempts to 1786 * compute a new mapping given the key and its current mapped 1787 * value. The entire method invocation is performed atomically. 1788 * Some attempted update operations on this map by other threads 1789 * may be blocked while computation is in progress, so the 1790 * computation should be short and simple, and must not attempt to 1791 * update any other mappings of this map. 1792 * 1793 * @param key key with which a value may be associated 1794 * @param remappingFunction the function to compute a value 1795 * @return the new value associated with the specified key, or null if none 1796 * @throws NullPointerException if the specified key or remappingFunction 1797 * is null 1798 * @throws IllegalStateException if the computation detectably 1799 * attempts a recursive update to this map that would 1800 * otherwise never complete 1801 * @throws RuntimeException or Error if the remappingFunction does so, 1802 * in which case the mapping is unchanged 1803 */ computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction)1804 public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1805 if (key == null || remappingFunction == null) 1806 throw new NullPointerException(); 1807 int h = spread(key.hashCode()); 1808 V val = null; 1809 int delta = 0; 1810 int binCount = 0; 1811 for (Node<K,V>[] tab = table;;) { 1812 Node<K,V> f; int n, i, fh; 1813 if (tab == null || (n = tab.length) == 0) 1814 tab = initTable(); 1815 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) 1816 break; 1817 else if ((fh = f.hash) == MOVED) 1818 tab = helpTransfer(tab, f); 1819 else { 1820 synchronized (f) { 1821 if (tabAt(tab, i) == f) { 1822 if (fh >= 0) { 1823 binCount = 1; 1824 for (Node<K,V> e = f, pred = null;; ++binCount) { 1825 K ek; 1826 if (e.hash == h && 1827 ((ek = e.key) == key || 1828 (ek != null && key.equals(ek)))) { 1829 val = remappingFunction.apply(key, e.val); 1830 if (val != null) 1831 e.val = val; 1832 else { 1833 delta = -1; 1834 Node<K,V> en = e.next; 1835 if (pred != null) 1836 pred.next = en; 1837 else 1838 setTabAt(tab, i, en); 1839 } 1840 break; 1841 } 1842 pred = e; 1843 if ((e = e.next) == null) 1844 break; 1845 } 1846 } 1847 else if (f instanceof TreeBin) { 1848 binCount = 2; 1849 TreeBin<K,V> t = (TreeBin<K,V>)f; 1850 TreeNode<K,V> r, p; 1851 if ((r = t.root) != null && 1852 (p = r.findTreeNode(h, key, null)) != null) { 1853 val = remappingFunction.apply(key, p.val); 1854 if (val != null) 1855 p.val = val; 1856 else { 1857 delta = -1; 1858 if (t.removeTreeNode(p)) 1859 setTabAt(tab, i, untreeify(t.first)); 1860 } 1861 } 1862 } 1863 else if (f instanceof ReservationNode) 1864 throw new IllegalStateException("Recursive update"); 1865 } 1866 } 1867 if (binCount != 0) 1868 break; 1869 } 1870 } 1871 if (delta != 0) 1872 addCount((long)delta, binCount); 1873 return val; 1874 } 1875 1876 /** 1877 * Attempts to compute a mapping for the specified key and its 1878 * current mapped value (or {@code null} if there is no current 1879 * mapping). The entire method invocation is performed atomically. 1880 * Some attempted update operations on this map by other threads 1881 * may be blocked while computation is in progress, so the 1882 * computation should be short and simple, and must not attempt to 1883 * update any other mappings of this Map. 1884 * 1885 * @param key key with which the specified value is to be associated 1886 * @param remappingFunction the function to compute a value 1887 * @return the new value associated with the specified key, or null if none 1888 * @throws NullPointerException if the specified key or remappingFunction 1889 * is null 1890 * @throws IllegalStateException if the computation detectably 1891 * attempts a recursive update to this map that would 1892 * otherwise never complete 1893 * @throws RuntimeException or Error if the remappingFunction does so, 1894 * in which case the mapping is unchanged 1895 */ compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction)1896 public V compute(K key, 1897 BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1898 if (key == null || remappingFunction == null) 1899 throw new NullPointerException(); 1900 int h = spread(key.hashCode()); 1901 V val = null; 1902 int delta = 0; 1903 int binCount = 0; 1904 for (Node<K,V>[] tab = table;;) { 1905 Node<K,V> f; int n, i, fh; 1906 if (tab == null || (n = tab.length) == 0) 1907 tab = initTable(); 1908 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 1909 Node<K,V> r = new ReservationNode<K,V>(); 1910 synchronized (r) { 1911 if (casTabAt(tab, i, null, r)) { 1912 binCount = 1; 1913 Node<K,V> node = null; 1914 try { 1915 if ((val = remappingFunction.apply(key, null)) != null) { 1916 delta = 1; 1917 node = new Node<K,V>(h, key, val, null); 1918 } 1919 } finally { 1920 setTabAt(tab, i, node); 1921 } 1922 } 1923 } 1924 if (binCount != 0) 1925 break; 1926 } 1927 else if ((fh = f.hash) == MOVED) 1928 tab = helpTransfer(tab, f); 1929 else { 1930 synchronized (f) { 1931 if (tabAt(tab, i) == f) { 1932 if (fh >= 0) { 1933 binCount = 1; 1934 for (Node<K,V> e = f, pred = null;; ++binCount) { 1935 K ek; 1936 if (e.hash == h && 1937 ((ek = e.key) == key || 1938 (ek != null && key.equals(ek)))) { 1939 val = remappingFunction.apply(key, e.val); 1940 if (val != null) 1941 e.val = val; 1942 else { 1943 delta = -1; 1944 Node<K,V> en = e.next; 1945 if (pred != null) 1946 pred.next = en; 1947 else 1948 setTabAt(tab, i, en); 1949 } 1950 break; 1951 } 1952 pred = e; 1953 if ((e = e.next) == null) { 1954 val = remappingFunction.apply(key, null); 1955 if (val != null) { 1956 if (pred.next != null) 1957 throw new IllegalStateException("Recursive update"); 1958 delta = 1; 1959 pred.next = 1960 new Node<K,V>(h, key, val, null); 1961 } 1962 break; 1963 } 1964 } 1965 } 1966 else if (f instanceof TreeBin) { 1967 binCount = 1; 1968 TreeBin<K,V> t = (TreeBin<K,V>)f; 1969 TreeNode<K,V> r, p; 1970 if ((r = t.root) != null) 1971 p = r.findTreeNode(h, key, null); 1972 else 1973 p = null; 1974 V pv = (p == null) ? null : p.val; 1975 val = remappingFunction.apply(key, pv); 1976 if (val != null) { 1977 if (p != null) 1978 p.val = val; 1979 else { 1980 delta = 1; 1981 t.putTreeVal(h, key, val); 1982 } 1983 } 1984 else if (p != null) { 1985 delta = -1; 1986 if (t.removeTreeNode(p)) 1987 setTabAt(tab, i, untreeify(t.first)); 1988 } 1989 } 1990 else if (f instanceof ReservationNode) 1991 throw new IllegalStateException("Recursive update"); 1992 } 1993 } 1994 if (binCount != 0) { 1995 if (binCount >= TREEIFY_THRESHOLD) 1996 treeifyBin(tab, i); 1997 break; 1998 } 1999 } 2000 } 2001 if (delta != 0) 2002 addCount((long)delta, binCount); 2003 return val; 2004 } 2005 2006 /** 2007 * If the specified key is not already associated with a 2008 * (non-null) value, associates it with the given value. 2009 * Otherwise, replaces the value with the results of the given 2010 * remapping function, or removes if {@code null}. The entire 2011 * method invocation is performed atomically. Some attempted 2012 * update operations on this map by other threads may be blocked 2013 * while computation is in progress, so the computation should be 2014 * short and simple, and must not attempt to update any other 2015 * mappings of this Map. 2016 * 2017 * @param key key with which the specified value is to be associated 2018 * @param value the value to use if absent 2019 * @param remappingFunction the function to recompute a value if present 2020 * @return the new value associated with the specified key, or null if none 2021 * @throws NullPointerException if the specified key or the 2022 * remappingFunction is null 2023 * @throws RuntimeException or Error if the remappingFunction does so, 2024 * in which case the mapping is unchanged 2025 */ merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction)2026 public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { 2027 if (key == null || value == null || remappingFunction == null) 2028 throw new NullPointerException(); 2029 int h = spread(key.hashCode()); 2030 V val = null; 2031 int delta = 0; 2032 int binCount = 0; 2033 for (Node<K,V>[] tab = table;;) { 2034 Node<K,V> f; int n, i, fh; 2035 if (tab == null || (n = tab.length) == 0) 2036 tab = initTable(); 2037 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 2038 if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) { 2039 delta = 1; 2040 val = value; 2041 break; 2042 } 2043 } 2044 else if ((fh = f.hash) == MOVED) 2045 tab = helpTransfer(tab, f); 2046 else { 2047 synchronized (f) { 2048 if (tabAt(tab, i) == f) { 2049 if (fh >= 0) { 2050 binCount = 1; 2051 for (Node<K,V> e = f, pred = null;; ++binCount) { 2052 K ek; 2053 if (e.hash == h && 2054 ((ek = e.key) == key || 2055 (ek != null && key.equals(ek)))) { 2056 val = remappingFunction.apply(e.val, value); 2057 if (val != null) 2058 e.val = val; 2059 else { 2060 delta = -1; 2061 Node<K,V> en = e.next; 2062 if (pred != null) 2063 pred.next = en; 2064 else 2065 setTabAt(tab, i, en); 2066 } 2067 break; 2068 } 2069 pred = e; 2070 if ((e = e.next) == null) { 2071 delta = 1; 2072 val = value; 2073 pred.next = 2074 new Node<K,V>(h, key, val, null); 2075 break; 2076 } 2077 } 2078 } 2079 else if (f instanceof TreeBin) { 2080 binCount = 2; 2081 TreeBin<K,V> t = (TreeBin<K,V>)f; 2082 TreeNode<K,V> r = t.root; 2083 TreeNode<K,V> p = (r == null) ? null : 2084 r.findTreeNode(h, key, null); 2085 val = (p == null) ? value : 2086 remappingFunction.apply(p.val, value); 2087 if (val != null) { 2088 if (p != null) 2089 p.val = val; 2090 else { 2091 delta = 1; 2092 t.putTreeVal(h, key, val); 2093 } 2094 } 2095 else if (p != null) { 2096 delta = -1; 2097 if (t.removeTreeNode(p)) 2098 setTabAt(tab, i, untreeify(t.first)); 2099 } 2100 } 2101 else if (f instanceof ReservationNode) 2102 throw new IllegalStateException("Recursive update"); 2103 } 2104 } 2105 if (binCount != 0) { 2106 if (binCount >= TREEIFY_THRESHOLD) 2107 treeifyBin(tab, i); 2108 break; 2109 } 2110 } 2111 } 2112 if (delta != 0) 2113 addCount((long)delta, binCount); 2114 return val; 2115 } 2116 2117 // Hashtable legacy methods 2118 2119 /** 2120 * Tests if some key maps into the specified value in this table. 2121 * 2122 * <p>Note that this method is identical in functionality to 2123 * {@link #containsValue(Object)}, and exists solely to ensure 2124 * full compatibility with class {@link java.util.Hashtable}, 2125 * which supported this method prior to introduction of the 2126 * Java Collections Framework. 2127 * 2128 * @param value a value to search for 2129 * @return {@code true} if and only if some key maps to the 2130 * {@code value} argument in this table as 2131 * determined by the {@code equals} method; 2132 * {@code false} otherwise 2133 * @throws NullPointerException if the specified value is null 2134 */ contains(Object value)2135 public boolean contains(Object value) { 2136 return containsValue(value); 2137 } 2138 2139 /** 2140 * Returns an enumeration of the keys in this table. 2141 * 2142 * @return an enumeration of the keys in this table 2143 * @see #keySet() 2144 */ keys()2145 public Enumeration<K> keys() { 2146 Node<K,V>[] t; 2147 int f = (t = table) == null ? 0 : t.length; 2148 return new KeyIterator<K,V>(t, f, 0, f, this); 2149 } 2150 2151 /** 2152 * Returns an enumeration of the values in this table. 2153 * 2154 * @return an enumeration of the values in this table 2155 * @see #values() 2156 */ elements()2157 public Enumeration<V> elements() { 2158 Node<K,V>[] t; 2159 int f = (t = table) == null ? 0 : t.length; 2160 return new ValueIterator<K,V>(t, f, 0, f, this); 2161 } 2162 2163 // ConcurrentHashMap-only methods 2164 2165 /** 2166 * Returns the number of mappings. This method should be used 2167 * instead of {@link #size} because a ConcurrentHashMap may 2168 * contain more mappings than can be represented as an int. The 2169 * value returned is an estimate; the actual count may differ if 2170 * there are concurrent insertions or removals. 2171 * 2172 * @return the number of mappings 2173 * @since 1.8 2174 */ mappingCount()2175 public long mappingCount() { 2176 long n = sumCount(); 2177 return (n < 0L) ? 0L : n; // ignore transient negative values 2178 } 2179 2180 /** 2181 * Creates a new {@link Set} backed by a ConcurrentHashMap 2182 * from the given type to {@code Boolean.TRUE}. 2183 * 2184 * @param <K> the element type of the returned set 2185 * @return the new set 2186 * @since 1.8 2187 */ newKeySet()2188 public static <K> KeySetView<K,Boolean> newKeySet() { 2189 return new KeySetView<K,Boolean> 2190 (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE); 2191 } 2192 2193 /** 2194 * Creates a new {@link Set} backed by a ConcurrentHashMap 2195 * from the given type to {@code Boolean.TRUE}. 2196 * 2197 * @param initialCapacity The implementation performs internal 2198 * sizing to accommodate this many elements. 2199 * @param <K> the element type of the returned set 2200 * @return the new set 2201 * @throws IllegalArgumentException if the initial capacity of 2202 * elements is negative 2203 * @since 1.8 2204 */ newKeySet(int initialCapacity)2205 public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) { 2206 return new KeySetView<K,Boolean> 2207 (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE); 2208 } 2209 2210 /** 2211 * Returns a {@link Set} view of the keys in this map, using the 2212 * given common mapped value for any additions (i.e., {@link 2213 * Collection#add} and {@link Collection#addAll(Collection)}). 2214 * This is of course only appropriate if it is acceptable to use 2215 * the same value for all additions from this view. 2216 * 2217 * @param mappedValue the mapped value to use for any additions 2218 * @return the set view 2219 * @throws NullPointerException if the mappedValue is null 2220 */ keySet(V mappedValue)2221 public KeySetView<K,V> keySet(V mappedValue) { 2222 if (mappedValue == null) 2223 throw new NullPointerException(); 2224 return new KeySetView<K,V>(this, mappedValue); 2225 } 2226 2227 /* ---------------- Special Nodes -------------- */ 2228 2229 /** 2230 * A node inserted at head of bins during transfer operations. 2231 */ 2232 static final class ForwardingNode<K,V> extends Node<K,V> { 2233 final Node<K,V>[] nextTable; ForwardingNode(Node<K,V>[] tab)2234 ForwardingNode(Node<K,V>[] tab) { 2235 super(MOVED, null, null, null); 2236 this.nextTable = tab; 2237 } 2238 find(int h, Object k)2239 Node<K,V> find(int h, Object k) { 2240 // loop to avoid arbitrarily deep recursion on forwarding nodes 2241 outer: for (Node<K,V>[] tab = nextTable;;) { 2242 Node<K,V> e; int n; 2243 if (k == null || tab == null || (n = tab.length) == 0 || 2244 (e = tabAt(tab, (n - 1) & h)) == null) 2245 return null; 2246 for (;;) { 2247 int eh; K ek; 2248 if ((eh = e.hash) == h && 2249 ((ek = e.key) == k || (ek != null && k.equals(ek)))) 2250 return e; 2251 if (eh < 0) { 2252 if (e instanceof ForwardingNode) { 2253 tab = ((ForwardingNode<K,V>)e).nextTable; 2254 continue outer; 2255 } 2256 else 2257 return e.find(h, k); 2258 } 2259 if ((e = e.next) == null) 2260 return null; 2261 } 2262 } 2263 } 2264 } 2265 2266 /** 2267 * A place-holder node used in computeIfAbsent and compute. 2268 */ 2269 static final class ReservationNode<K,V> extends Node<K,V> { ReservationNode()2270 ReservationNode() { 2271 super(RESERVED, null, null, null); 2272 } 2273 find(int h, Object k)2274 Node<K,V> find(int h, Object k) { 2275 return null; 2276 } 2277 } 2278 2279 /* ---------------- Table Initialization and Resizing -------------- */ 2280 2281 /** 2282 * Returns the stamp bits for resizing a table of size n. 2283 * Must be negative when shifted left by RESIZE_STAMP_SHIFT. 2284 */ resizeStamp(int n)2285 static final int resizeStamp(int n) { 2286 return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1)); 2287 } 2288 2289 /** 2290 * Initializes table, using the size recorded in sizeCtl. 2291 */ initTable()2292 private final Node<K,V>[] initTable() { 2293 Node<K,V>[] tab; int sc; 2294 while ((tab = table) == null || tab.length == 0) { 2295 if ((sc = sizeCtl) < 0) 2296 Thread.yield(); // lost initialization race; just spin 2297 else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { 2298 try { 2299 if ((tab = table) == null || tab.length == 0) { 2300 int n = (sc > 0) ? sc : DEFAULT_CAPACITY; 2301 @SuppressWarnings("unchecked") 2302 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n]; 2303 table = tab = nt; 2304 sc = n - (n >>> 2); 2305 } 2306 } finally { 2307 sizeCtl = sc; 2308 } 2309 break; 2310 } 2311 } 2312 return tab; 2313 } 2314 2315 /** 2316 * Adds to count, and if table is too small and not already 2317 * resizing, initiates transfer. If already resizing, helps 2318 * perform transfer if work is available. Rechecks occupancy 2319 * after a transfer to see if another resize is already needed 2320 * because resizings are lagging additions. 2321 * 2322 * @param x the count to add 2323 * @param check if <0, don't check resize, if <= 1 only check if uncontended 2324 */ addCount(long x, int check)2325 private final void addCount(long x, int check) { 2326 CounterCell[] as; long b, s; 2327 if ((as = counterCells) != null || 2328 !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) { 2329 CounterCell a; long v; int m; 2330 boolean uncontended = true; 2331 if (as == null || (m = as.length - 1) < 0 || 2332 (a = as[ThreadLocalRandom.getProbe() & m]) == null || 2333 !(uncontended = 2334 U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) { 2335 fullAddCount(x, uncontended); 2336 return; 2337 } 2338 if (check <= 1) 2339 return; 2340 s = sumCount(); 2341 } 2342 if (check >= 0) { 2343 Node<K,V>[] tab, nt; int n, sc; 2344 while (s >= (long)(sc = sizeCtl) && (tab = table) != null && 2345 (n = tab.length) < MAXIMUM_CAPACITY) { 2346 int rs = resizeStamp(n); 2347 if (sc < 0) { 2348 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || 2349 sc == rs + MAX_RESIZERS || (nt = nextTable) == null || 2350 transferIndex <= 0) 2351 break; 2352 if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) 2353 transfer(tab, nt); 2354 } 2355 else if (U.compareAndSwapInt(this, SIZECTL, sc, 2356 (rs << RESIZE_STAMP_SHIFT) + 2)) 2357 transfer(tab, null); 2358 s = sumCount(); 2359 } 2360 } 2361 } 2362 2363 /** 2364 * Helps transfer if a resize is in progress. 2365 */ helpTransfer(Node<K,V>[] tab, Node<K,V> f)2366 final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) { 2367 Node<K,V>[] nextTab; int sc; 2368 if (tab != null && (f instanceof ForwardingNode) && 2369 (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) { 2370 int rs = resizeStamp(tab.length); 2371 while (nextTab == nextTable && table == tab && 2372 (sc = sizeCtl) < 0) { 2373 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || 2374 sc == rs + MAX_RESIZERS || transferIndex <= 0) 2375 break; 2376 if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) { 2377 transfer(tab, nextTab); 2378 break; 2379 } 2380 } 2381 return nextTab; 2382 } 2383 return table; 2384 } 2385 2386 /** 2387 * Tries to presize table to accommodate the given number of elements. 2388 * 2389 * @param size number of elements (doesn't need to be perfectly accurate) 2390 */ tryPresize(int size)2391 private final void tryPresize(int size) { 2392 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : 2393 tableSizeFor(size + (size >>> 1) + 1); 2394 int sc; 2395 while ((sc = sizeCtl) >= 0) { 2396 Node<K,V>[] tab = table; int n; 2397 if (tab == null || (n = tab.length) == 0) { 2398 n = (sc > c) ? sc : c; 2399 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { 2400 try { 2401 if (table == tab) { 2402 @SuppressWarnings("unchecked") 2403 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n]; 2404 table = nt; 2405 sc = n - (n >>> 2); 2406 } 2407 } finally { 2408 sizeCtl = sc; 2409 } 2410 } 2411 } 2412 else if (c <= sc || n >= MAXIMUM_CAPACITY) 2413 break; 2414 else if (tab == table) { 2415 int rs = resizeStamp(n); 2416 if (U.compareAndSwapInt(this, SIZECTL, sc, 2417 (rs << RESIZE_STAMP_SHIFT) + 2)) 2418 transfer(tab, null); 2419 } 2420 } 2421 } 2422 2423 /** 2424 * Moves and/or copies the nodes in each bin to new table. See 2425 * above for explanation. 2426 */ transfer(Node<K,V>[] tab, Node<K,V>[] nextTab)2427 private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) { 2428 int n = tab.length, stride; 2429 if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) 2430 stride = MIN_TRANSFER_STRIDE; // subdivide range 2431 if (nextTab == null) { // initiating 2432 try { 2433 @SuppressWarnings("unchecked") 2434 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1]; 2435 nextTab = nt; 2436 } catch (Throwable ex) { // try to cope with OOME 2437 sizeCtl = Integer.MAX_VALUE; 2438 return; 2439 } 2440 nextTable = nextTab; 2441 transferIndex = n; 2442 } 2443 int nextn = nextTab.length; 2444 ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab); 2445 boolean advance = true; 2446 boolean finishing = false; // to ensure sweep before committing nextTab 2447 for (int i = 0, bound = 0;;) { 2448 Node<K,V> f; int fh; 2449 while (advance) { 2450 int nextIndex, nextBound; 2451 if (--i >= bound || finishing) 2452 advance = false; 2453 else if ((nextIndex = transferIndex) <= 0) { 2454 i = -1; 2455 advance = false; 2456 } 2457 else if (U.compareAndSwapInt 2458 (this, TRANSFERINDEX, nextIndex, 2459 nextBound = (nextIndex > stride ? 2460 nextIndex - stride : 0))) { 2461 bound = nextBound; 2462 i = nextIndex - 1; 2463 advance = false; 2464 } 2465 } 2466 if (i < 0 || i >= n || i + n >= nextn) { 2467 int sc; 2468 if (finishing) { 2469 nextTable = null; 2470 table = nextTab; 2471 sizeCtl = (n << 1) - (n >>> 1); 2472 return; 2473 } 2474 if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) { 2475 if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT) 2476 return; 2477 finishing = advance = true; 2478 i = n; // recheck before commit 2479 } 2480 } 2481 else if ((f = tabAt(tab, i)) == null) 2482 advance = casTabAt(tab, i, null, fwd); 2483 else if ((fh = f.hash) == MOVED) 2484 advance = true; // already processed 2485 else { 2486 synchronized (f) { 2487 if (tabAt(tab, i) == f) { 2488 Node<K,V> ln, hn; 2489 if (fh >= 0) { 2490 int runBit = fh & n; 2491 Node<K,V> lastRun = f; 2492 for (Node<K,V> p = f.next; p != null; p = p.next) { 2493 int b = p.hash & n; 2494 if (b != runBit) { 2495 runBit = b; 2496 lastRun = p; 2497 } 2498 } 2499 if (runBit == 0) { 2500 ln = lastRun; 2501 hn = null; 2502 } 2503 else { 2504 hn = lastRun; 2505 ln = null; 2506 } 2507 for (Node<K,V> p = f; p != lastRun; p = p.next) { 2508 int ph = p.hash; K pk = p.key; V pv = p.val; 2509 if ((ph & n) == 0) 2510 ln = new Node<K,V>(ph, pk, pv, ln); 2511 else 2512 hn = new Node<K,V>(ph, pk, pv, hn); 2513 } 2514 setTabAt(nextTab, i, ln); 2515 setTabAt(nextTab, i + n, hn); 2516 setTabAt(tab, i, fwd); 2517 advance = true; 2518 } 2519 else if (f instanceof TreeBin) { 2520 TreeBin<K,V> t = (TreeBin<K,V>)f; 2521 TreeNode<K,V> lo = null, loTail = null; 2522 TreeNode<K,V> hi = null, hiTail = null; 2523 int lc = 0, hc = 0; 2524 for (Node<K,V> e = t.first; e != null; e = e.next) { 2525 int h = e.hash; 2526 TreeNode<K,V> p = new TreeNode<K,V> 2527 (h, e.key, e.val, null, null); 2528 if ((h & n) == 0) { 2529 if ((p.prev = loTail) == null) 2530 lo = p; 2531 else 2532 loTail.next = p; 2533 loTail = p; 2534 ++lc; 2535 } 2536 else { 2537 if ((p.prev = hiTail) == null) 2538 hi = p; 2539 else 2540 hiTail.next = p; 2541 hiTail = p; 2542 ++hc; 2543 } 2544 } 2545 ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) : 2546 (hc != 0) ? new TreeBin<K,V>(lo) : t; 2547 hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) : 2548 (lc != 0) ? new TreeBin<K,V>(hi) : t; 2549 setTabAt(nextTab, i, ln); 2550 setTabAt(nextTab, i + n, hn); 2551 setTabAt(tab, i, fwd); 2552 advance = true; 2553 } 2554 } 2555 } 2556 } 2557 } 2558 } 2559 2560 /* ---------------- Counter support -------------- */ 2561 2562 /** 2563 * A padded cell for distributing counts. Adapted from LongAdder 2564 * and Striped64. See their internal docs for explanation. 2565 */ 2566 // Android-removed: @Contended, this hint is not used by the Android runtime. 2567 //@jdk.internal.vm.annotation.Contended 2568 static final class CounterCell { 2569 volatile long value; CounterCell(long x)2570 CounterCell(long x) { value = x; } 2571 } 2572 sumCount()2573 final long sumCount() { 2574 CounterCell[] as = counterCells; CounterCell a; 2575 long sum = baseCount; 2576 if (as != null) { 2577 for (int i = 0; i < as.length; ++i) { 2578 if ((a = as[i]) != null) 2579 sum += a.value; 2580 } 2581 } 2582 return sum; 2583 } 2584 2585 // See LongAdder version for explanation fullAddCount(long x, boolean wasUncontended)2586 private final void fullAddCount(long x, boolean wasUncontended) { 2587 int h; 2588 if ((h = ThreadLocalRandom.getProbe()) == 0) { 2589 ThreadLocalRandom.localInit(); // force initialization 2590 h = ThreadLocalRandom.getProbe(); 2591 wasUncontended = true; 2592 } 2593 boolean collide = false; // True if last slot nonempty 2594 for (;;) { 2595 CounterCell[] as; CounterCell a; int n; long v; 2596 if ((as = counterCells) != null && (n = as.length) > 0) { 2597 if ((a = as[(n - 1) & h]) == null) { 2598 if (cellsBusy == 0) { // Try to attach new Cell 2599 CounterCell r = new CounterCell(x); // Optimistic create 2600 if (cellsBusy == 0 && 2601 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { 2602 boolean created = false; 2603 try { // Recheck under lock 2604 CounterCell[] rs; int m, j; 2605 if ((rs = counterCells) != null && 2606 (m = rs.length) > 0 && 2607 rs[j = (m - 1) & h] == null) { 2608 rs[j] = r; 2609 created = true; 2610 } 2611 } finally { 2612 cellsBusy = 0; 2613 } 2614 if (created) 2615 break; 2616 continue; // Slot is now non-empty 2617 } 2618 } 2619 collide = false; 2620 } 2621 else if (!wasUncontended) // CAS already known to fail 2622 wasUncontended = true; // Continue after rehash 2623 else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x)) 2624 break; 2625 else if (counterCells != as || n >= NCPU) 2626 collide = false; // At max size or stale 2627 else if (!collide) 2628 collide = true; 2629 else if (cellsBusy == 0 && 2630 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { 2631 try { 2632 if (counterCells == as) {// Expand table unless stale 2633 CounterCell[] rs = new CounterCell[n << 1]; 2634 for (int i = 0; i < n; ++i) 2635 rs[i] = as[i]; 2636 counterCells = rs; 2637 } 2638 } finally { 2639 cellsBusy = 0; 2640 } 2641 collide = false; 2642 continue; // Retry with expanded table 2643 } 2644 h = ThreadLocalRandom.advanceProbe(h); 2645 } 2646 else if (cellsBusy == 0 && counterCells == as && 2647 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { 2648 boolean init = false; 2649 try { // Initialize table 2650 if (counterCells == as) { 2651 CounterCell[] rs = new CounterCell[2]; 2652 rs[h & 1] = new CounterCell(x); 2653 counterCells = rs; 2654 init = true; 2655 } 2656 } finally { 2657 cellsBusy = 0; 2658 } 2659 if (init) 2660 break; 2661 } 2662 else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x)) 2663 break; // Fall back on using base 2664 } 2665 } 2666 2667 /* ---------------- Conversion from/to TreeBins -------------- */ 2668 2669 /** 2670 * Replaces all linked nodes in bin at given index unless table is 2671 * too small, in which case resizes instead. 2672 */ treeifyBin(Node<K,V>[] tab, int index)2673 private final void treeifyBin(Node<K,V>[] tab, int index) { 2674 Node<K,V> b; int n; 2675 if (tab != null) { 2676 if ((n = tab.length) < MIN_TREEIFY_CAPACITY) 2677 tryPresize(n << 1); 2678 else if ((b = tabAt(tab, index)) != null && b.hash >= 0) { 2679 synchronized (b) { 2680 if (tabAt(tab, index) == b) { 2681 TreeNode<K,V> hd = null, tl = null; 2682 for (Node<K,V> e = b; e != null; e = e.next) { 2683 TreeNode<K,V> p = 2684 new TreeNode<K,V>(e.hash, e.key, e.val, 2685 null, null); 2686 if ((p.prev = tl) == null) 2687 hd = p; 2688 else 2689 tl.next = p; 2690 tl = p; 2691 } 2692 setTabAt(tab, index, new TreeBin<K,V>(hd)); 2693 } 2694 } 2695 } 2696 } 2697 } 2698 2699 /** 2700 * Returns a list on non-TreeNodes replacing those in given list. 2701 */ untreeify(Node<K,V> b)2702 static <K,V> Node<K,V> untreeify(Node<K,V> b) { 2703 Node<K,V> hd = null, tl = null; 2704 for (Node<K,V> q = b; q != null; q = q.next) { 2705 Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null); 2706 if (tl == null) 2707 hd = p; 2708 else 2709 tl.next = p; 2710 tl = p; 2711 } 2712 return hd; 2713 } 2714 2715 /* ---------------- TreeNodes -------------- */ 2716 2717 /** 2718 * Nodes for use in TreeBins. 2719 */ 2720 static final class TreeNode<K,V> extends Node<K,V> { 2721 TreeNode<K,V> parent; // red-black tree links 2722 TreeNode<K,V> left; 2723 TreeNode<K,V> right; 2724 TreeNode<K,V> prev; // needed to unlink next upon deletion 2725 boolean red; 2726 TreeNode(int hash, K key, V val, Node<K,V> next, TreeNode<K,V> parent)2727 TreeNode(int hash, K key, V val, Node<K,V> next, 2728 TreeNode<K,V> parent) { 2729 super(hash, key, val, next); 2730 this.parent = parent; 2731 } 2732 find(int h, Object k)2733 Node<K,V> find(int h, Object k) { 2734 return findTreeNode(h, k, null); 2735 } 2736 2737 /** 2738 * Returns the TreeNode (or null if not found) for the given key 2739 * starting at given root. 2740 */ findTreeNode(int h, Object k, Class<?> kc)2741 final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) { 2742 if (k != null) { 2743 TreeNode<K,V> p = this; 2744 do { 2745 int ph, dir; K pk; TreeNode<K,V> q; 2746 TreeNode<K,V> pl = p.left, pr = p.right; 2747 if ((ph = p.hash) > h) 2748 p = pl; 2749 else if (ph < h) 2750 p = pr; 2751 else if ((pk = p.key) == k || (pk != null && k.equals(pk))) 2752 return p; 2753 else if (pl == null) 2754 p = pr; 2755 else if (pr == null) 2756 p = pl; 2757 else if ((kc != null || 2758 (kc = comparableClassFor(k)) != null) && 2759 (dir = compareComparables(kc, k, pk)) != 0) 2760 p = (dir < 0) ? pl : pr; 2761 else if ((q = pr.findTreeNode(h, k, kc)) != null) 2762 return q; 2763 else 2764 p = pl; 2765 } while (p != null); 2766 } 2767 return null; 2768 } 2769 } 2770 2771 /* ---------------- TreeBins -------------- */ 2772 2773 /** 2774 * TreeNodes used at the heads of bins. TreeBins do not hold user 2775 * keys or values, but instead point to list of TreeNodes and 2776 * their root. They also maintain a parasitic read-write lock 2777 * forcing writers (who hold bin lock) to wait for readers (who do 2778 * not) to complete before tree restructuring operations. 2779 */ 2780 static final class TreeBin<K,V> extends Node<K,V> { 2781 TreeNode<K,V> root; 2782 volatile TreeNode<K,V> first; 2783 volatile Thread waiter; 2784 volatile int lockState; 2785 // values for lockState 2786 static final int WRITER = 1; // set while holding write lock 2787 static final int WAITER = 2; // set when waiting for write lock 2788 static final int READER = 4; // increment value for setting read lock 2789 2790 /** 2791 * Tie-breaking utility for ordering insertions when equal 2792 * hashCodes and non-comparable. We don't require a total 2793 * order, just a consistent insertion rule to maintain 2794 * equivalence across rebalancings. Tie-breaking further than 2795 * necessary simplifies testing a bit. 2796 */ tieBreakOrder(Object a, Object b)2797 static int tieBreakOrder(Object a, Object b) { 2798 int d; 2799 if (a == null || b == null || 2800 (d = a.getClass().getName(). 2801 compareTo(b.getClass().getName())) == 0) 2802 d = (System.identityHashCode(a) <= System.identityHashCode(b) ? 2803 -1 : 1); 2804 return d; 2805 } 2806 2807 /** 2808 * Creates bin with initial set of nodes headed by b. 2809 */ TreeBin(TreeNode<K,V> b)2810 TreeBin(TreeNode<K,V> b) { 2811 super(TREEBIN, null, null, null); 2812 this.first = b; 2813 TreeNode<K,V> r = null; 2814 for (TreeNode<K,V> x = b, next; x != null; x = next) { 2815 next = (TreeNode<K,V>)x.next; 2816 x.left = x.right = null; 2817 if (r == null) { 2818 x.parent = null; 2819 x.red = false; 2820 r = x; 2821 } 2822 else { 2823 K k = x.key; 2824 int h = x.hash; 2825 Class<?> kc = null; 2826 for (TreeNode<K,V> p = r;;) { 2827 int dir, ph; 2828 K pk = p.key; 2829 if ((ph = p.hash) > h) 2830 dir = -1; 2831 else if (ph < h) 2832 dir = 1; 2833 else if ((kc == null && 2834 (kc = comparableClassFor(k)) == null) || 2835 (dir = compareComparables(kc, k, pk)) == 0) 2836 dir = tieBreakOrder(k, pk); 2837 TreeNode<K,V> xp = p; 2838 if ((p = (dir <= 0) ? p.left : p.right) == null) { 2839 x.parent = xp; 2840 if (dir <= 0) 2841 xp.left = x; 2842 else 2843 xp.right = x; 2844 r = balanceInsertion(r, x); 2845 break; 2846 } 2847 } 2848 } 2849 } 2850 this.root = r; 2851 assert checkInvariants(root); 2852 } 2853 2854 /** 2855 * Acquires write lock for tree restructuring. 2856 */ lockRoot()2857 private final void lockRoot() { 2858 if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER)) 2859 contendedLock(); // offload to separate method 2860 } 2861 2862 /** 2863 * Releases write lock for tree restructuring. 2864 */ unlockRoot()2865 private final void unlockRoot() { 2866 lockState = 0; 2867 } 2868 2869 /** 2870 * Possibly blocks awaiting root lock. 2871 */ contendedLock()2872 private final void contendedLock() { 2873 boolean waiting = false; 2874 for (int s;;) { 2875 if (((s = lockState) & ~WAITER) == 0) { 2876 if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) { 2877 if (waiting) 2878 waiter = null; 2879 return; 2880 } 2881 } 2882 else if ((s & WAITER) == 0) { 2883 if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) { 2884 waiting = true; 2885 waiter = Thread.currentThread(); 2886 } 2887 } 2888 else if (waiting) 2889 LockSupport.park(this); 2890 } 2891 } 2892 2893 /** 2894 * Returns matching node or null if none. Tries to search 2895 * using tree comparisons from root, but continues linear 2896 * search when lock not available. 2897 */ find(int h, Object k)2898 final Node<K,V> find(int h, Object k) { 2899 if (k != null) { 2900 for (Node<K,V> e = first; e != null; ) { 2901 int s; K ek; 2902 if (((s = lockState) & (WAITER|WRITER)) != 0) { 2903 if (e.hash == h && 2904 ((ek = e.key) == k || (ek != null && k.equals(ek)))) 2905 return e; 2906 e = e.next; 2907 } 2908 else if (U.compareAndSwapInt(this, LOCKSTATE, s, 2909 s + READER)) { 2910 TreeNode<K,V> r, p; 2911 try { 2912 p = ((r = root) == null ? null : 2913 r.findTreeNode(h, k, null)); 2914 } finally { 2915 Thread w; 2916 if (U.getAndAddInt(this, LOCKSTATE, -READER) == 2917 (READER|WAITER) && (w = waiter) != null) 2918 LockSupport.unpark(w); 2919 } 2920 return p; 2921 } 2922 } 2923 } 2924 return null; 2925 } 2926 2927 /** 2928 * Finds or adds a node. 2929 * @return null if added 2930 */ putTreeVal(int h, K k, V v)2931 final TreeNode<K,V> putTreeVal(int h, K k, V v) { 2932 Class<?> kc = null; 2933 boolean searched = false; 2934 for (TreeNode<K,V> p = root;;) { 2935 int dir, ph; K pk; 2936 if (p == null) { 2937 first = root = new TreeNode<K,V>(h, k, v, null, null); 2938 break; 2939 } 2940 else if ((ph = p.hash) > h) 2941 dir = -1; 2942 else if (ph < h) 2943 dir = 1; 2944 else if ((pk = p.key) == k || (pk != null && k.equals(pk))) 2945 return p; 2946 else if ((kc == null && 2947 (kc = comparableClassFor(k)) == null) || 2948 (dir = compareComparables(kc, k, pk)) == 0) { 2949 if (!searched) { 2950 TreeNode<K,V> q, ch; 2951 searched = true; 2952 if (((ch = p.left) != null && 2953 (q = ch.findTreeNode(h, k, kc)) != null) || 2954 ((ch = p.right) != null && 2955 (q = ch.findTreeNode(h, k, kc)) != null)) 2956 return q; 2957 } 2958 dir = tieBreakOrder(k, pk); 2959 } 2960 2961 TreeNode<K,V> xp = p; 2962 if ((p = (dir <= 0) ? p.left : p.right) == null) { 2963 TreeNode<K,V> x, f = first; 2964 first = x = new TreeNode<K,V>(h, k, v, f, xp); 2965 if (f != null) 2966 f.prev = x; 2967 if (dir <= 0) 2968 xp.left = x; 2969 else 2970 xp.right = x; 2971 if (!xp.red) 2972 x.red = true; 2973 else { 2974 lockRoot(); 2975 try { 2976 root = balanceInsertion(root, x); 2977 } finally { 2978 unlockRoot(); 2979 } 2980 } 2981 break; 2982 } 2983 } 2984 assert checkInvariants(root); 2985 return null; 2986 } 2987 2988 /** 2989 * Removes the given node, that must be present before this 2990 * call. This is messier than typical red-black deletion code 2991 * because we cannot swap the contents of an interior node 2992 * with a leaf successor that is pinned by "next" pointers 2993 * that are accessible independently of lock. So instead we 2994 * swap the tree linkages. 2995 * 2996 * @return true if now too small, so should be untreeified 2997 */ removeTreeNode(TreeNode<K,V> p)2998 final boolean removeTreeNode(TreeNode<K,V> p) { 2999 TreeNode<K,V> next = (TreeNode<K,V>)p.next; 3000 TreeNode<K,V> pred = p.prev; // unlink traversal pointers 3001 TreeNode<K,V> r, rl; 3002 if (pred == null) 3003 first = next; 3004 else 3005 pred.next = next; 3006 if (next != null) 3007 next.prev = pred; 3008 if (first == null) { 3009 root = null; 3010 return true; 3011 } 3012 if ((r = root) == null || r.right == null || // too small 3013 (rl = r.left) == null || rl.left == null) 3014 return true; 3015 lockRoot(); 3016 try { 3017 TreeNode<K,V> replacement; 3018 TreeNode<K,V> pl = p.left; 3019 TreeNode<K,V> pr = p.right; 3020 if (pl != null && pr != null) { 3021 TreeNode<K,V> s = pr, sl; 3022 while ((sl = s.left) != null) // find successor 3023 s = sl; 3024 boolean c = s.red; s.red = p.red; p.red = c; // swap colors 3025 TreeNode<K,V> sr = s.right; 3026 TreeNode<K,V> pp = p.parent; 3027 if (s == pr) { // p was s's direct parent 3028 p.parent = s; 3029 s.right = p; 3030 } 3031 else { 3032 TreeNode<K,V> sp = s.parent; 3033 if ((p.parent = sp) != null) { 3034 if (s == sp.left) 3035 sp.left = p; 3036 else 3037 sp.right = p; 3038 } 3039 if ((s.right = pr) != null) 3040 pr.parent = s; 3041 } 3042 p.left = null; 3043 if ((p.right = sr) != null) 3044 sr.parent = p; 3045 if ((s.left = pl) != null) 3046 pl.parent = s; 3047 if ((s.parent = pp) == null) 3048 r = s; 3049 else if (p == pp.left) 3050 pp.left = s; 3051 else 3052 pp.right = s; 3053 if (sr != null) 3054 replacement = sr; 3055 else 3056 replacement = p; 3057 } 3058 else if (pl != null) 3059 replacement = pl; 3060 else if (pr != null) 3061 replacement = pr; 3062 else 3063 replacement = p; 3064 if (replacement != p) { 3065 TreeNode<K,V> pp = replacement.parent = p.parent; 3066 if (pp == null) 3067 r = replacement; 3068 else if (p == pp.left) 3069 pp.left = replacement; 3070 else 3071 pp.right = replacement; 3072 p.left = p.right = p.parent = null; 3073 } 3074 3075 root = (p.red) ? r : balanceDeletion(r, replacement); 3076 3077 if (p == replacement) { // detach pointers 3078 TreeNode<K,V> pp; 3079 if ((pp = p.parent) != null) { 3080 if (p == pp.left) 3081 pp.left = null; 3082 else if (p == pp.right) 3083 pp.right = null; 3084 p.parent = null; 3085 } 3086 } 3087 } finally { 3088 unlockRoot(); 3089 } 3090 assert checkInvariants(root); 3091 return false; 3092 } 3093 3094 /* ------------------------------------------------------------ */ 3095 // Red-black tree methods, all adapted from CLR 3096 rotateLeft(TreeNode<K,V> root, TreeNode<K,V> p)3097 static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root, 3098 TreeNode<K,V> p) { 3099 TreeNode<K,V> r, pp, rl; 3100 if (p != null && (r = p.right) != null) { 3101 if ((rl = p.right = r.left) != null) 3102 rl.parent = p; 3103 if ((pp = r.parent = p.parent) == null) 3104 (root = r).red = false; 3105 else if (pp.left == p) 3106 pp.left = r; 3107 else 3108 pp.right = r; 3109 r.left = p; 3110 p.parent = r; 3111 } 3112 return root; 3113 } 3114 rotateRight(TreeNode<K,V> root, TreeNode<K,V> p)3115 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root, 3116 TreeNode<K,V> p) { 3117 TreeNode<K,V> l, pp, lr; 3118 if (p != null && (l = p.left) != null) { 3119 if ((lr = p.left = l.right) != null) 3120 lr.parent = p; 3121 if ((pp = l.parent = p.parent) == null) 3122 (root = l).red = false; 3123 else if (pp.right == p) 3124 pp.right = l; 3125 else 3126 pp.left = l; 3127 l.right = p; 3128 p.parent = l; 3129 } 3130 return root; 3131 } 3132 balanceInsertion(TreeNode<K,V> root, TreeNode<K,V> x)3133 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root, 3134 TreeNode<K,V> x) { 3135 x.red = true; 3136 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) { 3137 if ((xp = x.parent) == null) { 3138 x.red = false; 3139 return x; 3140 } 3141 else if (!xp.red || (xpp = xp.parent) == null) 3142 return root; 3143 if (xp == (xppl = xpp.left)) { 3144 if ((xppr = xpp.right) != null && xppr.red) { 3145 xppr.red = false; 3146 xp.red = false; 3147 xpp.red = true; 3148 x = xpp; 3149 } 3150 else { 3151 if (x == xp.right) { 3152 root = rotateLeft(root, x = xp); 3153 xpp = (xp = x.parent) == null ? null : xp.parent; 3154 } 3155 if (xp != null) { 3156 xp.red = false; 3157 if (xpp != null) { 3158 xpp.red = true; 3159 root = rotateRight(root, xpp); 3160 } 3161 } 3162 } 3163 } 3164 else { 3165 if (xppl != null && xppl.red) { 3166 xppl.red = false; 3167 xp.red = false; 3168 xpp.red = true; 3169 x = xpp; 3170 } 3171 else { 3172 if (x == xp.left) { 3173 root = rotateRight(root, x = xp); 3174 xpp = (xp = x.parent) == null ? null : xp.parent; 3175 } 3176 if (xp != null) { 3177 xp.red = false; 3178 if (xpp != null) { 3179 xpp.red = true; 3180 root = rotateLeft(root, xpp); 3181 } 3182 } 3183 } 3184 } 3185 } 3186 } 3187 balanceDeletion(TreeNode<K,V> root, TreeNode<K,V> x)3188 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root, 3189 TreeNode<K,V> x) { 3190 for (TreeNode<K,V> xp, xpl, xpr;;) { 3191 if (x == null || x == root) 3192 return root; 3193 else if ((xp = x.parent) == null) { 3194 x.red = false; 3195 return x; 3196 } 3197 else if (x.red) { 3198 x.red = false; 3199 return root; 3200 } 3201 else if ((xpl = xp.left) == x) { 3202 if ((xpr = xp.right) != null && xpr.red) { 3203 xpr.red = false; 3204 xp.red = true; 3205 root = rotateLeft(root, xp); 3206 xpr = (xp = x.parent) == null ? null : xp.right; 3207 } 3208 if (xpr == null) 3209 x = xp; 3210 else { 3211 TreeNode<K,V> sl = xpr.left, sr = xpr.right; 3212 if ((sr == null || !sr.red) && 3213 (sl == null || !sl.red)) { 3214 xpr.red = true; 3215 x = xp; 3216 } 3217 else { 3218 if (sr == null || !sr.red) { 3219 if (sl != null) 3220 sl.red = false; 3221 xpr.red = true; 3222 root = rotateRight(root, xpr); 3223 xpr = (xp = x.parent) == null ? 3224 null : xp.right; 3225 } 3226 if (xpr != null) { 3227 xpr.red = (xp == null) ? false : xp.red; 3228 if ((sr = xpr.right) != null) 3229 sr.red = false; 3230 } 3231 if (xp != null) { 3232 xp.red = false; 3233 root = rotateLeft(root, xp); 3234 } 3235 x = root; 3236 } 3237 } 3238 } 3239 else { // symmetric 3240 if (xpl != null && xpl.red) { 3241 xpl.red = false; 3242 xp.red = true; 3243 root = rotateRight(root, xp); 3244 xpl = (xp = x.parent) == null ? null : xp.left; 3245 } 3246 if (xpl == null) 3247 x = xp; 3248 else { 3249 TreeNode<K,V> sl = xpl.left, sr = xpl.right; 3250 if ((sl == null || !sl.red) && 3251 (sr == null || !sr.red)) { 3252 xpl.red = true; 3253 x = xp; 3254 } 3255 else { 3256 if (sl == null || !sl.red) { 3257 if (sr != null) 3258 sr.red = false; 3259 xpl.red = true; 3260 root = rotateLeft(root, xpl); 3261 xpl = (xp = x.parent) == null ? 3262 null : xp.left; 3263 } 3264 if (xpl != null) { 3265 xpl.red = (xp == null) ? false : xp.red; 3266 if ((sl = xpl.left) != null) 3267 sl.red = false; 3268 } 3269 if (xp != null) { 3270 xp.red = false; 3271 root = rotateRight(root, xp); 3272 } 3273 x = root; 3274 } 3275 } 3276 } 3277 } 3278 } 3279 3280 /** 3281 * Checks invariants recursively for the tree of Nodes rooted at t. 3282 */ checkInvariants(TreeNode<K,V> t)3283 static <K,V> boolean checkInvariants(TreeNode<K,V> t) { 3284 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right, 3285 tb = t.prev, tn = (TreeNode<K,V>)t.next; 3286 if (tb != null && tb.next != t) 3287 return false; 3288 if (tn != null && tn.prev != t) 3289 return false; 3290 if (tp != null && t != tp.left && t != tp.right) 3291 return false; 3292 if (tl != null && (tl.parent != t || tl.hash > t.hash)) 3293 return false; 3294 if (tr != null && (tr.parent != t || tr.hash < t.hash)) 3295 return false; 3296 if (t.red && tl != null && tl.red && tr != null && tr.red) 3297 return false; 3298 if (tl != null && !checkInvariants(tl)) 3299 return false; 3300 if (tr != null && !checkInvariants(tr)) 3301 return false; 3302 return true; 3303 } 3304 3305 private static final sun.misc.Unsafe U = sun.misc.Unsafe.getUnsafe(); 3306 private static final long LOCKSTATE; 3307 static { 3308 try { 3309 LOCKSTATE = U.objectFieldOffset 3310 (TreeBin.class.getDeclaredField("lockState")); 3311 } catch (ReflectiveOperationException e) { 3312 throw new Error(e); 3313 } 3314 } 3315 } 3316 3317 /* ----------------Table Traversal -------------- */ 3318 3319 /** 3320 * Records the table, its length, and current traversal index for a 3321 * traverser that must process a region of a forwarded table before 3322 * proceeding with current table. 3323 */ 3324 static final class TableStack<K,V> { 3325 int length; 3326 int index; 3327 Node<K,V>[] tab; 3328 TableStack<K,V> next; 3329 } 3330 3331 /** 3332 * Encapsulates traversal for methods such as containsValue; also 3333 * serves as a base class for other iterators and spliterators. 3334 * 3335 * Method advance visits once each still-valid node that was 3336 * reachable upon iterator construction. It might miss some that 3337 * were added to a bin after the bin was visited, which is OK wrt 3338 * consistency guarantees. Maintaining this property in the face 3339 * of possible ongoing resizes requires a fair amount of 3340 * bookkeeping state that is difficult to optimize away amidst 3341 * volatile accesses. Even so, traversal maintains reasonable 3342 * throughput. 3343 * 3344 * Normally, iteration proceeds bin-by-bin traversing lists. 3345 * However, if the table has been resized, then all future steps 3346 * must traverse both the bin at the current index as well as at 3347 * (index + baseSize); and so on for further resizings. To 3348 * paranoically cope with potential sharing by users of iterators 3349 * across threads, iteration terminates if a bounds checks fails 3350 * for a table read. 3351 */ 3352 static class Traverser<K,V> { 3353 Node<K,V>[] tab; // current table; updated if resized 3354 Node<K,V> next; // the next entry to use 3355 TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes 3356 int index; // index of bin to use next 3357 int baseIndex; // current index of initial table 3358 int baseLimit; // index bound for initial table 3359 final int baseSize; // initial table size 3360 Traverser(Node<K,V>[] tab, int size, int index, int limit)3361 Traverser(Node<K,V>[] tab, int size, int index, int limit) { 3362 this.tab = tab; 3363 this.baseSize = size; 3364 this.baseIndex = this.index = index; 3365 this.baseLimit = limit; 3366 this.next = null; 3367 } 3368 3369 /** 3370 * Advances if possible, returning next valid node, or null if none. 3371 */ advance()3372 final Node<K,V> advance() { 3373 Node<K,V> e; 3374 if ((e = next) != null) 3375 e = e.next; 3376 for (;;) { 3377 Node<K,V>[] t; int i, n; // must use locals in checks 3378 if (e != null) 3379 return next = e; 3380 if (baseIndex >= baseLimit || (t = tab) == null || 3381 (n = t.length) <= (i = index) || i < 0) 3382 return next = null; 3383 if ((e = tabAt(t, i)) != null && e.hash < 0) { 3384 if (e instanceof ForwardingNode) { 3385 tab = ((ForwardingNode<K,V>)e).nextTable; 3386 e = null; 3387 pushState(t, i, n); 3388 continue; 3389 } 3390 else if (e instanceof TreeBin) 3391 e = ((TreeBin<K,V>)e).first; 3392 else 3393 e = null; 3394 } 3395 if (stack != null) 3396 recoverState(n); 3397 else if ((index = i + baseSize) >= n) 3398 index = ++baseIndex; // visit upper slots if present 3399 } 3400 } 3401 3402 /** 3403 * Saves traversal state upon encountering a forwarding node. 3404 */ pushState(Node<K,V>[] t, int i, int n)3405 private void pushState(Node<K,V>[] t, int i, int n) { 3406 TableStack<K,V> s = spare; // reuse if possible 3407 if (s != null) 3408 spare = s.next; 3409 else 3410 s = new TableStack<K,V>(); 3411 s.tab = t; 3412 s.length = n; 3413 s.index = i; 3414 s.next = stack; 3415 stack = s; 3416 } 3417 3418 /** 3419 * Possibly pops traversal state. 3420 * 3421 * @param n length of current table 3422 */ recoverState(int n)3423 private void recoverState(int n) { 3424 TableStack<K,V> s; int len; 3425 while ((s = stack) != null && (index += (len = s.length)) >= n) { 3426 n = len; 3427 index = s.index; 3428 tab = s.tab; 3429 s.tab = null; 3430 TableStack<K,V> next = s.next; 3431 s.next = spare; // save for reuse 3432 stack = next; 3433 spare = s; 3434 } 3435 if (s == null && (index += baseSize) >= n) 3436 index = ++baseIndex; 3437 } 3438 } 3439 3440 /** 3441 * Base of key, value, and entry Iterators. Adds fields to 3442 * Traverser to support iterator.remove. 3443 */ 3444 static class BaseIterator<K,V> extends Traverser<K,V> { 3445 final ConcurrentHashMap<K,V> map; 3446 Node<K,V> lastReturned; BaseIterator(Node<K,V>[] tab, int size, int index, int limit, ConcurrentHashMap<K,V> map)3447 BaseIterator(Node<K,V>[] tab, int size, int index, int limit, 3448 ConcurrentHashMap<K,V> map) { 3449 super(tab, size, index, limit); 3450 this.map = map; 3451 advance(); 3452 } 3453 hasNext()3454 public final boolean hasNext() { return next != null; } hasMoreElements()3455 public final boolean hasMoreElements() { return next != null; } 3456 remove()3457 public final void remove() { 3458 Node<K,V> p; 3459 if ((p = lastReturned) == null) 3460 throw new IllegalStateException(); 3461 lastReturned = null; 3462 map.replaceNode(p.key, null, null); 3463 } 3464 } 3465 3466 static final class KeyIterator<K,V> extends BaseIterator<K,V> 3467 implements Iterator<K>, Enumeration<K> { KeyIterator(Node<K,V>[] tab, int index, int size, int limit, ConcurrentHashMap<K,V> map)3468 KeyIterator(Node<K,V>[] tab, int index, int size, int limit, 3469 ConcurrentHashMap<K,V> map) { 3470 super(tab, index, size, limit, map); 3471 } 3472 next()3473 public final K next() { 3474 Node<K,V> p; 3475 if ((p = next) == null) 3476 throw new NoSuchElementException(); 3477 K k = p.key; 3478 lastReturned = p; 3479 advance(); 3480 return k; 3481 } 3482 nextElement()3483 public final K nextElement() { return next(); } 3484 } 3485 3486 static final class ValueIterator<K,V> extends BaseIterator<K,V> 3487 implements Iterator<V>, Enumeration<V> { ValueIterator(Node<K,V>[] tab, int index, int size, int limit, ConcurrentHashMap<K,V> map)3488 ValueIterator(Node<K,V>[] tab, int index, int size, int limit, 3489 ConcurrentHashMap<K,V> map) { 3490 super(tab, index, size, limit, map); 3491 } 3492 next()3493 public final V next() { 3494 Node<K,V> p; 3495 if ((p = next) == null) 3496 throw new NoSuchElementException(); 3497 V v = p.val; 3498 lastReturned = p; 3499 advance(); 3500 return v; 3501 } 3502 nextElement()3503 public final V nextElement() { return next(); } 3504 } 3505 3506 static final class EntryIterator<K,V> extends BaseIterator<K,V> 3507 implements Iterator<Map.Entry<K,V>> { EntryIterator(Node<K,V>[] tab, int index, int size, int limit, ConcurrentHashMap<K,V> map)3508 EntryIterator(Node<K,V>[] tab, int index, int size, int limit, 3509 ConcurrentHashMap<K,V> map) { 3510 super(tab, index, size, limit, map); 3511 } 3512 next()3513 public final Map.Entry<K,V> next() { 3514 Node<K,V> p; 3515 if ((p = next) == null) 3516 throw new NoSuchElementException(); 3517 K k = p.key; 3518 V v = p.val; 3519 lastReturned = p; 3520 advance(); 3521 return new MapEntry<K,V>(k, v, map); 3522 } 3523 } 3524 3525 /** 3526 * Exported Entry for EntryIterator. 3527 */ 3528 static final class MapEntry<K,V> implements Map.Entry<K,V> { 3529 final K key; // non-null 3530 V val; // non-null 3531 final ConcurrentHashMap<K,V> map; MapEntry(K key, V val, ConcurrentHashMap<K,V> map)3532 MapEntry(K key, V val, ConcurrentHashMap<K,V> map) { 3533 this.key = key; 3534 this.val = val; 3535 this.map = map; 3536 } getKey()3537 public K getKey() { return key; } getValue()3538 public V getValue() { return val; } hashCode()3539 public int hashCode() { return key.hashCode() ^ val.hashCode(); } toString()3540 public String toString() { 3541 return Helpers.mapEntryToString(key, val); 3542 } 3543 equals(Object o)3544 public boolean equals(Object o) { 3545 Object k, v; Map.Entry<?,?> e; 3546 return ((o instanceof Map.Entry) && 3547 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 3548 (v = e.getValue()) != null && 3549 (k == key || k.equals(key)) && 3550 (v == val || v.equals(val))); 3551 } 3552 3553 /** 3554 * Sets our entry's value and writes through to the map. The 3555 * value to return is somewhat arbitrary here. Since we do not 3556 * necessarily track asynchronous changes, the most recent 3557 * "previous" value could be different from what we return (or 3558 * could even have been removed, in which case the put will 3559 * re-establish). We do not and cannot guarantee more. 3560 */ setValue(V value)3561 public V setValue(V value) { 3562 if (value == null) throw new NullPointerException(); 3563 V v = val; 3564 val = value; 3565 map.put(key, value); 3566 return v; 3567 } 3568 } 3569 3570 static final class KeySpliterator<K,V> extends Traverser<K,V> 3571 implements Spliterator<K> { 3572 long est; // size estimate KeySpliterator(Node<K,V>[] tab, int size, int index, int limit, long est)3573 KeySpliterator(Node<K,V>[] tab, int size, int index, int limit, 3574 long est) { 3575 super(tab, size, index, limit); 3576 this.est = est; 3577 } 3578 trySplit()3579 public KeySpliterator<K,V> trySplit() { 3580 int i, f, h; 3581 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3582 new KeySpliterator<K,V>(tab, baseSize, baseLimit = h, 3583 f, est >>>= 1); 3584 } 3585 forEachRemaining(Consumer<? super K> action)3586 public void forEachRemaining(Consumer<? super K> action) { 3587 if (action == null) throw new NullPointerException(); 3588 for (Node<K,V> p; (p = advance()) != null;) 3589 action.accept(p.key); 3590 } 3591 tryAdvance(Consumer<? super K> action)3592 public boolean tryAdvance(Consumer<? super K> action) { 3593 if (action == null) throw new NullPointerException(); 3594 Node<K,V> p; 3595 if ((p = advance()) == null) 3596 return false; 3597 action.accept(p.key); 3598 return true; 3599 } 3600 estimateSize()3601 public long estimateSize() { return est; } 3602 characteristics()3603 public int characteristics() { 3604 return Spliterator.DISTINCT | Spliterator.CONCURRENT | 3605 Spliterator.NONNULL; 3606 } 3607 } 3608 3609 static final class ValueSpliterator<K,V> extends Traverser<K,V> 3610 implements Spliterator<V> { 3611 long est; // size estimate ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit, long est)3612 ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit, 3613 long est) { 3614 super(tab, size, index, limit); 3615 this.est = est; 3616 } 3617 trySplit()3618 public ValueSpliterator<K,V> trySplit() { 3619 int i, f, h; 3620 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3621 new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h, 3622 f, est >>>= 1); 3623 } 3624 forEachRemaining(Consumer<? super V> action)3625 public void forEachRemaining(Consumer<? super V> action) { 3626 if (action == null) throw new NullPointerException(); 3627 for (Node<K,V> p; (p = advance()) != null;) 3628 action.accept(p.val); 3629 } 3630 tryAdvance(Consumer<? super V> action)3631 public boolean tryAdvance(Consumer<? super V> action) { 3632 if (action == null) throw new NullPointerException(); 3633 Node<K,V> p; 3634 if ((p = advance()) == null) 3635 return false; 3636 action.accept(p.val); 3637 return true; 3638 } 3639 estimateSize()3640 public long estimateSize() { return est; } 3641 characteristics()3642 public int characteristics() { 3643 return Spliterator.CONCURRENT | Spliterator.NONNULL; 3644 } 3645 } 3646 3647 static final class EntrySpliterator<K,V> extends Traverser<K,V> 3648 implements Spliterator<Map.Entry<K,V>> { 3649 final ConcurrentHashMap<K,V> map; // To export MapEntry 3650 long est; // size estimate EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit, long est, ConcurrentHashMap<K,V> map)3651 EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit, 3652 long est, ConcurrentHashMap<K,V> map) { 3653 super(tab, size, index, limit); 3654 this.map = map; 3655 this.est = est; 3656 } 3657 trySplit()3658 public EntrySpliterator<K,V> trySplit() { 3659 int i, f, h; 3660 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3661 new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h, 3662 f, est >>>= 1, map); 3663 } 3664 forEachRemaining(Consumer<? super Map.Entry<K,V>> action)3665 public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { 3666 if (action == null) throw new NullPointerException(); 3667 for (Node<K,V> p; (p = advance()) != null; ) 3668 action.accept(new MapEntry<K,V>(p.key, p.val, map)); 3669 } 3670 tryAdvance(Consumer<? super Map.Entry<K,V>> action)3671 public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { 3672 if (action == null) throw new NullPointerException(); 3673 Node<K,V> p; 3674 if ((p = advance()) == null) 3675 return false; 3676 action.accept(new MapEntry<K,V>(p.key, p.val, map)); 3677 return true; 3678 } 3679 estimateSize()3680 public long estimateSize() { return est; } 3681 characteristics()3682 public int characteristics() { 3683 return Spliterator.DISTINCT | Spliterator.CONCURRENT | 3684 Spliterator.NONNULL; 3685 } 3686 } 3687 3688 // Parallel bulk operations 3689 3690 /** 3691 * Computes initial batch value for bulk tasks. The returned value 3692 * is approximately exp2 of the number of times (minus one) to 3693 * split task by two before executing leaf action. This value is 3694 * faster to compute and more convenient to use as a guide to 3695 * splitting than is the depth, since it is used while dividing by 3696 * two anyway. 3697 */ batchFor(long b)3698 final int batchFor(long b) { 3699 long n; 3700 if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b) 3701 return 0; 3702 int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4 3703 return (b <= 0L || (n /= b) >= sp) ? sp : (int)n; 3704 } 3705 3706 /** 3707 * Performs the given action for each (key, value). 3708 * 3709 * @param parallelismThreshold the (estimated) number of elements 3710 * needed for this operation to be executed in parallel 3711 * @param action the action 3712 * @since 1.8 3713 */ forEach(long parallelismThreshold, BiConsumer<? super K,? super V> action)3714 public void forEach(long parallelismThreshold, 3715 BiConsumer<? super K,? super V> action) { 3716 if (action == null) throw new NullPointerException(); 3717 new ForEachMappingTask<K,V> 3718 (null, batchFor(parallelismThreshold), 0, 0, table, 3719 action).invoke(); 3720 } 3721 3722 /** 3723 * Performs the given action for each non-null transformation 3724 * of each (key, value). 3725 * 3726 * @param parallelismThreshold the (estimated) number of elements 3727 * needed for this operation to be executed in parallel 3728 * @param transformer a function returning the transformation 3729 * for an element, or null if there is no transformation (in 3730 * which case the action is not applied) 3731 * @param action the action 3732 * @param <U> the return type of the transformer 3733 * @since 1.8 3734 */ forEach(long parallelismThreshold, BiFunction<? super K, ? super V, ? extends U> transformer, Consumer<? super U> action)3735 public <U> void forEach(long parallelismThreshold, 3736 BiFunction<? super K, ? super V, ? extends U> transformer, 3737 Consumer<? super U> action) { 3738 if (transformer == null || action == null) 3739 throw new NullPointerException(); 3740 new ForEachTransformedMappingTask<K,V,U> 3741 (null, batchFor(parallelismThreshold), 0, 0, table, 3742 transformer, action).invoke(); 3743 } 3744 3745 /** 3746 * Returns a non-null result from applying the given search 3747 * function on each (key, value), or null if none. Upon 3748 * success, further element processing is suppressed and the 3749 * results of any other parallel invocations of the search 3750 * function are ignored. 3751 * 3752 * @param parallelismThreshold the (estimated) number of elements 3753 * needed for this operation to be executed in parallel 3754 * @param searchFunction a function returning a non-null 3755 * result on success, else null 3756 * @param <U> the return type of the search function 3757 * @return a non-null result from applying the given search 3758 * function on each (key, value), or null if none 3759 * @since 1.8 3760 */ search(long parallelismThreshold, BiFunction<? super K, ? super V, ? extends U> searchFunction)3761 public <U> U search(long parallelismThreshold, 3762 BiFunction<? super K, ? super V, ? extends U> searchFunction) { 3763 if (searchFunction == null) throw new NullPointerException(); 3764 return new SearchMappingsTask<K,V,U> 3765 (null, batchFor(parallelismThreshold), 0, 0, table, 3766 searchFunction, new AtomicReference<U>()).invoke(); 3767 } 3768 3769 /** 3770 * Returns the result of accumulating the given transformation 3771 * of all (key, value) pairs using the given reducer to 3772 * combine values, or null if none. 3773 * 3774 * @param parallelismThreshold the (estimated) number of elements 3775 * needed for this operation to be executed in parallel 3776 * @param transformer a function returning the transformation 3777 * for an element, or null if there is no transformation (in 3778 * which case it is not combined) 3779 * @param reducer a commutative associative combining function 3780 * @param <U> the return type of the transformer 3781 * @return the result of accumulating the given transformation 3782 * of all (key, value) pairs 3783 * @since 1.8 3784 */ reduce(long parallelismThreshold, BiFunction<? super K, ? super V, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)3785 public <U> U reduce(long parallelismThreshold, 3786 BiFunction<? super K, ? super V, ? extends U> transformer, 3787 BiFunction<? super U, ? super U, ? extends U> reducer) { 3788 if (transformer == null || reducer == null) 3789 throw new NullPointerException(); 3790 return new MapReduceMappingsTask<K,V,U> 3791 (null, batchFor(parallelismThreshold), 0, 0, table, 3792 null, transformer, reducer).invoke(); 3793 } 3794 3795 /** 3796 * Returns the result of accumulating the given transformation 3797 * of all (key, value) pairs using the given reducer to 3798 * combine values, and the given basis as an identity value. 3799 * 3800 * @param parallelismThreshold the (estimated) number of elements 3801 * needed for this operation to be executed in parallel 3802 * @param transformer a function returning the transformation 3803 * for an element 3804 * @param basis the identity (initial default value) for the reduction 3805 * @param reducer a commutative associative combining function 3806 * @return the result of accumulating the given transformation 3807 * of all (key, value) pairs 3808 * @since 1.8 3809 */ reduceToDouble(long parallelismThreshold, ToDoubleBiFunction<? super K, ? super V> transformer, double basis, DoubleBinaryOperator reducer)3810 public double reduceToDouble(long parallelismThreshold, 3811 ToDoubleBiFunction<? super K, ? super V> transformer, 3812 double basis, 3813 DoubleBinaryOperator reducer) { 3814 if (transformer == null || reducer == null) 3815 throw new NullPointerException(); 3816 return new MapReduceMappingsToDoubleTask<K,V> 3817 (null, batchFor(parallelismThreshold), 0, 0, table, 3818 null, transformer, basis, reducer).invoke(); 3819 } 3820 3821 /** 3822 * Returns the result of accumulating the given transformation 3823 * of all (key, value) pairs using the given reducer to 3824 * combine values, and the given basis as an identity value. 3825 * 3826 * @param parallelismThreshold the (estimated) number of elements 3827 * needed for this operation to be executed in parallel 3828 * @param transformer a function returning the transformation 3829 * for an element 3830 * @param basis the identity (initial default value) for the reduction 3831 * @param reducer a commutative associative combining function 3832 * @return the result of accumulating the given transformation 3833 * of all (key, value) pairs 3834 * @since 1.8 3835 */ reduceToLong(long parallelismThreshold, ToLongBiFunction<? super K, ? super V> transformer, long basis, LongBinaryOperator reducer)3836 public long reduceToLong(long parallelismThreshold, 3837 ToLongBiFunction<? super K, ? super V> transformer, 3838 long basis, 3839 LongBinaryOperator reducer) { 3840 if (transformer == null || reducer == null) 3841 throw new NullPointerException(); 3842 return new MapReduceMappingsToLongTask<K,V> 3843 (null, batchFor(parallelismThreshold), 0, 0, table, 3844 null, transformer, basis, reducer).invoke(); 3845 } 3846 3847 /** 3848 * Returns the result of accumulating the given transformation 3849 * of all (key, value) pairs using the given reducer to 3850 * combine values, and the given basis as an identity value. 3851 * 3852 * @param parallelismThreshold the (estimated) number of elements 3853 * needed for this operation to be executed in parallel 3854 * @param transformer a function returning the transformation 3855 * for an element 3856 * @param basis the identity (initial default value) for the reduction 3857 * @param reducer a commutative associative combining function 3858 * @return the result of accumulating the given transformation 3859 * of all (key, value) pairs 3860 * @since 1.8 3861 */ reduceToInt(long parallelismThreshold, ToIntBiFunction<? super K, ? super V> transformer, int basis, IntBinaryOperator reducer)3862 public int reduceToInt(long parallelismThreshold, 3863 ToIntBiFunction<? super K, ? super V> transformer, 3864 int basis, 3865 IntBinaryOperator reducer) { 3866 if (transformer == null || reducer == null) 3867 throw new NullPointerException(); 3868 return new MapReduceMappingsToIntTask<K,V> 3869 (null, batchFor(parallelismThreshold), 0, 0, table, 3870 null, transformer, basis, reducer).invoke(); 3871 } 3872 3873 /** 3874 * Performs the given action for each key. 3875 * 3876 * @param parallelismThreshold the (estimated) number of elements 3877 * needed for this operation to be executed in parallel 3878 * @param action the action 3879 * @since 1.8 3880 */ forEachKey(long parallelismThreshold, Consumer<? super K> action)3881 public void forEachKey(long parallelismThreshold, 3882 Consumer<? super K> action) { 3883 if (action == null) throw new NullPointerException(); 3884 new ForEachKeyTask<K,V> 3885 (null, batchFor(parallelismThreshold), 0, 0, table, 3886 action).invoke(); 3887 } 3888 3889 /** 3890 * Performs the given action for each non-null transformation 3891 * of each key. 3892 * 3893 * @param parallelismThreshold the (estimated) number of elements 3894 * needed for this operation to be executed in parallel 3895 * @param transformer a function returning the transformation 3896 * for an element, or null if there is no transformation (in 3897 * which case the action is not applied) 3898 * @param action the action 3899 * @param <U> the return type of the transformer 3900 * @since 1.8 3901 */ forEachKey(long parallelismThreshold, Function<? super K, ? extends U> transformer, Consumer<? super U> action)3902 public <U> void forEachKey(long parallelismThreshold, 3903 Function<? super K, ? extends U> transformer, 3904 Consumer<? super U> action) { 3905 if (transformer == null || action == null) 3906 throw new NullPointerException(); 3907 new ForEachTransformedKeyTask<K,V,U> 3908 (null, batchFor(parallelismThreshold), 0, 0, table, 3909 transformer, action).invoke(); 3910 } 3911 3912 /** 3913 * Returns a non-null result from applying the given search 3914 * function on each key, or null if none. Upon success, 3915 * further element processing is suppressed and the results of 3916 * any other parallel invocations of the search function are 3917 * ignored. 3918 * 3919 * @param parallelismThreshold the (estimated) number of elements 3920 * needed for this operation to be executed in parallel 3921 * @param searchFunction a function returning a non-null 3922 * result on success, else null 3923 * @param <U> the return type of the search function 3924 * @return a non-null result from applying the given search 3925 * function on each key, or null if none 3926 * @since 1.8 3927 */ searchKeys(long parallelismThreshold, Function<? super K, ? extends U> searchFunction)3928 public <U> U searchKeys(long parallelismThreshold, 3929 Function<? super K, ? extends U> searchFunction) { 3930 if (searchFunction == null) throw new NullPointerException(); 3931 return new SearchKeysTask<K,V,U> 3932 (null, batchFor(parallelismThreshold), 0, 0, table, 3933 searchFunction, new AtomicReference<U>()).invoke(); 3934 } 3935 3936 /** 3937 * Returns the result of accumulating all keys using the given 3938 * reducer to combine values, or null if none. 3939 * 3940 * @param parallelismThreshold the (estimated) number of elements 3941 * needed for this operation to be executed in parallel 3942 * @param reducer a commutative associative combining function 3943 * @return the result of accumulating all keys using the given 3944 * reducer to combine values, or null if none 3945 * @since 1.8 3946 */ reduceKeys(long parallelismThreshold, BiFunction<? super K, ? super K, ? extends K> reducer)3947 public K reduceKeys(long parallelismThreshold, 3948 BiFunction<? super K, ? super K, ? extends K> reducer) { 3949 if (reducer == null) throw new NullPointerException(); 3950 return new ReduceKeysTask<K,V> 3951 (null, batchFor(parallelismThreshold), 0, 0, table, 3952 null, reducer).invoke(); 3953 } 3954 3955 /** 3956 * Returns the result of accumulating the given transformation 3957 * of all keys using the given reducer to combine values, or 3958 * null if none. 3959 * 3960 * @param parallelismThreshold the (estimated) number of elements 3961 * needed for this operation to be executed in parallel 3962 * @param transformer a function returning the transformation 3963 * for an element, or null if there is no transformation (in 3964 * which case it is not combined) 3965 * @param reducer a commutative associative combining function 3966 * @param <U> the return type of the transformer 3967 * @return the result of accumulating the given transformation 3968 * of all keys 3969 * @since 1.8 3970 */ reduceKeys(long parallelismThreshold, Function<? super K, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)3971 public <U> U reduceKeys(long parallelismThreshold, 3972 Function<? super K, ? extends U> transformer, 3973 BiFunction<? super U, ? super U, ? extends U> reducer) { 3974 if (transformer == null || reducer == null) 3975 throw new NullPointerException(); 3976 return new MapReduceKeysTask<K,V,U> 3977 (null, batchFor(parallelismThreshold), 0, 0, table, 3978 null, transformer, reducer).invoke(); 3979 } 3980 3981 /** 3982 * Returns the result of accumulating the given transformation 3983 * of all keys using the given reducer to combine values, and 3984 * the given basis as an identity value. 3985 * 3986 * @param parallelismThreshold the (estimated) number of elements 3987 * needed for this operation to be executed in parallel 3988 * @param transformer a function returning the transformation 3989 * for an element 3990 * @param basis the identity (initial default value) for the reduction 3991 * @param reducer a commutative associative combining function 3992 * @return the result of accumulating the given transformation 3993 * of all keys 3994 * @since 1.8 3995 */ reduceKeysToDouble(long parallelismThreshold, ToDoubleFunction<? super K> transformer, double basis, DoubleBinaryOperator reducer)3996 public double reduceKeysToDouble(long parallelismThreshold, 3997 ToDoubleFunction<? super K> transformer, 3998 double basis, 3999 DoubleBinaryOperator reducer) { 4000 if (transformer == null || reducer == null) 4001 throw new NullPointerException(); 4002 return new MapReduceKeysToDoubleTask<K,V> 4003 (null, batchFor(parallelismThreshold), 0, 0, table, 4004 null, transformer, basis, reducer).invoke(); 4005 } 4006 4007 /** 4008 * Returns the result of accumulating the given transformation 4009 * of all keys using the given reducer to combine values, and 4010 * the given basis as an identity value. 4011 * 4012 * @param parallelismThreshold the (estimated) number of elements 4013 * needed for this operation to be executed in parallel 4014 * @param transformer a function returning the transformation 4015 * for an element 4016 * @param basis the identity (initial default value) for the reduction 4017 * @param reducer a commutative associative combining function 4018 * @return the result of accumulating the given transformation 4019 * of all keys 4020 * @since 1.8 4021 */ reduceKeysToLong(long parallelismThreshold, ToLongFunction<? super K> transformer, long basis, LongBinaryOperator reducer)4022 public long reduceKeysToLong(long parallelismThreshold, 4023 ToLongFunction<? super K> transformer, 4024 long basis, 4025 LongBinaryOperator reducer) { 4026 if (transformer == null || reducer == null) 4027 throw new NullPointerException(); 4028 return new MapReduceKeysToLongTask<K,V> 4029 (null, batchFor(parallelismThreshold), 0, 0, table, 4030 null, transformer, basis, reducer).invoke(); 4031 } 4032 4033 /** 4034 * Returns the result of accumulating the given transformation 4035 * of all keys using the given reducer to combine values, and 4036 * the given basis as an identity value. 4037 * 4038 * @param parallelismThreshold the (estimated) number of elements 4039 * needed for this operation to be executed in parallel 4040 * @param transformer a function returning the transformation 4041 * for an element 4042 * @param basis the identity (initial default value) for the reduction 4043 * @param reducer a commutative associative combining function 4044 * @return the result of accumulating the given transformation 4045 * of all keys 4046 * @since 1.8 4047 */ reduceKeysToInt(long parallelismThreshold, ToIntFunction<? super K> transformer, int basis, IntBinaryOperator reducer)4048 public int reduceKeysToInt(long parallelismThreshold, 4049 ToIntFunction<? super K> transformer, 4050 int basis, 4051 IntBinaryOperator reducer) { 4052 if (transformer == null || reducer == null) 4053 throw new NullPointerException(); 4054 return new MapReduceKeysToIntTask<K,V> 4055 (null, batchFor(parallelismThreshold), 0, 0, table, 4056 null, transformer, basis, reducer).invoke(); 4057 } 4058 4059 /** 4060 * Performs the given action for each value. 4061 * 4062 * @param parallelismThreshold the (estimated) number of elements 4063 * needed for this operation to be executed in parallel 4064 * @param action the action 4065 * @since 1.8 4066 */ forEachValue(long parallelismThreshold, Consumer<? super V> action)4067 public void forEachValue(long parallelismThreshold, 4068 Consumer<? super V> action) { 4069 if (action == null) 4070 throw new NullPointerException(); 4071 new ForEachValueTask<K,V> 4072 (null, batchFor(parallelismThreshold), 0, 0, table, 4073 action).invoke(); 4074 } 4075 4076 /** 4077 * Performs the given action for each non-null transformation 4078 * of each value. 4079 * 4080 * @param parallelismThreshold the (estimated) number of elements 4081 * needed for this operation to be executed in parallel 4082 * @param transformer a function returning the transformation 4083 * for an element, or null if there is no transformation (in 4084 * which case the action is not applied) 4085 * @param action the action 4086 * @param <U> the return type of the transformer 4087 * @since 1.8 4088 */ forEachValue(long parallelismThreshold, Function<? super V, ? extends U> transformer, Consumer<? super U> action)4089 public <U> void forEachValue(long parallelismThreshold, 4090 Function<? super V, ? extends U> transformer, 4091 Consumer<? super U> action) { 4092 if (transformer == null || action == null) 4093 throw new NullPointerException(); 4094 new ForEachTransformedValueTask<K,V,U> 4095 (null, batchFor(parallelismThreshold), 0, 0, table, 4096 transformer, action).invoke(); 4097 } 4098 4099 /** 4100 * Returns a non-null result from applying the given search 4101 * function on each value, or null if none. Upon success, 4102 * further element processing is suppressed and the results of 4103 * any other parallel invocations of the search function are 4104 * ignored. 4105 * 4106 * @param parallelismThreshold the (estimated) number of elements 4107 * needed for this operation to be executed in parallel 4108 * @param searchFunction a function returning a non-null 4109 * result on success, else null 4110 * @param <U> the return type of the search function 4111 * @return a non-null result from applying the given search 4112 * function on each value, or null if none 4113 * @since 1.8 4114 */ searchValues(long parallelismThreshold, Function<? super V, ? extends U> searchFunction)4115 public <U> U searchValues(long parallelismThreshold, 4116 Function<? super V, ? extends U> searchFunction) { 4117 if (searchFunction == null) throw new NullPointerException(); 4118 return new SearchValuesTask<K,V,U> 4119 (null, batchFor(parallelismThreshold), 0, 0, table, 4120 searchFunction, new AtomicReference<U>()).invoke(); 4121 } 4122 4123 /** 4124 * Returns the result of accumulating all values using the 4125 * given reducer to combine values, or null if none. 4126 * 4127 * @param parallelismThreshold the (estimated) number of elements 4128 * needed for this operation to be executed in parallel 4129 * @param reducer a commutative associative combining function 4130 * @return the result of accumulating all values 4131 * @since 1.8 4132 */ reduceValues(long parallelismThreshold, BiFunction<? super V, ? super V, ? extends V> reducer)4133 public V reduceValues(long parallelismThreshold, 4134 BiFunction<? super V, ? super V, ? extends V> reducer) { 4135 if (reducer == null) throw new NullPointerException(); 4136 return new ReduceValuesTask<K,V> 4137 (null, batchFor(parallelismThreshold), 0, 0, table, 4138 null, reducer).invoke(); 4139 } 4140 4141 /** 4142 * Returns the result of accumulating the given transformation 4143 * of all values using the given reducer to combine values, or 4144 * null if none. 4145 * 4146 * @param parallelismThreshold the (estimated) number of elements 4147 * needed for this operation to be executed in parallel 4148 * @param transformer a function returning the transformation 4149 * for an element, or null if there is no transformation (in 4150 * which case it is not combined) 4151 * @param reducer a commutative associative combining function 4152 * @param <U> the return type of the transformer 4153 * @return the result of accumulating the given transformation 4154 * of all values 4155 * @since 1.8 4156 */ reduceValues(long parallelismThreshold, Function<? super V, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)4157 public <U> U reduceValues(long parallelismThreshold, 4158 Function<? super V, ? extends U> transformer, 4159 BiFunction<? super U, ? super U, ? extends U> reducer) { 4160 if (transformer == null || reducer == null) 4161 throw new NullPointerException(); 4162 return new MapReduceValuesTask<K,V,U> 4163 (null, batchFor(parallelismThreshold), 0, 0, table, 4164 null, transformer, reducer).invoke(); 4165 } 4166 4167 /** 4168 * Returns the result of accumulating the given transformation 4169 * of all values using the given reducer to combine values, 4170 * and the given basis as an identity value. 4171 * 4172 * @param parallelismThreshold the (estimated) number of elements 4173 * needed for this operation to be executed in parallel 4174 * @param transformer a function returning the transformation 4175 * for an element 4176 * @param basis the identity (initial default value) for the reduction 4177 * @param reducer a commutative associative combining function 4178 * @return the result of accumulating the given transformation 4179 * of all values 4180 * @since 1.8 4181 */ reduceValuesToDouble(long parallelismThreshold, ToDoubleFunction<? super V> transformer, double basis, DoubleBinaryOperator reducer)4182 public double reduceValuesToDouble(long parallelismThreshold, 4183 ToDoubleFunction<? super V> transformer, 4184 double basis, 4185 DoubleBinaryOperator reducer) { 4186 if (transformer == null || reducer == null) 4187 throw new NullPointerException(); 4188 return new MapReduceValuesToDoubleTask<K,V> 4189 (null, batchFor(parallelismThreshold), 0, 0, table, 4190 null, transformer, basis, reducer).invoke(); 4191 } 4192 4193 /** 4194 * Returns the result of accumulating the given transformation 4195 * of all values using the given reducer to combine values, 4196 * and the given basis as an identity value. 4197 * 4198 * @param parallelismThreshold the (estimated) number of elements 4199 * needed for this operation to be executed in parallel 4200 * @param transformer a function returning the transformation 4201 * for an element 4202 * @param basis the identity (initial default value) for the reduction 4203 * @param reducer a commutative associative combining function 4204 * @return the result of accumulating the given transformation 4205 * of all values 4206 * @since 1.8 4207 */ reduceValuesToLong(long parallelismThreshold, ToLongFunction<? super V> transformer, long basis, LongBinaryOperator reducer)4208 public long reduceValuesToLong(long parallelismThreshold, 4209 ToLongFunction<? super V> transformer, 4210 long basis, 4211 LongBinaryOperator reducer) { 4212 if (transformer == null || reducer == null) 4213 throw new NullPointerException(); 4214 return new MapReduceValuesToLongTask<K,V> 4215 (null, batchFor(parallelismThreshold), 0, 0, table, 4216 null, transformer, basis, reducer).invoke(); 4217 } 4218 4219 /** 4220 * Returns the result of accumulating the given transformation 4221 * of all values using the given reducer to combine values, 4222 * and the given basis as an identity value. 4223 * 4224 * @param parallelismThreshold the (estimated) number of elements 4225 * needed for this operation to be executed in parallel 4226 * @param transformer a function returning the transformation 4227 * for an element 4228 * @param basis the identity (initial default value) for the reduction 4229 * @param reducer a commutative associative combining function 4230 * @return the result of accumulating the given transformation 4231 * of all values 4232 * @since 1.8 4233 */ reduceValuesToInt(long parallelismThreshold, ToIntFunction<? super V> transformer, int basis, IntBinaryOperator reducer)4234 public int reduceValuesToInt(long parallelismThreshold, 4235 ToIntFunction<? super V> transformer, 4236 int basis, 4237 IntBinaryOperator reducer) { 4238 if (transformer == null || reducer == null) 4239 throw new NullPointerException(); 4240 return new MapReduceValuesToIntTask<K,V> 4241 (null, batchFor(parallelismThreshold), 0, 0, table, 4242 null, transformer, basis, reducer).invoke(); 4243 } 4244 4245 /** 4246 * Performs the given action for each entry. 4247 * 4248 * @param parallelismThreshold the (estimated) number of elements 4249 * needed for this operation to be executed in parallel 4250 * @param action the action 4251 * @since 1.8 4252 */ forEachEntry(long parallelismThreshold, Consumer<? super Map.Entry<K,V>> action)4253 public void forEachEntry(long parallelismThreshold, 4254 Consumer<? super Map.Entry<K,V>> action) { 4255 if (action == null) throw new NullPointerException(); 4256 new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table, 4257 action).invoke(); 4258 } 4259 4260 /** 4261 * Performs the given action for each non-null transformation 4262 * of each entry. 4263 * 4264 * @param parallelismThreshold the (estimated) number of elements 4265 * needed for this operation to be executed in parallel 4266 * @param transformer a function returning the transformation 4267 * for an element, or null if there is no transformation (in 4268 * which case the action is not applied) 4269 * @param action the action 4270 * @param <U> the return type of the transformer 4271 * @since 1.8 4272 */ forEachEntry(long parallelismThreshold, Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action)4273 public <U> void forEachEntry(long parallelismThreshold, 4274 Function<Map.Entry<K,V>, ? extends U> transformer, 4275 Consumer<? super U> action) { 4276 if (transformer == null || action == null) 4277 throw new NullPointerException(); 4278 new ForEachTransformedEntryTask<K,V,U> 4279 (null, batchFor(parallelismThreshold), 0, 0, table, 4280 transformer, action).invoke(); 4281 } 4282 4283 /** 4284 * Returns a non-null result from applying the given search 4285 * function on each entry, or null if none. Upon success, 4286 * further element processing is suppressed and the results of 4287 * any other parallel invocations of the search function are 4288 * ignored. 4289 * 4290 * @param parallelismThreshold the (estimated) number of elements 4291 * needed for this operation to be executed in parallel 4292 * @param searchFunction a function returning a non-null 4293 * result on success, else null 4294 * @param <U> the return type of the search function 4295 * @return a non-null result from applying the given search 4296 * function on each entry, or null if none 4297 * @since 1.8 4298 */ searchEntries(long parallelismThreshold, Function<Map.Entry<K,V>, ? extends U> searchFunction)4299 public <U> U searchEntries(long parallelismThreshold, 4300 Function<Map.Entry<K,V>, ? extends U> searchFunction) { 4301 if (searchFunction == null) throw new NullPointerException(); 4302 return new SearchEntriesTask<K,V,U> 4303 (null, batchFor(parallelismThreshold), 0, 0, table, 4304 searchFunction, new AtomicReference<U>()).invoke(); 4305 } 4306 4307 /** 4308 * Returns the result of accumulating all entries using the 4309 * given reducer to combine values, or null if none. 4310 * 4311 * @param parallelismThreshold the (estimated) number of elements 4312 * needed for this operation to be executed in parallel 4313 * @param reducer a commutative associative combining function 4314 * @return the result of accumulating all entries 4315 * @since 1.8 4316 */ reduceEntries(long parallelismThreshold, BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer)4317 public Map.Entry<K,V> reduceEntries(long parallelismThreshold, 4318 BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) { 4319 if (reducer == null) throw new NullPointerException(); 4320 return new ReduceEntriesTask<K,V> 4321 (null, batchFor(parallelismThreshold), 0, 0, table, 4322 null, reducer).invoke(); 4323 } 4324 4325 /** 4326 * Returns the result of accumulating the given transformation 4327 * of all entries using the given reducer to combine values, 4328 * or null if none. 4329 * 4330 * @param parallelismThreshold the (estimated) number of elements 4331 * needed for this operation to be executed in parallel 4332 * @param transformer a function returning the transformation 4333 * for an element, or null if there is no transformation (in 4334 * which case it is not combined) 4335 * @param reducer a commutative associative combining function 4336 * @param <U> the return type of the transformer 4337 * @return the result of accumulating the given transformation 4338 * of all entries 4339 * @since 1.8 4340 */ reduceEntries(long parallelismThreshold, Function<Map.Entry<K,V>, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)4341 public <U> U reduceEntries(long parallelismThreshold, 4342 Function<Map.Entry<K,V>, ? extends U> transformer, 4343 BiFunction<? super U, ? super U, ? extends U> reducer) { 4344 if (transformer == null || reducer == null) 4345 throw new NullPointerException(); 4346 return new MapReduceEntriesTask<K,V,U> 4347 (null, batchFor(parallelismThreshold), 0, 0, table, 4348 null, transformer, reducer).invoke(); 4349 } 4350 4351 /** 4352 * Returns the result of accumulating the given transformation 4353 * of all entries using the given reducer to combine values, 4354 * and the given basis as an identity value. 4355 * 4356 * @param parallelismThreshold the (estimated) number of elements 4357 * needed for this operation to be executed in parallel 4358 * @param transformer a function returning the transformation 4359 * for an element 4360 * @param basis the identity (initial default value) for the reduction 4361 * @param reducer a commutative associative combining function 4362 * @return the result of accumulating the given transformation 4363 * of all entries 4364 * @since 1.8 4365 */ reduceEntriesToDouble(long parallelismThreshold, ToDoubleFunction<Map.Entry<K,V>> transformer, double basis, DoubleBinaryOperator reducer)4366 public double reduceEntriesToDouble(long parallelismThreshold, 4367 ToDoubleFunction<Map.Entry<K,V>> transformer, 4368 double basis, 4369 DoubleBinaryOperator reducer) { 4370 if (transformer == null || reducer == null) 4371 throw new NullPointerException(); 4372 return new MapReduceEntriesToDoubleTask<K,V> 4373 (null, batchFor(parallelismThreshold), 0, 0, table, 4374 null, transformer, basis, reducer).invoke(); 4375 } 4376 4377 /** 4378 * Returns the result of accumulating the given transformation 4379 * of all entries using the given reducer to combine values, 4380 * and the given basis as an identity value. 4381 * 4382 * @param parallelismThreshold the (estimated) number of elements 4383 * needed for this operation to be executed in parallel 4384 * @param transformer a function returning the transformation 4385 * for an element 4386 * @param basis the identity (initial default value) for the reduction 4387 * @param reducer a commutative associative combining function 4388 * @return the result of accumulating the given transformation 4389 * of all entries 4390 * @since 1.8 4391 */ reduceEntriesToLong(long parallelismThreshold, ToLongFunction<Map.Entry<K,V>> transformer, long basis, LongBinaryOperator reducer)4392 public long reduceEntriesToLong(long parallelismThreshold, 4393 ToLongFunction<Map.Entry<K,V>> transformer, 4394 long basis, 4395 LongBinaryOperator reducer) { 4396 if (transformer == null || reducer == null) 4397 throw new NullPointerException(); 4398 return new MapReduceEntriesToLongTask<K,V> 4399 (null, batchFor(parallelismThreshold), 0, 0, table, 4400 null, transformer, basis, reducer).invoke(); 4401 } 4402 4403 /** 4404 * Returns the result of accumulating the given transformation 4405 * of all entries using the given reducer to combine values, 4406 * and the given basis as an identity value. 4407 * 4408 * @param parallelismThreshold the (estimated) number of elements 4409 * needed for this operation to be executed in parallel 4410 * @param transformer a function returning the transformation 4411 * for an element 4412 * @param basis the identity (initial default value) for the reduction 4413 * @param reducer a commutative associative combining function 4414 * @return the result of accumulating the given transformation 4415 * of all entries 4416 * @since 1.8 4417 */ reduceEntriesToInt(long parallelismThreshold, ToIntFunction<Map.Entry<K,V>> transformer, int basis, IntBinaryOperator reducer)4418 public int reduceEntriesToInt(long parallelismThreshold, 4419 ToIntFunction<Map.Entry<K,V>> transformer, 4420 int basis, 4421 IntBinaryOperator reducer) { 4422 if (transformer == null || reducer == null) 4423 throw new NullPointerException(); 4424 return new MapReduceEntriesToIntTask<K,V> 4425 (null, batchFor(parallelismThreshold), 0, 0, table, 4426 null, transformer, basis, reducer).invoke(); 4427 } 4428 4429 4430 /* ----------------Views -------------- */ 4431 4432 /** 4433 * Base class for views. 4434 */ 4435 abstract static class CollectionView<K,V,E> 4436 implements Collection<E>, java.io.Serializable { 4437 private static final long serialVersionUID = 7249069246763182397L; 4438 final ConcurrentHashMap<K,V> map; CollectionView(ConcurrentHashMap<K,V> map)4439 CollectionView(ConcurrentHashMap<K,V> map) { this.map = map; } 4440 4441 /** 4442 * Returns the map backing this view. 4443 * 4444 * @return the map backing this view 4445 */ getMap()4446 public ConcurrentHashMap<K,V> getMap() { return map; } 4447 4448 /** 4449 * Removes all of the elements from this view, by removing all 4450 * the mappings from the map backing this view. 4451 */ clear()4452 public final void clear() { map.clear(); } size()4453 public final int size() { return map.size(); } isEmpty()4454 public final boolean isEmpty() { return map.isEmpty(); } 4455 4456 // implementations below rely on concrete classes supplying these 4457 // abstract methods 4458 /** 4459 * Returns an iterator over the elements in this collection. 4460 * 4461 * <p>The returned iterator is 4462 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 4463 * 4464 * @return an iterator over the elements in this collection 4465 */ iterator()4466 public abstract Iterator<E> iterator(); contains(Object o)4467 public abstract boolean contains(Object o); remove(Object o)4468 public abstract boolean remove(Object o); 4469 4470 private static final String OOME_MSG = "Required array size too large"; 4471 toArray()4472 public final Object[] toArray() { 4473 long sz = map.mappingCount(); 4474 if (sz > MAX_ARRAY_SIZE) 4475 throw new OutOfMemoryError(OOME_MSG); 4476 int n = (int)sz; 4477 Object[] r = new Object[n]; 4478 int i = 0; 4479 for (E e : this) { 4480 if (i == n) { 4481 if (n >= MAX_ARRAY_SIZE) 4482 throw new OutOfMemoryError(OOME_MSG); 4483 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) 4484 n = MAX_ARRAY_SIZE; 4485 else 4486 n += (n >>> 1) + 1; 4487 r = Arrays.copyOf(r, n); 4488 } 4489 r[i++] = e; 4490 } 4491 return (i == n) ? r : Arrays.copyOf(r, i); 4492 } 4493 4494 @SuppressWarnings("unchecked") toArray(T[] a)4495 public final <T> T[] toArray(T[] a) { 4496 long sz = map.mappingCount(); 4497 if (sz > MAX_ARRAY_SIZE) 4498 throw new OutOfMemoryError(OOME_MSG); 4499 int m = (int)sz; 4500 T[] r = (a.length >= m) ? a : 4501 (T[])java.lang.reflect.Array 4502 .newInstance(a.getClass().getComponentType(), m); 4503 int n = r.length; 4504 int i = 0; 4505 for (E e : this) { 4506 if (i == n) { 4507 if (n >= MAX_ARRAY_SIZE) 4508 throw new OutOfMemoryError(OOME_MSG); 4509 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) 4510 n = MAX_ARRAY_SIZE; 4511 else 4512 n += (n >>> 1) + 1; 4513 r = Arrays.copyOf(r, n); 4514 } 4515 r[i++] = (T)e; 4516 } 4517 if (a == r && i < n) { 4518 r[i] = null; // null-terminate 4519 return r; 4520 } 4521 return (i == n) ? r : Arrays.copyOf(r, i); 4522 } 4523 4524 /** 4525 * Returns a string representation of this collection. 4526 * The string representation consists of the string representations 4527 * of the collection's elements in the order they are returned by 4528 * its iterator, enclosed in square brackets ({@code "[]"}). 4529 * Adjacent elements are separated by the characters {@code ", "} 4530 * (comma and space). Elements are converted to strings as by 4531 * {@link String#valueOf(Object)}. 4532 * 4533 * @return a string representation of this collection 4534 */ toString()4535 public final String toString() { 4536 StringBuilder sb = new StringBuilder(); 4537 sb.append('['); 4538 Iterator<E> it = iterator(); 4539 if (it.hasNext()) { 4540 for (;;) { 4541 Object e = it.next(); 4542 sb.append(e == this ? "(this Collection)" : e); 4543 if (!it.hasNext()) 4544 break; 4545 sb.append(',').append(' '); 4546 } 4547 } 4548 return sb.append(']').toString(); 4549 } 4550 containsAll(Collection<?> c)4551 public final boolean containsAll(Collection<?> c) { 4552 if (c != this) { 4553 for (Object e : c) { 4554 if (e == null || !contains(e)) 4555 return false; 4556 } 4557 } 4558 return true; 4559 } 4560 removeAll(Collection<?> c)4561 public final boolean removeAll(Collection<?> c) { 4562 if (c == null) throw new NullPointerException(); 4563 boolean modified = false; 4564 for (Iterator<E> it = iterator(); it.hasNext();) { 4565 if (c.contains(it.next())) { 4566 it.remove(); 4567 modified = true; 4568 } 4569 } 4570 return modified; 4571 } 4572 retainAll(Collection<?> c)4573 public final boolean retainAll(Collection<?> c) { 4574 if (c == null) throw new NullPointerException(); 4575 boolean modified = false; 4576 for (Iterator<E> it = iterator(); it.hasNext();) { 4577 if (!c.contains(it.next())) { 4578 it.remove(); 4579 modified = true; 4580 } 4581 } 4582 return modified; 4583 } 4584 4585 } 4586 4587 /** 4588 * A view of a ConcurrentHashMap as a {@link Set} of keys, in 4589 * which additions may optionally be enabled by mapping to a 4590 * common value. This class cannot be directly instantiated. 4591 * See {@link #keySet(Object) keySet(V)}, 4592 * {@link #newKeySet() newKeySet()}, 4593 * {@link #newKeySet(int) newKeySet(int)}. 4594 * 4595 * @since 1.8 4596 */ 4597 public static class KeySetView<K,V> extends CollectionView<K,V,K> 4598 implements Set<K>, java.io.Serializable { 4599 private static final long serialVersionUID = 7249069246763182397L; 4600 private final V value; KeySetView(ConcurrentHashMap<K,V> map, V value)4601 KeySetView(ConcurrentHashMap<K,V> map, V value) { // non-public 4602 super(map); 4603 this.value = value; 4604 } 4605 4606 /** 4607 * Returns the default mapped value for additions, 4608 * or {@code null} if additions are not supported. 4609 * 4610 * @return the default mapped value for additions, or {@code null} 4611 * if not supported 4612 */ getMappedValue()4613 public V getMappedValue() { return value; } 4614 4615 /** 4616 * {@inheritDoc} 4617 * @throws NullPointerException if the specified key is null 4618 */ contains(Object o)4619 public boolean contains(Object o) { return map.containsKey(o); } 4620 4621 /** 4622 * Removes the key from this map view, by removing the key (and its 4623 * corresponding value) from the backing map. This method does 4624 * nothing if the key is not in the map. 4625 * 4626 * @param o the key to be removed from the backing map 4627 * @return {@code true} if the backing map contained the specified key 4628 * @throws NullPointerException if the specified key is null 4629 */ remove(Object o)4630 public boolean remove(Object o) { return map.remove(o) != null; } 4631 4632 /** 4633 * @return an iterator over the keys of the backing map 4634 */ iterator()4635 public Iterator<K> iterator() { 4636 Node<K,V>[] t; 4637 ConcurrentHashMap<K,V> m = map; 4638 int f = (t = m.table) == null ? 0 : t.length; 4639 return new KeyIterator<K,V>(t, f, 0, f, m); 4640 } 4641 4642 /** 4643 * Adds the specified key to this set view by mapping the key to 4644 * the default mapped value in the backing map, if defined. 4645 * 4646 * @param e key to be added 4647 * @return {@code true} if this set changed as a result of the call 4648 * @throws NullPointerException if the specified key is null 4649 * @throws UnsupportedOperationException if no default mapped value 4650 * for additions was provided 4651 */ add(K e)4652 public boolean add(K e) { 4653 V v; 4654 if ((v = value) == null) 4655 throw new UnsupportedOperationException(); 4656 return map.putVal(e, v, true) == null; 4657 } 4658 4659 /** 4660 * Adds all of the elements in the specified collection to this set, 4661 * as if by calling {@link #add} on each one. 4662 * 4663 * @param c the elements to be inserted into this set 4664 * @return {@code true} if this set changed as a result of the call 4665 * @throws NullPointerException if the collection or any of its 4666 * elements are {@code null} 4667 * @throws UnsupportedOperationException if no default mapped value 4668 * for additions was provided 4669 */ addAll(Collection<? extends K> c)4670 public boolean addAll(Collection<? extends K> c) { 4671 boolean added = false; 4672 V v; 4673 if ((v = value) == null) 4674 throw new UnsupportedOperationException(); 4675 for (K e : c) { 4676 if (map.putVal(e, v, true) == null) 4677 added = true; 4678 } 4679 return added; 4680 } 4681 hashCode()4682 public int hashCode() { 4683 int h = 0; 4684 for (K e : this) 4685 h += e.hashCode(); 4686 return h; 4687 } 4688 equals(Object o)4689 public boolean equals(Object o) { 4690 Set<?> c; 4691 return ((o instanceof Set) && 4692 ((c = (Set<?>)o) == this || 4693 (containsAll(c) && c.containsAll(this)))); 4694 } 4695 spliterator()4696 public Spliterator<K> spliterator() { 4697 Node<K,V>[] t; 4698 ConcurrentHashMap<K,V> m = map; 4699 long n = m.sumCount(); 4700 int f = (t = m.table) == null ? 0 : t.length; 4701 return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n); 4702 } 4703 forEach(Consumer<? super K> action)4704 public void forEach(Consumer<? super K> action) { 4705 if (action == null) throw new NullPointerException(); 4706 Node<K,V>[] t; 4707 if ((t = map.table) != null) { 4708 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4709 for (Node<K,V> p; (p = it.advance()) != null; ) 4710 action.accept(p.key); 4711 } 4712 } 4713 } 4714 4715 /** 4716 * A view of a ConcurrentHashMap as a {@link Collection} of 4717 * values, in which additions are disabled. This class cannot be 4718 * directly instantiated. See {@link #values()}. 4719 */ 4720 static final class ValuesView<K,V> extends CollectionView<K,V,V> 4721 implements Collection<V>, java.io.Serializable { 4722 private static final long serialVersionUID = 2249069246763182397L; ValuesView(ConcurrentHashMap<K,V> map)4723 ValuesView(ConcurrentHashMap<K,V> map) { super(map); } contains(Object o)4724 public final boolean contains(Object o) { 4725 return map.containsValue(o); 4726 } 4727 remove(Object o)4728 public final boolean remove(Object o) { 4729 if (o != null) { 4730 for (Iterator<V> it = iterator(); it.hasNext();) { 4731 if (o.equals(it.next())) { 4732 it.remove(); 4733 return true; 4734 } 4735 } 4736 } 4737 return false; 4738 } 4739 iterator()4740 public final Iterator<V> iterator() { 4741 ConcurrentHashMap<K,V> m = map; 4742 Node<K,V>[] t; 4743 int f = (t = m.table) == null ? 0 : t.length; 4744 return new ValueIterator<K,V>(t, f, 0, f, m); 4745 } 4746 add(V e)4747 public final boolean add(V e) { 4748 throw new UnsupportedOperationException(); 4749 } addAll(Collection<? extends V> c)4750 public final boolean addAll(Collection<? extends V> c) { 4751 throw new UnsupportedOperationException(); 4752 } 4753 removeIf(Predicate<? super V> filter)4754 public boolean removeIf(Predicate<? super V> filter) { 4755 return map.removeValueIf(filter); 4756 } 4757 spliterator()4758 public Spliterator<V> spliterator() { 4759 Node<K,V>[] t; 4760 ConcurrentHashMap<K,V> m = map; 4761 long n = m.sumCount(); 4762 int f = (t = m.table) == null ? 0 : t.length; 4763 return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n); 4764 } 4765 forEach(Consumer<? super V> action)4766 public void forEach(Consumer<? super V> action) { 4767 if (action == null) throw new NullPointerException(); 4768 Node<K,V>[] t; 4769 if ((t = map.table) != null) { 4770 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4771 for (Node<K,V> p; (p = it.advance()) != null; ) 4772 action.accept(p.val); 4773 } 4774 } 4775 } 4776 4777 /** 4778 * A view of a ConcurrentHashMap as a {@link Set} of (key, value) 4779 * entries. This class cannot be directly instantiated. See 4780 * {@link #entrySet()}. 4781 */ 4782 static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>> 4783 implements Set<Map.Entry<K,V>>, java.io.Serializable { 4784 private static final long serialVersionUID = 2249069246763182397L; EntrySetView(ConcurrentHashMap<K,V> map)4785 EntrySetView(ConcurrentHashMap<K,V> map) { super(map); } 4786 contains(Object o)4787 public boolean contains(Object o) { 4788 Object k, v, r; Map.Entry<?,?> e; 4789 return ((o instanceof Map.Entry) && 4790 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 4791 (r = map.get(k)) != null && 4792 (v = e.getValue()) != null && 4793 (v == r || v.equals(r))); 4794 } 4795 remove(Object o)4796 public boolean remove(Object o) { 4797 Object k, v; Map.Entry<?,?> e; 4798 return ((o instanceof Map.Entry) && 4799 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 4800 (v = e.getValue()) != null && 4801 map.remove(k, v)); 4802 } 4803 4804 /** 4805 * @return an iterator over the entries of the backing map 4806 */ iterator()4807 public Iterator<Map.Entry<K,V>> iterator() { 4808 ConcurrentHashMap<K,V> m = map; 4809 Node<K,V>[] t; 4810 int f = (t = m.table) == null ? 0 : t.length; 4811 return new EntryIterator<K,V>(t, f, 0, f, m); 4812 } 4813 add(Entry<K,V> e)4814 public boolean add(Entry<K,V> e) { 4815 return map.putVal(e.getKey(), e.getValue(), false) == null; 4816 } 4817 addAll(Collection<? extends Entry<K,V>> c)4818 public boolean addAll(Collection<? extends Entry<K,V>> c) { 4819 boolean added = false; 4820 for (Entry<K,V> e : c) { 4821 if (add(e)) 4822 added = true; 4823 } 4824 return added; 4825 } 4826 removeIf(Predicate<? super Entry<K,V>> filter)4827 public boolean removeIf(Predicate<? super Entry<K,V>> filter) { 4828 return map.removeEntryIf(filter); 4829 } 4830 hashCode()4831 public final int hashCode() { 4832 int h = 0; 4833 Node<K,V>[] t; 4834 if ((t = map.table) != null) { 4835 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4836 for (Node<K,V> p; (p = it.advance()) != null; ) { 4837 h += p.hashCode(); 4838 } 4839 } 4840 return h; 4841 } 4842 equals(Object o)4843 public final boolean equals(Object o) { 4844 Set<?> c; 4845 return ((o instanceof Set) && 4846 ((c = (Set<?>)o) == this || 4847 (containsAll(c) && c.containsAll(this)))); 4848 } 4849 spliterator()4850 public Spliterator<Map.Entry<K,V>> spliterator() { 4851 Node<K,V>[] t; 4852 ConcurrentHashMap<K,V> m = map; 4853 long n = m.sumCount(); 4854 int f = (t = m.table) == null ? 0 : t.length; 4855 return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m); 4856 } 4857 forEach(Consumer<? super Map.Entry<K,V>> action)4858 public void forEach(Consumer<? super Map.Entry<K,V>> action) { 4859 if (action == null) throw new NullPointerException(); 4860 Node<K,V>[] t; 4861 if ((t = map.table) != null) { 4862 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4863 for (Node<K,V> p; (p = it.advance()) != null; ) 4864 action.accept(new MapEntry<K,V>(p.key, p.val, map)); 4865 } 4866 } 4867 4868 } 4869 4870 // ------------------------------------------------------- 4871 4872 /** 4873 * Base class for bulk tasks. Repeats some fields and code from 4874 * class Traverser, because we need to subclass CountedCompleter. 4875 */ 4876 @SuppressWarnings("serial") 4877 abstract static class BulkTask<K,V,R> extends CountedCompleter<R> { 4878 Node<K,V>[] tab; // same as Traverser 4879 Node<K,V> next; 4880 TableStack<K,V> stack, spare; 4881 int index; 4882 int baseIndex; 4883 int baseLimit; 4884 final int baseSize; 4885 int batch; // split control 4886 BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t)4887 BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) { 4888 super(par); 4889 this.batch = b; 4890 this.index = this.baseIndex = i; 4891 if ((this.tab = t) == null) 4892 this.baseSize = this.baseLimit = 0; 4893 else if (par == null) 4894 this.baseSize = this.baseLimit = t.length; 4895 else { 4896 this.baseLimit = f; 4897 this.baseSize = par.baseSize; 4898 } 4899 } 4900 4901 /** 4902 * Same as Traverser version. 4903 */ advance()4904 final Node<K,V> advance() { 4905 Node<K,V> e; 4906 if ((e = next) != null) 4907 e = e.next; 4908 for (;;) { 4909 Node<K,V>[] t; int i, n; 4910 if (e != null) 4911 return next = e; 4912 if (baseIndex >= baseLimit || (t = tab) == null || 4913 (n = t.length) <= (i = index) || i < 0) 4914 return next = null; 4915 if ((e = tabAt(t, i)) != null && e.hash < 0) { 4916 if (e instanceof ForwardingNode) { 4917 tab = ((ForwardingNode<K,V>)e).nextTable; 4918 e = null; 4919 pushState(t, i, n); 4920 continue; 4921 } 4922 else if (e instanceof TreeBin) 4923 e = ((TreeBin<K,V>)e).first; 4924 else 4925 e = null; 4926 } 4927 if (stack != null) 4928 recoverState(n); 4929 else if ((index = i + baseSize) >= n) 4930 index = ++baseIndex; 4931 } 4932 } 4933 pushState(Node<K,V>[] t, int i, int n)4934 private void pushState(Node<K,V>[] t, int i, int n) { 4935 TableStack<K,V> s = spare; 4936 if (s != null) 4937 spare = s.next; 4938 else 4939 s = new TableStack<K,V>(); 4940 s.tab = t; 4941 s.length = n; 4942 s.index = i; 4943 s.next = stack; 4944 stack = s; 4945 } 4946 recoverState(int n)4947 private void recoverState(int n) { 4948 TableStack<K,V> s; int len; 4949 while ((s = stack) != null && (index += (len = s.length)) >= n) { 4950 n = len; 4951 index = s.index; 4952 tab = s.tab; 4953 s.tab = null; 4954 TableStack<K,V> next = s.next; 4955 s.next = spare; // save for reuse 4956 stack = next; 4957 spare = s; 4958 } 4959 if (s == null && (index += baseSize) >= n) 4960 index = ++baseIndex; 4961 } 4962 } 4963 4964 /* 4965 * Task classes. Coded in a regular but ugly format/style to 4966 * simplify checks that each variant differs in the right way from 4967 * others. The null screenings exist because compilers cannot tell 4968 * that we've already null-checked task arguments, so we force 4969 * simplest hoisted bypass to help avoid convoluted traps. 4970 */ 4971 @SuppressWarnings("serial") 4972 static final class ForEachKeyTask<K,V> 4973 extends BulkTask<K,V,Void> { 4974 final Consumer<? super K> action; ForEachKeyTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Consumer<? super K> action)4975 ForEachKeyTask 4976 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 4977 Consumer<? super K> action) { 4978 super(p, b, i, f, t); 4979 this.action = action; 4980 } compute()4981 public final void compute() { 4982 final Consumer<? super K> action; 4983 if ((action = this.action) != null) { 4984 for (int i = baseIndex, f, h; batch > 0 && 4985 (h = ((f = baseLimit) + i) >>> 1) > i;) { 4986 addToPendingCount(1); 4987 new ForEachKeyTask<K,V> 4988 (this, batch >>>= 1, baseLimit = h, f, tab, 4989 action).fork(); 4990 } 4991 for (Node<K,V> p; (p = advance()) != null;) 4992 action.accept(p.key); 4993 propagateCompletion(); 4994 } 4995 } 4996 } 4997 4998 @SuppressWarnings("serial") 4999 static final class ForEachValueTask<K,V> 5000 extends BulkTask<K,V,Void> { 5001 final Consumer<? super V> action; ForEachValueTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Consumer<? super V> action)5002 ForEachValueTask 5003 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5004 Consumer<? super V> action) { 5005 super(p, b, i, f, t); 5006 this.action = action; 5007 } compute()5008 public final void compute() { 5009 final Consumer<? super V> action; 5010 if ((action = this.action) != null) { 5011 for (int i = baseIndex, f, h; batch > 0 && 5012 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5013 addToPendingCount(1); 5014 new ForEachValueTask<K,V> 5015 (this, batch >>>= 1, baseLimit = h, f, tab, 5016 action).fork(); 5017 } 5018 for (Node<K,V> p; (p = advance()) != null;) 5019 action.accept(p.val); 5020 propagateCompletion(); 5021 } 5022 } 5023 } 5024 5025 @SuppressWarnings("serial") 5026 static final class ForEachEntryTask<K,V> 5027 extends BulkTask<K,V,Void> { 5028 final Consumer<? super Entry<K,V>> action; ForEachEntryTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Consumer<? super Entry<K,V>> action)5029 ForEachEntryTask 5030 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5031 Consumer<? super Entry<K,V>> action) { 5032 super(p, b, i, f, t); 5033 this.action = action; 5034 } compute()5035 public final void compute() { 5036 final Consumer<? super Entry<K,V>> action; 5037 if ((action = this.action) != null) { 5038 for (int i = baseIndex, f, h; batch > 0 && 5039 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5040 addToPendingCount(1); 5041 new ForEachEntryTask<K,V> 5042 (this, batch >>>= 1, baseLimit = h, f, tab, 5043 action).fork(); 5044 } 5045 for (Node<K,V> p; (p = advance()) != null; ) 5046 action.accept(p); 5047 propagateCompletion(); 5048 } 5049 } 5050 } 5051 5052 @SuppressWarnings("serial") 5053 static final class ForEachMappingTask<K,V> 5054 extends BulkTask<K,V,Void> { 5055 final BiConsumer<? super K, ? super V> action; ForEachMappingTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, BiConsumer<? super K,? super V> action)5056 ForEachMappingTask 5057 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5058 BiConsumer<? super K,? super V> action) { 5059 super(p, b, i, f, t); 5060 this.action = action; 5061 } compute()5062 public final void compute() { 5063 final BiConsumer<? super K, ? super V> action; 5064 if ((action = this.action) != null) { 5065 for (int i = baseIndex, f, h; batch > 0 && 5066 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5067 addToPendingCount(1); 5068 new ForEachMappingTask<K,V> 5069 (this, batch >>>= 1, baseLimit = h, f, tab, 5070 action).fork(); 5071 } 5072 for (Node<K,V> p; (p = advance()) != null; ) 5073 action.accept(p.key, p.val); 5074 propagateCompletion(); 5075 } 5076 } 5077 } 5078 5079 @SuppressWarnings("serial") 5080 static final class ForEachTransformedKeyTask<K,V,U> 5081 extends BulkTask<K,V,Void> { 5082 final Function<? super K, ? extends U> transformer; 5083 final Consumer<? super U> action; ForEachTransformedKeyTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<? super K, ? extends U> transformer, Consumer<? super U> action)5084 ForEachTransformedKeyTask 5085 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5086 Function<? super K, ? extends U> transformer, Consumer<? super U> action) { 5087 super(p, b, i, f, t); 5088 this.transformer = transformer; this.action = action; 5089 } compute()5090 public final void compute() { 5091 final Function<? super K, ? extends U> transformer; 5092 final Consumer<? super U> action; 5093 if ((transformer = this.transformer) != null && 5094 (action = this.action) != null) { 5095 for (int i = baseIndex, f, h; batch > 0 && 5096 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5097 addToPendingCount(1); 5098 new ForEachTransformedKeyTask<K,V,U> 5099 (this, batch >>>= 1, baseLimit = h, f, tab, 5100 transformer, action).fork(); 5101 } 5102 for (Node<K,V> p; (p = advance()) != null; ) { 5103 U u; 5104 if ((u = transformer.apply(p.key)) != null) 5105 action.accept(u); 5106 } 5107 propagateCompletion(); 5108 } 5109 } 5110 } 5111 5112 @SuppressWarnings("serial") 5113 static final class ForEachTransformedValueTask<K,V,U> 5114 extends BulkTask<K,V,Void> { 5115 final Function<? super V, ? extends U> transformer; 5116 final Consumer<? super U> action; ForEachTransformedValueTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<? super V, ? extends U> transformer, Consumer<? super U> action)5117 ForEachTransformedValueTask 5118 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5119 Function<? super V, ? extends U> transformer, Consumer<? super U> action) { 5120 super(p, b, i, f, t); 5121 this.transformer = transformer; this.action = action; 5122 } compute()5123 public final void compute() { 5124 final Function<? super V, ? extends U> transformer; 5125 final Consumer<? super U> action; 5126 if ((transformer = this.transformer) != null && 5127 (action = this.action) != null) { 5128 for (int i = baseIndex, f, h; batch > 0 && 5129 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5130 addToPendingCount(1); 5131 new ForEachTransformedValueTask<K,V,U> 5132 (this, batch >>>= 1, baseLimit = h, f, tab, 5133 transformer, action).fork(); 5134 } 5135 for (Node<K,V> p; (p = advance()) != null; ) { 5136 U u; 5137 if ((u = transformer.apply(p.val)) != null) 5138 action.accept(u); 5139 } 5140 propagateCompletion(); 5141 } 5142 } 5143 } 5144 5145 @SuppressWarnings("serial") 5146 static final class ForEachTransformedEntryTask<K,V,U> 5147 extends BulkTask<K,V,Void> { 5148 final Function<Map.Entry<K,V>, ? extends U> transformer; 5149 final Consumer<? super U> action; ForEachTransformedEntryTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action)5150 ForEachTransformedEntryTask 5151 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5152 Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) { 5153 super(p, b, i, f, t); 5154 this.transformer = transformer; this.action = action; 5155 } compute()5156 public final void compute() { 5157 final Function<Map.Entry<K,V>, ? extends U> transformer; 5158 final Consumer<? super U> action; 5159 if ((transformer = this.transformer) != null && 5160 (action = this.action) != null) { 5161 for (int i = baseIndex, f, h; batch > 0 && 5162 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5163 addToPendingCount(1); 5164 new ForEachTransformedEntryTask<K,V,U> 5165 (this, batch >>>= 1, baseLimit = h, f, tab, 5166 transformer, action).fork(); 5167 } 5168 for (Node<K,V> p; (p = advance()) != null; ) { 5169 U u; 5170 if ((u = transformer.apply(p)) != null) 5171 action.accept(u); 5172 } 5173 propagateCompletion(); 5174 } 5175 } 5176 } 5177 5178 @SuppressWarnings("serial") 5179 static final class ForEachTransformedMappingTask<K,V,U> 5180 extends BulkTask<K,V,Void> { 5181 final BiFunction<? super K, ? super V, ? extends U> transformer; 5182 final Consumer<? super U> action; ForEachTransformedMappingTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, BiFunction<? super K, ? super V, ? extends U> transformer, Consumer<? super U> action)5183 ForEachTransformedMappingTask 5184 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5185 BiFunction<? super K, ? super V, ? extends U> transformer, 5186 Consumer<? super U> action) { 5187 super(p, b, i, f, t); 5188 this.transformer = transformer; this.action = action; 5189 } compute()5190 public final void compute() { 5191 final BiFunction<? super K, ? super V, ? extends U> transformer; 5192 final Consumer<? super U> action; 5193 if ((transformer = this.transformer) != null && 5194 (action = this.action) != null) { 5195 for (int i = baseIndex, f, h; batch > 0 && 5196 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5197 addToPendingCount(1); 5198 new ForEachTransformedMappingTask<K,V,U> 5199 (this, batch >>>= 1, baseLimit = h, f, tab, 5200 transformer, action).fork(); 5201 } 5202 for (Node<K,V> p; (p = advance()) != null; ) { 5203 U u; 5204 if ((u = transformer.apply(p.key, p.val)) != null) 5205 action.accept(u); 5206 } 5207 propagateCompletion(); 5208 } 5209 } 5210 } 5211 5212 @SuppressWarnings("serial") 5213 static final class SearchKeysTask<K,V,U> 5214 extends BulkTask<K,V,U> { 5215 final Function<? super K, ? extends U> searchFunction; 5216 final AtomicReference<U> result; SearchKeysTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<? super K, ? extends U> searchFunction, AtomicReference<U> result)5217 SearchKeysTask 5218 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5219 Function<? super K, ? extends U> searchFunction, 5220 AtomicReference<U> result) { 5221 super(p, b, i, f, t); 5222 this.searchFunction = searchFunction; this.result = result; 5223 } getRawResult()5224 public final U getRawResult() { return result.get(); } compute()5225 public final void compute() { 5226 final Function<? super K, ? extends U> searchFunction; 5227 final AtomicReference<U> result; 5228 if ((searchFunction = this.searchFunction) != null && 5229 (result = this.result) != null) { 5230 for (int i = baseIndex, f, h; batch > 0 && 5231 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5232 if (result.get() != null) 5233 return; 5234 addToPendingCount(1); 5235 new SearchKeysTask<K,V,U> 5236 (this, batch >>>= 1, baseLimit = h, f, tab, 5237 searchFunction, result).fork(); 5238 } 5239 while (result.get() == null) { 5240 U u; 5241 Node<K,V> p; 5242 if ((p = advance()) == null) { 5243 propagateCompletion(); 5244 break; 5245 } 5246 if ((u = searchFunction.apply(p.key)) != null) { 5247 if (result.compareAndSet(null, u)) 5248 quietlyCompleteRoot(); 5249 break; 5250 } 5251 } 5252 } 5253 } 5254 } 5255 5256 @SuppressWarnings("serial") 5257 static final class SearchValuesTask<K,V,U> 5258 extends BulkTask<K,V,U> { 5259 final Function<? super V, ? extends U> searchFunction; 5260 final AtomicReference<U> result; SearchValuesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<? super V, ? extends U> searchFunction, AtomicReference<U> result)5261 SearchValuesTask 5262 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5263 Function<? super V, ? extends U> searchFunction, 5264 AtomicReference<U> result) { 5265 super(p, b, i, f, t); 5266 this.searchFunction = searchFunction; this.result = result; 5267 } getRawResult()5268 public final U getRawResult() { return result.get(); } compute()5269 public final void compute() { 5270 final Function<? super V, ? extends U> searchFunction; 5271 final AtomicReference<U> result; 5272 if ((searchFunction = this.searchFunction) != null && 5273 (result = this.result) != null) { 5274 for (int i = baseIndex, f, h; batch > 0 && 5275 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5276 if (result.get() != null) 5277 return; 5278 addToPendingCount(1); 5279 new SearchValuesTask<K,V,U> 5280 (this, batch >>>= 1, baseLimit = h, f, tab, 5281 searchFunction, result).fork(); 5282 } 5283 while (result.get() == null) { 5284 U u; 5285 Node<K,V> p; 5286 if ((p = advance()) == null) { 5287 propagateCompletion(); 5288 break; 5289 } 5290 if ((u = searchFunction.apply(p.val)) != null) { 5291 if (result.compareAndSet(null, u)) 5292 quietlyCompleteRoot(); 5293 break; 5294 } 5295 } 5296 } 5297 } 5298 } 5299 5300 @SuppressWarnings("serial") 5301 static final class SearchEntriesTask<K,V,U> 5302 extends BulkTask<K,V,U> { 5303 final Function<Entry<K,V>, ? extends U> searchFunction; 5304 final AtomicReference<U> result; SearchEntriesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<Entry<K,V>, ? extends U> searchFunction, AtomicReference<U> result)5305 SearchEntriesTask 5306 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5307 Function<Entry<K,V>, ? extends U> searchFunction, 5308 AtomicReference<U> result) { 5309 super(p, b, i, f, t); 5310 this.searchFunction = searchFunction; this.result = result; 5311 } getRawResult()5312 public final U getRawResult() { return result.get(); } compute()5313 public final void compute() { 5314 final Function<Entry<K,V>, ? extends U> searchFunction; 5315 final AtomicReference<U> result; 5316 if ((searchFunction = this.searchFunction) != null && 5317 (result = this.result) != null) { 5318 for (int i = baseIndex, f, h; batch > 0 && 5319 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5320 if (result.get() != null) 5321 return; 5322 addToPendingCount(1); 5323 new SearchEntriesTask<K,V,U> 5324 (this, batch >>>= 1, baseLimit = h, f, tab, 5325 searchFunction, result).fork(); 5326 } 5327 while (result.get() == null) { 5328 U u; 5329 Node<K,V> p; 5330 if ((p = advance()) == null) { 5331 propagateCompletion(); 5332 break; 5333 } 5334 if ((u = searchFunction.apply(p)) != null) { 5335 if (result.compareAndSet(null, u)) 5336 quietlyCompleteRoot(); 5337 return; 5338 } 5339 } 5340 } 5341 } 5342 } 5343 5344 @SuppressWarnings("serial") 5345 static final class SearchMappingsTask<K,V,U> 5346 extends BulkTask<K,V,U> { 5347 final BiFunction<? super K, ? super V, ? extends U> searchFunction; 5348 final AtomicReference<U> result; SearchMappingsTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, BiFunction<? super K, ? super V, ? extends U> searchFunction, AtomicReference<U> result)5349 SearchMappingsTask 5350 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5351 BiFunction<? super K, ? super V, ? extends U> searchFunction, 5352 AtomicReference<U> result) { 5353 super(p, b, i, f, t); 5354 this.searchFunction = searchFunction; this.result = result; 5355 } getRawResult()5356 public final U getRawResult() { return result.get(); } compute()5357 public final void compute() { 5358 final BiFunction<? super K, ? super V, ? extends U> searchFunction; 5359 final AtomicReference<U> result; 5360 if ((searchFunction = this.searchFunction) != null && 5361 (result = this.result) != null) { 5362 for (int i = baseIndex, f, h; batch > 0 && 5363 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5364 if (result.get() != null) 5365 return; 5366 addToPendingCount(1); 5367 new SearchMappingsTask<K,V,U> 5368 (this, batch >>>= 1, baseLimit = h, f, tab, 5369 searchFunction, result).fork(); 5370 } 5371 while (result.get() == null) { 5372 U u; 5373 Node<K,V> p; 5374 if ((p = advance()) == null) { 5375 propagateCompletion(); 5376 break; 5377 } 5378 if ((u = searchFunction.apply(p.key, p.val)) != null) { 5379 if (result.compareAndSet(null, u)) 5380 quietlyCompleteRoot(); 5381 break; 5382 } 5383 } 5384 } 5385 } 5386 } 5387 5388 @SuppressWarnings("serial") 5389 static final class ReduceKeysTask<K,V> 5390 extends BulkTask<K,V,K> { 5391 final BiFunction<? super K, ? super K, ? extends K> reducer; 5392 K result; 5393 ReduceKeysTask<K,V> rights, nextRight; ReduceKeysTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, ReduceKeysTask<K,V> nextRight, BiFunction<? super K, ? super K, ? extends K> reducer)5394 ReduceKeysTask 5395 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5396 ReduceKeysTask<K,V> nextRight, 5397 BiFunction<? super K, ? super K, ? extends K> reducer) { 5398 super(p, b, i, f, t); this.nextRight = nextRight; 5399 this.reducer = reducer; 5400 } getRawResult()5401 public final K getRawResult() { return result; } compute()5402 public final void compute() { 5403 final BiFunction<? super K, ? super K, ? extends K> reducer; 5404 if ((reducer = this.reducer) != null) { 5405 for (int i = baseIndex, f, h; batch > 0 && 5406 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5407 addToPendingCount(1); 5408 (rights = new ReduceKeysTask<K,V> 5409 (this, batch >>>= 1, baseLimit = h, f, tab, 5410 rights, reducer)).fork(); 5411 } 5412 K r = null; 5413 for (Node<K,V> p; (p = advance()) != null; ) { 5414 K u = p.key; 5415 r = (r == null) ? u : u == null ? r : reducer.apply(r, u); 5416 } 5417 result = r; 5418 CountedCompleter<?> c; 5419 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5420 @SuppressWarnings("unchecked") 5421 ReduceKeysTask<K,V> 5422 t = (ReduceKeysTask<K,V>)c, 5423 s = t.rights; 5424 while (s != null) { 5425 K tr, sr; 5426 if ((sr = s.result) != null) 5427 t.result = (((tr = t.result) == null) ? sr : 5428 reducer.apply(tr, sr)); 5429 s = t.rights = s.nextRight; 5430 } 5431 } 5432 } 5433 } 5434 } 5435 5436 @SuppressWarnings("serial") 5437 static final class ReduceValuesTask<K,V> 5438 extends BulkTask<K,V,V> { 5439 final BiFunction<? super V, ? super V, ? extends V> reducer; 5440 V result; 5441 ReduceValuesTask<K,V> rights, nextRight; ReduceValuesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, ReduceValuesTask<K,V> nextRight, BiFunction<? super V, ? super V, ? extends V> reducer)5442 ReduceValuesTask 5443 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5444 ReduceValuesTask<K,V> nextRight, 5445 BiFunction<? super V, ? super V, ? extends V> reducer) { 5446 super(p, b, i, f, t); this.nextRight = nextRight; 5447 this.reducer = reducer; 5448 } getRawResult()5449 public final V getRawResult() { return result; } compute()5450 public final void compute() { 5451 final BiFunction<? super V, ? super V, ? extends V> reducer; 5452 if ((reducer = this.reducer) != null) { 5453 for (int i = baseIndex, f, h; batch > 0 && 5454 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5455 addToPendingCount(1); 5456 (rights = new ReduceValuesTask<K,V> 5457 (this, batch >>>= 1, baseLimit = h, f, tab, 5458 rights, reducer)).fork(); 5459 } 5460 V r = null; 5461 for (Node<K,V> p; (p = advance()) != null; ) { 5462 V v = p.val; 5463 r = (r == null) ? v : reducer.apply(r, v); 5464 } 5465 result = r; 5466 CountedCompleter<?> c; 5467 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5468 @SuppressWarnings("unchecked") 5469 ReduceValuesTask<K,V> 5470 t = (ReduceValuesTask<K,V>)c, 5471 s = t.rights; 5472 while (s != null) { 5473 V tr, sr; 5474 if ((sr = s.result) != null) 5475 t.result = (((tr = t.result) == null) ? sr : 5476 reducer.apply(tr, sr)); 5477 s = t.rights = s.nextRight; 5478 } 5479 } 5480 } 5481 } 5482 } 5483 5484 @SuppressWarnings("serial") 5485 static final class ReduceEntriesTask<K,V> 5486 extends BulkTask<K,V,Map.Entry<K,V>> { 5487 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer; 5488 Map.Entry<K,V> result; 5489 ReduceEntriesTask<K,V> rights, nextRight; ReduceEntriesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, ReduceEntriesTask<K,V> nextRight, BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer)5490 ReduceEntriesTask 5491 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5492 ReduceEntriesTask<K,V> nextRight, 5493 BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) { 5494 super(p, b, i, f, t); this.nextRight = nextRight; 5495 this.reducer = reducer; 5496 } getRawResult()5497 public final Map.Entry<K,V> getRawResult() { return result; } compute()5498 public final void compute() { 5499 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer; 5500 if ((reducer = this.reducer) != null) { 5501 for (int i = baseIndex, f, h; batch > 0 && 5502 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5503 addToPendingCount(1); 5504 (rights = new ReduceEntriesTask<K,V> 5505 (this, batch >>>= 1, baseLimit = h, f, tab, 5506 rights, reducer)).fork(); 5507 } 5508 Map.Entry<K,V> r = null; 5509 for (Node<K,V> p; (p = advance()) != null; ) 5510 r = (r == null) ? p : reducer.apply(r, p); 5511 result = r; 5512 CountedCompleter<?> c; 5513 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5514 @SuppressWarnings("unchecked") 5515 ReduceEntriesTask<K,V> 5516 t = (ReduceEntriesTask<K,V>)c, 5517 s = t.rights; 5518 while (s != null) { 5519 Map.Entry<K,V> tr, sr; 5520 if ((sr = s.result) != null) 5521 t.result = (((tr = t.result) == null) ? sr : 5522 reducer.apply(tr, sr)); 5523 s = t.rights = s.nextRight; 5524 } 5525 } 5526 } 5527 } 5528 } 5529 5530 @SuppressWarnings("serial") 5531 static final class MapReduceKeysTask<K,V,U> 5532 extends BulkTask<K,V,U> { 5533 final Function<? super K, ? extends U> transformer; 5534 final BiFunction<? super U, ? super U, ? extends U> reducer; 5535 U result; 5536 MapReduceKeysTask<K,V,U> rights, nextRight; MapReduceKeysTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysTask<K,V,U> nextRight, Function<? super K, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)5537 MapReduceKeysTask 5538 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5539 MapReduceKeysTask<K,V,U> nextRight, 5540 Function<? super K, ? extends U> transformer, 5541 BiFunction<? super U, ? super U, ? extends U> reducer) { 5542 super(p, b, i, f, t); this.nextRight = nextRight; 5543 this.transformer = transformer; 5544 this.reducer = reducer; 5545 } getRawResult()5546 public final U getRawResult() { return result; } compute()5547 public final void compute() { 5548 final Function<? super K, ? extends U> transformer; 5549 final BiFunction<? super U, ? super U, ? extends U> reducer; 5550 if ((transformer = this.transformer) != null && 5551 (reducer = this.reducer) != null) { 5552 for (int i = baseIndex, f, h; batch > 0 && 5553 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5554 addToPendingCount(1); 5555 (rights = new MapReduceKeysTask<K,V,U> 5556 (this, batch >>>= 1, baseLimit = h, f, tab, 5557 rights, transformer, reducer)).fork(); 5558 } 5559 U r = null; 5560 for (Node<K,V> p; (p = advance()) != null; ) { 5561 U u; 5562 if ((u = transformer.apply(p.key)) != null) 5563 r = (r == null) ? u : reducer.apply(r, u); 5564 } 5565 result = r; 5566 CountedCompleter<?> c; 5567 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5568 @SuppressWarnings("unchecked") 5569 MapReduceKeysTask<K,V,U> 5570 t = (MapReduceKeysTask<K,V,U>)c, 5571 s = t.rights; 5572 while (s != null) { 5573 U tr, sr; 5574 if ((sr = s.result) != null) 5575 t.result = (((tr = t.result) == null) ? sr : 5576 reducer.apply(tr, sr)); 5577 s = t.rights = s.nextRight; 5578 } 5579 } 5580 } 5581 } 5582 } 5583 5584 @SuppressWarnings("serial") 5585 static final class MapReduceValuesTask<K,V,U> 5586 extends BulkTask<K,V,U> { 5587 final Function<? super V, ? extends U> transformer; 5588 final BiFunction<? super U, ? super U, ? extends U> reducer; 5589 U result; 5590 MapReduceValuesTask<K,V,U> rights, nextRight; MapReduceValuesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesTask<K,V,U> nextRight, Function<? super V, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)5591 MapReduceValuesTask 5592 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5593 MapReduceValuesTask<K,V,U> nextRight, 5594 Function<? super V, ? extends U> transformer, 5595 BiFunction<? super U, ? super U, ? extends U> reducer) { 5596 super(p, b, i, f, t); this.nextRight = nextRight; 5597 this.transformer = transformer; 5598 this.reducer = reducer; 5599 } getRawResult()5600 public final U getRawResult() { return result; } compute()5601 public final void compute() { 5602 final Function<? super V, ? extends U> transformer; 5603 final BiFunction<? super U, ? super U, ? extends U> reducer; 5604 if ((transformer = this.transformer) != null && 5605 (reducer = this.reducer) != null) { 5606 for (int i = baseIndex, f, h; batch > 0 && 5607 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5608 addToPendingCount(1); 5609 (rights = new MapReduceValuesTask<K,V,U> 5610 (this, batch >>>= 1, baseLimit = h, f, tab, 5611 rights, transformer, reducer)).fork(); 5612 } 5613 U r = null; 5614 for (Node<K,V> p; (p = advance()) != null; ) { 5615 U u; 5616 if ((u = transformer.apply(p.val)) != null) 5617 r = (r == null) ? u : reducer.apply(r, u); 5618 } 5619 result = r; 5620 CountedCompleter<?> c; 5621 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5622 @SuppressWarnings("unchecked") 5623 MapReduceValuesTask<K,V,U> 5624 t = (MapReduceValuesTask<K,V,U>)c, 5625 s = t.rights; 5626 while (s != null) { 5627 U tr, sr; 5628 if ((sr = s.result) != null) 5629 t.result = (((tr = t.result) == null) ? sr : 5630 reducer.apply(tr, sr)); 5631 s = t.rights = s.nextRight; 5632 } 5633 } 5634 } 5635 } 5636 } 5637 5638 @SuppressWarnings("serial") 5639 static final class MapReduceEntriesTask<K,V,U> 5640 extends BulkTask<K,V,U> { 5641 final Function<Map.Entry<K,V>, ? extends U> transformer; 5642 final BiFunction<? super U, ? super U, ? extends U> reducer; 5643 U result; 5644 MapReduceEntriesTask<K,V,U> rights, nextRight; MapReduceEntriesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesTask<K,V,U> nextRight, Function<Map.Entry<K,V>, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)5645 MapReduceEntriesTask 5646 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5647 MapReduceEntriesTask<K,V,U> nextRight, 5648 Function<Map.Entry<K,V>, ? extends U> transformer, 5649 BiFunction<? super U, ? super U, ? extends U> reducer) { 5650 super(p, b, i, f, t); this.nextRight = nextRight; 5651 this.transformer = transformer; 5652 this.reducer = reducer; 5653 } getRawResult()5654 public final U getRawResult() { return result; } compute()5655 public final void compute() { 5656 final Function<Map.Entry<K,V>, ? extends U> transformer; 5657 final BiFunction<? super U, ? super U, ? extends U> reducer; 5658 if ((transformer = this.transformer) != null && 5659 (reducer = this.reducer) != null) { 5660 for (int i = baseIndex, f, h; batch > 0 && 5661 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5662 addToPendingCount(1); 5663 (rights = new MapReduceEntriesTask<K,V,U> 5664 (this, batch >>>= 1, baseLimit = h, f, tab, 5665 rights, transformer, reducer)).fork(); 5666 } 5667 U r = null; 5668 for (Node<K,V> p; (p = advance()) != null; ) { 5669 U u; 5670 if ((u = transformer.apply(p)) != null) 5671 r = (r == null) ? u : reducer.apply(r, u); 5672 } 5673 result = r; 5674 CountedCompleter<?> c; 5675 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5676 @SuppressWarnings("unchecked") 5677 MapReduceEntriesTask<K,V,U> 5678 t = (MapReduceEntriesTask<K,V,U>)c, 5679 s = t.rights; 5680 while (s != null) { 5681 U tr, sr; 5682 if ((sr = s.result) != null) 5683 t.result = (((tr = t.result) == null) ? sr : 5684 reducer.apply(tr, sr)); 5685 s = t.rights = s.nextRight; 5686 } 5687 } 5688 } 5689 } 5690 } 5691 5692 @SuppressWarnings("serial") 5693 static final class MapReduceMappingsTask<K,V,U> 5694 extends BulkTask<K,V,U> { 5695 final BiFunction<? super K, ? super V, ? extends U> transformer; 5696 final BiFunction<? super U, ? super U, ? extends U> reducer; 5697 U result; 5698 MapReduceMappingsTask<K,V,U> rights, nextRight; MapReduceMappingsTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsTask<K,V,U> nextRight, BiFunction<? super K, ? super V, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)5699 MapReduceMappingsTask 5700 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5701 MapReduceMappingsTask<K,V,U> nextRight, 5702 BiFunction<? super K, ? super V, ? extends U> transformer, 5703 BiFunction<? super U, ? super U, ? extends U> reducer) { 5704 super(p, b, i, f, t); this.nextRight = nextRight; 5705 this.transformer = transformer; 5706 this.reducer = reducer; 5707 } getRawResult()5708 public final U getRawResult() { return result; } compute()5709 public final void compute() { 5710 final BiFunction<? super K, ? super V, ? extends U> transformer; 5711 final BiFunction<? super U, ? super U, ? extends U> reducer; 5712 if ((transformer = this.transformer) != null && 5713 (reducer = this.reducer) != null) { 5714 for (int i = baseIndex, f, h; batch > 0 && 5715 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5716 addToPendingCount(1); 5717 (rights = new MapReduceMappingsTask<K,V,U> 5718 (this, batch >>>= 1, baseLimit = h, f, tab, 5719 rights, transformer, reducer)).fork(); 5720 } 5721 U r = null; 5722 for (Node<K,V> p; (p = advance()) != null; ) { 5723 U u; 5724 if ((u = transformer.apply(p.key, p.val)) != null) 5725 r = (r == null) ? u : reducer.apply(r, u); 5726 } 5727 result = r; 5728 CountedCompleter<?> c; 5729 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5730 @SuppressWarnings("unchecked") 5731 MapReduceMappingsTask<K,V,U> 5732 t = (MapReduceMappingsTask<K,V,U>)c, 5733 s = t.rights; 5734 while (s != null) { 5735 U tr, sr; 5736 if ((sr = s.result) != null) 5737 t.result = (((tr = t.result) == null) ? sr : 5738 reducer.apply(tr, sr)); 5739 s = t.rights = s.nextRight; 5740 } 5741 } 5742 } 5743 } 5744 } 5745 5746 @SuppressWarnings("serial") 5747 static final class MapReduceKeysToDoubleTask<K,V> 5748 extends BulkTask<K,V,Double> { 5749 final ToDoubleFunction<? super K> transformer; 5750 final DoubleBinaryOperator reducer; 5751 final double basis; 5752 double result; 5753 MapReduceKeysToDoubleTask<K,V> rights, nextRight; MapReduceKeysToDoubleTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysToDoubleTask<K,V> nextRight, ToDoubleFunction<? super K> transformer, double basis, DoubleBinaryOperator reducer)5754 MapReduceKeysToDoubleTask 5755 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5756 MapReduceKeysToDoubleTask<K,V> nextRight, 5757 ToDoubleFunction<? super K> transformer, 5758 double basis, 5759 DoubleBinaryOperator reducer) { 5760 super(p, b, i, f, t); this.nextRight = nextRight; 5761 this.transformer = transformer; 5762 this.basis = basis; this.reducer = reducer; 5763 } getRawResult()5764 public final Double getRawResult() { return result; } compute()5765 public final void compute() { 5766 final ToDoubleFunction<? super K> transformer; 5767 final DoubleBinaryOperator reducer; 5768 if ((transformer = this.transformer) != null && 5769 (reducer = this.reducer) != null) { 5770 double r = this.basis; 5771 for (int i = baseIndex, f, h; batch > 0 && 5772 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5773 addToPendingCount(1); 5774 (rights = new MapReduceKeysToDoubleTask<K,V> 5775 (this, batch >>>= 1, baseLimit = h, f, tab, 5776 rights, transformer, r, reducer)).fork(); 5777 } 5778 for (Node<K,V> p; (p = advance()) != null; ) 5779 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key)); 5780 result = r; 5781 CountedCompleter<?> c; 5782 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5783 @SuppressWarnings("unchecked") 5784 MapReduceKeysToDoubleTask<K,V> 5785 t = (MapReduceKeysToDoubleTask<K,V>)c, 5786 s = t.rights; 5787 while (s != null) { 5788 t.result = reducer.applyAsDouble(t.result, s.result); 5789 s = t.rights = s.nextRight; 5790 } 5791 } 5792 } 5793 } 5794 } 5795 5796 @SuppressWarnings("serial") 5797 static final class MapReduceValuesToDoubleTask<K,V> 5798 extends BulkTask<K,V,Double> { 5799 final ToDoubleFunction<? super V> transformer; 5800 final DoubleBinaryOperator reducer; 5801 final double basis; 5802 double result; 5803 MapReduceValuesToDoubleTask<K,V> rights, nextRight; MapReduceValuesToDoubleTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesToDoubleTask<K,V> nextRight, ToDoubleFunction<? super V> transformer, double basis, DoubleBinaryOperator reducer)5804 MapReduceValuesToDoubleTask 5805 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5806 MapReduceValuesToDoubleTask<K,V> nextRight, 5807 ToDoubleFunction<? super V> transformer, 5808 double basis, 5809 DoubleBinaryOperator reducer) { 5810 super(p, b, i, f, t); this.nextRight = nextRight; 5811 this.transformer = transformer; 5812 this.basis = basis; this.reducer = reducer; 5813 } getRawResult()5814 public final Double getRawResult() { return result; } compute()5815 public final void compute() { 5816 final ToDoubleFunction<? super V> transformer; 5817 final DoubleBinaryOperator reducer; 5818 if ((transformer = this.transformer) != null && 5819 (reducer = this.reducer) != null) { 5820 double r = this.basis; 5821 for (int i = baseIndex, f, h; batch > 0 && 5822 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5823 addToPendingCount(1); 5824 (rights = new MapReduceValuesToDoubleTask<K,V> 5825 (this, batch >>>= 1, baseLimit = h, f, tab, 5826 rights, transformer, r, reducer)).fork(); 5827 } 5828 for (Node<K,V> p; (p = advance()) != null; ) 5829 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val)); 5830 result = r; 5831 CountedCompleter<?> c; 5832 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5833 @SuppressWarnings("unchecked") 5834 MapReduceValuesToDoubleTask<K,V> 5835 t = (MapReduceValuesToDoubleTask<K,V>)c, 5836 s = t.rights; 5837 while (s != null) { 5838 t.result = reducer.applyAsDouble(t.result, s.result); 5839 s = t.rights = s.nextRight; 5840 } 5841 } 5842 } 5843 } 5844 } 5845 5846 @SuppressWarnings("serial") 5847 static final class MapReduceEntriesToDoubleTask<K,V> 5848 extends BulkTask<K,V,Double> { 5849 final ToDoubleFunction<Map.Entry<K,V>> transformer; 5850 final DoubleBinaryOperator reducer; 5851 final double basis; 5852 double result; 5853 MapReduceEntriesToDoubleTask<K,V> rights, nextRight; MapReduceEntriesToDoubleTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesToDoubleTask<K,V> nextRight, ToDoubleFunction<Map.Entry<K,V>> transformer, double basis, DoubleBinaryOperator reducer)5854 MapReduceEntriesToDoubleTask 5855 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5856 MapReduceEntriesToDoubleTask<K,V> nextRight, 5857 ToDoubleFunction<Map.Entry<K,V>> transformer, 5858 double basis, 5859 DoubleBinaryOperator reducer) { 5860 super(p, b, i, f, t); this.nextRight = nextRight; 5861 this.transformer = transformer; 5862 this.basis = basis; this.reducer = reducer; 5863 } getRawResult()5864 public final Double getRawResult() { return result; } compute()5865 public final void compute() { 5866 final ToDoubleFunction<Map.Entry<K,V>> transformer; 5867 final DoubleBinaryOperator reducer; 5868 if ((transformer = this.transformer) != null && 5869 (reducer = this.reducer) != null) { 5870 double r = this.basis; 5871 for (int i = baseIndex, f, h; batch > 0 && 5872 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5873 addToPendingCount(1); 5874 (rights = new MapReduceEntriesToDoubleTask<K,V> 5875 (this, batch >>>= 1, baseLimit = h, f, tab, 5876 rights, transformer, r, reducer)).fork(); 5877 } 5878 for (Node<K,V> p; (p = advance()) != null; ) 5879 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p)); 5880 result = r; 5881 CountedCompleter<?> c; 5882 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5883 @SuppressWarnings("unchecked") 5884 MapReduceEntriesToDoubleTask<K,V> 5885 t = (MapReduceEntriesToDoubleTask<K,V>)c, 5886 s = t.rights; 5887 while (s != null) { 5888 t.result = reducer.applyAsDouble(t.result, s.result); 5889 s = t.rights = s.nextRight; 5890 } 5891 } 5892 } 5893 } 5894 } 5895 5896 @SuppressWarnings("serial") 5897 static final class MapReduceMappingsToDoubleTask<K,V> 5898 extends BulkTask<K,V,Double> { 5899 final ToDoubleBiFunction<? super K, ? super V> transformer; 5900 final DoubleBinaryOperator reducer; 5901 final double basis; 5902 double result; 5903 MapReduceMappingsToDoubleTask<K,V> rights, nextRight; MapReduceMappingsToDoubleTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsToDoubleTask<K,V> nextRight, ToDoubleBiFunction<? super K, ? super V> transformer, double basis, DoubleBinaryOperator reducer)5904 MapReduceMappingsToDoubleTask 5905 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5906 MapReduceMappingsToDoubleTask<K,V> nextRight, 5907 ToDoubleBiFunction<? super K, ? super V> transformer, 5908 double basis, 5909 DoubleBinaryOperator reducer) { 5910 super(p, b, i, f, t); this.nextRight = nextRight; 5911 this.transformer = transformer; 5912 this.basis = basis; this.reducer = reducer; 5913 } getRawResult()5914 public final Double getRawResult() { return result; } compute()5915 public final void compute() { 5916 final ToDoubleBiFunction<? super K, ? super V> transformer; 5917 final DoubleBinaryOperator reducer; 5918 if ((transformer = this.transformer) != null && 5919 (reducer = this.reducer) != null) { 5920 double r = this.basis; 5921 for (int i = baseIndex, f, h; batch > 0 && 5922 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5923 addToPendingCount(1); 5924 (rights = new MapReduceMappingsToDoubleTask<K,V> 5925 (this, batch >>>= 1, baseLimit = h, f, tab, 5926 rights, transformer, r, reducer)).fork(); 5927 } 5928 for (Node<K,V> p; (p = advance()) != null; ) 5929 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val)); 5930 result = r; 5931 CountedCompleter<?> c; 5932 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5933 @SuppressWarnings("unchecked") 5934 MapReduceMappingsToDoubleTask<K,V> 5935 t = (MapReduceMappingsToDoubleTask<K,V>)c, 5936 s = t.rights; 5937 while (s != null) { 5938 t.result = reducer.applyAsDouble(t.result, s.result); 5939 s = t.rights = s.nextRight; 5940 } 5941 } 5942 } 5943 } 5944 } 5945 5946 @SuppressWarnings("serial") 5947 static final class MapReduceKeysToLongTask<K,V> 5948 extends BulkTask<K,V,Long> { 5949 final ToLongFunction<? super K> transformer; 5950 final LongBinaryOperator reducer; 5951 final long basis; 5952 long result; 5953 MapReduceKeysToLongTask<K,V> rights, nextRight; MapReduceKeysToLongTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysToLongTask<K,V> nextRight, ToLongFunction<? super K> transformer, long basis, LongBinaryOperator reducer)5954 MapReduceKeysToLongTask 5955 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5956 MapReduceKeysToLongTask<K,V> nextRight, 5957 ToLongFunction<? super K> transformer, 5958 long basis, 5959 LongBinaryOperator reducer) { 5960 super(p, b, i, f, t); this.nextRight = nextRight; 5961 this.transformer = transformer; 5962 this.basis = basis; this.reducer = reducer; 5963 } getRawResult()5964 public final Long getRawResult() { return result; } compute()5965 public final void compute() { 5966 final ToLongFunction<? super K> transformer; 5967 final LongBinaryOperator reducer; 5968 if ((transformer = this.transformer) != null && 5969 (reducer = this.reducer) != null) { 5970 long r = this.basis; 5971 for (int i = baseIndex, f, h; batch > 0 && 5972 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5973 addToPendingCount(1); 5974 (rights = new MapReduceKeysToLongTask<K,V> 5975 (this, batch >>>= 1, baseLimit = h, f, tab, 5976 rights, transformer, r, reducer)).fork(); 5977 } 5978 for (Node<K,V> p; (p = advance()) != null; ) 5979 r = reducer.applyAsLong(r, transformer.applyAsLong(p.key)); 5980 result = r; 5981 CountedCompleter<?> c; 5982 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5983 @SuppressWarnings("unchecked") 5984 MapReduceKeysToLongTask<K,V> 5985 t = (MapReduceKeysToLongTask<K,V>)c, 5986 s = t.rights; 5987 while (s != null) { 5988 t.result = reducer.applyAsLong(t.result, s.result); 5989 s = t.rights = s.nextRight; 5990 } 5991 } 5992 } 5993 } 5994 } 5995 5996 @SuppressWarnings("serial") 5997 static final class MapReduceValuesToLongTask<K,V> 5998 extends BulkTask<K,V,Long> { 5999 final ToLongFunction<? super V> transformer; 6000 final LongBinaryOperator reducer; 6001 final long basis; 6002 long result; 6003 MapReduceValuesToLongTask<K,V> rights, nextRight; MapReduceValuesToLongTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesToLongTask<K,V> nextRight, ToLongFunction<? super V> transformer, long basis, LongBinaryOperator reducer)6004 MapReduceValuesToLongTask 6005 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6006 MapReduceValuesToLongTask<K,V> nextRight, 6007 ToLongFunction<? super V> transformer, 6008 long basis, 6009 LongBinaryOperator reducer) { 6010 super(p, b, i, f, t); this.nextRight = nextRight; 6011 this.transformer = transformer; 6012 this.basis = basis; this.reducer = reducer; 6013 } getRawResult()6014 public final Long getRawResult() { return result; } compute()6015 public final void compute() { 6016 final ToLongFunction<? super V> transformer; 6017 final LongBinaryOperator reducer; 6018 if ((transformer = this.transformer) != null && 6019 (reducer = this.reducer) != null) { 6020 long r = this.basis; 6021 for (int i = baseIndex, f, h; batch > 0 && 6022 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6023 addToPendingCount(1); 6024 (rights = new MapReduceValuesToLongTask<K,V> 6025 (this, batch >>>= 1, baseLimit = h, f, tab, 6026 rights, transformer, r, reducer)).fork(); 6027 } 6028 for (Node<K,V> p; (p = advance()) != null; ) 6029 r = reducer.applyAsLong(r, transformer.applyAsLong(p.val)); 6030 result = r; 6031 CountedCompleter<?> c; 6032 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6033 @SuppressWarnings("unchecked") 6034 MapReduceValuesToLongTask<K,V> 6035 t = (MapReduceValuesToLongTask<K,V>)c, 6036 s = t.rights; 6037 while (s != null) { 6038 t.result = reducer.applyAsLong(t.result, s.result); 6039 s = t.rights = s.nextRight; 6040 } 6041 } 6042 } 6043 } 6044 } 6045 6046 @SuppressWarnings("serial") 6047 static final class MapReduceEntriesToLongTask<K,V> 6048 extends BulkTask<K,V,Long> { 6049 final ToLongFunction<Map.Entry<K,V>> transformer; 6050 final LongBinaryOperator reducer; 6051 final long basis; 6052 long result; 6053 MapReduceEntriesToLongTask<K,V> rights, nextRight; MapReduceEntriesToLongTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesToLongTask<K,V> nextRight, ToLongFunction<Map.Entry<K,V>> transformer, long basis, LongBinaryOperator reducer)6054 MapReduceEntriesToLongTask 6055 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6056 MapReduceEntriesToLongTask<K,V> nextRight, 6057 ToLongFunction<Map.Entry<K,V>> transformer, 6058 long basis, 6059 LongBinaryOperator reducer) { 6060 super(p, b, i, f, t); this.nextRight = nextRight; 6061 this.transformer = transformer; 6062 this.basis = basis; this.reducer = reducer; 6063 } getRawResult()6064 public final Long getRawResult() { return result; } compute()6065 public final void compute() { 6066 final ToLongFunction<Map.Entry<K,V>> transformer; 6067 final LongBinaryOperator reducer; 6068 if ((transformer = this.transformer) != null && 6069 (reducer = this.reducer) != null) { 6070 long r = this.basis; 6071 for (int i = baseIndex, f, h; batch > 0 && 6072 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6073 addToPendingCount(1); 6074 (rights = new MapReduceEntriesToLongTask<K,V> 6075 (this, batch >>>= 1, baseLimit = h, f, tab, 6076 rights, transformer, r, reducer)).fork(); 6077 } 6078 for (Node<K,V> p; (p = advance()) != null; ) 6079 r = reducer.applyAsLong(r, transformer.applyAsLong(p)); 6080 result = r; 6081 CountedCompleter<?> c; 6082 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6083 @SuppressWarnings("unchecked") 6084 MapReduceEntriesToLongTask<K,V> 6085 t = (MapReduceEntriesToLongTask<K,V>)c, 6086 s = t.rights; 6087 while (s != null) { 6088 t.result = reducer.applyAsLong(t.result, s.result); 6089 s = t.rights = s.nextRight; 6090 } 6091 } 6092 } 6093 } 6094 } 6095 6096 @SuppressWarnings("serial") 6097 static final class MapReduceMappingsToLongTask<K,V> 6098 extends BulkTask<K,V,Long> { 6099 final ToLongBiFunction<? super K, ? super V> transformer; 6100 final LongBinaryOperator reducer; 6101 final long basis; 6102 long result; 6103 MapReduceMappingsToLongTask<K,V> rights, nextRight; MapReduceMappingsToLongTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsToLongTask<K,V> nextRight, ToLongBiFunction<? super K, ? super V> transformer, long basis, LongBinaryOperator reducer)6104 MapReduceMappingsToLongTask 6105 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6106 MapReduceMappingsToLongTask<K,V> nextRight, 6107 ToLongBiFunction<? super K, ? super V> transformer, 6108 long basis, 6109 LongBinaryOperator reducer) { 6110 super(p, b, i, f, t); this.nextRight = nextRight; 6111 this.transformer = transformer; 6112 this.basis = basis; this.reducer = reducer; 6113 } getRawResult()6114 public final Long getRawResult() { return result; } compute()6115 public final void compute() { 6116 final ToLongBiFunction<? super K, ? super V> transformer; 6117 final LongBinaryOperator reducer; 6118 if ((transformer = this.transformer) != null && 6119 (reducer = this.reducer) != null) { 6120 long r = this.basis; 6121 for (int i = baseIndex, f, h; batch > 0 && 6122 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6123 addToPendingCount(1); 6124 (rights = new MapReduceMappingsToLongTask<K,V> 6125 (this, batch >>>= 1, baseLimit = h, f, tab, 6126 rights, transformer, r, reducer)).fork(); 6127 } 6128 for (Node<K,V> p; (p = advance()) != null; ) 6129 r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val)); 6130 result = r; 6131 CountedCompleter<?> c; 6132 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6133 @SuppressWarnings("unchecked") 6134 MapReduceMappingsToLongTask<K,V> 6135 t = (MapReduceMappingsToLongTask<K,V>)c, 6136 s = t.rights; 6137 while (s != null) { 6138 t.result = reducer.applyAsLong(t.result, s.result); 6139 s = t.rights = s.nextRight; 6140 } 6141 } 6142 } 6143 } 6144 } 6145 6146 @SuppressWarnings("serial") 6147 static final class MapReduceKeysToIntTask<K,V> 6148 extends BulkTask<K,V,Integer> { 6149 final ToIntFunction<? super K> transformer; 6150 final IntBinaryOperator reducer; 6151 final int basis; 6152 int result; 6153 MapReduceKeysToIntTask<K,V> rights, nextRight; MapReduceKeysToIntTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysToIntTask<K,V> nextRight, ToIntFunction<? super K> transformer, int basis, IntBinaryOperator reducer)6154 MapReduceKeysToIntTask 6155 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6156 MapReduceKeysToIntTask<K,V> nextRight, 6157 ToIntFunction<? super K> transformer, 6158 int basis, 6159 IntBinaryOperator reducer) { 6160 super(p, b, i, f, t); this.nextRight = nextRight; 6161 this.transformer = transformer; 6162 this.basis = basis; this.reducer = reducer; 6163 } getRawResult()6164 public final Integer getRawResult() { return result; } compute()6165 public final void compute() { 6166 final ToIntFunction<? super K> transformer; 6167 final IntBinaryOperator reducer; 6168 if ((transformer = this.transformer) != null && 6169 (reducer = this.reducer) != null) { 6170 int r = this.basis; 6171 for (int i = baseIndex, f, h; batch > 0 && 6172 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6173 addToPendingCount(1); 6174 (rights = new MapReduceKeysToIntTask<K,V> 6175 (this, batch >>>= 1, baseLimit = h, f, tab, 6176 rights, transformer, r, reducer)).fork(); 6177 } 6178 for (Node<K,V> p; (p = advance()) != null; ) 6179 r = reducer.applyAsInt(r, transformer.applyAsInt(p.key)); 6180 result = r; 6181 CountedCompleter<?> c; 6182 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6183 @SuppressWarnings("unchecked") 6184 MapReduceKeysToIntTask<K,V> 6185 t = (MapReduceKeysToIntTask<K,V>)c, 6186 s = t.rights; 6187 while (s != null) { 6188 t.result = reducer.applyAsInt(t.result, s.result); 6189 s = t.rights = s.nextRight; 6190 } 6191 } 6192 } 6193 } 6194 } 6195 6196 @SuppressWarnings("serial") 6197 static final class MapReduceValuesToIntTask<K,V> 6198 extends BulkTask<K,V,Integer> { 6199 final ToIntFunction<? super V> transformer; 6200 final IntBinaryOperator reducer; 6201 final int basis; 6202 int result; 6203 MapReduceValuesToIntTask<K,V> rights, nextRight; MapReduceValuesToIntTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesToIntTask<K,V> nextRight, ToIntFunction<? super V> transformer, int basis, IntBinaryOperator reducer)6204 MapReduceValuesToIntTask 6205 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6206 MapReduceValuesToIntTask<K,V> nextRight, 6207 ToIntFunction<? super V> transformer, 6208 int basis, 6209 IntBinaryOperator reducer) { 6210 super(p, b, i, f, t); this.nextRight = nextRight; 6211 this.transformer = transformer; 6212 this.basis = basis; this.reducer = reducer; 6213 } getRawResult()6214 public final Integer getRawResult() { return result; } compute()6215 public final void compute() { 6216 final ToIntFunction<? super V> transformer; 6217 final IntBinaryOperator reducer; 6218 if ((transformer = this.transformer) != null && 6219 (reducer = this.reducer) != null) { 6220 int r = this.basis; 6221 for (int i = baseIndex, f, h; batch > 0 && 6222 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6223 addToPendingCount(1); 6224 (rights = new MapReduceValuesToIntTask<K,V> 6225 (this, batch >>>= 1, baseLimit = h, f, tab, 6226 rights, transformer, r, reducer)).fork(); 6227 } 6228 for (Node<K,V> p; (p = advance()) != null; ) 6229 r = reducer.applyAsInt(r, transformer.applyAsInt(p.val)); 6230 result = r; 6231 CountedCompleter<?> c; 6232 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6233 @SuppressWarnings("unchecked") 6234 MapReduceValuesToIntTask<K,V> 6235 t = (MapReduceValuesToIntTask<K,V>)c, 6236 s = t.rights; 6237 while (s != null) { 6238 t.result = reducer.applyAsInt(t.result, s.result); 6239 s = t.rights = s.nextRight; 6240 } 6241 } 6242 } 6243 } 6244 } 6245 6246 @SuppressWarnings("serial") 6247 static final class MapReduceEntriesToIntTask<K,V> 6248 extends BulkTask<K,V,Integer> { 6249 final ToIntFunction<Map.Entry<K,V>> transformer; 6250 final IntBinaryOperator reducer; 6251 final int basis; 6252 int result; 6253 MapReduceEntriesToIntTask<K,V> rights, nextRight; MapReduceEntriesToIntTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesToIntTask<K,V> nextRight, ToIntFunction<Map.Entry<K,V>> transformer, int basis, IntBinaryOperator reducer)6254 MapReduceEntriesToIntTask 6255 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6256 MapReduceEntriesToIntTask<K,V> nextRight, 6257 ToIntFunction<Map.Entry<K,V>> transformer, 6258 int basis, 6259 IntBinaryOperator reducer) { 6260 super(p, b, i, f, t); this.nextRight = nextRight; 6261 this.transformer = transformer; 6262 this.basis = basis; this.reducer = reducer; 6263 } getRawResult()6264 public final Integer getRawResult() { return result; } compute()6265 public final void compute() { 6266 final ToIntFunction<Map.Entry<K,V>> transformer; 6267 final IntBinaryOperator reducer; 6268 if ((transformer = this.transformer) != null && 6269 (reducer = this.reducer) != null) { 6270 int r = this.basis; 6271 for (int i = baseIndex, f, h; batch > 0 && 6272 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6273 addToPendingCount(1); 6274 (rights = new MapReduceEntriesToIntTask<K,V> 6275 (this, batch >>>= 1, baseLimit = h, f, tab, 6276 rights, transformer, r, reducer)).fork(); 6277 } 6278 for (Node<K,V> p; (p = advance()) != null; ) 6279 r = reducer.applyAsInt(r, transformer.applyAsInt(p)); 6280 result = r; 6281 CountedCompleter<?> c; 6282 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6283 @SuppressWarnings("unchecked") 6284 MapReduceEntriesToIntTask<K,V> 6285 t = (MapReduceEntriesToIntTask<K,V>)c, 6286 s = t.rights; 6287 while (s != null) { 6288 t.result = reducer.applyAsInt(t.result, s.result); 6289 s = t.rights = s.nextRight; 6290 } 6291 } 6292 } 6293 } 6294 } 6295 6296 @SuppressWarnings("serial") 6297 static final class MapReduceMappingsToIntTask<K,V> 6298 extends BulkTask<K,V,Integer> { 6299 final ToIntBiFunction<? super K, ? super V> transformer; 6300 final IntBinaryOperator reducer; 6301 final int basis; 6302 int result; 6303 MapReduceMappingsToIntTask<K,V> rights, nextRight; MapReduceMappingsToIntTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsToIntTask<K,V> nextRight, ToIntBiFunction<? super K, ? super V> transformer, int basis, IntBinaryOperator reducer)6304 MapReduceMappingsToIntTask 6305 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6306 MapReduceMappingsToIntTask<K,V> nextRight, 6307 ToIntBiFunction<? super K, ? super V> transformer, 6308 int basis, 6309 IntBinaryOperator reducer) { 6310 super(p, b, i, f, t); this.nextRight = nextRight; 6311 this.transformer = transformer; 6312 this.basis = basis; this.reducer = reducer; 6313 } getRawResult()6314 public final Integer getRawResult() { return result; } compute()6315 public final void compute() { 6316 final ToIntBiFunction<? super K, ? super V> transformer; 6317 final IntBinaryOperator reducer; 6318 if ((transformer = this.transformer) != null && 6319 (reducer = this.reducer) != null) { 6320 int r = this.basis; 6321 for (int i = baseIndex, f, h; batch > 0 && 6322 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6323 addToPendingCount(1); 6324 (rights = new MapReduceMappingsToIntTask<K,V> 6325 (this, batch >>>= 1, baseLimit = h, f, tab, 6326 rights, transformer, r, reducer)).fork(); 6327 } 6328 for (Node<K,V> p; (p = advance()) != null; ) 6329 r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val)); 6330 result = r; 6331 CountedCompleter<?> c; 6332 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6333 @SuppressWarnings("unchecked") 6334 MapReduceMappingsToIntTask<K,V> 6335 t = (MapReduceMappingsToIntTask<K,V>)c, 6336 s = t.rights; 6337 while (s != null) { 6338 t.result = reducer.applyAsInt(t.result, s.result); 6339 s = t.rights = s.nextRight; 6340 } 6341 } 6342 } 6343 } 6344 } 6345 6346 // Unsafe mechanics 6347 private static final sun.misc.Unsafe U = sun.misc.Unsafe.getUnsafe(); 6348 private static final long SIZECTL; 6349 private static final long TRANSFERINDEX; 6350 private static final long BASECOUNT; 6351 private static final long CELLSBUSY; 6352 private static final long CELLVALUE; 6353 private static final int ABASE; 6354 private static final int ASHIFT; 6355 6356 static { 6357 try { 6358 SIZECTL = U.objectFieldOffset 6359 (ConcurrentHashMap.class.getDeclaredField("sizeCtl")); 6360 TRANSFERINDEX = U.objectFieldOffset 6361 (ConcurrentHashMap.class.getDeclaredField("transferIndex")); 6362 BASECOUNT = U.objectFieldOffset 6363 (ConcurrentHashMap.class.getDeclaredField("baseCount")); 6364 CELLSBUSY = U.objectFieldOffset 6365 (ConcurrentHashMap.class.getDeclaredField("cellsBusy")); 6366 6367 CELLVALUE = U.objectFieldOffset 6368 (CounterCell.class.getDeclaredField("value")); 6369 6370 ABASE = U.arrayBaseOffset(Node[].class); 6371 int scale = U.arrayIndexScale(Node[].class); 6372 if ((scale & (scale - 1)) != 0) 6373 throw new Error("array index scale not a power of two"); 6374 ASHIFT = 31 - Integer.numberOfLeadingZeros(scale); 6375 } catch (ReflectiveOperationException e) { 6376 throw new Error(e); 6377 } 6378 6379 // Reduce the risk of rare disastrous classloading in first call to 6380 // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773 6381 Class<?> ensureLoaded = LockSupport.class; 6382 } 6383 } 6384