1 /* 2 * Written by Doug Lea with assistance from members of JCP JSR-166 3 * Expert Group and released to the public domain, as explained at 4 * http://creativecommons.org/publicdomain/zero/1.0/ 5 */ 6 7 package java.util.concurrent; 8 9 import java.io.ObjectStreamField; 10 import java.io.Serializable; 11 import java.lang.reflect.ParameterizedType; 12 import java.lang.reflect.Type; 13 import java.util.AbstractMap; 14 import java.util.Arrays; 15 import java.util.Collection; 16 import java.util.Enumeration; 17 import java.util.HashMap; 18 import java.util.Hashtable; 19 import java.util.Iterator; 20 import java.util.Map; 21 import java.util.NoSuchElementException; 22 import java.util.Set; 23 import java.util.Spliterator; 24 import java.util.concurrent.atomic.AtomicReference; 25 import java.util.concurrent.locks.LockSupport; 26 import java.util.concurrent.locks.ReentrantLock; 27 import java.util.function.BiConsumer; 28 import java.util.function.BiFunction; 29 import java.util.function.Consumer; 30 import java.util.function.DoubleBinaryOperator; 31 import java.util.function.Function; 32 import java.util.function.IntBinaryOperator; 33 import java.util.function.LongBinaryOperator; 34 import java.util.function.Predicate; 35 import java.util.function.ToDoubleBiFunction; 36 import java.util.function.ToDoubleFunction; 37 import java.util.function.ToIntBiFunction; 38 import java.util.function.ToIntFunction; 39 import java.util.function.ToLongBiFunction; 40 import java.util.function.ToLongFunction; 41 import java.util.stream.Stream; 42 43 // BEGIN android-note 44 // removed link to collections framework docs 45 // END android-note 46 47 /** 48 * A hash table supporting full concurrency of retrievals and 49 * high expected concurrency for updates. This class obeys the 50 * same functional specification as {@link java.util.Hashtable}, and 51 * includes versions of methods corresponding to each method of 52 * {@code Hashtable}. However, even though all operations are 53 * thread-safe, retrieval operations do <em>not</em> entail locking, 54 * and there is <em>not</em> any support for locking the entire table 55 * in a way that prevents all access. This class is fully 56 * interoperable with {@code Hashtable} in programs that rely on its 57 * thread safety but not on its synchronization details. 58 * 59 * <p>Retrieval operations (including {@code get}) generally do not 60 * block, so may overlap with update operations (including {@code put} 61 * and {@code remove}). Retrievals reflect the results of the most 62 * recently <em>completed</em> update operations holding upon their 63 * onset. (More formally, an update operation for a given key bears a 64 * <em>happens-before</em> relation with any (non-null) retrieval for 65 * that key reporting the updated value.) For aggregate operations 66 * such as {@code putAll} and {@code clear}, concurrent retrievals may 67 * reflect insertion or removal of only some entries. Similarly, 68 * Iterators, Spliterators and Enumerations return elements reflecting the 69 * state of the hash table at some point at or since the creation of the 70 * iterator/enumeration. They do <em>not</em> throw {@link 71 * java.util.ConcurrentModificationException ConcurrentModificationException}. 72 * However, iterators are designed to be used by only one thread at a time. 73 * Bear in mind that the results of aggregate status methods including 74 * {@code size}, {@code isEmpty}, and {@code containsValue} are typically 75 * useful only when a map is not undergoing concurrent updates in other threads. 76 * Otherwise the results of these methods reflect transient states 77 * that may be adequate for monitoring or estimation purposes, but not 78 * for program control. 79 * 80 * <p>The table is dynamically expanded when there are too many 81 * collisions (i.e., keys that have distinct hash codes but fall into 82 * the same slot modulo the table size), with the expected average 83 * effect of maintaining roughly two bins per mapping (corresponding 84 * to a 0.75 load factor threshold for resizing). There may be much 85 * variance around this average as mappings are added and removed, but 86 * overall, this maintains a commonly accepted time/space tradeoff for 87 * hash tables. However, resizing this or any other kind of hash 88 * table may be a relatively slow operation. When possible, it is a 89 * good idea to provide a size estimate as an optional {@code 90 * initialCapacity} constructor argument. An additional optional 91 * {@code loadFactor} constructor argument provides a further means of 92 * customizing initial table capacity by specifying the table density 93 * to be used in calculating the amount of space to allocate for the 94 * given number of elements. Also, for compatibility with previous 95 * versions of this class, constructors may optionally specify an 96 * expected {@code concurrencyLevel} as an additional hint for 97 * internal sizing. Note that using many keys with exactly the same 98 * {@code hashCode()} is a sure way to slow down performance of any 99 * hash table. To ameliorate impact, when keys are {@link Comparable}, 100 * this class may use comparison order among keys to help break ties. 101 * 102 * <p>A {@link Set} projection of a ConcurrentHashMap may be created 103 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed 104 * (using {@link #keySet(Object)} when only keys are of interest, and the 105 * mapped values are (perhaps transiently) not used or all take the 106 * same mapping value. 107 * 108 * <p>A ConcurrentHashMap can be used as a scalable frequency map (a 109 * form of histogram or multiset) by using {@link 110 * java.util.concurrent.atomic.LongAdder} values and initializing via 111 * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count 112 * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use 113 * {@code freqs.computeIfAbsent(key, k -> new LongAdder()).increment();} 114 * 115 * <p>This class and its views and iterators implement all of the 116 * <em>optional</em> methods of the {@link Map} and {@link Iterator} 117 * interfaces. 118 * 119 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class 120 * does <em>not</em> allow {@code null} to be used as a key or value. 121 * 122 * <p>ConcurrentHashMaps support a set of sequential and parallel bulk 123 * operations that, unlike most {@link Stream} methods, are designed 124 * to be safely, and often sensibly, applied even with maps that are 125 * being concurrently updated by other threads; for example, when 126 * computing a snapshot summary of the values in a shared registry. 127 * There are three kinds of operation, each with four forms, accepting 128 * functions with keys, values, entries, and (key, value) pairs as 129 * arguments and/or return values. Because the elements of a 130 * ConcurrentHashMap are not ordered in any particular way, and may be 131 * processed in different orders in different parallel executions, the 132 * correctness of supplied functions should not depend on any 133 * ordering, or on any other objects or values that may transiently 134 * change while computation is in progress; and except for forEach 135 * actions, should ideally be side-effect-free. Bulk operations on 136 * {@link java.util.Map.Entry} objects do not support method {@code 137 * setValue}. 138 * 139 * <ul> 140 * <li>forEach: Performs a given action on each element. 141 * A variant form applies a given transformation on each element 142 * before performing the action. 143 * 144 * <li>search: Returns the first available non-null result of 145 * applying a given function on each element; skipping further 146 * search when a result is found. 147 * 148 * <li>reduce: Accumulates each element. The supplied reduction 149 * function cannot rely on ordering (more formally, it should be 150 * both associative and commutative). There are five variants: 151 * 152 * <ul> 153 * 154 * <li>Plain reductions. (There is not a form of this method for 155 * (key, value) function arguments since there is no corresponding 156 * return type.) 157 * 158 * <li>Mapped reductions that accumulate the results of a given 159 * function applied to each element. 160 * 161 * <li>Reductions to scalar doubles, longs, and ints, using a 162 * given basis value. 163 * 164 * </ul> 165 * </ul> 166 * 167 * <p>These bulk operations accept a {@code parallelismThreshold} 168 * argument. Methods proceed sequentially if the current map size is 169 * estimated to be less than the given threshold. Using a value of 170 * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value 171 * of {@code 1} results in maximal parallelism by partitioning into 172 * enough subtasks to fully utilize the {@link 173 * ForkJoinPool#commonPool()} that is used for all parallel 174 * computations. Normally, you would initially choose one of these 175 * extreme values, and then measure performance of using in-between 176 * values that trade off overhead versus throughput. 177 * 178 * <p>The concurrency properties of bulk operations follow 179 * from those of ConcurrentHashMap: Any non-null result returned 180 * from {@code get(key)} and related access methods bears a 181 * happens-before relation with the associated insertion or 182 * update. The result of any bulk operation reflects the 183 * composition of these per-element relations (but is not 184 * necessarily atomic with respect to the map as a whole unless it 185 * is somehow known to be quiescent). Conversely, because keys 186 * and values in the map are never null, null serves as a reliable 187 * atomic indicator of the current lack of any result. To 188 * maintain this property, null serves as an implicit basis for 189 * all non-scalar reduction operations. For the double, long, and 190 * int versions, the basis should be one that, when combined with 191 * any other value, returns that other value (more formally, it 192 * should be the identity element for the reduction). Most common 193 * reductions have these properties; for example, computing a sum 194 * with basis 0 or a minimum with basis MAX_VALUE. 195 * 196 * <p>Search and transformation functions provided as arguments 197 * should similarly return null to indicate the lack of any result 198 * (in which case it is not used). In the case of mapped 199 * reductions, this also enables transformations to serve as 200 * filters, returning null (or, in the case of primitive 201 * specializations, the identity basis) if the element should not 202 * be combined. You can create compound transformations and 203 * filterings by composing them yourself under this "null means 204 * there is nothing there now" rule before using them in search or 205 * reduce operations. 206 * 207 * <p>Methods accepting and/or returning Entry arguments maintain 208 * key-value associations. They may be useful for example when 209 * finding the key for the greatest value. Note that "plain" Entry 210 * arguments can be supplied using {@code new 211 * AbstractMap.SimpleEntry(k,v)}. 212 * 213 * <p>Bulk operations may complete abruptly, throwing an 214 * exception encountered in the application of a supplied 215 * function. Bear in mind when handling such exceptions that other 216 * concurrently executing functions could also have thrown 217 * exceptions, or would have done so if the first exception had 218 * not occurred. 219 * 220 * <p>Speedups for parallel compared to sequential forms are common 221 * but not guaranteed. Parallel operations involving brief functions 222 * on small maps may execute more slowly than sequential forms if the 223 * underlying work to parallelize the computation is more expensive 224 * than the computation itself. Similarly, parallelization may not 225 * lead to much actual parallelism if all processors are busy 226 * performing unrelated tasks. 227 * 228 * <p>All arguments to all task methods must be non-null. 229 * 230 * @since 1.5 231 * @author Doug Lea 232 * @param <K> the type of keys maintained by this map 233 * @param <V> the type of mapped values 234 */ 235 public class ConcurrentHashMap<K,V> extends AbstractMap<K,V> 236 implements ConcurrentMap<K,V>, Serializable { 237 private static final long serialVersionUID = 7249069246763182397L; 238 239 /* 240 * Overview: 241 * 242 * The primary design goal of this hash table is to maintain 243 * concurrent readability (typically method get(), but also 244 * iterators and related methods) while minimizing update 245 * contention. Secondary goals are to keep space consumption about 246 * the same or better than java.util.HashMap, and to support high 247 * initial insertion rates on an empty table by many threads. 248 * 249 * This map usually acts as a binned (bucketed) hash table. Each 250 * key-value mapping is held in a Node. Most nodes are instances 251 * of the basic Node class with hash, key, value, and next 252 * fields. However, various subclasses exist: TreeNodes are 253 * arranged in balanced trees, not lists. TreeBins hold the roots 254 * of sets of TreeNodes. ForwardingNodes are placed at the heads 255 * of bins during resizing. ReservationNodes are used as 256 * placeholders while establishing values in computeIfAbsent and 257 * related methods. The types TreeBin, ForwardingNode, and 258 * ReservationNode do not hold normal user keys, values, or 259 * hashes, and are readily distinguishable during search etc 260 * because they have negative hash fields and null key and value 261 * fields. (These special nodes are either uncommon or transient, 262 * so the impact of carrying around some unused fields is 263 * insignificant.) 264 * 265 * The table is lazily initialized to a power-of-two size upon the 266 * first insertion. Each bin in the table normally contains a 267 * list of Nodes (most often, the list has only zero or one Node). 268 * Table accesses require volatile/atomic reads, writes, and 269 * CASes. Because there is no other way to arrange this without 270 * adding further indirections, we use intrinsics 271 * (sun.misc.Unsafe) operations. 272 * 273 * We use the top (sign) bit of Node hash fields for control 274 * purposes -- it is available anyway because of addressing 275 * constraints. Nodes with negative hash fields are specially 276 * handled or ignored in map methods. 277 * 278 * Insertion (via put or its variants) of the first node in an 279 * empty bin is performed by just CASing it to the bin. This is 280 * by far the most common case for put operations under most 281 * key/hash distributions. Other update operations (insert, 282 * delete, and replace) require locks. We do not want to waste 283 * the space required to associate a distinct lock object with 284 * each bin, so instead use the first node of a bin list itself as 285 * a lock. Locking support for these locks relies on builtin 286 * "synchronized" monitors. 287 * 288 * Using the first node of a list as a lock does not by itself 289 * suffice though: When a node is locked, any update must first 290 * validate that it is still the first node after locking it, and 291 * retry if not. Because new nodes are always appended to lists, 292 * once a node is first in a bin, it remains first until deleted 293 * or the bin becomes invalidated (upon resizing). 294 * 295 * The main disadvantage of per-bin locks is that other update 296 * operations on other nodes in a bin list protected by the same 297 * lock can stall, for example when user equals() or mapping 298 * functions take a long time. However, statistically, under 299 * random hash codes, this is not a common problem. Ideally, the 300 * frequency of nodes in bins follows a Poisson distribution 301 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a 302 * parameter of about 0.5 on average, given the resizing threshold 303 * of 0.75, although with a large variance because of resizing 304 * granularity. Ignoring variance, the expected occurrences of 305 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The 306 * first values are: 307 * 308 * 0: 0.60653066 309 * 1: 0.30326533 310 * 2: 0.07581633 311 * 3: 0.01263606 312 * 4: 0.00157952 313 * 5: 0.00015795 314 * 6: 0.00001316 315 * 7: 0.00000094 316 * 8: 0.00000006 317 * more: less than 1 in ten million 318 * 319 * Lock contention probability for two threads accessing distinct 320 * elements is roughly 1 / (8 * #elements) under random hashes. 321 * 322 * Actual hash code distributions encountered in practice 323 * sometimes deviate significantly from uniform randomness. This 324 * includes the case when N > (1<<30), so some keys MUST collide. 325 * Similarly for dumb or hostile usages in which multiple keys are 326 * designed to have identical hash codes or ones that differs only 327 * in masked-out high bits. So we use a secondary strategy that 328 * applies when the number of nodes in a bin exceeds a 329 * threshold. These TreeBins use a balanced tree to hold nodes (a 330 * specialized form of red-black trees), bounding search time to 331 * O(log N). Each search step in a TreeBin is at least twice as 332 * slow as in a regular list, but given that N cannot exceed 333 * (1<<64) (before running out of addresses) this bounds search 334 * steps, lock hold times, etc, to reasonable constants (roughly 335 * 100 nodes inspected per operation worst case) so long as keys 336 * are Comparable (which is very common -- String, Long, etc). 337 * TreeBin nodes (TreeNodes) also maintain the same "next" 338 * traversal pointers as regular nodes, so can be traversed in 339 * iterators in the same way. 340 * 341 * The table is resized when occupancy exceeds a percentage 342 * threshold (nominally, 0.75, but see below). Any thread 343 * noticing an overfull bin may assist in resizing after the 344 * initiating thread allocates and sets up the replacement array. 345 * However, rather than stalling, these other threads may proceed 346 * with insertions etc. The use of TreeBins shields us from the 347 * worst case effects of overfilling while resizes are in 348 * progress. Resizing proceeds by transferring bins, one by one, 349 * from the table to the next table. However, threads claim small 350 * blocks of indices to transfer (via field transferIndex) before 351 * doing so, reducing contention. A generation stamp in field 352 * sizeCtl ensures that resizings do not overlap. Because we are 353 * using power-of-two expansion, the elements from each bin must 354 * either stay at same index, or move with a power of two 355 * offset. We eliminate unnecessary node creation by catching 356 * cases where old nodes can be reused because their next fields 357 * won't change. On average, only about one-sixth of them need 358 * cloning when a table doubles. The nodes they replace will be 359 * garbage collectable as soon as they are no longer referenced by 360 * any reader thread that may be in the midst of concurrently 361 * traversing table. Upon transfer, the old table bin contains 362 * only a special forwarding node (with hash field "MOVED") that 363 * contains the next table as its key. On encountering a 364 * forwarding node, access and update operations restart, using 365 * the new table. 366 * 367 * Each bin transfer requires its bin lock, which can stall 368 * waiting for locks while resizing. However, because other 369 * threads can join in and help resize rather than contend for 370 * locks, average aggregate waits become shorter as resizing 371 * progresses. The transfer operation must also ensure that all 372 * accessible bins in both the old and new table are usable by any 373 * traversal. This is arranged in part by proceeding from the 374 * last bin (table.length - 1) up towards the first. Upon seeing 375 * a forwarding node, traversals (see class Traverser) arrange to 376 * move to the new table without revisiting nodes. To ensure that 377 * no intervening nodes are skipped even when moved out of order, 378 * a stack (see class TableStack) is created on first encounter of 379 * a forwarding node during a traversal, to maintain its place if 380 * later processing the current table. The need for these 381 * save/restore mechanics is relatively rare, but when one 382 * forwarding node is encountered, typically many more will be. 383 * So Traversers use a simple caching scheme to avoid creating so 384 * many new TableStack nodes. (Thanks to Peter Levart for 385 * suggesting use of a stack here.) 386 * 387 * The traversal scheme also applies to partial traversals of 388 * ranges of bins (via an alternate Traverser constructor) 389 * to support partitioned aggregate operations. Also, read-only 390 * operations give up if ever forwarded to a null table, which 391 * provides support for shutdown-style clearing, which is also not 392 * currently implemented. 393 * 394 * Lazy table initialization minimizes footprint until first use, 395 * and also avoids resizings when the first operation is from a 396 * putAll, constructor with map argument, or deserialization. 397 * These cases attempt to override the initial capacity settings, 398 * but harmlessly fail to take effect in cases of races. 399 * 400 * The element count is maintained using a specialization of 401 * LongAdder. We need to incorporate a specialization rather than 402 * just use a LongAdder in order to access implicit 403 * contention-sensing that leads to creation of multiple 404 * CounterCells. The counter mechanics avoid contention on 405 * updates but can encounter cache thrashing if read too 406 * frequently during concurrent access. To avoid reading so often, 407 * resizing under contention is attempted only upon adding to a 408 * bin already holding two or more nodes. Under uniform hash 409 * distributions, the probability of this occurring at threshold 410 * is around 13%, meaning that only about 1 in 8 puts check 411 * threshold (and after resizing, many fewer do so). 412 * 413 * TreeBins use a special form of comparison for search and 414 * related operations (which is the main reason we cannot use 415 * existing collections such as TreeMaps). TreeBins contain 416 * Comparable elements, but may contain others, as well as 417 * elements that are Comparable but not necessarily Comparable for 418 * the same T, so we cannot invoke compareTo among them. To handle 419 * this, the tree is ordered primarily by hash value, then by 420 * Comparable.compareTo order if applicable. On lookup at a node, 421 * if elements are not comparable or compare as 0 then both left 422 * and right children may need to be searched in the case of tied 423 * hash values. (This corresponds to the full list search that 424 * would be necessary if all elements were non-Comparable and had 425 * tied hashes.) On insertion, to keep a total ordering (or as 426 * close as is required here) across rebalancings, we compare 427 * classes and identityHashCodes as tie-breakers. The red-black 428 * balancing code is updated from pre-jdk-collections 429 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) 430 * based in turn on Cormen, Leiserson, and Rivest "Introduction to 431 * Algorithms" (CLR). 432 * 433 * TreeBins also require an additional locking mechanism. While 434 * list traversal is always possible by readers even during 435 * updates, tree traversal is not, mainly because of tree-rotations 436 * that may change the root node and/or its linkages. TreeBins 437 * include a simple read-write lock mechanism parasitic on the 438 * main bin-synchronization strategy: Structural adjustments 439 * associated with an insertion or removal are already bin-locked 440 * (and so cannot conflict with other writers) but must wait for 441 * ongoing readers to finish. Since there can be only one such 442 * waiter, we use a simple scheme using a single "waiter" field to 443 * block writers. However, readers need never block. If the root 444 * lock is held, they proceed along the slow traversal path (via 445 * next-pointers) until the lock becomes available or the list is 446 * exhausted, whichever comes first. These cases are not fast, but 447 * maximize aggregate expected throughput. 448 * 449 * Maintaining API and serialization compatibility with previous 450 * versions of this class introduces several oddities. Mainly: We 451 * leave untouched but unused constructor arguments referring to 452 * concurrencyLevel. We accept a loadFactor constructor argument, 453 * but apply it only to initial table capacity (which is the only 454 * time that we can guarantee to honor it.) We also declare an 455 * unused "Segment" class that is instantiated in minimal form 456 * only when serializing. 457 * 458 * Also, solely for compatibility with previous versions of this 459 * class, it extends AbstractMap, even though all of its methods 460 * are overridden, so it is just useless baggage. 461 * 462 * This file is organized to make things a little easier to follow 463 * while reading than they might otherwise: First the main static 464 * declarations and utilities, then fields, then main public 465 * methods (with a few factorings of multiple public methods into 466 * internal ones), then sizing methods, trees, traversers, and 467 * bulk operations. 468 */ 469 470 /* ---------------- Constants -------------- */ 471 472 /** 473 * The largest possible table capacity. This value must be 474 * exactly 1<<30 to stay within Java array allocation and indexing 475 * bounds for power of two table sizes, and is further required 476 * because the top two bits of 32bit hash fields are used for 477 * control purposes. 478 */ 479 private static final int MAXIMUM_CAPACITY = 1 << 30; 480 481 /** 482 * The default initial table capacity. Must be a power of 2 483 * (i.e., at least 1) and at most MAXIMUM_CAPACITY. 484 */ 485 private static final int DEFAULT_CAPACITY = 16; 486 487 /** 488 * The largest possible (non-power of two) array size. 489 * Needed by toArray and related methods. 490 */ 491 static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; 492 493 /** 494 * The default concurrency level for this table. Unused but 495 * defined for compatibility with previous versions of this class. 496 */ 497 private static final int DEFAULT_CONCURRENCY_LEVEL = 16; 498 499 /** 500 * The load factor for this table. Overrides of this value in 501 * constructors affect only the initial table capacity. The 502 * actual floating point value isn't normally used -- it is 503 * simpler to use expressions such as {@code n - (n >>> 2)} for 504 * the associated resizing threshold. 505 */ 506 private static final float LOAD_FACTOR = 0.75f; 507 508 /** 509 * The bin count threshold for using a tree rather than list for a 510 * bin. Bins are converted to trees when adding an element to a 511 * bin with at least this many nodes. The value must be greater 512 * than 2, and should be at least 8 to mesh with assumptions in 513 * tree removal about conversion back to plain bins upon 514 * shrinkage. 515 */ 516 static final int TREEIFY_THRESHOLD = 8; 517 518 /** 519 * The bin count threshold for untreeifying a (split) bin during a 520 * resize operation. Should be less than TREEIFY_THRESHOLD, and at 521 * most 6 to mesh with shrinkage detection under removal. 522 */ 523 static final int UNTREEIFY_THRESHOLD = 6; 524 525 /** 526 * The smallest table capacity for which bins may be treeified. 527 * (Otherwise the table is resized if too many nodes in a bin.) 528 * The value should be at least 4 * TREEIFY_THRESHOLD to avoid 529 * conflicts between resizing and treeification thresholds. 530 */ 531 static final int MIN_TREEIFY_CAPACITY = 64; 532 533 /** 534 * Minimum number of rebinnings per transfer step. Ranges are 535 * subdivided to allow multiple resizer threads. This value 536 * serves as a lower bound to avoid resizers encountering 537 * excessive memory contention. The value should be at least 538 * DEFAULT_CAPACITY. 539 */ 540 private static final int MIN_TRANSFER_STRIDE = 16; 541 542 /** 543 * The number of bits used for generation stamp in sizeCtl. 544 * Must be at least 6 for 32bit arrays. 545 */ 546 private static final int RESIZE_STAMP_BITS = 16; 547 548 /** 549 * The maximum number of threads that can help resize. 550 * Must fit in 32 - RESIZE_STAMP_BITS bits. 551 */ 552 private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1; 553 554 /** 555 * The bit shift for recording size stamp in sizeCtl. 556 */ 557 private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS; 558 559 /* 560 * Encodings for Node hash fields. See above for explanation. 561 */ 562 static final int MOVED = -1; // hash for forwarding nodes 563 static final int TREEBIN = -2; // hash for roots of trees 564 static final int RESERVED = -3; // hash for transient reservations 565 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash 566 567 /** Number of CPUS, to place bounds on some sizings */ 568 static final int NCPU = Runtime.getRuntime().availableProcessors(); 569 570 /** 571 * Serialized pseudo-fields, provided only for jdk7 compatibility. 572 * @serialField segments Segment[] 573 * The segments, each of which is a specialized hash table. 574 * @serialField segmentMask int 575 * Mask value for indexing into segments. The upper bits of a 576 * key's hash code are used to choose the segment. 577 * @serialField segmentShift int 578 * Shift value for indexing within segments. 579 */ 580 private static final ObjectStreamField[] serialPersistentFields = { 581 new ObjectStreamField("segments", Segment[].class), 582 new ObjectStreamField("segmentMask", Integer.TYPE), 583 new ObjectStreamField("segmentShift", Integer.TYPE), 584 }; 585 586 /* ---------------- Nodes -------------- */ 587 588 /** 589 * Key-value entry. This class is never exported out as a 590 * user-mutable Map.Entry (i.e., one supporting setValue; see 591 * MapEntry below), but can be used for read-only traversals used 592 * in bulk tasks. Subclasses of Node with a negative hash field 593 * are special, and contain null keys and values (but are never 594 * exported). Otherwise, keys and vals are never null. 595 */ 596 static class Node<K,V> implements Map.Entry<K,V> { 597 final int hash; 598 final K key; 599 volatile V val; 600 volatile Node<K,V> next; 601 Node(int hash, K key, V val, Node<K,V> next)602 Node(int hash, K key, V val, Node<K,V> next) { 603 this.hash = hash; 604 this.key = key; 605 this.val = val; 606 this.next = next; 607 } 608 getKey()609 public final K getKey() { return key; } getValue()610 public final V getValue() { return val; } hashCode()611 public final int hashCode() { return key.hashCode() ^ val.hashCode(); } toString()612 public final String toString() { 613 return Helpers.mapEntryToString(key, val); 614 } setValue(V value)615 public final V setValue(V value) { 616 throw new UnsupportedOperationException(); 617 } 618 equals(Object o)619 public final boolean equals(Object o) { 620 Object k, v, u; Map.Entry<?,?> e; 621 return ((o instanceof Map.Entry) && 622 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 623 (v = e.getValue()) != null && 624 (k == key || k.equals(key)) && 625 (v == (u = val) || v.equals(u))); 626 } 627 628 /** 629 * Virtualized support for map.get(); overridden in subclasses. 630 */ find(int h, Object k)631 Node<K,V> find(int h, Object k) { 632 Node<K,V> e = this; 633 if (k != null) { 634 do { 635 K ek; 636 if (e.hash == h && 637 ((ek = e.key) == k || (ek != null && k.equals(ek)))) 638 return e; 639 } while ((e = e.next) != null); 640 } 641 return null; 642 } 643 } 644 645 /* ---------------- Static utilities -------------- */ 646 647 /** 648 * Spreads (XORs) higher bits of hash to lower and also forces top 649 * bit to 0. Because the table uses power-of-two masking, sets of 650 * hashes that vary only in bits above the current mask will 651 * always collide. (Among known examples are sets of Float keys 652 * holding consecutive whole numbers in small tables.) So we 653 * apply a transform that spreads the impact of higher bits 654 * downward. There is a tradeoff between speed, utility, and 655 * quality of bit-spreading. Because many common sets of hashes 656 * are already reasonably distributed (so don't benefit from 657 * spreading), and because we use trees to handle large sets of 658 * collisions in bins, we just XOR some shifted bits in the 659 * cheapest possible way to reduce systematic lossage, as well as 660 * to incorporate impact of the highest bits that would otherwise 661 * never be used in index calculations because of table bounds. 662 */ spread(int h)663 static final int spread(int h) { 664 return (h ^ (h >>> 16)) & HASH_BITS; 665 } 666 667 /** 668 * Returns a power of two table size for the given desired capacity. 669 * See Hackers Delight, sec 3.2 670 */ tableSizeFor(int c)671 private static final int tableSizeFor(int c) { 672 int n = c - 1; 673 n |= n >>> 1; 674 n |= n >>> 2; 675 n |= n >>> 4; 676 n |= n >>> 8; 677 n |= n >>> 16; 678 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; 679 } 680 681 /** 682 * Returns x's Class if it is of the form "class C implements 683 * Comparable<C>", else null. 684 */ comparableClassFor(Object x)685 static Class<?> comparableClassFor(Object x) { 686 if (x instanceof Comparable) { 687 Class<?> c; Type[] ts, as; Type t; ParameterizedType p; 688 if ((c = x.getClass()) == String.class) // bypass checks 689 return c; 690 if ((ts = c.getGenericInterfaces()) != null) { 691 for (int i = 0; i < ts.length; ++i) { 692 if (((t = ts[i]) instanceof ParameterizedType) && 693 ((p = (ParameterizedType)t).getRawType() == 694 Comparable.class) && 695 (as = p.getActualTypeArguments()) != null && 696 as.length == 1 && as[0] == c) // type arg is c 697 return c; 698 } 699 } 700 } 701 return null; 702 } 703 704 /** 705 * Returns k.compareTo(x) if x matches kc (k's screened comparable 706 * class), else 0. 707 */ 708 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable compareComparables(Class<?> kc, Object k, Object x)709 static int compareComparables(Class<?> kc, Object k, Object x) { 710 return (x == null || x.getClass() != kc ? 0 : 711 ((Comparable)k).compareTo(x)); 712 } 713 714 /* ---------------- Table element access -------------- */ 715 716 /* 717 * Volatile access methods are used for table elements as well as 718 * elements of in-progress next table while resizing. All uses of 719 * the tab arguments must be null checked by callers. All callers 720 * also paranoically precheck that tab's length is not zero (or an 721 * equivalent check), thus ensuring that any index argument taking 722 * the form of a hash value anded with (length - 1) is a valid 723 * index. Note that, to be correct wrt arbitrary concurrency 724 * errors by users, these checks must operate on local variables, 725 * which accounts for some odd-looking inline assignments below. 726 * Note that calls to setTabAt always occur within locked regions, 727 * and so in principle require only release ordering, not 728 * full volatile semantics, but are currently coded as volatile 729 * writes to be conservative. 730 */ 731 732 @SuppressWarnings("unchecked") tabAt(Node<K,V>[] tab, int i)733 static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) { 734 return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE); 735 } 736 casTabAt(Node<K,V>[] tab, int i, Node<K,V> c, Node<K,V> v)737 static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i, 738 Node<K,V> c, Node<K,V> v) { 739 return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v); 740 } 741 setTabAt(Node<K,V>[] tab, int i, Node<K,V> v)742 static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) { 743 U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v); 744 } 745 746 /* ---------------- Fields -------------- */ 747 748 /** 749 * The array of bins. Lazily initialized upon first insertion. 750 * Size is always a power of two. Accessed directly by iterators. 751 */ 752 transient volatile Node<K,V>[] table; 753 754 /** 755 * The next table to use; non-null only while resizing. 756 */ 757 private transient volatile Node<K,V>[] nextTable; 758 759 /** 760 * Base counter value, used mainly when there is no contention, 761 * but also as a fallback during table initialization 762 * races. Updated via CAS. 763 */ 764 private transient volatile long baseCount; 765 766 /** 767 * Table initialization and resizing control. When negative, the 768 * table is being initialized or resized: -1 for initialization, 769 * else -(1 + the number of active resizing threads). Otherwise, 770 * when table is null, holds the initial table size to use upon 771 * creation, or 0 for default. After initialization, holds the 772 * next element count value upon which to resize the table. 773 */ 774 private transient volatile int sizeCtl; 775 776 /** 777 * The next table index (plus one) to split while resizing. 778 */ 779 private transient volatile int transferIndex; 780 781 /** 782 * Spinlock (locked via CAS) used when resizing and/or creating CounterCells. 783 */ 784 private transient volatile int cellsBusy; 785 786 /** 787 * Table of counter cells. When non-null, size is a power of 2. 788 */ 789 private transient volatile CounterCell[] counterCells; 790 791 // views 792 private transient KeySetView<K,V> keySet; 793 private transient ValuesView<K,V> values; 794 private transient EntrySetView<K,V> entrySet; 795 796 797 /* ---------------- Public operations -------------- */ 798 799 /** 800 * Creates a new, empty map with the default initial table size (16). 801 */ ConcurrentHashMap()802 public ConcurrentHashMap() { 803 } 804 805 /** 806 * Creates a new, empty map with an initial table size 807 * accommodating the specified number of elements without the need 808 * to dynamically resize. 809 * 810 * @param initialCapacity The implementation performs internal 811 * sizing to accommodate this many elements. 812 * @throws IllegalArgumentException if the initial capacity of 813 * elements is negative 814 */ ConcurrentHashMap(int initialCapacity)815 public ConcurrentHashMap(int initialCapacity) { 816 if (initialCapacity < 0) 817 throw new IllegalArgumentException(); 818 int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? 819 MAXIMUM_CAPACITY : 820 tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); 821 this.sizeCtl = cap; 822 } 823 824 /** 825 * Creates a new map with the same mappings as the given map. 826 * 827 * @param m the map 828 */ ConcurrentHashMap(Map<? extends K, ? extends V> m)829 public ConcurrentHashMap(Map<? extends K, ? extends V> m) { 830 this.sizeCtl = DEFAULT_CAPACITY; 831 putAll(m); 832 } 833 834 /** 835 * Creates a new, empty map with an initial table size based on 836 * the given number of elements ({@code initialCapacity}) and 837 * initial table density ({@code loadFactor}). 838 * 839 * @param initialCapacity the initial capacity. The implementation 840 * performs internal sizing to accommodate this many elements, 841 * given the specified load factor. 842 * @param loadFactor the load factor (table density) for 843 * establishing the initial table size 844 * @throws IllegalArgumentException if the initial capacity of 845 * elements is negative or the load factor is nonpositive 846 * 847 * @since 1.6 848 */ ConcurrentHashMap(int initialCapacity, float loadFactor)849 public ConcurrentHashMap(int initialCapacity, float loadFactor) { 850 this(initialCapacity, loadFactor, 1); 851 } 852 853 /** 854 * Creates a new, empty map with an initial table size based on 855 * the given number of elements ({@code initialCapacity}), table 856 * density ({@code loadFactor}), and number of concurrently 857 * updating threads ({@code concurrencyLevel}). 858 * 859 * @param initialCapacity the initial capacity. The implementation 860 * performs internal sizing to accommodate this many elements, 861 * given the specified load factor. 862 * @param loadFactor the load factor (table density) for 863 * establishing the initial table size 864 * @param concurrencyLevel the estimated number of concurrently 865 * updating threads. The implementation may use this value as 866 * a sizing hint. 867 * @throws IllegalArgumentException if the initial capacity is 868 * negative or the load factor or concurrencyLevel are 869 * nonpositive 870 */ ConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel)871 public ConcurrentHashMap(int initialCapacity, 872 float loadFactor, int concurrencyLevel) { 873 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) 874 throw new IllegalArgumentException(); 875 if (initialCapacity < concurrencyLevel) // Use at least as many bins 876 initialCapacity = concurrencyLevel; // as estimated threads 877 long size = (long)(1.0 + (long)initialCapacity / loadFactor); 878 int cap = (size >= (long)MAXIMUM_CAPACITY) ? 879 MAXIMUM_CAPACITY : tableSizeFor((int)size); 880 this.sizeCtl = cap; 881 } 882 883 // Original (since JDK1.2) Map methods 884 885 /** 886 * {@inheritDoc} 887 */ size()888 public int size() { 889 long n = sumCount(); 890 return ((n < 0L) ? 0 : 891 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : 892 (int)n); 893 } 894 895 /** 896 * {@inheritDoc} 897 */ isEmpty()898 public boolean isEmpty() { 899 return sumCount() <= 0L; // ignore transient negative values 900 } 901 902 /** 903 * Returns the value to which the specified key is mapped, 904 * or {@code null} if this map contains no mapping for the key. 905 * 906 * <p>More formally, if this map contains a mapping from a key 907 * {@code k} to a value {@code v} such that {@code key.equals(k)}, 908 * then this method returns {@code v}; otherwise it returns 909 * {@code null}. (There can be at most one such mapping.) 910 * 911 * @throws NullPointerException if the specified key is null 912 */ get(Object key)913 public V get(Object key) { 914 Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek; 915 int h = spread(key.hashCode()); 916 if ((tab = table) != null && (n = tab.length) > 0 && 917 (e = tabAt(tab, (n - 1) & h)) != null) { 918 if ((eh = e.hash) == h) { 919 if ((ek = e.key) == key || (ek != null && key.equals(ek))) 920 return e.val; 921 } 922 else if (eh < 0) 923 return (p = e.find(h, key)) != null ? p.val : null; 924 while ((e = e.next) != null) { 925 if (e.hash == h && 926 ((ek = e.key) == key || (ek != null && key.equals(ek)))) 927 return e.val; 928 } 929 } 930 return null; 931 } 932 933 /** 934 * Tests if the specified object is a key in this table. 935 * 936 * @param key possible key 937 * @return {@code true} if and only if the specified object 938 * is a key in this table, as determined by the 939 * {@code equals} method; {@code false} otherwise 940 * @throws NullPointerException if the specified key is null 941 */ containsKey(Object key)942 public boolean containsKey(Object key) { 943 return get(key) != null; 944 } 945 946 /** 947 * Returns {@code true} if this map maps one or more keys to the 948 * specified value. Note: This method may require a full traversal 949 * of the map, and is much slower than method {@code containsKey}. 950 * 951 * @param value value whose presence in this map is to be tested 952 * @return {@code true} if this map maps one or more keys to the 953 * specified value 954 * @throws NullPointerException if the specified value is null 955 */ containsValue(Object value)956 public boolean containsValue(Object value) { 957 if (value == null) 958 throw new NullPointerException(); 959 Node<K,V>[] t; 960 if ((t = table) != null) { 961 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 962 for (Node<K,V> p; (p = it.advance()) != null; ) { 963 V v; 964 if ((v = p.val) == value || (v != null && value.equals(v))) 965 return true; 966 } 967 } 968 return false; 969 } 970 971 /** 972 * Maps the specified key to the specified value in this table. 973 * Neither the key nor the value can be null. 974 * 975 * <p>The value can be retrieved by calling the {@code get} method 976 * with a key that is equal to the original key. 977 * 978 * @param key key with which the specified value is to be associated 979 * @param value value to be associated with the specified key 980 * @return the previous value associated with {@code key}, or 981 * {@code null} if there was no mapping for {@code key} 982 * @throws NullPointerException if the specified key or value is null 983 */ put(K key, V value)984 public V put(K key, V value) { 985 return putVal(key, value, false); 986 } 987 988 /** Implementation for put and putIfAbsent */ putVal(K key, V value, boolean onlyIfAbsent)989 final V putVal(K key, V value, boolean onlyIfAbsent) { 990 if (key == null || value == null) throw new NullPointerException(); 991 int hash = spread(key.hashCode()); 992 int binCount = 0; 993 for (Node<K,V>[] tab = table;;) { 994 Node<K,V> f; int n, i, fh; 995 if (tab == null || (n = tab.length) == 0) 996 tab = initTable(); 997 else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) { 998 if (casTabAt(tab, i, null, 999 new Node<K,V>(hash, key, value, null))) 1000 break; // no lock when adding to empty bin 1001 } 1002 else if ((fh = f.hash) == MOVED) 1003 tab = helpTransfer(tab, f); 1004 else { 1005 V oldVal = null; 1006 synchronized (f) { 1007 if (tabAt(tab, i) == f) { 1008 if (fh >= 0) { 1009 binCount = 1; 1010 for (Node<K,V> e = f;; ++binCount) { 1011 K ek; 1012 if (e.hash == hash && 1013 ((ek = e.key) == key || 1014 (ek != null && key.equals(ek)))) { 1015 oldVal = e.val; 1016 if (!onlyIfAbsent) 1017 e.val = value; 1018 break; 1019 } 1020 Node<K,V> pred = e; 1021 if ((e = e.next) == null) { 1022 pred.next = new Node<K,V>(hash, key, 1023 value, null); 1024 break; 1025 } 1026 } 1027 } 1028 else if (f instanceof TreeBin) { 1029 Node<K,V> p; 1030 binCount = 2; 1031 if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key, 1032 value)) != null) { 1033 oldVal = p.val; 1034 if (!onlyIfAbsent) 1035 p.val = value; 1036 } 1037 } 1038 else if (f instanceof ReservationNode) 1039 throw new IllegalStateException("Recursive update"); 1040 } 1041 } 1042 if (binCount != 0) { 1043 if (binCount >= TREEIFY_THRESHOLD) 1044 treeifyBin(tab, i); 1045 if (oldVal != null) 1046 return oldVal; 1047 break; 1048 } 1049 } 1050 } 1051 addCount(1L, binCount); 1052 return null; 1053 } 1054 1055 /** 1056 * Copies all of the mappings from the specified map to this one. 1057 * These mappings replace any mappings that this map had for any of the 1058 * keys currently in the specified map. 1059 * 1060 * @param m mappings to be stored in this map 1061 */ putAll(Map<? extends K, ? extends V> m)1062 public void putAll(Map<? extends K, ? extends V> m) { 1063 tryPresize(m.size()); 1064 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) 1065 putVal(e.getKey(), e.getValue(), false); 1066 } 1067 1068 /** 1069 * Removes the key (and its corresponding value) from this map. 1070 * This method does nothing if the key is not in the map. 1071 * 1072 * @param key the key that needs to be removed 1073 * @return the previous value associated with {@code key}, or 1074 * {@code null} if there was no mapping for {@code key} 1075 * @throws NullPointerException if the specified key is null 1076 */ remove(Object key)1077 public V remove(Object key) { 1078 return replaceNode(key, null, null); 1079 } 1080 1081 /** 1082 * Implementation for the four public remove/replace methods: 1083 * Replaces node value with v, conditional upon match of cv if 1084 * non-null. If resulting value is null, delete. 1085 */ replaceNode(Object key, V value, Object cv)1086 final V replaceNode(Object key, V value, Object cv) { 1087 int hash = spread(key.hashCode()); 1088 for (Node<K,V>[] tab = table;;) { 1089 Node<K,V> f; int n, i, fh; 1090 if (tab == null || (n = tab.length) == 0 || 1091 (f = tabAt(tab, i = (n - 1) & hash)) == null) 1092 break; 1093 else if ((fh = f.hash) == MOVED) 1094 tab = helpTransfer(tab, f); 1095 else { 1096 V oldVal = null; 1097 boolean validated = false; 1098 synchronized (f) { 1099 if (tabAt(tab, i) == f) { 1100 if (fh >= 0) { 1101 validated = true; 1102 for (Node<K,V> e = f, pred = null;;) { 1103 K ek; 1104 if (e.hash == hash && 1105 ((ek = e.key) == key || 1106 (ek != null && key.equals(ek)))) { 1107 V ev = e.val; 1108 if (cv == null || cv == ev || 1109 (ev != null && cv.equals(ev))) { 1110 oldVal = ev; 1111 if (value != null) 1112 e.val = value; 1113 else if (pred != null) 1114 pred.next = e.next; 1115 else 1116 setTabAt(tab, i, e.next); 1117 } 1118 break; 1119 } 1120 pred = e; 1121 if ((e = e.next) == null) 1122 break; 1123 } 1124 } 1125 else if (f instanceof TreeBin) { 1126 validated = true; 1127 TreeBin<K,V> t = (TreeBin<K,V>)f; 1128 TreeNode<K,V> r, p; 1129 if ((r = t.root) != null && 1130 (p = r.findTreeNode(hash, key, null)) != null) { 1131 V pv = p.val; 1132 if (cv == null || cv == pv || 1133 (pv != null && cv.equals(pv))) { 1134 oldVal = pv; 1135 if (value != null) 1136 p.val = value; 1137 else if (t.removeTreeNode(p)) 1138 setTabAt(tab, i, untreeify(t.first)); 1139 } 1140 } 1141 } 1142 else if (f instanceof ReservationNode) 1143 throw new IllegalStateException("Recursive update"); 1144 } 1145 } 1146 if (validated) { 1147 if (oldVal != null) { 1148 if (value == null) 1149 addCount(-1L, -1); 1150 return oldVal; 1151 } 1152 break; 1153 } 1154 } 1155 } 1156 return null; 1157 } 1158 1159 /** 1160 * Removes all of the mappings from this map. 1161 */ clear()1162 public void clear() { 1163 long delta = 0L; // negative number of deletions 1164 int i = 0; 1165 Node<K,V>[] tab = table; 1166 while (tab != null && i < tab.length) { 1167 int fh; 1168 Node<K,V> f = tabAt(tab, i); 1169 if (f == null) 1170 ++i; 1171 else if ((fh = f.hash) == MOVED) { 1172 tab = helpTransfer(tab, f); 1173 i = 0; // restart 1174 } 1175 else { 1176 synchronized (f) { 1177 if (tabAt(tab, i) == f) { 1178 Node<K,V> p = (fh >= 0 ? f : 1179 (f instanceof TreeBin) ? 1180 ((TreeBin<K,V>)f).first : null); 1181 while (p != null) { 1182 --delta; 1183 p = p.next; 1184 } 1185 setTabAt(tab, i++, null); 1186 } 1187 } 1188 } 1189 } 1190 if (delta != 0L) 1191 addCount(delta, -1); 1192 } 1193 1194 /** 1195 * Returns a {@link Set} view of the keys contained in this map. 1196 * The set is backed by the map, so changes to the map are 1197 * reflected in the set, and vice-versa. The set supports element 1198 * removal, which removes the corresponding mapping from this map, 1199 * via the {@code Iterator.remove}, {@code Set.remove}, 1200 * {@code removeAll}, {@code retainAll}, and {@code clear} 1201 * operations. It does not support the {@code add} or 1202 * {@code addAll} operations. 1203 * 1204 * <p> The set returned by this method is guaranteed to an instance of 1205 * {@link KeySetView}. 1206 * 1207 * <p>The view's iterators and spliterators are 1208 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1209 * 1210 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}, 1211 * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}. 1212 * 1213 * @return the set view 1214 */ 1215 // NOTE: The upstream version of this function returns KeySetView (See http://b/28099367). keySet()1216 public Set<K> keySet() { 1217 KeySetView<K,V> ks; 1218 return (ks = keySet) != null ? ks : (keySet = new KeySetView<K,V>(this, null)); 1219 } 1220 1221 /** 1222 * Returns a {@link Collection} view of the values contained in this map. 1223 * The collection is backed by the map, so changes to the map are 1224 * reflected in the collection, and vice-versa. The collection 1225 * supports element removal, which removes the corresponding 1226 * mapping from this map, via the {@code Iterator.remove}, 1227 * {@code Collection.remove}, {@code removeAll}, 1228 * {@code retainAll}, and {@code clear} operations. It does not 1229 * support the {@code add} or {@code addAll} operations. 1230 * 1231 * <p>The view's iterators and spliterators are 1232 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1233 * 1234 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT} 1235 * and {@link Spliterator#NONNULL}. 1236 * 1237 * @return the collection view 1238 */ values()1239 public Collection<V> values() { 1240 ValuesView<K,V> vs; 1241 return (vs = values) != null ? vs : (values = new ValuesView<K,V>(this)); 1242 } 1243 1244 /** 1245 * Returns a {@link Set} view of the mappings contained in this map. 1246 * The set is backed by the map, so changes to the map are 1247 * reflected in the set, and vice-versa. The set supports element 1248 * removal, which removes the corresponding mapping from the map, 1249 * via the {@code Iterator.remove}, {@code Set.remove}, 1250 * {@code removeAll}, {@code retainAll}, and {@code clear} 1251 * operations. 1252 * 1253 * <p>The view's iterators and spliterators are 1254 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 1255 * 1256 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}, 1257 * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}. 1258 * 1259 * @return the set view 1260 */ entrySet()1261 public Set<Map.Entry<K,V>> entrySet() { 1262 EntrySetView<K,V> es; 1263 return (es = entrySet) != null ? es : (entrySet = new EntrySetView<K,V>(this)); 1264 } 1265 1266 /** 1267 * Returns the hash code value for this {@link Map}, i.e., 1268 * the sum of, for each key-value pair in the map, 1269 * {@code key.hashCode() ^ value.hashCode()}. 1270 * 1271 * @return the hash code value for this map 1272 */ hashCode()1273 public int hashCode() { 1274 int h = 0; 1275 Node<K,V>[] t; 1276 if ((t = table) != null) { 1277 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1278 for (Node<K,V> p; (p = it.advance()) != null; ) 1279 h += p.key.hashCode() ^ p.val.hashCode(); 1280 } 1281 return h; 1282 } 1283 1284 /** 1285 * Returns a string representation of this map. The string 1286 * representation consists of a list of key-value mappings (in no 1287 * particular order) enclosed in braces ("{@code {}}"). Adjacent 1288 * mappings are separated by the characters {@code ", "} (comma 1289 * and space). Each key-value mapping is rendered as the key 1290 * followed by an equals sign ("{@code =}") followed by the 1291 * associated value. 1292 * 1293 * @return a string representation of this map 1294 */ toString()1295 public String toString() { 1296 Node<K,V>[] t; 1297 int f = (t = table) == null ? 0 : t.length; 1298 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f); 1299 StringBuilder sb = new StringBuilder(); 1300 sb.append('{'); 1301 Node<K,V> p; 1302 if ((p = it.advance()) != null) { 1303 for (;;) { 1304 K k = p.key; 1305 V v = p.val; 1306 sb.append(k == this ? "(this Map)" : k); 1307 sb.append('='); 1308 sb.append(v == this ? "(this Map)" : v); 1309 if ((p = it.advance()) == null) 1310 break; 1311 sb.append(',').append(' '); 1312 } 1313 } 1314 return sb.append('}').toString(); 1315 } 1316 1317 /** 1318 * Compares the specified object with this map for equality. 1319 * Returns {@code true} if the given object is a map with the same 1320 * mappings as this map. This operation may return misleading 1321 * results if either map is concurrently modified during execution 1322 * of this method. 1323 * 1324 * @param o object to be compared for equality with this map 1325 * @return {@code true} if the specified object is equal to this map 1326 */ equals(Object o)1327 public boolean equals(Object o) { 1328 if (o != this) { 1329 if (!(o instanceof Map)) 1330 return false; 1331 Map<?,?> m = (Map<?,?>) o; 1332 Node<K,V>[] t; 1333 int f = (t = table) == null ? 0 : t.length; 1334 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f); 1335 for (Node<K,V> p; (p = it.advance()) != null; ) { 1336 V val = p.val; 1337 Object v = m.get(p.key); 1338 if (v == null || (v != val && !v.equals(val))) 1339 return false; 1340 } 1341 for (Map.Entry<?,?> e : m.entrySet()) { 1342 Object mk, mv, v; 1343 if ((mk = e.getKey()) == null || 1344 (mv = e.getValue()) == null || 1345 (v = get(mk)) == null || 1346 (mv != v && !mv.equals(v))) 1347 return false; 1348 } 1349 } 1350 return true; 1351 } 1352 1353 /** 1354 * Stripped-down version of helper class used in previous version, 1355 * declared for the sake of serialization compatibility. 1356 */ 1357 static class Segment<K,V> extends ReentrantLock implements Serializable { 1358 private static final long serialVersionUID = 2249069246763182397L; 1359 final float loadFactor; Segment(float lf)1360 Segment(float lf) { this.loadFactor = lf; } 1361 } 1362 1363 /** 1364 * Saves the state of the {@code ConcurrentHashMap} instance to a 1365 * stream (i.e., serializes it). 1366 * @param s the stream 1367 * @throws java.io.IOException if an I/O error occurs 1368 * @serialData 1369 * the serialized fields, followed by the key (Object) and value 1370 * (Object) for each key-value mapping, followed by a null pair. 1371 * The key-value mappings are emitted in no particular order. 1372 */ writeObject(java.io.ObjectOutputStream s)1373 private void writeObject(java.io.ObjectOutputStream s) 1374 throws java.io.IOException { 1375 // For serialization compatibility 1376 // Emulate segment calculation from previous version of this class 1377 int sshift = 0; 1378 int ssize = 1; 1379 while (ssize < DEFAULT_CONCURRENCY_LEVEL) { 1380 ++sshift; 1381 ssize <<= 1; 1382 } 1383 int segmentShift = 32 - sshift; 1384 int segmentMask = ssize - 1; 1385 @SuppressWarnings("unchecked") 1386 Segment<K,V>[] segments = (Segment<K,V>[]) 1387 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL]; 1388 for (int i = 0; i < segments.length; ++i) 1389 segments[i] = new Segment<K,V>(LOAD_FACTOR); 1390 java.io.ObjectOutputStream.PutField streamFields = s.putFields(); 1391 streamFields.put("segments", segments); 1392 streamFields.put("segmentShift", segmentShift); 1393 streamFields.put("segmentMask", segmentMask); 1394 s.writeFields(); 1395 1396 Node<K,V>[] t; 1397 if ((t = table) != null) { 1398 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1399 for (Node<K,V> p; (p = it.advance()) != null; ) { 1400 s.writeObject(p.key); 1401 s.writeObject(p.val); 1402 } 1403 } 1404 s.writeObject(null); 1405 s.writeObject(null); 1406 } 1407 1408 /** 1409 * Reconstitutes the instance from a stream (that is, deserializes it). 1410 * @param s the stream 1411 * @throws ClassNotFoundException if the class of a serialized object 1412 * could not be found 1413 * @throws java.io.IOException if an I/O error occurs 1414 */ readObject(java.io.ObjectInputStream s)1415 private void readObject(java.io.ObjectInputStream s) 1416 throws java.io.IOException, ClassNotFoundException { 1417 /* 1418 * To improve performance in typical cases, we create nodes 1419 * while reading, then place in table once size is known. 1420 * However, we must also validate uniqueness and deal with 1421 * overpopulated bins while doing so, which requires 1422 * specialized versions of putVal mechanics. 1423 */ 1424 sizeCtl = -1; // force exclusion for table construction 1425 s.defaultReadObject(); 1426 long size = 0L; 1427 Node<K,V> p = null; 1428 for (;;) { 1429 @SuppressWarnings("unchecked") 1430 K k = (K) s.readObject(); 1431 @SuppressWarnings("unchecked") 1432 V v = (V) s.readObject(); 1433 if (k != null && v != null) { 1434 p = new Node<K,V>(spread(k.hashCode()), k, v, p); 1435 ++size; 1436 } 1437 else 1438 break; 1439 } 1440 if (size == 0L) 1441 sizeCtl = 0; 1442 else { 1443 int n; 1444 if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) 1445 n = MAXIMUM_CAPACITY; 1446 else { 1447 int sz = (int)size; 1448 n = tableSizeFor(sz + (sz >>> 1) + 1); 1449 } 1450 @SuppressWarnings("unchecked") 1451 Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n]; 1452 int mask = n - 1; 1453 long added = 0L; 1454 while (p != null) { 1455 boolean insertAtFront; 1456 Node<K,V> next = p.next, first; 1457 int h = p.hash, j = h & mask; 1458 if ((first = tabAt(tab, j)) == null) 1459 insertAtFront = true; 1460 else { 1461 K k = p.key; 1462 if (first.hash < 0) { 1463 TreeBin<K,V> t = (TreeBin<K,V>)first; 1464 if (t.putTreeVal(h, k, p.val) == null) 1465 ++added; 1466 insertAtFront = false; 1467 } 1468 else { 1469 int binCount = 0; 1470 insertAtFront = true; 1471 Node<K,V> q; K qk; 1472 for (q = first; q != null; q = q.next) { 1473 if (q.hash == h && 1474 ((qk = q.key) == k || 1475 (qk != null && k.equals(qk)))) { 1476 insertAtFront = false; 1477 break; 1478 } 1479 ++binCount; 1480 } 1481 if (insertAtFront && binCount >= TREEIFY_THRESHOLD) { 1482 insertAtFront = false; 1483 ++added; 1484 p.next = first; 1485 TreeNode<K,V> hd = null, tl = null; 1486 for (q = p; q != null; q = q.next) { 1487 TreeNode<K,V> t = new TreeNode<K,V> 1488 (q.hash, q.key, q.val, null, null); 1489 if ((t.prev = tl) == null) 1490 hd = t; 1491 else 1492 tl.next = t; 1493 tl = t; 1494 } 1495 setTabAt(tab, j, new TreeBin<K,V>(hd)); 1496 } 1497 } 1498 } 1499 if (insertAtFront) { 1500 ++added; 1501 p.next = first; 1502 setTabAt(tab, j, p); 1503 } 1504 p = next; 1505 } 1506 table = tab; 1507 sizeCtl = n - (n >>> 2); 1508 baseCount = added; 1509 } 1510 } 1511 1512 // ConcurrentMap methods 1513 1514 /** 1515 * {@inheritDoc} 1516 * 1517 * @return the previous value associated with the specified key, 1518 * or {@code null} if there was no mapping for the key 1519 * @throws NullPointerException if the specified key or value is null 1520 */ putIfAbsent(K key, V value)1521 public V putIfAbsent(K key, V value) { 1522 return putVal(key, value, true); 1523 } 1524 1525 /** 1526 * {@inheritDoc} 1527 * 1528 * @throws NullPointerException if the specified key is null 1529 */ remove(Object key, Object value)1530 public boolean remove(Object key, Object value) { 1531 if (key == null) 1532 throw new NullPointerException(); 1533 return value != null && replaceNode(key, null, value) != null; 1534 } 1535 1536 /** 1537 * {@inheritDoc} 1538 * 1539 * @throws NullPointerException if any of the arguments are null 1540 */ replace(K key, V oldValue, V newValue)1541 public boolean replace(K key, V oldValue, V newValue) { 1542 if (key == null || oldValue == null || newValue == null) 1543 throw new NullPointerException(); 1544 return replaceNode(key, newValue, oldValue) != null; 1545 } 1546 1547 /** 1548 * {@inheritDoc} 1549 * 1550 * @return the previous value associated with the specified key, 1551 * or {@code null} if there was no mapping for the key 1552 * @throws NullPointerException if the specified key or value is null 1553 */ replace(K key, V value)1554 public V replace(K key, V value) { 1555 if (key == null || value == null) 1556 throw new NullPointerException(); 1557 return replaceNode(key, value, null); 1558 } 1559 1560 // Overrides of JDK8+ Map extension method defaults 1561 1562 /** 1563 * Returns the value to which the specified key is mapped, or the 1564 * given default value if this map contains no mapping for the 1565 * key. 1566 * 1567 * @param key the key whose associated value is to be returned 1568 * @param defaultValue the value to return if this map contains 1569 * no mapping for the given key 1570 * @return the mapping for the key, if present; else the default value 1571 * @throws NullPointerException if the specified key is null 1572 */ getOrDefault(Object key, V defaultValue)1573 public V getOrDefault(Object key, V defaultValue) { 1574 V v; 1575 return (v = get(key)) == null ? defaultValue : v; 1576 } 1577 forEach(BiConsumer<? super K, ? super V> action)1578 public void forEach(BiConsumer<? super K, ? super V> action) { 1579 if (action == null) throw new NullPointerException(); 1580 Node<K,V>[] t; 1581 if ((t = table) != null) { 1582 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1583 for (Node<K,V> p; (p = it.advance()) != null; ) { 1584 action.accept(p.key, p.val); 1585 } 1586 } 1587 } 1588 replaceAll(BiFunction<? super K, ? super V, ? extends V> function)1589 public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { 1590 if (function == null) throw new NullPointerException(); 1591 Node<K,V>[] t; 1592 if ((t = table) != null) { 1593 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1594 for (Node<K,V> p; (p = it.advance()) != null; ) { 1595 V oldValue = p.val; 1596 for (K key = p.key;;) { 1597 V newValue = function.apply(key, oldValue); 1598 if (newValue == null) 1599 throw new NullPointerException(); 1600 if (replaceNode(key, newValue, oldValue) != null || 1601 (oldValue = get(key)) == null) 1602 break; 1603 } 1604 } 1605 } 1606 } 1607 1608 /** 1609 * Helper method for EntrySetView.removeIf. 1610 */ removeEntryIf(Predicate<? super Entry<K,V>> function)1611 boolean removeEntryIf(Predicate<? super Entry<K,V>> function) { 1612 if (function == null) throw new NullPointerException(); 1613 Node<K,V>[] t; 1614 boolean removed = false; 1615 if ((t = table) != null) { 1616 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1617 for (Node<K,V> p; (p = it.advance()) != null; ) { 1618 K k = p.key; 1619 V v = p.val; 1620 Map.Entry<K,V> e = new AbstractMap.SimpleImmutableEntry<>(k, v); 1621 if (function.test(e) && replaceNode(k, null, v) != null) 1622 removed = true; 1623 } 1624 } 1625 return removed; 1626 } 1627 1628 /** 1629 * Helper method for ValuesView.removeIf. 1630 */ removeValueIf(Predicate<? super V> function)1631 boolean removeValueIf(Predicate<? super V> function) { 1632 if (function == null) throw new NullPointerException(); 1633 Node<K,V>[] t; 1634 boolean removed = false; 1635 if ((t = table) != null) { 1636 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 1637 for (Node<K,V> p; (p = it.advance()) != null; ) { 1638 K k = p.key; 1639 V v = p.val; 1640 if (function.test(v) && replaceNode(k, null, v) != null) 1641 removed = true; 1642 } 1643 } 1644 return removed; 1645 } 1646 1647 /** 1648 * If the specified key is not already associated with a value, 1649 * attempts to compute its value using the given mapping function 1650 * and enters it into this map unless {@code null}. The entire 1651 * method invocation is performed atomically, so the function is 1652 * applied at most once per key. Some attempted update operations 1653 * on this map by other threads may be blocked while computation 1654 * is in progress, so the computation should be short and simple, 1655 * and must not attempt to update any other mappings of this map. 1656 * 1657 * @param key key with which the specified value is to be associated 1658 * @param mappingFunction the function to compute a value 1659 * @return the current (existing or computed) value associated with 1660 * the specified key, or null if the computed value is null 1661 * @throws NullPointerException if the specified key or mappingFunction 1662 * is null 1663 * @throws IllegalStateException if the computation detectably 1664 * attempts a recursive update to this map that would 1665 * otherwise never complete 1666 * @throws RuntimeException or Error if the mappingFunction does so, 1667 * in which case the mapping is left unestablished 1668 */ computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction)1669 public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) { 1670 if (key == null || mappingFunction == null) 1671 throw new NullPointerException(); 1672 int h = spread(key.hashCode()); 1673 V val = null; 1674 int binCount = 0; 1675 for (Node<K,V>[] tab = table;;) { 1676 Node<K,V> f; int n, i, fh; 1677 if (tab == null || (n = tab.length) == 0) 1678 tab = initTable(); 1679 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 1680 Node<K,V> r = new ReservationNode<K,V>(); 1681 synchronized (r) { 1682 if (casTabAt(tab, i, null, r)) { 1683 binCount = 1; 1684 Node<K,V> node = null; 1685 try { 1686 if ((val = mappingFunction.apply(key)) != null) 1687 node = new Node<K,V>(h, key, val, null); 1688 } finally { 1689 setTabAt(tab, i, node); 1690 } 1691 } 1692 } 1693 if (binCount != 0) 1694 break; 1695 } 1696 else if ((fh = f.hash) == MOVED) 1697 tab = helpTransfer(tab, f); 1698 else { 1699 boolean added = false; 1700 synchronized (f) { 1701 if (tabAt(tab, i) == f) { 1702 if (fh >= 0) { 1703 binCount = 1; 1704 for (Node<K,V> e = f;; ++binCount) { 1705 K ek; 1706 if (e.hash == h && 1707 ((ek = e.key) == key || 1708 (ek != null && key.equals(ek)))) { 1709 val = e.val; 1710 break; 1711 } 1712 Node<K,V> pred = e; 1713 if ((e = e.next) == null) { 1714 if ((val = mappingFunction.apply(key)) != null) { 1715 if (pred.next != null) 1716 throw new IllegalStateException("Recursive update"); 1717 added = true; 1718 pred.next = new Node<K,V>(h, key, val, null); 1719 } 1720 break; 1721 } 1722 } 1723 } 1724 else if (f instanceof TreeBin) { 1725 binCount = 2; 1726 TreeBin<K,V> t = (TreeBin<K,V>)f; 1727 TreeNode<K,V> r, p; 1728 if ((r = t.root) != null && 1729 (p = r.findTreeNode(h, key, null)) != null) 1730 val = p.val; 1731 else if ((val = mappingFunction.apply(key)) != null) { 1732 added = true; 1733 t.putTreeVal(h, key, val); 1734 } 1735 } 1736 else if (f instanceof ReservationNode) 1737 throw new IllegalStateException("Recursive update"); 1738 } 1739 } 1740 if (binCount != 0) { 1741 if (binCount >= TREEIFY_THRESHOLD) 1742 treeifyBin(tab, i); 1743 if (!added) 1744 return val; 1745 break; 1746 } 1747 } 1748 } 1749 if (val != null) 1750 addCount(1L, binCount); 1751 return val; 1752 } 1753 1754 /** 1755 * If the value for the specified key is present, attempts to 1756 * compute a new mapping given the key and its current mapped 1757 * value. The entire method invocation is performed atomically. 1758 * Some attempted update operations on this map by other threads 1759 * may be blocked while computation is in progress, so the 1760 * computation should be short and simple, and must not attempt to 1761 * update any other mappings of this map. 1762 * 1763 * @param key key with which a value may be associated 1764 * @param remappingFunction the function to compute a value 1765 * @return the new value associated with the specified key, or null if none 1766 * @throws NullPointerException if the specified key or remappingFunction 1767 * is null 1768 * @throws IllegalStateException if the computation detectably 1769 * attempts a recursive update to this map that would 1770 * otherwise never complete 1771 * @throws RuntimeException or Error if the remappingFunction does so, 1772 * in which case the mapping is unchanged 1773 */ computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction)1774 public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1775 if (key == null || remappingFunction == null) 1776 throw new NullPointerException(); 1777 int h = spread(key.hashCode()); 1778 V val = null; 1779 int delta = 0; 1780 int binCount = 0; 1781 for (Node<K,V>[] tab = table;;) { 1782 Node<K,V> f; int n, i, fh; 1783 if (tab == null || (n = tab.length) == 0) 1784 tab = initTable(); 1785 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) 1786 break; 1787 else if ((fh = f.hash) == MOVED) 1788 tab = helpTransfer(tab, f); 1789 else { 1790 synchronized (f) { 1791 if (tabAt(tab, i) == f) { 1792 if (fh >= 0) { 1793 binCount = 1; 1794 for (Node<K,V> e = f, pred = null;; ++binCount) { 1795 K ek; 1796 if (e.hash == h && 1797 ((ek = e.key) == key || 1798 (ek != null && key.equals(ek)))) { 1799 val = remappingFunction.apply(key, e.val); 1800 if (val != null) 1801 e.val = val; 1802 else { 1803 delta = -1; 1804 Node<K,V> en = e.next; 1805 if (pred != null) 1806 pred.next = en; 1807 else 1808 setTabAt(tab, i, en); 1809 } 1810 break; 1811 } 1812 pred = e; 1813 if ((e = e.next) == null) 1814 break; 1815 } 1816 } 1817 else if (f instanceof TreeBin) { 1818 binCount = 2; 1819 TreeBin<K,V> t = (TreeBin<K,V>)f; 1820 TreeNode<K,V> r, p; 1821 if ((r = t.root) != null && 1822 (p = r.findTreeNode(h, key, null)) != null) { 1823 val = remappingFunction.apply(key, p.val); 1824 if (val != null) 1825 p.val = val; 1826 else { 1827 delta = -1; 1828 if (t.removeTreeNode(p)) 1829 setTabAt(tab, i, untreeify(t.first)); 1830 } 1831 } 1832 } 1833 else if (f instanceof ReservationNode) 1834 throw new IllegalStateException("Recursive update"); 1835 } 1836 } 1837 if (binCount != 0) 1838 break; 1839 } 1840 } 1841 if (delta != 0) 1842 addCount((long)delta, binCount); 1843 return val; 1844 } 1845 1846 /** 1847 * Attempts to compute a mapping for the specified key and its 1848 * current mapped value (or {@code null} if there is no current 1849 * mapping). The entire method invocation is performed atomically. 1850 * Some attempted update operations on this map by other threads 1851 * may be blocked while computation is in progress, so the 1852 * computation should be short and simple, and must not attempt to 1853 * update any other mappings of this Map. 1854 * 1855 * @param key key with which the specified value is to be associated 1856 * @param remappingFunction the function to compute a value 1857 * @return the new value associated with the specified key, or null if none 1858 * @throws NullPointerException if the specified key or remappingFunction 1859 * is null 1860 * @throws IllegalStateException if the computation detectably 1861 * attempts a recursive update to this map that would 1862 * otherwise never complete 1863 * @throws RuntimeException or Error if the remappingFunction does so, 1864 * in which case the mapping is unchanged 1865 */ compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction)1866 public V compute(K key, 1867 BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1868 if (key == null || remappingFunction == null) 1869 throw new NullPointerException(); 1870 int h = spread(key.hashCode()); 1871 V val = null; 1872 int delta = 0; 1873 int binCount = 0; 1874 for (Node<K,V>[] tab = table;;) { 1875 Node<K,V> f; int n, i, fh; 1876 if (tab == null || (n = tab.length) == 0) 1877 tab = initTable(); 1878 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 1879 Node<K,V> r = new ReservationNode<K,V>(); 1880 synchronized (r) { 1881 if (casTabAt(tab, i, null, r)) { 1882 binCount = 1; 1883 Node<K,V> node = null; 1884 try { 1885 if ((val = remappingFunction.apply(key, null)) != null) { 1886 delta = 1; 1887 node = new Node<K,V>(h, key, val, null); 1888 } 1889 } finally { 1890 setTabAt(tab, i, node); 1891 } 1892 } 1893 } 1894 if (binCount != 0) 1895 break; 1896 } 1897 else if ((fh = f.hash) == MOVED) 1898 tab = helpTransfer(tab, f); 1899 else { 1900 synchronized (f) { 1901 if (tabAt(tab, i) == f) { 1902 if (fh >= 0) { 1903 binCount = 1; 1904 for (Node<K,V> e = f, pred = null;; ++binCount) { 1905 K ek; 1906 if (e.hash == h && 1907 ((ek = e.key) == key || 1908 (ek != null && key.equals(ek)))) { 1909 val = remappingFunction.apply(key, e.val); 1910 if (val != null) 1911 e.val = val; 1912 else { 1913 delta = -1; 1914 Node<K,V> en = e.next; 1915 if (pred != null) 1916 pred.next = en; 1917 else 1918 setTabAt(tab, i, en); 1919 } 1920 break; 1921 } 1922 pred = e; 1923 if ((e = e.next) == null) { 1924 val = remappingFunction.apply(key, null); 1925 if (val != null) { 1926 if (pred.next != null) 1927 throw new IllegalStateException("Recursive update"); 1928 delta = 1; 1929 pred.next = 1930 new Node<K,V>(h, key, val, null); 1931 } 1932 break; 1933 } 1934 } 1935 } 1936 else if (f instanceof TreeBin) { 1937 binCount = 1; 1938 TreeBin<K,V> t = (TreeBin<K,V>)f; 1939 TreeNode<K,V> r, p; 1940 if ((r = t.root) != null) 1941 p = r.findTreeNode(h, key, null); 1942 else 1943 p = null; 1944 V pv = (p == null) ? null : p.val; 1945 val = remappingFunction.apply(key, pv); 1946 if (val != null) { 1947 if (p != null) 1948 p.val = val; 1949 else { 1950 delta = 1; 1951 t.putTreeVal(h, key, val); 1952 } 1953 } 1954 else if (p != null) { 1955 delta = -1; 1956 if (t.removeTreeNode(p)) 1957 setTabAt(tab, i, untreeify(t.first)); 1958 } 1959 } 1960 else if (f instanceof ReservationNode) 1961 throw new IllegalStateException("Recursive update"); 1962 } 1963 } 1964 if (binCount != 0) { 1965 if (binCount >= TREEIFY_THRESHOLD) 1966 treeifyBin(tab, i); 1967 break; 1968 } 1969 } 1970 } 1971 if (delta != 0) 1972 addCount((long)delta, binCount); 1973 return val; 1974 } 1975 1976 /** 1977 * If the specified key is not already associated with a 1978 * (non-null) value, associates it with the given value. 1979 * Otherwise, replaces the value with the results of the given 1980 * remapping function, or removes if {@code null}. The entire 1981 * method invocation is performed atomically. Some attempted 1982 * update operations on this map by other threads may be blocked 1983 * while computation is in progress, so the computation should be 1984 * short and simple, and must not attempt to update any other 1985 * mappings of this Map. 1986 * 1987 * @param key key with which the specified value is to be associated 1988 * @param value the value to use if absent 1989 * @param remappingFunction the function to recompute a value if present 1990 * @return the new value associated with the specified key, or null if none 1991 * @throws NullPointerException if the specified key or the 1992 * remappingFunction is null 1993 * @throws RuntimeException or Error if the remappingFunction does so, 1994 * in which case the mapping is unchanged 1995 */ merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction)1996 public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { 1997 if (key == null || value == null || remappingFunction == null) 1998 throw new NullPointerException(); 1999 int h = spread(key.hashCode()); 2000 V val = null; 2001 int delta = 0; 2002 int binCount = 0; 2003 for (Node<K,V>[] tab = table;;) { 2004 Node<K,V> f; int n, i, fh; 2005 if (tab == null || (n = tab.length) == 0) 2006 tab = initTable(); 2007 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) { 2008 if (casTabAt(tab, i, null, new Node<K,V>(h, key, value, null))) { 2009 delta = 1; 2010 val = value; 2011 break; 2012 } 2013 } 2014 else if ((fh = f.hash) == MOVED) 2015 tab = helpTransfer(tab, f); 2016 else { 2017 synchronized (f) { 2018 if (tabAt(tab, i) == f) { 2019 if (fh >= 0) { 2020 binCount = 1; 2021 for (Node<K,V> e = f, pred = null;; ++binCount) { 2022 K ek; 2023 if (e.hash == h && 2024 ((ek = e.key) == key || 2025 (ek != null && key.equals(ek)))) { 2026 val = remappingFunction.apply(e.val, value); 2027 if (val != null) 2028 e.val = val; 2029 else { 2030 delta = -1; 2031 Node<K,V> en = e.next; 2032 if (pred != null) 2033 pred.next = en; 2034 else 2035 setTabAt(tab, i, en); 2036 } 2037 break; 2038 } 2039 pred = e; 2040 if ((e = e.next) == null) { 2041 delta = 1; 2042 val = value; 2043 pred.next = 2044 new Node<K,V>(h, key, val, null); 2045 break; 2046 } 2047 } 2048 } 2049 else if (f instanceof TreeBin) { 2050 binCount = 2; 2051 TreeBin<K,V> t = (TreeBin<K,V>)f; 2052 TreeNode<K,V> r = t.root; 2053 TreeNode<K,V> p = (r == null) ? null : 2054 r.findTreeNode(h, key, null); 2055 val = (p == null) ? value : 2056 remappingFunction.apply(p.val, value); 2057 if (val != null) { 2058 if (p != null) 2059 p.val = val; 2060 else { 2061 delta = 1; 2062 t.putTreeVal(h, key, val); 2063 } 2064 } 2065 else if (p != null) { 2066 delta = -1; 2067 if (t.removeTreeNode(p)) 2068 setTabAt(tab, i, untreeify(t.first)); 2069 } 2070 } 2071 else if (f instanceof ReservationNode) 2072 throw new IllegalStateException("Recursive update"); 2073 } 2074 } 2075 if (binCount != 0) { 2076 if (binCount >= TREEIFY_THRESHOLD) 2077 treeifyBin(tab, i); 2078 break; 2079 } 2080 } 2081 } 2082 if (delta != 0) 2083 addCount((long)delta, binCount); 2084 return val; 2085 } 2086 2087 // Hashtable legacy methods 2088 2089 /** 2090 * Tests if some key maps into the specified value in this table. 2091 * 2092 * <p>Note that this method is identical in functionality to 2093 * {@link #containsValue(Object)}, and exists solely to ensure 2094 * full compatibility with class {@link java.util.Hashtable}, 2095 * which supported this method prior to introduction of the 2096 * Java Collections Framework. 2097 * 2098 * @param value a value to search for 2099 * @return {@code true} if and only if some key maps to the 2100 * {@code value} argument in this table as 2101 * determined by the {@code equals} method; 2102 * {@code false} otherwise 2103 * @throws NullPointerException if the specified value is null 2104 */ contains(Object value)2105 public boolean contains(Object value) { 2106 return containsValue(value); 2107 } 2108 2109 /** 2110 * Returns an enumeration of the keys in this table. 2111 * 2112 * @return an enumeration of the keys in this table 2113 * @see #keySet() 2114 */ keys()2115 public Enumeration<K> keys() { 2116 Node<K,V>[] t; 2117 int f = (t = table) == null ? 0 : t.length; 2118 return new KeyIterator<K,V>(t, f, 0, f, this); 2119 } 2120 2121 /** 2122 * Returns an enumeration of the values in this table. 2123 * 2124 * @return an enumeration of the values in this table 2125 * @see #values() 2126 */ elements()2127 public Enumeration<V> elements() { 2128 Node<K,V>[] t; 2129 int f = (t = table) == null ? 0 : t.length; 2130 return new ValueIterator<K,V>(t, f, 0, f, this); 2131 } 2132 2133 // ConcurrentHashMap-only methods 2134 2135 /** 2136 * Returns the number of mappings. This method should be used 2137 * instead of {@link #size} because a ConcurrentHashMap may 2138 * contain more mappings than can be represented as an int. The 2139 * value returned is an estimate; the actual count may differ if 2140 * there are concurrent insertions or removals. 2141 * 2142 * @return the number of mappings 2143 * @since 1.8 2144 */ mappingCount()2145 public long mappingCount() { 2146 long n = sumCount(); 2147 return (n < 0L) ? 0L : n; // ignore transient negative values 2148 } 2149 2150 /** 2151 * Creates a new {@link Set} backed by a ConcurrentHashMap 2152 * from the given type to {@code Boolean.TRUE}. 2153 * 2154 * @param <K> the element type of the returned set 2155 * @return the new set 2156 * @since 1.8 2157 */ newKeySet()2158 public static <K> KeySetView<K,Boolean> newKeySet() { 2159 return new KeySetView<K,Boolean> 2160 (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE); 2161 } 2162 2163 /** 2164 * Creates a new {@link Set} backed by a ConcurrentHashMap 2165 * from the given type to {@code Boolean.TRUE}. 2166 * 2167 * @param initialCapacity The implementation performs internal 2168 * sizing to accommodate this many elements. 2169 * @param <K> the element type of the returned set 2170 * @return the new set 2171 * @throws IllegalArgumentException if the initial capacity of 2172 * elements is negative 2173 * @since 1.8 2174 */ newKeySet(int initialCapacity)2175 public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) { 2176 return new KeySetView<K,Boolean> 2177 (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE); 2178 } 2179 2180 /** 2181 * Returns a {@link Set} view of the keys in this map, using the 2182 * given common mapped value for any additions (i.e., {@link 2183 * Collection#add} and {@link Collection#addAll(Collection)}). 2184 * This is of course only appropriate if it is acceptable to use 2185 * the same value for all additions from this view. 2186 * 2187 * @param mappedValue the mapped value to use for any additions 2188 * @return the set view 2189 * @throws NullPointerException if the mappedValue is null 2190 */ keySet(V mappedValue)2191 public KeySetView<K,V> keySet(V mappedValue) { 2192 if (mappedValue == null) 2193 throw new NullPointerException(); 2194 return new KeySetView<K,V>(this, mappedValue); 2195 } 2196 2197 /* ---------------- Special Nodes -------------- */ 2198 2199 /** 2200 * A node inserted at head of bins during transfer operations. 2201 */ 2202 static final class ForwardingNode<K,V> extends Node<K,V> { 2203 final Node<K,V>[] nextTable; ForwardingNode(Node<K,V>[] tab)2204 ForwardingNode(Node<K,V>[] tab) { 2205 super(MOVED, null, null, null); 2206 this.nextTable = tab; 2207 } 2208 find(int h, Object k)2209 Node<K,V> find(int h, Object k) { 2210 // loop to avoid arbitrarily deep recursion on forwarding nodes 2211 outer: for (Node<K,V>[] tab = nextTable;;) { 2212 Node<K,V> e; int n; 2213 if (k == null || tab == null || (n = tab.length) == 0 || 2214 (e = tabAt(tab, (n - 1) & h)) == null) 2215 return null; 2216 for (;;) { 2217 int eh; K ek; 2218 if ((eh = e.hash) == h && 2219 ((ek = e.key) == k || (ek != null && k.equals(ek)))) 2220 return e; 2221 if (eh < 0) { 2222 if (e instanceof ForwardingNode) { 2223 tab = ((ForwardingNode<K,V>)e).nextTable; 2224 continue outer; 2225 } 2226 else 2227 return e.find(h, k); 2228 } 2229 if ((e = e.next) == null) 2230 return null; 2231 } 2232 } 2233 } 2234 } 2235 2236 /** 2237 * A place-holder node used in computeIfAbsent and compute. 2238 */ 2239 static final class ReservationNode<K,V> extends Node<K,V> { ReservationNode()2240 ReservationNode() { 2241 super(RESERVED, null, null, null); 2242 } 2243 find(int h, Object k)2244 Node<K,V> find(int h, Object k) { 2245 return null; 2246 } 2247 } 2248 2249 /* ---------------- Table Initialization and Resizing -------------- */ 2250 2251 /** 2252 * Returns the stamp bits for resizing a table of size n. 2253 * Must be negative when shifted left by RESIZE_STAMP_SHIFT. 2254 */ resizeStamp(int n)2255 static final int resizeStamp(int n) { 2256 return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1)); 2257 } 2258 2259 /** 2260 * Initializes table, using the size recorded in sizeCtl. 2261 */ initTable()2262 private final Node<K,V>[] initTable() { 2263 Node<K,V>[] tab; int sc; 2264 while ((tab = table) == null || tab.length == 0) { 2265 if ((sc = sizeCtl) < 0) 2266 Thread.yield(); // lost initialization race; just spin 2267 else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { 2268 try { 2269 if ((tab = table) == null || tab.length == 0) { 2270 int n = (sc > 0) ? sc : DEFAULT_CAPACITY; 2271 @SuppressWarnings("unchecked") 2272 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n]; 2273 table = tab = nt; 2274 sc = n - (n >>> 2); 2275 } 2276 } finally { 2277 sizeCtl = sc; 2278 } 2279 break; 2280 } 2281 } 2282 return tab; 2283 } 2284 2285 /** 2286 * Adds to count, and if table is too small and not already 2287 * resizing, initiates transfer. If already resizing, helps 2288 * perform transfer if work is available. Rechecks occupancy 2289 * after a transfer to see if another resize is already needed 2290 * because resizings are lagging additions. 2291 * 2292 * @param x the count to add 2293 * @param check if <0, don't check resize, if <= 1 only check if uncontended 2294 */ addCount(long x, int check)2295 private final void addCount(long x, int check) { 2296 CounterCell[] as; long b, s; 2297 if ((as = counterCells) != null || 2298 !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) { 2299 CounterCell a; long v; int m; 2300 boolean uncontended = true; 2301 if (as == null || (m = as.length - 1) < 0 || 2302 (a = as[ThreadLocalRandom.getProbe() & m]) == null || 2303 !(uncontended = 2304 U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) { 2305 fullAddCount(x, uncontended); 2306 return; 2307 } 2308 if (check <= 1) 2309 return; 2310 s = sumCount(); 2311 } 2312 if (check >= 0) { 2313 Node<K,V>[] tab, nt; int n, sc; 2314 while (s >= (long)(sc = sizeCtl) && (tab = table) != null && 2315 (n = tab.length) < MAXIMUM_CAPACITY) { 2316 int rs = resizeStamp(n); 2317 if (sc < 0) { 2318 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || 2319 sc == rs + MAX_RESIZERS || (nt = nextTable) == null || 2320 transferIndex <= 0) 2321 break; 2322 if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) 2323 transfer(tab, nt); 2324 } 2325 else if (U.compareAndSwapInt(this, SIZECTL, sc, 2326 (rs << RESIZE_STAMP_SHIFT) + 2)) 2327 transfer(tab, null); 2328 s = sumCount(); 2329 } 2330 } 2331 } 2332 2333 /** 2334 * Helps transfer if a resize is in progress. 2335 */ helpTransfer(Node<K,V>[] tab, Node<K,V> f)2336 final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) { 2337 Node<K,V>[] nextTab; int sc; 2338 if (tab != null && (f instanceof ForwardingNode) && 2339 (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) { 2340 int rs = resizeStamp(tab.length); 2341 while (nextTab == nextTable && table == tab && 2342 (sc = sizeCtl) < 0) { 2343 if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || 2344 sc == rs + MAX_RESIZERS || transferIndex <= 0) 2345 break; 2346 if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) { 2347 transfer(tab, nextTab); 2348 break; 2349 } 2350 } 2351 return nextTab; 2352 } 2353 return table; 2354 } 2355 2356 /** 2357 * Tries to presize table to accommodate the given number of elements. 2358 * 2359 * @param size number of elements (doesn't need to be perfectly accurate) 2360 */ tryPresize(int size)2361 private final void tryPresize(int size) { 2362 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : 2363 tableSizeFor(size + (size >>> 1) + 1); 2364 int sc; 2365 while ((sc = sizeCtl) >= 0) { 2366 Node<K,V>[] tab = table; int n; 2367 if (tab == null || (n = tab.length) == 0) { 2368 n = (sc > c) ? sc : c; 2369 if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { 2370 try { 2371 if (table == tab) { 2372 @SuppressWarnings("unchecked") 2373 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n]; 2374 table = nt; 2375 sc = n - (n >>> 2); 2376 } 2377 } finally { 2378 sizeCtl = sc; 2379 } 2380 } 2381 } 2382 else if (c <= sc || n >= MAXIMUM_CAPACITY) 2383 break; 2384 else if (tab == table) { 2385 int rs = resizeStamp(n); 2386 if (U.compareAndSwapInt(this, SIZECTL, sc, 2387 (rs << RESIZE_STAMP_SHIFT) + 2)) 2388 transfer(tab, null); 2389 } 2390 } 2391 } 2392 2393 /** 2394 * Moves and/or copies the nodes in each bin to new table. See 2395 * above for explanation. 2396 */ transfer(Node<K,V>[] tab, Node<K,V>[] nextTab)2397 private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) { 2398 int n = tab.length, stride; 2399 if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) 2400 stride = MIN_TRANSFER_STRIDE; // subdivide range 2401 if (nextTab == null) { // initiating 2402 try { 2403 @SuppressWarnings("unchecked") 2404 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1]; 2405 nextTab = nt; 2406 } catch (Throwable ex) { // try to cope with OOME 2407 sizeCtl = Integer.MAX_VALUE; 2408 return; 2409 } 2410 nextTable = nextTab; 2411 transferIndex = n; 2412 } 2413 int nextn = nextTab.length; 2414 ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab); 2415 boolean advance = true; 2416 boolean finishing = false; // to ensure sweep before committing nextTab 2417 for (int i = 0, bound = 0;;) { 2418 Node<K,V> f; int fh; 2419 while (advance) { 2420 int nextIndex, nextBound; 2421 if (--i >= bound || finishing) 2422 advance = false; 2423 else if ((nextIndex = transferIndex) <= 0) { 2424 i = -1; 2425 advance = false; 2426 } 2427 else if (U.compareAndSwapInt 2428 (this, TRANSFERINDEX, nextIndex, 2429 nextBound = (nextIndex > stride ? 2430 nextIndex - stride : 0))) { 2431 bound = nextBound; 2432 i = nextIndex - 1; 2433 advance = false; 2434 } 2435 } 2436 if (i < 0 || i >= n || i + n >= nextn) { 2437 int sc; 2438 if (finishing) { 2439 nextTable = null; 2440 table = nextTab; 2441 sizeCtl = (n << 1) - (n >>> 1); 2442 return; 2443 } 2444 if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, sc - 1)) { 2445 if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT) 2446 return; 2447 finishing = advance = true; 2448 i = n; // recheck before commit 2449 } 2450 } 2451 else if ((f = tabAt(tab, i)) == null) 2452 advance = casTabAt(tab, i, null, fwd); 2453 else if ((fh = f.hash) == MOVED) 2454 advance = true; // already processed 2455 else { 2456 synchronized (f) { 2457 if (tabAt(tab, i) == f) { 2458 Node<K,V> ln, hn; 2459 if (fh >= 0) { 2460 int runBit = fh & n; 2461 Node<K,V> lastRun = f; 2462 for (Node<K,V> p = f.next; p != null; p = p.next) { 2463 int b = p.hash & n; 2464 if (b != runBit) { 2465 runBit = b; 2466 lastRun = p; 2467 } 2468 } 2469 if (runBit == 0) { 2470 ln = lastRun; 2471 hn = null; 2472 } 2473 else { 2474 hn = lastRun; 2475 ln = null; 2476 } 2477 for (Node<K,V> p = f; p != lastRun; p = p.next) { 2478 int ph = p.hash; K pk = p.key; V pv = p.val; 2479 if ((ph & n) == 0) 2480 ln = new Node<K,V>(ph, pk, pv, ln); 2481 else 2482 hn = new Node<K,V>(ph, pk, pv, hn); 2483 } 2484 setTabAt(nextTab, i, ln); 2485 setTabAt(nextTab, i + n, hn); 2486 setTabAt(tab, i, fwd); 2487 advance = true; 2488 } 2489 else if (f instanceof TreeBin) { 2490 TreeBin<K,V> t = (TreeBin<K,V>)f; 2491 TreeNode<K,V> lo = null, loTail = null; 2492 TreeNode<K,V> hi = null, hiTail = null; 2493 int lc = 0, hc = 0; 2494 for (Node<K,V> e = t.first; e != null; e = e.next) { 2495 int h = e.hash; 2496 TreeNode<K,V> p = new TreeNode<K,V> 2497 (h, e.key, e.val, null, null); 2498 if ((h & n) == 0) { 2499 if ((p.prev = loTail) == null) 2500 lo = p; 2501 else 2502 loTail.next = p; 2503 loTail = p; 2504 ++lc; 2505 } 2506 else { 2507 if ((p.prev = hiTail) == null) 2508 hi = p; 2509 else 2510 hiTail.next = p; 2511 hiTail = p; 2512 ++hc; 2513 } 2514 } 2515 ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) : 2516 (hc != 0) ? new TreeBin<K,V>(lo) : t; 2517 hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) : 2518 (lc != 0) ? new TreeBin<K,V>(hi) : t; 2519 setTabAt(nextTab, i, ln); 2520 setTabAt(nextTab, i + n, hn); 2521 setTabAt(tab, i, fwd); 2522 advance = true; 2523 } 2524 } 2525 } 2526 } 2527 } 2528 } 2529 2530 /* ---------------- Counter support -------------- */ 2531 2532 /** 2533 * A padded cell for distributing counts. Adapted from LongAdder 2534 * and Striped64. See their internal docs for explanation. 2535 */ 2536 //@jdk.internal.vm.annotation.Contended // android-removed 2537 static final class CounterCell { 2538 volatile long value; CounterCell(long x)2539 CounterCell(long x) { value = x; } 2540 } 2541 sumCount()2542 final long sumCount() { 2543 CounterCell[] as = counterCells; CounterCell a; 2544 long sum = baseCount; 2545 if (as != null) { 2546 for (int i = 0; i < as.length; ++i) { 2547 if ((a = as[i]) != null) 2548 sum += a.value; 2549 } 2550 } 2551 return sum; 2552 } 2553 2554 // See LongAdder version for explanation fullAddCount(long x, boolean wasUncontended)2555 private final void fullAddCount(long x, boolean wasUncontended) { 2556 int h; 2557 if ((h = ThreadLocalRandom.getProbe()) == 0) { 2558 ThreadLocalRandom.localInit(); // force initialization 2559 h = ThreadLocalRandom.getProbe(); 2560 wasUncontended = true; 2561 } 2562 boolean collide = false; // True if last slot nonempty 2563 for (;;) { 2564 CounterCell[] as; CounterCell a; int n; long v; 2565 if ((as = counterCells) != null && (n = as.length) > 0) { 2566 if ((a = as[(n - 1) & h]) == null) { 2567 if (cellsBusy == 0) { // Try to attach new Cell 2568 CounterCell r = new CounterCell(x); // Optimistic create 2569 if (cellsBusy == 0 && 2570 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { 2571 boolean created = false; 2572 try { // Recheck under lock 2573 CounterCell[] rs; int m, j; 2574 if ((rs = counterCells) != null && 2575 (m = rs.length) > 0 && 2576 rs[j = (m - 1) & h] == null) { 2577 rs[j] = r; 2578 created = true; 2579 } 2580 } finally { 2581 cellsBusy = 0; 2582 } 2583 if (created) 2584 break; 2585 continue; // Slot is now non-empty 2586 } 2587 } 2588 collide = false; 2589 } 2590 else if (!wasUncontended) // CAS already known to fail 2591 wasUncontended = true; // Continue after rehash 2592 else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x)) 2593 break; 2594 else if (counterCells != as || n >= NCPU) 2595 collide = false; // At max size or stale 2596 else if (!collide) 2597 collide = true; 2598 else if (cellsBusy == 0 && 2599 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { 2600 try { 2601 if (counterCells == as) {// Expand table unless stale 2602 CounterCell[] rs = new CounterCell[n << 1]; 2603 for (int i = 0; i < n; ++i) 2604 rs[i] = as[i]; 2605 counterCells = rs; 2606 } 2607 } finally { 2608 cellsBusy = 0; 2609 } 2610 collide = false; 2611 continue; // Retry with expanded table 2612 } 2613 h = ThreadLocalRandom.advanceProbe(h); 2614 } 2615 else if (cellsBusy == 0 && counterCells == as && 2616 U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { 2617 boolean init = false; 2618 try { // Initialize table 2619 if (counterCells == as) { 2620 CounterCell[] rs = new CounterCell[2]; 2621 rs[h & 1] = new CounterCell(x); 2622 counterCells = rs; 2623 init = true; 2624 } 2625 } finally { 2626 cellsBusy = 0; 2627 } 2628 if (init) 2629 break; 2630 } 2631 else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x)) 2632 break; // Fall back on using base 2633 } 2634 } 2635 2636 /* ---------------- Conversion from/to TreeBins -------------- */ 2637 2638 /** 2639 * Replaces all linked nodes in bin at given index unless table is 2640 * too small, in which case resizes instead. 2641 */ treeifyBin(Node<K,V>[] tab, int index)2642 private final void treeifyBin(Node<K,V>[] tab, int index) { 2643 Node<K,V> b; int n; 2644 if (tab != null) { 2645 if ((n = tab.length) < MIN_TREEIFY_CAPACITY) 2646 tryPresize(n << 1); 2647 else if ((b = tabAt(tab, index)) != null && b.hash >= 0) { 2648 synchronized (b) { 2649 if (tabAt(tab, index) == b) { 2650 TreeNode<K,V> hd = null, tl = null; 2651 for (Node<K,V> e = b; e != null; e = e.next) { 2652 TreeNode<K,V> p = 2653 new TreeNode<K,V>(e.hash, e.key, e.val, 2654 null, null); 2655 if ((p.prev = tl) == null) 2656 hd = p; 2657 else 2658 tl.next = p; 2659 tl = p; 2660 } 2661 setTabAt(tab, index, new TreeBin<K,V>(hd)); 2662 } 2663 } 2664 } 2665 } 2666 } 2667 2668 /** 2669 * Returns a list on non-TreeNodes replacing those in given list. 2670 */ untreeify(Node<K,V> b)2671 static <K,V> Node<K,V> untreeify(Node<K,V> b) { 2672 Node<K,V> hd = null, tl = null; 2673 for (Node<K,V> q = b; q != null; q = q.next) { 2674 Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val, null); 2675 if (tl == null) 2676 hd = p; 2677 else 2678 tl.next = p; 2679 tl = p; 2680 } 2681 return hd; 2682 } 2683 2684 /* ---------------- TreeNodes -------------- */ 2685 2686 /** 2687 * Nodes for use in TreeBins. 2688 */ 2689 static final class TreeNode<K,V> extends Node<K,V> { 2690 TreeNode<K,V> parent; // red-black tree links 2691 TreeNode<K,V> left; 2692 TreeNode<K,V> right; 2693 TreeNode<K,V> prev; // needed to unlink next upon deletion 2694 boolean red; 2695 TreeNode(int hash, K key, V val, Node<K,V> next, TreeNode<K,V> parent)2696 TreeNode(int hash, K key, V val, Node<K,V> next, 2697 TreeNode<K,V> parent) { 2698 super(hash, key, val, next); 2699 this.parent = parent; 2700 } 2701 find(int h, Object k)2702 Node<K,V> find(int h, Object k) { 2703 return findTreeNode(h, k, null); 2704 } 2705 2706 /** 2707 * Returns the TreeNode (or null if not found) for the given key 2708 * starting at given root. 2709 */ findTreeNode(int h, Object k, Class<?> kc)2710 final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) { 2711 if (k != null) { 2712 TreeNode<K,V> p = this; 2713 do { 2714 int ph, dir; K pk; TreeNode<K,V> q; 2715 TreeNode<K,V> pl = p.left, pr = p.right; 2716 if ((ph = p.hash) > h) 2717 p = pl; 2718 else if (ph < h) 2719 p = pr; 2720 else if ((pk = p.key) == k || (pk != null && k.equals(pk))) 2721 return p; 2722 else if (pl == null) 2723 p = pr; 2724 else if (pr == null) 2725 p = pl; 2726 else if ((kc != null || 2727 (kc = comparableClassFor(k)) != null) && 2728 (dir = compareComparables(kc, k, pk)) != 0) 2729 p = (dir < 0) ? pl : pr; 2730 else if ((q = pr.findTreeNode(h, k, kc)) != null) 2731 return q; 2732 else 2733 p = pl; 2734 } while (p != null); 2735 } 2736 return null; 2737 } 2738 } 2739 2740 /* ---------------- TreeBins -------------- */ 2741 2742 /** 2743 * TreeNodes used at the heads of bins. TreeBins do not hold user 2744 * keys or values, but instead point to list of TreeNodes and 2745 * their root. They also maintain a parasitic read-write lock 2746 * forcing writers (who hold bin lock) to wait for readers (who do 2747 * not) to complete before tree restructuring operations. 2748 */ 2749 static final class TreeBin<K,V> extends Node<K,V> { 2750 TreeNode<K,V> root; 2751 volatile TreeNode<K,V> first; 2752 volatile Thread waiter; 2753 volatile int lockState; 2754 // values for lockState 2755 static final int WRITER = 1; // set while holding write lock 2756 static final int WAITER = 2; // set when waiting for write lock 2757 static final int READER = 4; // increment value for setting read lock 2758 2759 /** 2760 * Tie-breaking utility for ordering insertions when equal 2761 * hashCodes and non-comparable. We don't require a total 2762 * order, just a consistent insertion rule to maintain 2763 * equivalence across rebalancings. Tie-breaking further than 2764 * necessary simplifies testing a bit. 2765 */ tieBreakOrder(Object a, Object b)2766 static int tieBreakOrder(Object a, Object b) { 2767 int d; 2768 if (a == null || b == null || 2769 (d = a.getClass().getName(). 2770 compareTo(b.getClass().getName())) == 0) 2771 d = (System.identityHashCode(a) <= System.identityHashCode(b) ? 2772 -1 : 1); 2773 return d; 2774 } 2775 2776 /** 2777 * Creates bin with initial set of nodes headed by b. 2778 */ TreeBin(TreeNode<K,V> b)2779 TreeBin(TreeNode<K,V> b) { 2780 super(TREEBIN, null, null, null); 2781 this.first = b; 2782 TreeNode<K,V> r = null; 2783 for (TreeNode<K,V> x = b, next; x != null; x = next) { 2784 next = (TreeNode<K,V>)x.next; 2785 x.left = x.right = null; 2786 if (r == null) { 2787 x.parent = null; 2788 x.red = false; 2789 r = x; 2790 } 2791 else { 2792 K k = x.key; 2793 int h = x.hash; 2794 Class<?> kc = null; 2795 for (TreeNode<K,V> p = r;;) { 2796 int dir, ph; 2797 K pk = p.key; 2798 if ((ph = p.hash) > h) 2799 dir = -1; 2800 else if (ph < h) 2801 dir = 1; 2802 else if ((kc == null && 2803 (kc = comparableClassFor(k)) == null) || 2804 (dir = compareComparables(kc, k, pk)) == 0) 2805 dir = tieBreakOrder(k, pk); 2806 TreeNode<K,V> xp = p; 2807 if ((p = (dir <= 0) ? p.left : p.right) == null) { 2808 x.parent = xp; 2809 if (dir <= 0) 2810 xp.left = x; 2811 else 2812 xp.right = x; 2813 r = balanceInsertion(r, x); 2814 break; 2815 } 2816 } 2817 } 2818 } 2819 this.root = r; 2820 assert checkInvariants(root); 2821 } 2822 2823 /** 2824 * Acquires write lock for tree restructuring. 2825 */ lockRoot()2826 private final void lockRoot() { 2827 if (!U.compareAndSwapInt(this, LOCKSTATE, 0, WRITER)) 2828 contendedLock(); // offload to separate method 2829 } 2830 2831 /** 2832 * Releases write lock for tree restructuring. 2833 */ unlockRoot()2834 private final void unlockRoot() { 2835 lockState = 0; 2836 } 2837 2838 /** 2839 * Possibly blocks awaiting root lock. 2840 */ contendedLock()2841 private final void contendedLock() { 2842 boolean waiting = false; 2843 for (int s;;) { 2844 if (((s = lockState) & ~WAITER) == 0) { 2845 if (U.compareAndSwapInt(this, LOCKSTATE, s, WRITER)) { 2846 if (waiting) 2847 waiter = null; 2848 return; 2849 } 2850 } 2851 else if ((s & WAITER) == 0) { 2852 if (U.compareAndSwapInt(this, LOCKSTATE, s, s | WAITER)) { 2853 waiting = true; 2854 waiter = Thread.currentThread(); 2855 } 2856 } 2857 else if (waiting) 2858 LockSupport.park(this); 2859 } 2860 } 2861 2862 /** 2863 * Returns matching node or null if none. Tries to search 2864 * using tree comparisons from root, but continues linear 2865 * search when lock not available. 2866 */ find(int h, Object k)2867 final Node<K,V> find(int h, Object k) { 2868 if (k != null) { 2869 for (Node<K,V> e = first; e != null; ) { 2870 int s; K ek; 2871 if (((s = lockState) & (WAITER|WRITER)) != 0) { 2872 if (e.hash == h && 2873 ((ek = e.key) == k || (ek != null && k.equals(ek)))) 2874 return e; 2875 e = e.next; 2876 } 2877 else if (U.compareAndSwapInt(this, LOCKSTATE, s, 2878 s + READER)) { 2879 TreeNode<K,V> r, p; 2880 try { 2881 p = ((r = root) == null ? null : 2882 r.findTreeNode(h, k, null)); 2883 } finally { 2884 Thread w; 2885 if (U.getAndAddInt(this, LOCKSTATE, -READER) == 2886 (READER|WAITER) && (w = waiter) != null) 2887 LockSupport.unpark(w); 2888 } 2889 return p; 2890 } 2891 } 2892 } 2893 return null; 2894 } 2895 2896 /** 2897 * Finds or adds a node. 2898 * @return null if added 2899 */ putTreeVal(int h, K k, V v)2900 final TreeNode<K,V> putTreeVal(int h, K k, V v) { 2901 Class<?> kc = null; 2902 boolean searched = false; 2903 for (TreeNode<K,V> p = root;;) { 2904 int dir, ph; K pk; 2905 if (p == null) { 2906 first = root = new TreeNode<K,V>(h, k, v, null, null); 2907 break; 2908 } 2909 else if ((ph = p.hash) > h) 2910 dir = -1; 2911 else if (ph < h) 2912 dir = 1; 2913 else if ((pk = p.key) == k || (pk != null && k.equals(pk))) 2914 return p; 2915 else if ((kc == null && 2916 (kc = comparableClassFor(k)) == null) || 2917 (dir = compareComparables(kc, k, pk)) == 0) { 2918 if (!searched) { 2919 TreeNode<K,V> q, ch; 2920 searched = true; 2921 if (((ch = p.left) != null && 2922 (q = ch.findTreeNode(h, k, kc)) != null) || 2923 ((ch = p.right) != null && 2924 (q = ch.findTreeNode(h, k, kc)) != null)) 2925 return q; 2926 } 2927 dir = tieBreakOrder(k, pk); 2928 } 2929 2930 TreeNode<K,V> xp = p; 2931 if ((p = (dir <= 0) ? p.left : p.right) == null) { 2932 TreeNode<K,V> x, f = first; 2933 first = x = new TreeNode<K,V>(h, k, v, f, xp); 2934 if (f != null) 2935 f.prev = x; 2936 if (dir <= 0) 2937 xp.left = x; 2938 else 2939 xp.right = x; 2940 if (!xp.red) 2941 x.red = true; 2942 else { 2943 lockRoot(); 2944 try { 2945 root = balanceInsertion(root, x); 2946 } finally { 2947 unlockRoot(); 2948 } 2949 } 2950 break; 2951 } 2952 } 2953 assert checkInvariants(root); 2954 return null; 2955 } 2956 2957 /** 2958 * Removes the given node, that must be present before this 2959 * call. This is messier than typical red-black deletion code 2960 * because we cannot swap the contents of an interior node 2961 * with a leaf successor that is pinned by "next" pointers 2962 * that are accessible independently of lock. So instead we 2963 * swap the tree linkages. 2964 * 2965 * @return true if now too small, so should be untreeified 2966 */ removeTreeNode(TreeNode<K,V> p)2967 final boolean removeTreeNode(TreeNode<K,V> p) { 2968 TreeNode<K,V> next = (TreeNode<K,V>)p.next; 2969 TreeNode<K,V> pred = p.prev; // unlink traversal pointers 2970 TreeNode<K,V> r, rl; 2971 if (pred == null) 2972 first = next; 2973 else 2974 pred.next = next; 2975 if (next != null) 2976 next.prev = pred; 2977 if (first == null) { 2978 root = null; 2979 return true; 2980 } 2981 if ((r = root) == null || r.right == null || // too small 2982 (rl = r.left) == null || rl.left == null) 2983 return true; 2984 lockRoot(); 2985 try { 2986 TreeNode<K,V> replacement; 2987 TreeNode<K,V> pl = p.left; 2988 TreeNode<K,V> pr = p.right; 2989 if (pl != null && pr != null) { 2990 TreeNode<K,V> s = pr, sl; 2991 while ((sl = s.left) != null) // find successor 2992 s = sl; 2993 boolean c = s.red; s.red = p.red; p.red = c; // swap colors 2994 TreeNode<K,V> sr = s.right; 2995 TreeNode<K,V> pp = p.parent; 2996 if (s == pr) { // p was s's direct parent 2997 p.parent = s; 2998 s.right = p; 2999 } 3000 else { 3001 TreeNode<K,V> sp = s.parent; 3002 if ((p.parent = sp) != null) { 3003 if (s == sp.left) 3004 sp.left = p; 3005 else 3006 sp.right = p; 3007 } 3008 if ((s.right = pr) != null) 3009 pr.parent = s; 3010 } 3011 p.left = null; 3012 if ((p.right = sr) != null) 3013 sr.parent = p; 3014 if ((s.left = pl) != null) 3015 pl.parent = s; 3016 if ((s.parent = pp) == null) 3017 r = s; 3018 else if (p == pp.left) 3019 pp.left = s; 3020 else 3021 pp.right = s; 3022 if (sr != null) 3023 replacement = sr; 3024 else 3025 replacement = p; 3026 } 3027 else if (pl != null) 3028 replacement = pl; 3029 else if (pr != null) 3030 replacement = pr; 3031 else 3032 replacement = p; 3033 if (replacement != p) { 3034 TreeNode<K,V> pp = replacement.parent = p.parent; 3035 if (pp == null) 3036 r = replacement; 3037 else if (p == pp.left) 3038 pp.left = replacement; 3039 else 3040 pp.right = replacement; 3041 p.left = p.right = p.parent = null; 3042 } 3043 3044 root = (p.red) ? r : balanceDeletion(r, replacement); 3045 3046 if (p == replacement) { // detach pointers 3047 TreeNode<K,V> pp; 3048 if ((pp = p.parent) != null) { 3049 if (p == pp.left) 3050 pp.left = null; 3051 else if (p == pp.right) 3052 pp.right = null; 3053 p.parent = null; 3054 } 3055 } 3056 } finally { 3057 unlockRoot(); 3058 } 3059 assert checkInvariants(root); 3060 return false; 3061 } 3062 3063 /* ------------------------------------------------------------ */ 3064 // Red-black tree methods, all adapted from CLR 3065 rotateLeft(TreeNode<K,V> root, TreeNode<K,V> p)3066 static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root, 3067 TreeNode<K,V> p) { 3068 TreeNode<K,V> r, pp, rl; 3069 if (p != null && (r = p.right) != null) { 3070 if ((rl = p.right = r.left) != null) 3071 rl.parent = p; 3072 if ((pp = r.parent = p.parent) == null) 3073 (root = r).red = false; 3074 else if (pp.left == p) 3075 pp.left = r; 3076 else 3077 pp.right = r; 3078 r.left = p; 3079 p.parent = r; 3080 } 3081 return root; 3082 } 3083 rotateRight(TreeNode<K,V> root, TreeNode<K,V> p)3084 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root, 3085 TreeNode<K,V> p) { 3086 TreeNode<K,V> l, pp, lr; 3087 if (p != null && (l = p.left) != null) { 3088 if ((lr = p.left = l.right) != null) 3089 lr.parent = p; 3090 if ((pp = l.parent = p.parent) == null) 3091 (root = l).red = false; 3092 else if (pp.right == p) 3093 pp.right = l; 3094 else 3095 pp.left = l; 3096 l.right = p; 3097 p.parent = l; 3098 } 3099 return root; 3100 } 3101 balanceInsertion(TreeNode<K,V> root, TreeNode<K,V> x)3102 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root, 3103 TreeNode<K,V> x) { 3104 x.red = true; 3105 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) { 3106 if ((xp = x.parent) == null) { 3107 x.red = false; 3108 return x; 3109 } 3110 else if (!xp.red || (xpp = xp.parent) == null) 3111 return root; 3112 if (xp == (xppl = xpp.left)) { 3113 if ((xppr = xpp.right) != null && xppr.red) { 3114 xppr.red = false; 3115 xp.red = false; 3116 xpp.red = true; 3117 x = xpp; 3118 } 3119 else { 3120 if (x == xp.right) { 3121 root = rotateLeft(root, x = xp); 3122 xpp = (xp = x.parent) == null ? null : xp.parent; 3123 } 3124 if (xp != null) { 3125 xp.red = false; 3126 if (xpp != null) { 3127 xpp.red = true; 3128 root = rotateRight(root, xpp); 3129 } 3130 } 3131 } 3132 } 3133 else { 3134 if (xppl != null && xppl.red) { 3135 xppl.red = false; 3136 xp.red = false; 3137 xpp.red = true; 3138 x = xpp; 3139 } 3140 else { 3141 if (x == xp.left) { 3142 root = rotateRight(root, x = xp); 3143 xpp = (xp = x.parent) == null ? null : xp.parent; 3144 } 3145 if (xp != null) { 3146 xp.red = false; 3147 if (xpp != null) { 3148 xpp.red = true; 3149 root = rotateLeft(root, xpp); 3150 } 3151 } 3152 } 3153 } 3154 } 3155 } 3156 balanceDeletion(TreeNode<K,V> root, TreeNode<K,V> x)3157 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root, 3158 TreeNode<K,V> x) { 3159 for (TreeNode<K,V> xp, xpl, xpr;;) { 3160 if (x == null || x == root) 3161 return root; 3162 else if ((xp = x.parent) == null) { 3163 x.red = false; 3164 return x; 3165 } 3166 else if (x.red) { 3167 x.red = false; 3168 return root; 3169 } 3170 else if ((xpl = xp.left) == x) { 3171 if ((xpr = xp.right) != null && xpr.red) { 3172 xpr.red = false; 3173 xp.red = true; 3174 root = rotateLeft(root, xp); 3175 xpr = (xp = x.parent) == null ? null : xp.right; 3176 } 3177 if (xpr == null) 3178 x = xp; 3179 else { 3180 TreeNode<K,V> sl = xpr.left, sr = xpr.right; 3181 if ((sr == null || !sr.red) && 3182 (sl == null || !sl.red)) { 3183 xpr.red = true; 3184 x = xp; 3185 } 3186 else { 3187 if (sr == null || !sr.red) { 3188 if (sl != null) 3189 sl.red = false; 3190 xpr.red = true; 3191 root = rotateRight(root, xpr); 3192 xpr = (xp = x.parent) == null ? 3193 null : xp.right; 3194 } 3195 if (xpr != null) { 3196 xpr.red = (xp == null) ? false : xp.red; 3197 if ((sr = xpr.right) != null) 3198 sr.red = false; 3199 } 3200 if (xp != null) { 3201 xp.red = false; 3202 root = rotateLeft(root, xp); 3203 } 3204 x = root; 3205 } 3206 } 3207 } 3208 else { // symmetric 3209 if (xpl != null && xpl.red) { 3210 xpl.red = false; 3211 xp.red = true; 3212 root = rotateRight(root, xp); 3213 xpl = (xp = x.parent) == null ? null : xp.left; 3214 } 3215 if (xpl == null) 3216 x = xp; 3217 else { 3218 TreeNode<K,V> sl = xpl.left, sr = xpl.right; 3219 if ((sl == null || !sl.red) && 3220 (sr == null || !sr.red)) { 3221 xpl.red = true; 3222 x = xp; 3223 } 3224 else { 3225 if (sl == null || !sl.red) { 3226 if (sr != null) 3227 sr.red = false; 3228 xpl.red = true; 3229 root = rotateLeft(root, xpl); 3230 xpl = (xp = x.parent) == null ? 3231 null : xp.left; 3232 } 3233 if (xpl != null) { 3234 xpl.red = (xp == null) ? false : xp.red; 3235 if ((sl = xpl.left) != null) 3236 sl.red = false; 3237 } 3238 if (xp != null) { 3239 xp.red = false; 3240 root = rotateRight(root, xp); 3241 } 3242 x = root; 3243 } 3244 } 3245 } 3246 } 3247 } 3248 3249 /** 3250 * Checks invariants recursively for the tree of Nodes rooted at t. 3251 */ checkInvariants(TreeNode<K,V> t)3252 static <K,V> boolean checkInvariants(TreeNode<K,V> t) { 3253 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right, 3254 tb = t.prev, tn = (TreeNode<K,V>)t.next; 3255 if (tb != null && tb.next != t) 3256 return false; 3257 if (tn != null && tn.prev != t) 3258 return false; 3259 if (tp != null && t != tp.left && t != tp.right) 3260 return false; 3261 if (tl != null && (tl.parent != t || tl.hash > t.hash)) 3262 return false; 3263 if (tr != null && (tr.parent != t || tr.hash < t.hash)) 3264 return false; 3265 if (t.red && tl != null && tl.red && tr != null && tr.red) 3266 return false; 3267 if (tl != null && !checkInvariants(tl)) 3268 return false; 3269 if (tr != null && !checkInvariants(tr)) 3270 return false; 3271 return true; 3272 } 3273 3274 private static final sun.misc.Unsafe U = sun.misc.Unsafe.getUnsafe(); 3275 private static final long LOCKSTATE; 3276 static { 3277 try { 3278 LOCKSTATE = U.objectFieldOffset 3279 (TreeBin.class.getDeclaredField("lockState")); 3280 } catch (ReflectiveOperationException e) { 3281 throw new Error(e); 3282 } 3283 } 3284 } 3285 3286 /* ----------------Table Traversal -------------- */ 3287 3288 /** 3289 * Records the table, its length, and current traversal index for a 3290 * traverser that must process a region of a forwarded table before 3291 * proceeding with current table. 3292 */ 3293 static final class TableStack<K,V> { 3294 int length; 3295 int index; 3296 Node<K,V>[] tab; 3297 TableStack<K,V> next; 3298 } 3299 3300 /** 3301 * Encapsulates traversal for methods such as containsValue; also 3302 * serves as a base class for other iterators and spliterators. 3303 * 3304 * Method advance visits once each still-valid node that was 3305 * reachable upon iterator construction. It might miss some that 3306 * were added to a bin after the bin was visited, which is OK wrt 3307 * consistency guarantees. Maintaining this property in the face 3308 * of possible ongoing resizes requires a fair amount of 3309 * bookkeeping state that is difficult to optimize away amidst 3310 * volatile accesses. Even so, traversal maintains reasonable 3311 * throughput. 3312 * 3313 * Normally, iteration proceeds bin-by-bin traversing lists. 3314 * However, if the table has been resized, then all future steps 3315 * must traverse both the bin at the current index as well as at 3316 * (index + baseSize); and so on for further resizings. To 3317 * paranoically cope with potential sharing by users of iterators 3318 * across threads, iteration terminates if a bounds checks fails 3319 * for a table read. 3320 */ 3321 static class Traverser<K,V> { 3322 Node<K,V>[] tab; // current table; updated if resized 3323 Node<K,V> next; // the next entry to use 3324 TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes 3325 int index; // index of bin to use next 3326 int baseIndex; // current index of initial table 3327 int baseLimit; // index bound for initial table 3328 final int baseSize; // initial table size 3329 Traverser(Node<K,V>[] tab, int size, int index, int limit)3330 Traverser(Node<K,V>[] tab, int size, int index, int limit) { 3331 this.tab = tab; 3332 this.baseSize = size; 3333 this.baseIndex = this.index = index; 3334 this.baseLimit = limit; 3335 this.next = null; 3336 } 3337 3338 /** 3339 * Advances if possible, returning next valid node, or null if none. 3340 */ advance()3341 final Node<K,V> advance() { 3342 Node<K,V> e; 3343 if ((e = next) != null) 3344 e = e.next; 3345 for (;;) { 3346 Node<K,V>[] t; int i, n; // must use locals in checks 3347 if (e != null) 3348 return next = e; 3349 if (baseIndex >= baseLimit || (t = tab) == null || 3350 (n = t.length) <= (i = index) || i < 0) 3351 return next = null; 3352 if ((e = tabAt(t, i)) != null && e.hash < 0) { 3353 if (e instanceof ForwardingNode) { 3354 tab = ((ForwardingNode<K,V>)e).nextTable; 3355 e = null; 3356 pushState(t, i, n); 3357 continue; 3358 } 3359 else if (e instanceof TreeBin) 3360 e = ((TreeBin<K,V>)e).first; 3361 else 3362 e = null; 3363 } 3364 if (stack != null) 3365 recoverState(n); 3366 else if ((index = i + baseSize) >= n) 3367 index = ++baseIndex; // visit upper slots if present 3368 } 3369 } 3370 3371 /** 3372 * Saves traversal state upon encountering a forwarding node. 3373 */ pushState(Node<K,V>[] t, int i, int n)3374 private void pushState(Node<K,V>[] t, int i, int n) { 3375 TableStack<K,V> s = spare; // reuse if possible 3376 if (s != null) 3377 spare = s.next; 3378 else 3379 s = new TableStack<K,V>(); 3380 s.tab = t; 3381 s.length = n; 3382 s.index = i; 3383 s.next = stack; 3384 stack = s; 3385 } 3386 3387 /** 3388 * Possibly pops traversal state. 3389 * 3390 * @param n length of current table 3391 */ recoverState(int n)3392 private void recoverState(int n) { 3393 TableStack<K,V> s; int len; 3394 while ((s = stack) != null && (index += (len = s.length)) >= n) { 3395 n = len; 3396 index = s.index; 3397 tab = s.tab; 3398 s.tab = null; 3399 TableStack<K,V> next = s.next; 3400 s.next = spare; // save for reuse 3401 stack = next; 3402 spare = s; 3403 } 3404 if (s == null && (index += baseSize) >= n) 3405 index = ++baseIndex; 3406 } 3407 } 3408 3409 /** 3410 * Base of key, value, and entry Iterators. Adds fields to 3411 * Traverser to support iterator.remove. 3412 */ 3413 static class BaseIterator<K,V> extends Traverser<K,V> { 3414 final ConcurrentHashMap<K,V> map; 3415 Node<K,V> lastReturned; BaseIterator(Node<K,V>[] tab, int size, int index, int limit, ConcurrentHashMap<K,V> map)3416 BaseIterator(Node<K,V>[] tab, int size, int index, int limit, 3417 ConcurrentHashMap<K,V> map) { 3418 super(tab, size, index, limit); 3419 this.map = map; 3420 advance(); 3421 } 3422 hasNext()3423 public final boolean hasNext() { return next != null; } hasMoreElements()3424 public final boolean hasMoreElements() { return next != null; } 3425 remove()3426 public final void remove() { 3427 Node<K,V> p; 3428 if ((p = lastReturned) == null) 3429 throw new IllegalStateException(); 3430 lastReturned = null; 3431 map.replaceNode(p.key, null, null); 3432 } 3433 } 3434 3435 static final class KeyIterator<K,V> extends BaseIterator<K,V> 3436 implements Iterator<K>, Enumeration<K> { KeyIterator(Node<K,V>[] tab, int index, int size, int limit, ConcurrentHashMap<K,V> map)3437 KeyIterator(Node<K,V>[] tab, int index, int size, int limit, 3438 ConcurrentHashMap<K,V> map) { 3439 super(tab, index, size, limit, map); 3440 } 3441 next()3442 public final K next() { 3443 Node<K,V> p; 3444 if ((p = next) == null) 3445 throw new NoSuchElementException(); 3446 K k = p.key; 3447 lastReturned = p; 3448 advance(); 3449 return k; 3450 } 3451 nextElement()3452 public final K nextElement() { return next(); } 3453 } 3454 3455 static final class ValueIterator<K,V> extends BaseIterator<K,V> 3456 implements Iterator<V>, Enumeration<V> { ValueIterator(Node<K,V>[] tab, int index, int size, int limit, ConcurrentHashMap<K,V> map)3457 ValueIterator(Node<K,V>[] tab, int index, int size, int limit, 3458 ConcurrentHashMap<K,V> map) { 3459 super(tab, index, size, limit, map); 3460 } 3461 next()3462 public final V next() { 3463 Node<K,V> p; 3464 if ((p = next) == null) 3465 throw new NoSuchElementException(); 3466 V v = p.val; 3467 lastReturned = p; 3468 advance(); 3469 return v; 3470 } 3471 nextElement()3472 public final V nextElement() { return next(); } 3473 } 3474 3475 static final class EntryIterator<K,V> extends BaseIterator<K,V> 3476 implements Iterator<Map.Entry<K,V>> { EntryIterator(Node<K,V>[] tab, int index, int size, int limit, ConcurrentHashMap<K,V> map)3477 EntryIterator(Node<K,V>[] tab, int index, int size, int limit, 3478 ConcurrentHashMap<K,V> map) { 3479 super(tab, index, size, limit, map); 3480 } 3481 next()3482 public final Map.Entry<K,V> next() { 3483 Node<K,V> p; 3484 if ((p = next) == null) 3485 throw new NoSuchElementException(); 3486 K k = p.key; 3487 V v = p.val; 3488 lastReturned = p; 3489 advance(); 3490 return new MapEntry<K,V>(k, v, map); 3491 } 3492 } 3493 3494 /** 3495 * Exported Entry for EntryIterator. 3496 */ 3497 static final class MapEntry<K,V> implements Map.Entry<K,V> { 3498 final K key; // non-null 3499 V val; // non-null 3500 final ConcurrentHashMap<K,V> map; MapEntry(K key, V val, ConcurrentHashMap<K,V> map)3501 MapEntry(K key, V val, ConcurrentHashMap<K,V> map) { 3502 this.key = key; 3503 this.val = val; 3504 this.map = map; 3505 } getKey()3506 public K getKey() { return key; } getValue()3507 public V getValue() { return val; } hashCode()3508 public int hashCode() { return key.hashCode() ^ val.hashCode(); } toString()3509 public String toString() { 3510 return Helpers.mapEntryToString(key, val); 3511 } 3512 equals(Object o)3513 public boolean equals(Object o) { 3514 Object k, v; Map.Entry<?,?> e; 3515 return ((o instanceof Map.Entry) && 3516 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 3517 (v = e.getValue()) != null && 3518 (k == key || k.equals(key)) && 3519 (v == val || v.equals(val))); 3520 } 3521 3522 /** 3523 * Sets our entry's value and writes through to the map. The 3524 * value to return is somewhat arbitrary here. Since we do not 3525 * necessarily track asynchronous changes, the most recent 3526 * "previous" value could be different from what we return (or 3527 * could even have been removed, in which case the put will 3528 * re-establish). We do not and cannot guarantee more. 3529 */ setValue(V value)3530 public V setValue(V value) { 3531 if (value == null) throw new NullPointerException(); 3532 V v = val; 3533 val = value; 3534 map.put(key, value); 3535 return v; 3536 } 3537 } 3538 3539 static final class KeySpliterator<K,V> extends Traverser<K,V> 3540 implements Spliterator<K> { 3541 long est; // size estimate KeySpliterator(Node<K,V>[] tab, int size, int index, int limit, long est)3542 KeySpliterator(Node<K,V>[] tab, int size, int index, int limit, 3543 long est) { 3544 super(tab, size, index, limit); 3545 this.est = est; 3546 } 3547 trySplit()3548 public KeySpliterator<K,V> trySplit() { 3549 int i, f, h; 3550 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3551 new KeySpliterator<K,V>(tab, baseSize, baseLimit = h, 3552 f, est >>>= 1); 3553 } 3554 forEachRemaining(Consumer<? super K> action)3555 public void forEachRemaining(Consumer<? super K> action) { 3556 if (action == null) throw new NullPointerException(); 3557 for (Node<K,V> p; (p = advance()) != null;) 3558 action.accept(p.key); 3559 } 3560 tryAdvance(Consumer<? super K> action)3561 public boolean tryAdvance(Consumer<? super K> action) { 3562 if (action == null) throw new NullPointerException(); 3563 Node<K,V> p; 3564 if ((p = advance()) == null) 3565 return false; 3566 action.accept(p.key); 3567 return true; 3568 } 3569 estimateSize()3570 public long estimateSize() { return est; } 3571 characteristics()3572 public int characteristics() { 3573 return Spliterator.DISTINCT | Spliterator.CONCURRENT | 3574 Spliterator.NONNULL; 3575 } 3576 } 3577 3578 static final class ValueSpliterator<K,V> extends Traverser<K,V> 3579 implements Spliterator<V> { 3580 long est; // size estimate ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit, long est)3581 ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit, 3582 long est) { 3583 super(tab, size, index, limit); 3584 this.est = est; 3585 } 3586 trySplit()3587 public ValueSpliterator<K,V> trySplit() { 3588 int i, f, h; 3589 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3590 new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h, 3591 f, est >>>= 1); 3592 } 3593 forEachRemaining(Consumer<? super V> action)3594 public void forEachRemaining(Consumer<? super V> action) { 3595 if (action == null) throw new NullPointerException(); 3596 for (Node<K,V> p; (p = advance()) != null;) 3597 action.accept(p.val); 3598 } 3599 tryAdvance(Consumer<? super V> action)3600 public boolean tryAdvance(Consumer<? super V> action) { 3601 if (action == null) throw new NullPointerException(); 3602 Node<K,V> p; 3603 if ((p = advance()) == null) 3604 return false; 3605 action.accept(p.val); 3606 return true; 3607 } 3608 estimateSize()3609 public long estimateSize() { return est; } 3610 characteristics()3611 public int characteristics() { 3612 return Spliterator.CONCURRENT | Spliterator.NONNULL; 3613 } 3614 } 3615 3616 static final class EntrySpliterator<K,V> extends Traverser<K,V> 3617 implements Spliterator<Map.Entry<K,V>> { 3618 final ConcurrentHashMap<K,V> map; // To export MapEntry 3619 long est; // size estimate EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit, long est, ConcurrentHashMap<K,V> map)3620 EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit, 3621 long est, ConcurrentHashMap<K,V> map) { 3622 super(tab, size, index, limit); 3623 this.map = map; 3624 this.est = est; 3625 } 3626 trySplit()3627 public EntrySpliterator<K,V> trySplit() { 3628 int i, f, h; 3629 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : 3630 new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h, 3631 f, est >>>= 1, map); 3632 } 3633 forEachRemaining(Consumer<? super Map.Entry<K,V>> action)3634 public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { 3635 if (action == null) throw new NullPointerException(); 3636 for (Node<K,V> p; (p = advance()) != null; ) 3637 action.accept(new MapEntry<K,V>(p.key, p.val, map)); 3638 } 3639 tryAdvance(Consumer<? super Map.Entry<K,V>> action)3640 public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { 3641 if (action == null) throw new NullPointerException(); 3642 Node<K,V> p; 3643 if ((p = advance()) == null) 3644 return false; 3645 action.accept(new MapEntry<K,V>(p.key, p.val, map)); 3646 return true; 3647 } 3648 estimateSize()3649 public long estimateSize() { return est; } 3650 characteristics()3651 public int characteristics() { 3652 return Spliterator.DISTINCT | Spliterator.CONCURRENT | 3653 Spliterator.NONNULL; 3654 } 3655 } 3656 3657 // Parallel bulk operations 3658 3659 /** 3660 * Computes initial batch value for bulk tasks. The returned value 3661 * is approximately exp2 of the number of times (minus one) to 3662 * split task by two before executing leaf action. This value is 3663 * faster to compute and more convenient to use as a guide to 3664 * splitting than is the depth, since it is used while dividing by 3665 * two anyway. 3666 */ batchFor(long b)3667 final int batchFor(long b) { 3668 long n; 3669 if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b) 3670 return 0; 3671 int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4 3672 return (b <= 0L || (n /= b) >= sp) ? sp : (int)n; 3673 } 3674 3675 /** 3676 * Performs the given action for each (key, value). 3677 * 3678 * @param parallelismThreshold the (estimated) number of elements 3679 * needed for this operation to be executed in parallel 3680 * @param action the action 3681 * @since 1.8 3682 */ forEach(long parallelismThreshold, BiConsumer<? super K,? super V> action)3683 public void forEach(long parallelismThreshold, 3684 BiConsumer<? super K,? super V> action) { 3685 if (action == null) throw new NullPointerException(); 3686 new ForEachMappingTask<K,V> 3687 (null, batchFor(parallelismThreshold), 0, 0, table, 3688 action).invoke(); 3689 } 3690 3691 /** 3692 * Performs the given action for each non-null transformation 3693 * of each (key, value). 3694 * 3695 * @param parallelismThreshold the (estimated) number of elements 3696 * needed for this operation to be executed in parallel 3697 * @param transformer a function returning the transformation 3698 * for an element, or null if there is no transformation (in 3699 * which case the action is not applied) 3700 * @param action the action 3701 * @param <U> the return type of the transformer 3702 * @since 1.8 3703 */ forEach(long parallelismThreshold, BiFunction<? super K, ? super V, ? extends U> transformer, Consumer<? super U> action)3704 public <U> void forEach(long parallelismThreshold, 3705 BiFunction<? super K, ? super V, ? extends U> transformer, 3706 Consumer<? super U> action) { 3707 if (transformer == null || action == null) 3708 throw new NullPointerException(); 3709 new ForEachTransformedMappingTask<K,V,U> 3710 (null, batchFor(parallelismThreshold), 0, 0, table, 3711 transformer, action).invoke(); 3712 } 3713 3714 /** 3715 * Returns a non-null result from applying the given search 3716 * function on each (key, value), or null if none. Upon 3717 * success, further element processing is suppressed and the 3718 * results of any other parallel invocations of the search 3719 * function are ignored. 3720 * 3721 * @param parallelismThreshold the (estimated) number of elements 3722 * needed for this operation to be executed in parallel 3723 * @param searchFunction a function returning a non-null 3724 * result on success, else null 3725 * @param <U> the return type of the search function 3726 * @return a non-null result from applying the given search 3727 * function on each (key, value), or null if none 3728 * @since 1.8 3729 */ search(long parallelismThreshold, BiFunction<? super K, ? super V, ? extends U> searchFunction)3730 public <U> U search(long parallelismThreshold, 3731 BiFunction<? super K, ? super V, ? extends U> searchFunction) { 3732 if (searchFunction == null) throw new NullPointerException(); 3733 return new SearchMappingsTask<K,V,U> 3734 (null, batchFor(parallelismThreshold), 0, 0, table, 3735 searchFunction, new AtomicReference<U>()).invoke(); 3736 } 3737 3738 /** 3739 * Returns the result of accumulating the given transformation 3740 * of all (key, value) pairs using the given reducer to 3741 * combine values, or null if none. 3742 * 3743 * @param parallelismThreshold the (estimated) number of elements 3744 * needed for this operation to be executed in parallel 3745 * @param transformer a function returning the transformation 3746 * for an element, or null if there is no transformation (in 3747 * which case it is not combined) 3748 * @param reducer a commutative associative combining function 3749 * @param <U> the return type of the transformer 3750 * @return the result of accumulating the given transformation 3751 * of all (key, value) pairs 3752 * @since 1.8 3753 */ reduce(long parallelismThreshold, BiFunction<? super K, ? super V, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)3754 public <U> U reduce(long parallelismThreshold, 3755 BiFunction<? super K, ? super V, ? extends U> transformer, 3756 BiFunction<? super U, ? super U, ? extends U> reducer) { 3757 if (transformer == null || reducer == null) 3758 throw new NullPointerException(); 3759 return new MapReduceMappingsTask<K,V,U> 3760 (null, batchFor(parallelismThreshold), 0, 0, table, 3761 null, transformer, reducer).invoke(); 3762 } 3763 3764 /** 3765 * Returns the result of accumulating the given transformation 3766 * of all (key, value) pairs using the given reducer to 3767 * combine values, and the given basis as an identity value. 3768 * 3769 * @param parallelismThreshold the (estimated) number of elements 3770 * needed for this operation to be executed in parallel 3771 * @param transformer a function returning the transformation 3772 * for an element 3773 * @param basis the identity (initial default value) for the reduction 3774 * @param reducer a commutative associative combining function 3775 * @return the result of accumulating the given transformation 3776 * of all (key, value) pairs 3777 * @since 1.8 3778 */ reduceToDouble(long parallelismThreshold, ToDoubleBiFunction<? super K, ? super V> transformer, double basis, DoubleBinaryOperator reducer)3779 public double reduceToDouble(long parallelismThreshold, 3780 ToDoubleBiFunction<? super K, ? super V> transformer, 3781 double basis, 3782 DoubleBinaryOperator reducer) { 3783 if (transformer == null || reducer == null) 3784 throw new NullPointerException(); 3785 return new MapReduceMappingsToDoubleTask<K,V> 3786 (null, batchFor(parallelismThreshold), 0, 0, table, 3787 null, transformer, basis, reducer).invoke(); 3788 } 3789 3790 /** 3791 * Returns the result of accumulating the given transformation 3792 * of all (key, value) pairs using the given reducer to 3793 * combine values, and the given basis as an identity value. 3794 * 3795 * @param parallelismThreshold the (estimated) number of elements 3796 * needed for this operation to be executed in parallel 3797 * @param transformer a function returning the transformation 3798 * for an element 3799 * @param basis the identity (initial default value) for the reduction 3800 * @param reducer a commutative associative combining function 3801 * @return the result of accumulating the given transformation 3802 * of all (key, value) pairs 3803 * @since 1.8 3804 */ reduceToLong(long parallelismThreshold, ToLongBiFunction<? super K, ? super V> transformer, long basis, LongBinaryOperator reducer)3805 public long reduceToLong(long parallelismThreshold, 3806 ToLongBiFunction<? super K, ? super V> transformer, 3807 long basis, 3808 LongBinaryOperator reducer) { 3809 if (transformer == null || reducer == null) 3810 throw new NullPointerException(); 3811 return new MapReduceMappingsToLongTask<K,V> 3812 (null, batchFor(parallelismThreshold), 0, 0, table, 3813 null, transformer, basis, reducer).invoke(); 3814 } 3815 3816 /** 3817 * Returns the result of accumulating the given transformation 3818 * of all (key, value) pairs using the given reducer to 3819 * combine values, and the given basis as an identity value. 3820 * 3821 * @param parallelismThreshold the (estimated) number of elements 3822 * needed for this operation to be executed in parallel 3823 * @param transformer a function returning the transformation 3824 * for an element 3825 * @param basis the identity (initial default value) for the reduction 3826 * @param reducer a commutative associative combining function 3827 * @return the result of accumulating the given transformation 3828 * of all (key, value) pairs 3829 * @since 1.8 3830 */ reduceToInt(long parallelismThreshold, ToIntBiFunction<? super K, ? super V> transformer, int basis, IntBinaryOperator reducer)3831 public int reduceToInt(long parallelismThreshold, 3832 ToIntBiFunction<? super K, ? super V> transformer, 3833 int basis, 3834 IntBinaryOperator reducer) { 3835 if (transformer == null || reducer == null) 3836 throw new NullPointerException(); 3837 return new MapReduceMappingsToIntTask<K,V> 3838 (null, batchFor(parallelismThreshold), 0, 0, table, 3839 null, transformer, basis, reducer).invoke(); 3840 } 3841 3842 /** 3843 * Performs the given action for each key. 3844 * 3845 * @param parallelismThreshold the (estimated) number of elements 3846 * needed for this operation to be executed in parallel 3847 * @param action the action 3848 * @since 1.8 3849 */ forEachKey(long parallelismThreshold, Consumer<? super K> action)3850 public void forEachKey(long parallelismThreshold, 3851 Consumer<? super K> action) { 3852 if (action == null) throw new NullPointerException(); 3853 new ForEachKeyTask<K,V> 3854 (null, batchFor(parallelismThreshold), 0, 0, table, 3855 action).invoke(); 3856 } 3857 3858 /** 3859 * Performs the given action for each non-null transformation 3860 * of each key. 3861 * 3862 * @param parallelismThreshold the (estimated) number of elements 3863 * needed for this operation to be executed in parallel 3864 * @param transformer a function returning the transformation 3865 * for an element, or null if there is no transformation (in 3866 * which case the action is not applied) 3867 * @param action the action 3868 * @param <U> the return type of the transformer 3869 * @since 1.8 3870 */ forEachKey(long parallelismThreshold, Function<? super K, ? extends U> transformer, Consumer<? super U> action)3871 public <U> void forEachKey(long parallelismThreshold, 3872 Function<? super K, ? extends U> transformer, 3873 Consumer<? super U> action) { 3874 if (transformer == null || action == null) 3875 throw new NullPointerException(); 3876 new ForEachTransformedKeyTask<K,V,U> 3877 (null, batchFor(parallelismThreshold), 0, 0, table, 3878 transformer, action).invoke(); 3879 } 3880 3881 /** 3882 * Returns a non-null result from applying the given search 3883 * function on each key, or null if none. Upon success, 3884 * further element processing is suppressed and the results of 3885 * any other parallel invocations of the search function are 3886 * ignored. 3887 * 3888 * @param parallelismThreshold the (estimated) number of elements 3889 * needed for this operation to be executed in parallel 3890 * @param searchFunction a function returning a non-null 3891 * result on success, else null 3892 * @param <U> the return type of the search function 3893 * @return a non-null result from applying the given search 3894 * function on each key, or null if none 3895 * @since 1.8 3896 */ searchKeys(long parallelismThreshold, Function<? super K, ? extends U> searchFunction)3897 public <U> U searchKeys(long parallelismThreshold, 3898 Function<? super K, ? extends U> searchFunction) { 3899 if (searchFunction == null) throw new NullPointerException(); 3900 return new SearchKeysTask<K,V,U> 3901 (null, batchFor(parallelismThreshold), 0, 0, table, 3902 searchFunction, new AtomicReference<U>()).invoke(); 3903 } 3904 3905 /** 3906 * Returns the result of accumulating all keys using the given 3907 * reducer to combine values, or null if none. 3908 * 3909 * @param parallelismThreshold the (estimated) number of elements 3910 * needed for this operation to be executed in parallel 3911 * @param reducer a commutative associative combining function 3912 * @return the result of accumulating all keys using the given 3913 * reducer to combine values, or null if none 3914 * @since 1.8 3915 */ reduceKeys(long parallelismThreshold, BiFunction<? super K, ? super K, ? extends K> reducer)3916 public K reduceKeys(long parallelismThreshold, 3917 BiFunction<? super K, ? super K, ? extends K> reducer) { 3918 if (reducer == null) throw new NullPointerException(); 3919 return new ReduceKeysTask<K,V> 3920 (null, batchFor(parallelismThreshold), 0, 0, table, 3921 null, reducer).invoke(); 3922 } 3923 3924 /** 3925 * Returns the result of accumulating the given transformation 3926 * of all keys using the given reducer to combine values, or 3927 * null if none. 3928 * 3929 * @param parallelismThreshold the (estimated) number of elements 3930 * needed for this operation to be executed in parallel 3931 * @param transformer a function returning the transformation 3932 * for an element, or null if there is no transformation (in 3933 * which case it is not combined) 3934 * @param reducer a commutative associative combining function 3935 * @param <U> the return type of the transformer 3936 * @return the result of accumulating the given transformation 3937 * of all keys 3938 * @since 1.8 3939 */ reduceKeys(long parallelismThreshold, Function<? super K, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)3940 public <U> U reduceKeys(long parallelismThreshold, 3941 Function<? super K, ? extends U> transformer, 3942 BiFunction<? super U, ? super U, ? extends U> reducer) { 3943 if (transformer == null || reducer == null) 3944 throw new NullPointerException(); 3945 return new MapReduceKeysTask<K,V,U> 3946 (null, batchFor(parallelismThreshold), 0, 0, table, 3947 null, transformer, reducer).invoke(); 3948 } 3949 3950 /** 3951 * Returns the result of accumulating the given transformation 3952 * of all keys using the given reducer to combine values, and 3953 * the given basis as an identity value. 3954 * 3955 * @param parallelismThreshold the (estimated) number of elements 3956 * needed for this operation to be executed in parallel 3957 * @param transformer a function returning the transformation 3958 * for an element 3959 * @param basis the identity (initial default value) for the reduction 3960 * @param reducer a commutative associative combining function 3961 * @return the result of accumulating the given transformation 3962 * of all keys 3963 * @since 1.8 3964 */ reduceKeysToDouble(long parallelismThreshold, ToDoubleFunction<? super K> transformer, double basis, DoubleBinaryOperator reducer)3965 public double reduceKeysToDouble(long parallelismThreshold, 3966 ToDoubleFunction<? super K> transformer, 3967 double basis, 3968 DoubleBinaryOperator reducer) { 3969 if (transformer == null || reducer == null) 3970 throw new NullPointerException(); 3971 return new MapReduceKeysToDoubleTask<K,V> 3972 (null, batchFor(parallelismThreshold), 0, 0, table, 3973 null, transformer, basis, reducer).invoke(); 3974 } 3975 3976 /** 3977 * Returns the result of accumulating the given transformation 3978 * of all keys using the given reducer to combine values, and 3979 * the given basis as an identity value. 3980 * 3981 * @param parallelismThreshold the (estimated) number of elements 3982 * needed for this operation to be executed in parallel 3983 * @param transformer a function returning the transformation 3984 * for an element 3985 * @param basis the identity (initial default value) for the reduction 3986 * @param reducer a commutative associative combining function 3987 * @return the result of accumulating the given transformation 3988 * of all keys 3989 * @since 1.8 3990 */ reduceKeysToLong(long parallelismThreshold, ToLongFunction<? super K> transformer, long basis, LongBinaryOperator reducer)3991 public long reduceKeysToLong(long parallelismThreshold, 3992 ToLongFunction<? super K> transformer, 3993 long basis, 3994 LongBinaryOperator reducer) { 3995 if (transformer == null || reducer == null) 3996 throw new NullPointerException(); 3997 return new MapReduceKeysToLongTask<K,V> 3998 (null, batchFor(parallelismThreshold), 0, 0, table, 3999 null, transformer, basis, reducer).invoke(); 4000 } 4001 4002 /** 4003 * Returns the result of accumulating the given transformation 4004 * of all keys using the given reducer to combine values, and 4005 * the given basis as an identity value. 4006 * 4007 * @param parallelismThreshold the (estimated) number of elements 4008 * needed for this operation to be executed in parallel 4009 * @param transformer a function returning the transformation 4010 * for an element 4011 * @param basis the identity (initial default value) for the reduction 4012 * @param reducer a commutative associative combining function 4013 * @return the result of accumulating the given transformation 4014 * of all keys 4015 * @since 1.8 4016 */ reduceKeysToInt(long parallelismThreshold, ToIntFunction<? super K> transformer, int basis, IntBinaryOperator reducer)4017 public int reduceKeysToInt(long parallelismThreshold, 4018 ToIntFunction<? super K> transformer, 4019 int basis, 4020 IntBinaryOperator reducer) { 4021 if (transformer == null || reducer == null) 4022 throw new NullPointerException(); 4023 return new MapReduceKeysToIntTask<K,V> 4024 (null, batchFor(parallelismThreshold), 0, 0, table, 4025 null, transformer, basis, reducer).invoke(); 4026 } 4027 4028 /** 4029 * Performs the given action for each value. 4030 * 4031 * @param parallelismThreshold the (estimated) number of elements 4032 * needed for this operation to be executed in parallel 4033 * @param action the action 4034 * @since 1.8 4035 */ forEachValue(long parallelismThreshold, Consumer<? super V> action)4036 public void forEachValue(long parallelismThreshold, 4037 Consumer<? super V> action) { 4038 if (action == null) 4039 throw new NullPointerException(); 4040 new ForEachValueTask<K,V> 4041 (null, batchFor(parallelismThreshold), 0, 0, table, 4042 action).invoke(); 4043 } 4044 4045 /** 4046 * Performs the given action for each non-null transformation 4047 * of each value. 4048 * 4049 * @param parallelismThreshold the (estimated) number of elements 4050 * needed for this operation to be executed in parallel 4051 * @param transformer a function returning the transformation 4052 * for an element, or null if there is no transformation (in 4053 * which case the action is not applied) 4054 * @param action the action 4055 * @param <U> the return type of the transformer 4056 * @since 1.8 4057 */ forEachValue(long parallelismThreshold, Function<? super V, ? extends U> transformer, Consumer<? super U> action)4058 public <U> void forEachValue(long parallelismThreshold, 4059 Function<? super V, ? extends U> transformer, 4060 Consumer<? super U> action) { 4061 if (transformer == null || action == null) 4062 throw new NullPointerException(); 4063 new ForEachTransformedValueTask<K,V,U> 4064 (null, batchFor(parallelismThreshold), 0, 0, table, 4065 transformer, action).invoke(); 4066 } 4067 4068 /** 4069 * Returns a non-null result from applying the given search 4070 * function on each value, or null if none. Upon success, 4071 * further element processing is suppressed and the results of 4072 * any other parallel invocations of the search function are 4073 * ignored. 4074 * 4075 * @param parallelismThreshold the (estimated) number of elements 4076 * needed for this operation to be executed in parallel 4077 * @param searchFunction a function returning a non-null 4078 * result on success, else null 4079 * @param <U> the return type of the search function 4080 * @return a non-null result from applying the given search 4081 * function on each value, or null if none 4082 * @since 1.8 4083 */ searchValues(long parallelismThreshold, Function<? super V, ? extends U> searchFunction)4084 public <U> U searchValues(long parallelismThreshold, 4085 Function<? super V, ? extends U> searchFunction) { 4086 if (searchFunction == null) throw new NullPointerException(); 4087 return new SearchValuesTask<K,V,U> 4088 (null, batchFor(parallelismThreshold), 0, 0, table, 4089 searchFunction, new AtomicReference<U>()).invoke(); 4090 } 4091 4092 /** 4093 * Returns the result of accumulating all values using the 4094 * given reducer to combine values, or null if none. 4095 * 4096 * @param parallelismThreshold the (estimated) number of elements 4097 * needed for this operation to be executed in parallel 4098 * @param reducer a commutative associative combining function 4099 * @return the result of accumulating all values 4100 * @since 1.8 4101 */ reduceValues(long parallelismThreshold, BiFunction<? super V, ? super V, ? extends V> reducer)4102 public V reduceValues(long parallelismThreshold, 4103 BiFunction<? super V, ? super V, ? extends V> reducer) { 4104 if (reducer == null) throw new NullPointerException(); 4105 return new ReduceValuesTask<K,V> 4106 (null, batchFor(parallelismThreshold), 0, 0, table, 4107 null, reducer).invoke(); 4108 } 4109 4110 /** 4111 * Returns the result of accumulating the given transformation 4112 * of all values using the given reducer to combine values, or 4113 * null if none. 4114 * 4115 * @param parallelismThreshold the (estimated) number of elements 4116 * needed for this operation to be executed in parallel 4117 * @param transformer a function returning the transformation 4118 * for an element, or null if there is no transformation (in 4119 * which case it is not combined) 4120 * @param reducer a commutative associative combining function 4121 * @param <U> the return type of the transformer 4122 * @return the result of accumulating the given transformation 4123 * of all values 4124 * @since 1.8 4125 */ reduceValues(long parallelismThreshold, Function<? super V, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)4126 public <U> U reduceValues(long parallelismThreshold, 4127 Function<? super V, ? extends U> transformer, 4128 BiFunction<? super U, ? super U, ? extends U> reducer) { 4129 if (transformer == null || reducer == null) 4130 throw new NullPointerException(); 4131 return new MapReduceValuesTask<K,V,U> 4132 (null, batchFor(parallelismThreshold), 0, 0, table, 4133 null, transformer, reducer).invoke(); 4134 } 4135 4136 /** 4137 * Returns the result of accumulating the given transformation 4138 * of all values using the given reducer to combine values, 4139 * and the given basis as an identity value. 4140 * 4141 * @param parallelismThreshold the (estimated) number of elements 4142 * needed for this operation to be executed in parallel 4143 * @param transformer a function returning the transformation 4144 * for an element 4145 * @param basis the identity (initial default value) for the reduction 4146 * @param reducer a commutative associative combining function 4147 * @return the result of accumulating the given transformation 4148 * of all values 4149 * @since 1.8 4150 */ reduceValuesToDouble(long parallelismThreshold, ToDoubleFunction<? super V> transformer, double basis, DoubleBinaryOperator reducer)4151 public double reduceValuesToDouble(long parallelismThreshold, 4152 ToDoubleFunction<? super V> transformer, 4153 double basis, 4154 DoubleBinaryOperator reducer) { 4155 if (transformer == null || reducer == null) 4156 throw new NullPointerException(); 4157 return new MapReduceValuesToDoubleTask<K,V> 4158 (null, batchFor(parallelismThreshold), 0, 0, table, 4159 null, transformer, basis, reducer).invoke(); 4160 } 4161 4162 /** 4163 * Returns the result of accumulating the given transformation 4164 * of all values using the given reducer to combine values, 4165 * and the given basis as an identity value. 4166 * 4167 * @param parallelismThreshold the (estimated) number of elements 4168 * needed for this operation to be executed in parallel 4169 * @param transformer a function returning the transformation 4170 * for an element 4171 * @param basis the identity (initial default value) for the reduction 4172 * @param reducer a commutative associative combining function 4173 * @return the result of accumulating the given transformation 4174 * of all values 4175 * @since 1.8 4176 */ reduceValuesToLong(long parallelismThreshold, ToLongFunction<? super V> transformer, long basis, LongBinaryOperator reducer)4177 public long reduceValuesToLong(long parallelismThreshold, 4178 ToLongFunction<? super V> transformer, 4179 long basis, 4180 LongBinaryOperator reducer) { 4181 if (transformer == null || reducer == null) 4182 throw new NullPointerException(); 4183 return new MapReduceValuesToLongTask<K,V> 4184 (null, batchFor(parallelismThreshold), 0, 0, table, 4185 null, transformer, basis, reducer).invoke(); 4186 } 4187 4188 /** 4189 * Returns the result of accumulating the given transformation 4190 * of all values using the given reducer to combine values, 4191 * and the given basis as an identity value. 4192 * 4193 * @param parallelismThreshold the (estimated) number of elements 4194 * needed for this operation to be executed in parallel 4195 * @param transformer a function returning the transformation 4196 * for an element 4197 * @param basis the identity (initial default value) for the reduction 4198 * @param reducer a commutative associative combining function 4199 * @return the result of accumulating the given transformation 4200 * of all values 4201 * @since 1.8 4202 */ reduceValuesToInt(long parallelismThreshold, ToIntFunction<? super V> transformer, int basis, IntBinaryOperator reducer)4203 public int reduceValuesToInt(long parallelismThreshold, 4204 ToIntFunction<? super V> transformer, 4205 int basis, 4206 IntBinaryOperator reducer) { 4207 if (transformer == null || reducer == null) 4208 throw new NullPointerException(); 4209 return new MapReduceValuesToIntTask<K,V> 4210 (null, batchFor(parallelismThreshold), 0, 0, table, 4211 null, transformer, basis, reducer).invoke(); 4212 } 4213 4214 /** 4215 * Performs the given action for each entry. 4216 * 4217 * @param parallelismThreshold the (estimated) number of elements 4218 * needed for this operation to be executed in parallel 4219 * @param action the action 4220 * @since 1.8 4221 */ forEachEntry(long parallelismThreshold, Consumer<? super Map.Entry<K,V>> action)4222 public void forEachEntry(long parallelismThreshold, 4223 Consumer<? super Map.Entry<K,V>> action) { 4224 if (action == null) throw new NullPointerException(); 4225 new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table, 4226 action).invoke(); 4227 } 4228 4229 /** 4230 * Performs the given action for each non-null transformation 4231 * of each entry. 4232 * 4233 * @param parallelismThreshold the (estimated) number of elements 4234 * needed for this operation to be executed in parallel 4235 * @param transformer a function returning the transformation 4236 * for an element, or null if there is no transformation (in 4237 * which case the action is not applied) 4238 * @param action the action 4239 * @param <U> the return type of the transformer 4240 * @since 1.8 4241 */ forEachEntry(long parallelismThreshold, Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action)4242 public <U> void forEachEntry(long parallelismThreshold, 4243 Function<Map.Entry<K,V>, ? extends U> transformer, 4244 Consumer<? super U> action) { 4245 if (transformer == null || action == null) 4246 throw new NullPointerException(); 4247 new ForEachTransformedEntryTask<K,V,U> 4248 (null, batchFor(parallelismThreshold), 0, 0, table, 4249 transformer, action).invoke(); 4250 } 4251 4252 /** 4253 * Returns a non-null result from applying the given search 4254 * function on each entry, or null if none. Upon success, 4255 * further element processing is suppressed and the results of 4256 * any other parallel invocations of the search function are 4257 * ignored. 4258 * 4259 * @param parallelismThreshold the (estimated) number of elements 4260 * needed for this operation to be executed in parallel 4261 * @param searchFunction a function returning a non-null 4262 * result on success, else null 4263 * @param <U> the return type of the search function 4264 * @return a non-null result from applying the given search 4265 * function on each entry, or null if none 4266 * @since 1.8 4267 */ searchEntries(long parallelismThreshold, Function<Map.Entry<K,V>, ? extends U> searchFunction)4268 public <U> U searchEntries(long parallelismThreshold, 4269 Function<Map.Entry<K,V>, ? extends U> searchFunction) { 4270 if (searchFunction == null) throw new NullPointerException(); 4271 return new SearchEntriesTask<K,V,U> 4272 (null, batchFor(parallelismThreshold), 0, 0, table, 4273 searchFunction, new AtomicReference<U>()).invoke(); 4274 } 4275 4276 /** 4277 * Returns the result of accumulating all entries using the 4278 * given reducer to combine values, or null if none. 4279 * 4280 * @param parallelismThreshold the (estimated) number of elements 4281 * needed for this operation to be executed in parallel 4282 * @param reducer a commutative associative combining function 4283 * @return the result of accumulating all entries 4284 * @since 1.8 4285 */ reduceEntries(long parallelismThreshold, BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer)4286 public Map.Entry<K,V> reduceEntries(long parallelismThreshold, 4287 BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) { 4288 if (reducer == null) throw new NullPointerException(); 4289 return new ReduceEntriesTask<K,V> 4290 (null, batchFor(parallelismThreshold), 0, 0, table, 4291 null, reducer).invoke(); 4292 } 4293 4294 /** 4295 * Returns the result of accumulating the given transformation 4296 * of all entries using the given reducer to combine values, 4297 * or null if none. 4298 * 4299 * @param parallelismThreshold the (estimated) number of elements 4300 * needed for this operation to be executed in parallel 4301 * @param transformer a function returning the transformation 4302 * for an element, or null if there is no transformation (in 4303 * which case it is not combined) 4304 * @param reducer a commutative associative combining function 4305 * @param <U> the return type of the transformer 4306 * @return the result of accumulating the given transformation 4307 * of all entries 4308 * @since 1.8 4309 */ reduceEntries(long parallelismThreshold, Function<Map.Entry<K,V>, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)4310 public <U> U reduceEntries(long parallelismThreshold, 4311 Function<Map.Entry<K,V>, ? extends U> transformer, 4312 BiFunction<? super U, ? super U, ? extends U> reducer) { 4313 if (transformer == null || reducer == null) 4314 throw new NullPointerException(); 4315 return new MapReduceEntriesTask<K,V,U> 4316 (null, batchFor(parallelismThreshold), 0, 0, table, 4317 null, transformer, reducer).invoke(); 4318 } 4319 4320 /** 4321 * Returns the result of accumulating the given transformation 4322 * of all entries using the given reducer to combine values, 4323 * and the given basis as an identity value. 4324 * 4325 * @param parallelismThreshold the (estimated) number of elements 4326 * needed for this operation to be executed in parallel 4327 * @param transformer a function returning the transformation 4328 * for an element 4329 * @param basis the identity (initial default value) for the reduction 4330 * @param reducer a commutative associative combining function 4331 * @return the result of accumulating the given transformation 4332 * of all entries 4333 * @since 1.8 4334 */ reduceEntriesToDouble(long parallelismThreshold, ToDoubleFunction<Map.Entry<K,V>> transformer, double basis, DoubleBinaryOperator reducer)4335 public double reduceEntriesToDouble(long parallelismThreshold, 4336 ToDoubleFunction<Map.Entry<K,V>> transformer, 4337 double basis, 4338 DoubleBinaryOperator reducer) { 4339 if (transformer == null || reducer == null) 4340 throw new NullPointerException(); 4341 return new MapReduceEntriesToDoubleTask<K,V> 4342 (null, batchFor(parallelismThreshold), 0, 0, table, 4343 null, transformer, basis, reducer).invoke(); 4344 } 4345 4346 /** 4347 * Returns the result of accumulating the given transformation 4348 * of all entries using the given reducer to combine values, 4349 * and the given basis as an identity value. 4350 * 4351 * @param parallelismThreshold the (estimated) number of elements 4352 * needed for this operation to be executed in parallel 4353 * @param transformer a function returning the transformation 4354 * for an element 4355 * @param basis the identity (initial default value) for the reduction 4356 * @param reducer a commutative associative combining function 4357 * @return the result of accumulating the given transformation 4358 * of all entries 4359 * @since 1.8 4360 */ reduceEntriesToLong(long parallelismThreshold, ToLongFunction<Map.Entry<K,V>> transformer, long basis, LongBinaryOperator reducer)4361 public long reduceEntriesToLong(long parallelismThreshold, 4362 ToLongFunction<Map.Entry<K,V>> transformer, 4363 long basis, 4364 LongBinaryOperator reducer) { 4365 if (transformer == null || reducer == null) 4366 throw new NullPointerException(); 4367 return new MapReduceEntriesToLongTask<K,V> 4368 (null, batchFor(parallelismThreshold), 0, 0, table, 4369 null, transformer, basis, reducer).invoke(); 4370 } 4371 4372 /** 4373 * Returns the result of accumulating the given transformation 4374 * of all entries using the given reducer to combine values, 4375 * and the given basis as an identity value. 4376 * 4377 * @param parallelismThreshold the (estimated) number of elements 4378 * needed for this operation to be executed in parallel 4379 * @param transformer a function returning the transformation 4380 * for an element 4381 * @param basis the identity (initial default value) for the reduction 4382 * @param reducer a commutative associative combining function 4383 * @return the result of accumulating the given transformation 4384 * of all entries 4385 * @since 1.8 4386 */ reduceEntriesToInt(long parallelismThreshold, ToIntFunction<Map.Entry<K,V>> transformer, int basis, IntBinaryOperator reducer)4387 public int reduceEntriesToInt(long parallelismThreshold, 4388 ToIntFunction<Map.Entry<K,V>> transformer, 4389 int basis, 4390 IntBinaryOperator reducer) { 4391 if (transformer == null || reducer == null) 4392 throw new NullPointerException(); 4393 return new MapReduceEntriesToIntTask<K,V> 4394 (null, batchFor(parallelismThreshold), 0, 0, table, 4395 null, transformer, basis, reducer).invoke(); 4396 } 4397 4398 4399 /* ----------------Views -------------- */ 4400 4401 /** 4402 * Base class for views. 4403 */ 4404 abstract static class CollectionView<K,V,E> 4405 implements Collection<E>, java.io.Serializable { 4406 private static final long serialVersionUID = 7249069246763182397L; 4407 final ConcurrentHashMap<K,V> map; CollectionView(ConcurrentHashMap<K,V> map)4408 CollectionView(ConcurrentHashMap<K,V> map) { this.map = map; } 4409 4410 /** 4411 * Returns the map backing this view. 4412 * 4413 * @return the map backing this view 4414 */ getMap()4415 public ConcurrentHashMap<K,V> getMap() { return map; } 4416 4417 /** 4418 * Removes all of the elements from this view, by removing all 4419 * the mappings from the map backing this view. 4420 */ clear()4421 public final void clear() { map.clear(); } size()4422 public final int size() { return map.size(); } isEmpty()4423 public final boolean isEmpty() { return map.isEmpty(); } 4424 4425 // implementations below rely on concrete classes supplying these 4426 // abstract methods 4427 /** 4428 * Returns an iterator over the elements in this collection. 4429 * 4430 * <p>The returned iterator is 4431 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>. 4432 * 4433 * @return an iterator over the elements in this collection 4434 */ iterator()4435 public abstract Iterator<E> iterator(); contains(Object o)4436 public abstract boolean contains(Object o); remove(Object o)4437 public abstract boolean remove(Object o); 4438 4439 private static final String OOME_MSG = "Required array size too large"; 4440 toArray()4441 public final Object[] toArray() { 4442 long sz = map.mappingCount(); 4443 if (sz > MAX_ARRAY_SIZE) 4444 throw new OutOfMemoryError(OOME_MSG); 4445 int n = (int)sz; 4446 Object[] r = new Object[n]; 4447 int i = 0; 4448 for (E e : this) { 4449 if (i == n) { 4450 if (n >= MAX_ARRAY_SIZE) 4451 throw new OutOfMemoryError(OOME_MSG); 4452 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) 4453 n = MAX_ARRAY_SIZE; 4454 else 4455 n += (n >>> 1) + 1; 4456 r = Arrays.copyOf(r, n); 4457 } 4458 r[i++] = e; 4459 } 4460 return (i == n) ? r : Arrays.copyOf(r, i); 4461 } 4462 4463 @SuppressWarnings("unchecked") toArray(T[] a)4464 public final <T> T[] toArray(T[] a) { 4465 long sz = map.mappingCount(); 4466 if (sz > MAX_ARRAY_SIZE) 4467 throw new OutOfMemoryError(OOME_MSG); 4468 int m = (int)sz; 4469 T[] r = (a.length >= m) ? a : 4470 (T[])java.lang.reflect.Array 4471 .newInstance(a.getClass().getComponentType(), m); 4472 int n = r.length; 4473 int i = 0; 4474 for (E e : this) { 4475 if (i == n) { 4476 if (n >= MAX_ARRAY_SIZE) 4477 throw new OutOfMemoryError(OOME_MSG); 4478 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) 4479 n = MAX_ARRAY_SIZE; 4480 else 4481 n += (n >>> 1) + 1; 4482 r = Arrays.copyOf(r, n); 4483 } 4484 r[i++] = (T)e; 4485 } 4486 if (a == r && i < n) { 4487 r[i] = null; // null-terminate 4488 return r; 4489 } 4490 return (i == n) ? r : Arrays.copyOf(r, i); 4491 } 4492 4493 /** 4494 * Returns a string representation of this collection. 4495 * The string representation consists of the string representations 4496 * of the collection's elements in the order they are returned by 4497 * its iterator, enclosed in square brackets ({@code "[]"}). 4498 * Adjacent elements are separated by the characters {@code ", "} 4499 * (comma and space). Elements are converted to strings as by 4500 * {@link String#valueOf(Object)}. 4501 * 4502 * @return a string representation of this collection 4503 */ toString()4504 public final String toString() { 4505 StringBuilder sb = new StringBuilder(); 4506 sb.append('['); 4507 Iterator<E> it = iterator(); 4508 if (it.hasNext()) { 4509 for (;;) { 4510 Object e = it.next(); 4511 sb.append(e == this ? "(this Collection)" : e); 4512 if (!it.hasNext()) 4513 break; 4514 sb.append(',').append(' '); 4515 } 4516 } 4517 return sb.append(']').toString(); 4518 } 4519 containsAll(Collection<?> c)4520 public final boolean containsAll(Collection<?> c) { 4521 if (c != this) { 4522 for (Object e : c) { 4523 if (e == null || !contains(e)) 4524 return false; 4525 } 4526 } 4527 return true; 4528 } 4529 removeAll(Collection<?> c)4530 public final boolean removeAll(Collection<?> c) { 4531 if (c == null) throw new NullPointerException(); 4532 boolean modified = false; 4533 for (Iterator<E> it = iterator(); it.hasNext();) { 4534 if (c.contains(it.next())) { 4535 it.remove(); 4536 modified = true; 4537 } 4538 } 4539 return modified; 4540 } 4541 retainAll(Collection<?> c)4542 public final boolean retainAll(Collection<?> c) { 4543 if (c == null) throw new NullPointerException(); 4544 boolean modified = false; 4545 for (Iterator<E> it = iterator(); it.hasNext();) { 4546 if (!c.contains(it.next())) { 4547 it.remove(); 4548 modified = true; 4549 } 4550 } 4551 return modified; 4552 } 4553 4554 } 4555 4556 /** 4557 * A view of a ConcurrentHashMap as a {@link Set} of keys, in 4558 * which additions may optionally be enabled by mapping to a 4559 * common value. This class cannot be directly instantiated. 4560 * See {@link #keySet(Object) keySet(V)}, 4561 * {@link #newKeySet() newKeySet()}, 4562 * {@link #newKeySet(int) newKeySet(int)}. 4563 * 4564 * @since 1.8 4565 */ 4566 public static class KeySetView<K,V> extends CollectionView<K,V,K> 4567 implements Set<K>, java.io.Serializable { 4568 private static final long serialVersionUID = 7249069246763182397L; 4569 private final V value; KeySetView(ConcurrentHashMap<K,V> map, V value)4570 KeySetView(ConcurrentHashMap<K,V> map, V value) { // non-public 4571 super(map); 4572 this.value = value; 4573 } 4574 4575 /** 4576 * Returns the default mapped value for additions, 4577 * or {@code null} if additions are not supported. 4578 * 4579 * @return the default mapped value for additions, or {@code null} 4580 * if not supported 4581 */ getMappedValue()4582 public V getMappedValue() { return value; } 4583 4584 /** 4585 * {@inheritDoc} 4586 * @throws NullPointerException if the specified key is null 4587 */ contains(Object o)4588 public boolean contains(Object o) { return map.containsKey(o); } 4589 4590 /** 4591 * Removes the key from this map view, by removing the key (and its 4592 * corresponding value) from the backing map. This method does 4593 * nothing if the key is not in the map. 4594 * 4595 * @param o the key to be removed from the backing map 4596 * @return {@code true} if the backing map contained the specified key 4597 * @throws NullPointerException if the specified key is null 4598 */ remove(Object o)4599 public boolean remove(Object o) { return map.remove(o) != null; } 4600 4601 /** 4602 * @return an iterator over the keys of the backing map 4603 */ iterator()4604 public Iterator<K> iterator() { 4605 Node<K,V>[] t; 4606 ConcurrentHashMap<K,V> m = map; 4607 int f = (t = m.table) == null ? 0 : t.length; 4608 return new KeyIterator<K,V>(t, f, 0, f, m); 4609 } 4610 4611 /** 4612 * Adds the specified key to this set view by mapping the key to 4613 * the default mapped value in the backing map, if defined. 4614 * 4615 * @param e key to be added 4616 * @return {@code true} if this set changed as a result of the call 4617 * @throws NullPointerException if the specified key is null 4618 * @throws UnsupportedOperationException if no default mapped value 4619 * for additions was provided 4620 */ add(K e)4621 public boolean add(K e) { 4622 V v; 4623 if ((v = value) == null) 4624 throw new UnsupportedOperationException(); 4625 return map.putVal(e, v, true) == null; 4626 } 4627 4628 /** 4629 * Adds all of the elements in the specified collection to this set, 4630 * as if by calling {@link #add} on each one. 4631 * 4632 * @param c the elements to be inserted into this set 4633 * @return {@code true} if this set changed as a result of the call 4634 * @throws NullPointerException if the collection or any of its 4635 * elements are {@code null} 4636 * @throws UnsupportedOperationException if no default mapped value 4637 * for additions was provided 4638 */ addAll(Collection<? extends K> c)4639 public boolean addAll(Collection<? extends K> c) { 4640 boolean added = false; 4641 V v; 4642 if ((v = value) == null) 4643 throw new UnsupportedOperationException(); 4644 for (K e : c) { 4645 if (map.putVal(e, v, true) == null) 4646 added = true; 4647 } 4648 return added; 4649 } 4650 hashCode()4651 public int hashCode() { 4652 int h = 0; 4653 for (K e : this) 4654 h += e.hashCode(); 4655 return h; 4656 } 4657 equals(Object o)4658 public boolean equals(Object o) { 4659 Set<?> c; 4660 return ((o instanceof Set) && 4661 ((c = (Set<?>)o) == this || 4662 (containsAll(c) && c.containsAll(this)))); 4663 } 4664 spliterator()4665 public Spliterator<K> spliterator() { 4666 Node<K,V>[] t; 4667 ConcurrentHashMap<K,V> m = map; 4668 long n = m.sumCount(); 4669 int f = (t = m.table) == null ? 0 : t.length; 4670 return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n); 4671 } 4672 forEach(Consumer<? super K> action)4673 public void forEach(Consumer<? super K> action) { 4674 if (action == null) throw new NullPointerException(); 4675 Node<K,V>[] t; 4676 if ((t = map.table) != null) { 4677 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4678 for (Node<K,V> p; (p = it.advance()) != null; ) 4679 action.accept(p.key); 4680 } 4681 } 4682 } 4683 4684 /** 4685 * A view of a ConcurrentHashMap as a {@link Collection} of 4686 * values, in which additions are disabled. This class cannot be 4687 * directly instantiated. See {@link #values()}. 4688 */ 4689 static final class ValuesView<K,V> extends CollectionView<K,V,V> 4690 implements Collection<V>, java.io.Serializable { 4691 private static final long serialVersionUID = 2249069246763182397L; ValuesView(ConcurrentHashMap<K,V> map)4692 ValuesView(ConcurrentHashMap<K,V> map) { super(map); } contains(Object o)4693 public final boolean contains(Object o) { 4694 return map.containsValue(o); 4695 } 4696 remove(Object o)4697 public final boolean remove(Object o) { 4698 if (o != null) { 4699 for (Iterator<V> it = iterator(); it.hasNext();) { 4700 if (o.equals(it.next())) { 4701 it.remove(); 4702 return true; 4703 } 4704 } 4705 } 4706 return false; 4707 } 4708 iterator()4709 public final Iterator<V> iterator() { 4710 ConcurrentHashMap<K,V> m = map; 4711 Node<K,V>[] t; 4712 int f = (t = m.table) == null ? 0 : t.length; 4713 return new ValueIterator<K,V>(t, f, 0, f, m); 4714 } 4715 add(V e)4716 public final boolean add(V e) { 4717 throw new UnsupportedOperationException(); 4718 } addAll(Collection<? extends V> c)4719 public final boolean addAll(Collection<? extends V> c) { 4720 throw new UnsupportedOperationException(); 4721 } 4722 removeIf(Predicate<? super V> filter)4723 public boolean removeIf(Predicate<? super V> filter) { 4724 return map.removeValueIf(filter); 4725 } 4726 spliterator()4727 public Spliterator<V> spliterator() { 4728 Node<K,V>[] t; 4729 ConcurrentHashMap<K,V> m = map; 4730 long n = m.sumCount(); 4731 int f = (t = m.table) == null ? 0 : t.length; 4732 return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n); 4733 } 4734 forEach(Consumer<? super V> action)4735 public void forEach(Consumer<? super V> action) { 4736 if (action == null) throw new NullPointerException(); 4737 Node<K,V>[] t; 4738 if ((t = map.table) != null) { 4739 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4740 for (Node<K,V> p; (p = it.advance()) != null; ) 4741 action.accept(p.val); 4742 } 4743 } 4744 } 4745 4746 /** 4747 * A view of a ConcurrentHashMap as a {@link Set} of (key, value) 4748 * entries. This class cannot be directly instantiated. See 4749 * {@link #entrySet()}. 4750 */ 4751 static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>> 4752 implements Set<Map.Entry<K,V>>, java.io.Serializable { 4753 private static final long serialVersionUID = 2249069246763182397L; EntrySetView(ConcurrentHashMap<K,V> map)4754 EntrySetView(ConcurrentHashMap<K,V> map) { super(map); } 4755 contains(Object o)4756 public boolean contains(Object o) { 4757 Object k, v, r; Map.Entry<?,?> e; 4758 return ((o instanceof Map.Entry) && 4759 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 4760 (r = map.get(k)) != null && 4761 (v = e.getValue()) != null && 4762 (v == r || v.equals(r))); 4763 } 4764 remove(Object o)4765 public boolean remove(Object o) { 4766 Object k, v; Map.Entry<?,?> e; 4767 return ((o instanceof Map.Entry) && 4768 (k = (e = (Map.Entry<?,?>)o).getKey()) != null && 4769 (v = e.getValue()) != null && 4770 map.remove(k, v)); 4771 } 4772 4773 /** 4774 * @return an iterator over the entries of the backing map 4775 */ iterator()4776 public Iterator<Map.Entry<K,V>> iterator() { 4777 ConcurrentHashMap<K,V> m = map; 4778 Node<K,V>[] t; 4779 int f = (t = m.table) == null ? 0 : t.length; 4780 return new EntryIterator<K,V>(t, f, 0, f, m); 4781 } 4782 add(Entry<K,V> e)4783 public boolean add(Entry<K,V> e) { 4784 return map.putVal(e.getKey(), e.getValue(), false) == null; 4785 } 4786 addAll(Collection<? extends Entry<K,V>> c)4787 public boolean addAll(Collection<? extends Entry<K,V>> c) { 4788 boolean added = false; 4789 for (Entry<K,V> e : c) { 4790 if (add(e)) 4791 added = true; 4792 } 4793 return added; 4794 } 4795 removeIf(Predicate<? super Entry<K,V>> filter)4796 public boolean removeIf(Predicate<? super Entry<K,V>> filter) { 4797 return map.removeEntryIf(filter); 4798 } 4799 hashCode()4800 public final int hashCode() { 4801 int h = 0; 4802 Node<K,V>[] t; 4803 if ((t = map.table) != null) { 4804 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4805 for (Node<K,V> p; (p = it.advance()) != null; ) { 4806 h += p.hashCode(); 4807 } 4808 } 4809 return h; 4810 } 4811 equals(Object o)4812 public final boolean equals(Object o) { 4813 Set<?> c; 4814 return ((o instanceof Set) && 4815 ((c = (Set<?>)o) == this || 4816 (containsAll(c) && c.containsAll(this)))); 4817 } 4818 spliterator()4819 public Spliterator<Map.Entry<K,V>> spliterator() { 4820 Node<K,V>[] t; 4821 ConcurrentHashMap<K,V> m = map; 4822 long n = m.sumCount(); 4823 int f = (t = m.table) == null ? 0 : t.length; 4824 return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m); 4825 } 4826 forEach(Consumer<? super Map.Entry<K,V>> action)4827 public void forEach(Consumer<? super Map.Entry<K,V>> action) { 4828 if (action == null) throw new NullPointerException(); 4829 Node<K,V>[] t; 4830 if ((t = map.table) != null) { 4831 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); 4832 for (Node<K,V> p; (p = it.advance()) != null; ) 4833 action.accept(new MapEntry<K,V>(p.key, p.val, map)); 4834 } 4835 } 4836 4837 } 4838 4839 // ------------------------------------------------------- 4840 4841 /** 4842 * Base class for bulk tasks. Repeats some fields and code from 4843 * class Traverser, because we need to subclass CountedCompleter. 4844 */ 4845 @SuppressWarnings("serial") 4846 abstract static class BulkTask<K,V,R> extends CountedCompleter<R> { 4847 Node<K,V>[] tab; // same as Traverser 4848 Node<K,V> next; 4849 TableStack<K,V> stack, spare; 4850 int index; 4851 int baseIndex; 4852 int baseLimit; 4853 final int baseSize; 4854 int batch; // split control 4855 BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t)4856 BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) { 4857 super(par); 4858 this.batch = b; 4859 this.index = this.baseIndex = i; 4860 if ((this.tab = t) == null) 4861 this.baseSize = this.baseLimit = 0; 4862 else if (par == null) 4863 this.baseSize = this.baseLimit = t.length; 4864 else { 4865 this.baseLimit = f; 4866 this.baseSize = par.baseSize; 4867 } 4868 } 4869 4870 /** 4871 * Same as Traverser version. 4872 */ advance()4873 final Node<K,V> advance() { 4874 Node<K,V> e; 4875 if ((e = next) != null) 4876 e = e.next; 4877 for (;;) { 4878 Node<K,V>[] t; int i, n; 4879 if (e != null) 4880 return next = e; 4881 if (baseIndex >= baseLimit || (t = tab) == null || 4882 (n = t.length) <= (i = index) || i < 0) 4883 return next = null; 4884 if ((e = tabAt(t, i)) != null && e.hash < 0) { 4885 if (e instanceof ForwardingNode) { 4886 tab = ((ForwardingNode<K,V>)e).nextTable; 4887 e = null; 4888 pushState(t, i, n); 4889 continue; 4890 } 4891 else if (e instanceof TreeBin) 4892 e = ((TreeBin<K,V>)e).first; 4893 else 4894 e = null; 4895 } 4896 if (stack != null) 4897 recoverState(n); 4898 else if ((index = i + baseSize) >= n) 4899 index = ++baseIndex; 4900 } 4901 } 4902 pushState(Node<K,V>[] t, int i, int n)4903 private void pushState(Node<K,V>[] t, int i, int n) { 4904 TableStack<K,V> s = spare; 4905 if (s != null) 4906 spare = s.next; 4907 else 4908 s = new TableStack<K,V>(); 4909 s.tab = t; 4910 s.length = n; 4911 s.index = i; 4912 s.next = stack; 4913 stack = s; 4914 } 4915 recoverState(int n)4916 private void recoverState(int n) { 4917 TableStack<K,V> s; int len; 4918 while ((s = stack) != null && (index += (len = s.length)) >= n) { 4919 n = len; 4920 index = s.index; 4921 tab = s.tab; 4922 s.tab = null; 4923 TableStack<K,V> next = s.next; 4924 s.next = spare; // save for reuse 4925 stack = next; 4926 spare = s; 4927 } 4928 if (s == null && (index += baseSize) >= n) 4929 index = ++baseIndex; 4930 } 4931 } 4932 4933 /* 4934 * Task classes. Coded in a regular but ugly format/style to 4935 * simplify checks that each variant differs in the right way from 4936 * others. The null screenings exist because compilers cannot tell 4937 * that we've already null-checked task arguments, so we force 4938 * simplest hoisted bypass to help avoid convoluted traps. 4939 */ 4940 @SuppressWarnings("serial") 4941 static final class ForEachKeyTask<K,V> 4942 extends BulkTask<K,V,Void> { 4943 final Consumer<? super K> action; ForEachKeyTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Consumer<? super K> action)4944 ForEachKeyTask 4945 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 4946 Consumer<? super K> action) { 4947 super(p, b, i, f, t); 4948 this.action = action; 4949 } compute()4950 public final void compute() { 4951 final Consumer<? super K> action; 4952 if ((action = this.action) != null) { 4953 for (int i = baseIndex, f, h; batch > 0 && 4954 (h = ((f = baseLimit) + i) >>> 1) > i;) { 4955 addToPendingCount(1); 4956 new ForEachKeyTask<K,V> 4957 (this, batch >>>= 1, baseLimit = h, f, tab, 4958 action).fork(); 4959 } 4960 for (Node<K,V> p; (p = advance()) != null;) 4961 action.accept(p.key); 4962 propagateCompletion(); 4963 } 4964 } 4965 } 4966 4967 @SuppressWarnings("serial") 4968 static final class ForEachValueTask<K,V> 4969 extends BulkTask<K,V,Void> { 4970 final Consumer<? super V> action; ForEachValueTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Consumer<? super V> action)4971 ForEachValueTask 4972 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 4973 Consumer<? super V> action) { 4974 super(p, b, i, f, t); 4975 this.action = action; 4976 } compute()4977 public final void compute() { 4978 final Consumer<? super V> action; 4979 if ((action = this.action) != null) { 4980 for (int i = baseIndex, f, h; batch > 0 && 4981 (h = ((f = baseLimit) + i) >>> 1) > i;) { 4982 addToPendingCount(1); 4983 new ForEachValueTask<K,V> 4984 (this, batch >>>= 1, baseLimit = h, f, tab, 4985 action).fork(); 4986 } 4987 for (Node<K,V> p; (p = advance()) != null;) 4988 action.accept(p.val); 4989 propagateCompletion(); 4990 } 4991 } 4992 } 4993 4994 @SuppressWarnings("serial") 4995 static final class ForEachEntryTask<K,V> 4996 extends BulkTask<K,V,Void> { 4997 final Consumer<? super Entry<K,V>> action; ForEachEntryTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Consumer<? super Entry<K,V>> action)4998 ForEachEntryTask 4999 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5000 Consumer<? super Entry<K,V>> action) { 5001 super(p, b, i, f, t); 5002 this.action = action; 5003 } compute()5004 public final void compute() { 5005 final Consumer<? super Entry<K,V>> action; 5006 if ((action = this.action) != null) { 5007 for (int i = baseIndex, f, h; batch > 0 && 5008 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5009 addToPendingCount(1); 5010 new ForEachEntryTask<K,V> 5011 (this, batch >>>= 1, baseLimit = h, f, tab, 5012 action).fork(); 5013 } 5014 for (Node<K,V> p; (p = advance()) != null; ) 5015 action.accept(p); 5016 propagateCompletion(); 5017 } 5018 } 5019 } 5020 5021 @SuppressWarnings("serial") 5022 static final class ForEachMappingTask<K,V> 5023 extends BulkTask<K,V,Void> { 5024 final BiConsumer<? super K, ? super V> action; ForEachMappingTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, BiConsumer<? super K,? super V> action)5025 ForEachMappingTask 5026 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5027 BiConsumer<? super K,? super V> action) { 5028 super(p, b, i, f, t); 5029 this.action = action; 5030 } compute()5031 public final void compute() { 5032 final BiConsumer<? super K, ? super V> action; 5033 if ((action = this.action) != null) { 5034 for (int i = baseIndex, f, h; batch > 0 && 5035 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5036 addToPendingCount(1); 5037 new ForEachMappingTask<K,V> 5038 (this, batch >>>= 1, baseLimit = h, f, tab, 5039 action).fork(); 5040 } 5041 for (Node<K,V> p; (p = advance()) != null; ) 5042 action.accept(p.key, p.val); 5043 propagateCompletion(); 5044 } 5045 } 5046 } 5047 5048 @SuppressWarnings("serial") 5049 static final class ForEachTransformedKeyTask<K,V,U> 5050 extends BulkTask<K,V,Void> { 5051 final Function<? super K, ? extends U> transformer; 5052 final Consumer<? super U> action; ForEachTransformedKeyTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<? super K, ? extends U> transformer, Consumer<? super U> action)5053 ForEachTransformedKeyTask 5054 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5055 Function<? super K, ? extends U> transformer, Consumer<? super U> action) { 5056 super(p, b, i, f, t); 5057 this.transformer = transformer; this.action = action; 5058 } compute()5059 public final void compute() { 5060 final Function<? super K, ? extends U> transformer; 5061 final Consumer<? super U> action; 5062 if ((transformer = this.transformer) != null && 5063 (action = this.action) != null) { 5064 for (int i = baseIndex, f, h; batch > 0 && 5065 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5066 addToPendingCount(1); 5067 new ForEachTransformedKeyTask<K,V,U> 5068 (this, batch >>>= 1, baseLimit = h, f, tab, 5069 transformer, action).fork(); 5070 } 5071 for (Node<K,V> p; (p = advance()) != null; ) { 5072 U u; 5073 if ((u = transformer.apply(p.key)) != null) 5074 action.accept(u); 5075 } 5076 propagateCompletion(); 5077 } 5078 } 5079 } 5080 5081 @SuppressWarnings("serial") 5082 static final class ForEachTransformedValueTask<K,V,U> 5083 extends BulkTask<K,V,Void> { 5084 final Function<? super V, ? extends U> transformer; 5085 final Consumer<? super U> action; ForEachTransformedValueTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<? super V, ? extends U> transformer, Consumer<? super U> action)5086 ForEachTransformedValueTask 5087 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5088 Function<? super V, ? extends U> transformer, Consumer<? super U> action) { 5089 super(p, b, i, f, t); 5090 this.transformer = transformer; this.action = action; 5091 } compute()5092 public final void compute() { 5093 final Function<? super V, ? extends U> transformer; 5094 final Consumer<? super U> action; 5095 if ((transformer = this.transformer) != null && 5096 (action = this.action) != null) { 5097 for (int i = baseIndex, f, h; batch > 0 && 5098 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5099 addToPendingCount(1); 5100 new ForEachTransformedValueTask<K,V,U> 5101 (this, batch >>>= 1, baseLimit = h, f, tab, 5102 transformer, action).fork(); 5103 } 5104 for (Node<K,V> p; (p = advance()) != null; ) { 5105 U u; 5106 if ((u = transformer.apply(p.val)) != null) 5107 action.accept(u); 5108 } 5109 propagateCompletion(); 5110 } 5111 } 5112 } 5113 5114 @SuppressWarnings("serial") 5115 static final class ForEachTransformedEntryTask<K,V,U> 5116 extends BulkTask<K,V,Void> { 5117 final Function<Map.Entry<K,V>, ? extends U> transformer; 5118 final Consumer<? super U> action; ForEachTransformedEntryTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action)5119 ForEachTransformedEntryTask 5120 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5121 Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) { 5122 super(p, b, i, f, t); 5123 this.transformer = transformer; this.action = action; 5124 } compute()5125 public final void compute() { 5126 final Function<Map.Entry<K,V>, ? extends U> transformer; 5127 final Consumer<? super U> action; 5128 if ((transformer = this.transformer) != null && 5129 (action = this.action) != null) { 5130 for (int i = baseIndex, f, h; batch > 0 && 5131 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5132 addToPendingCount(1); 5133 new ForEachTransformedEntryTask<K,V,U> 5134 (this, batch >>>= 1, baseLimit = h, f, tab, 5135 transformer, action).fork(); 5136 } 5137 for (Node<K,V> p; (p = advance()) != null; ) { 5138 U u; 5139 if ((u = transformer.apply(p)) != null) 5140 action.accept(u); 5141 } 5142 propagateCompletion(); 5143 } 5144 } 5145 } 5146 5147 @SuppressWarnings("serial") 5148 static final class ForEachTransformedMappingTask<K,V,U> 5149 extends BulkTask<K,V,Void> { 5150 final BiFunction<? super K, ? super V, ? extends U> transformer; 5151 final Consumer<? super U> action; ForEachTransformedMappingTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, BiFunction<? super K, ? super V, ? extends U> transformer, Consumer<? super U> action)5152 ForEachTransformedMappingTask 5153 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5154 BiFunction<? super K, ? super V, ? extends U> transformer, 5155 Consumer<? super U> action) { 5156 super(p, b, i, f, t); 5157 this.transformer = transformer; this.action = action; 5158 } compute()5159 public final void compute() { 5160 final BiFunction<? super K, ? super V, ? extends U> transformer; 5161 final Consumer<? super U> action; 5162 if ((transformer = this.transformer) != null && 5163 (action = this.action) != null) { 5164 for (int i = baseIndex, f, h; batch > 0 && 5165 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5166 addToPendingCount(1); 5167 new ForEachTransformedMappingTask<K,V,U> 5168 (this, batch >>>= 1, baseLimit = h, f, tab, 5169 transformer, action).fork(); 5170 } 5171 for (Node<K,V> p; (p = advance()) != null; ) { 5172 U u; 5173 if ((u = transformer.apply(p.key, p.val)) != null) 5174 action.accept(u); 5175 } 5176 propagateCompletion(); 5177 } 5178 } 5179 } 5180 5181 @SuppressWarnings("serial") 5182 static final class SearchKeysTask<K,V,U> 5183 extends BulkTask<K,V,U> { 5184 final Function<? super K, ? extends U> searchFunction; 5185 final AtomicReference<U> result; SearchKeysTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<? super K, ? extends U> searchFunction, AtomicReference<U> result)5186 SearchKeysTask 5187 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5188 Function<? super K, ? extends U> searchFunction, 5189 AtomicReference<U> result) { 5190 super(p, b, i, f, t); 5191 this.searchFunction = searchFunction; this.result = result; 5192 } getRawResult()5193 public final U getRawResult() { return result.get(); } compute()5194 public final void compute() { 5195 final Function<? super K, ? extends U> searchFunction; 5196 final AtomicReference<U> result; 5197 if ((searchFunction = this.searchFunction) != null && 5198 (result = this.result) != null) { 5199 for (int i = baseIndex, f, h; batch > 0 && 5200 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5201 if (result.get() != null) 5202 return; 5203 addToPendingCount(1); 5204 new SearchKeysTask<K,V,U> 5205 (this, batch >>>= 1, baseLimit = h, f, tab, 5206 searchFunction, result).fork(); 5207 } 5208 while (result.get() == null) { 5209 U u; 5210 Node<K,V> p; 5211 if ((p = advance()) == null) { 5212 propagateCompletion(); 5213 break; 5214 } 5215 if ((u = searchFunction.apply(p.key)) != null) { 5216 if (result.compareAndSet(null, u)) 5217 quietlyCompleteRoot(); 5218 break; 5219 } 5220 } 5221 } 5222 } 5223 } 5224 5225 @SuppressWarnings("serial") 5226 static final class SearchValuesTask<K,V,U> 5227 extends BulkTask<K,V,U> { 5228 final Function<? super V, ? extends U> searchFunction; 5229 final AtomicReference<U> result; SearchValuesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<? super V, ? extends U> searchFunction, AtomicReference<U> result)5230 SearchValuesTask 5231 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5232 Function<? super V, ? extends U> searchFunction, 5233 AtomicReference<U> result) { 5234 super(p, b, i, f, t); 5235 this.searchFunction = searchFunction; this.result = result; 5236 } getRawResult()5237 public final U getRawResult() { return result.get(); } compute()5238 public final void compute() { 5239 final Function<? super V, ? extends U> searchFunction; 5240 final AtomicReference<U> result; 5241 if ((searchFunction = this.searchFunction) != null && 5242 (result = this.result) != null) { 5243 for (int i = baseIndex, f, h; batch > 0 && 5244 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5245 if (result.get() != null) 5246 return; 5247 addToPendingCount(1); 5248 new SearchValuesTask<K,V,U> 5249 (this, batch >>>= 1, baseLimit = h, f, tab, 5250 searchFunction, result).fork(); 5251 } 5252 while (result.get() == null) { 5253 U u; 5254 Node<K,V> p; 5255 if ((p = advance()) == null) { 5256 propagateCompletion(); 5257 break; 5258 } 5259 if ((u = searchFunction.apply(p.val)) != null) { 5260 if (result.compareAndSet(null, u)) 5261 quietlyCompleteRoot(); 5262 break; 5263 } 5264 } 5265 } 5266 } 5267 } 5268 5269 @SuppressWarnings("serial") 5270 static final class SearchEntriesTask<K,V,U> 5271 extends BulkTask<K,V,U> { 5272 final Function<Entry<K,V>, ? extends U> searchFunction; 5273 final AtomicReference<U> result; SearchEntriesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<Entry<K,V>, ? extends U> searchFunction, AtomicReference<U> result)5274 SearchEntriesTask 5275 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5276 Function<Entry<K,V>, ? extends U> searchFunction, 5277 AtomicReference<U> result) { 5278 super(p, b, i, f, t); 5279 this.searchFunction = searchFunction; this.result = result; 5280 } getRawResult()5281 public final U getRawResult() { return result.get(); } compute()5282 public final void compute() { 5283 final Function<Entry<K,V>, ? extends U> searchFunction; 5284 final AtomicReference<U> result; 5285 if ((searchFunction = this.searchFunction) != null && 5286 (result = this.result) != null) { 5287 for (int i = baseIndex, f, h; batch > 0 && 5288 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5289 if (result.get() != null) 5290 return; 5291 addToPendingCount(1); 5292 new SearchEntriesTask<K,V,U> 5293 (this, batch >>>= 1, baseLimit = h, f, tab, 5294 searchFunction, result).fork(); 5295 } 5296 while (result.get() == null) { 5297 U u; 5298 Node<K,V> p; 5299 if ((p = advance()) == null) { 5300 propagateCompletion(); 5301 break; 5302 } 5303 if ((u = searchFunction.apply(p)) != null) { 5304 if (result.compareAndSet(null, u)) 5305 quietlyCompleteRoot(); 5306 return; 5307 } 5308 } 5309 } 5310 } 5311 } 5312 5313 @SuppressWarnings("serial") 5314 static final class SearchMappingsTask<K,V,U> 5315 extends BulkTask<K,V,U> { 5316 final BiFunction<? super K, ? super V, ? extends U> searchFunction; 5317 final AtomicReference<U> result; SearchMappingsTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, BiFunction<? super K, ? super V, ? extends U> searchFunction, AtomicReference<U> result)5318 SearchMappingsTask 5319 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5320 BiFunction<? super K, ? super V, ? extends U> searchFunction, 5321 AtomicReference<U> result) { 5322 super(p, b, i, f, t); 5323 this.searchFunction = searchFunction; this.result = result; 5324 } getRawResult()5325 public final U getRawResult() { return result.get(); } compute()5326 public final void compute() { 5327 final BiFunction<? super K, ? super V, ? extends U> searchFunction; 5328 final AtomicReference<U> result; 5329 if ((searchFunction = this.searchFunction) != null && 5330 (result = this.result) != null) { 5331 for (int i = baseIndex, f, h; batch > 0 && 5332 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5333 if (result.get() != null) 5334 return; 5335 addToPendingCount(1); 5336 new SearchMappingsTask<K,V,U> 5337 (this, batch >>>= 1, baseLimit = h, f, tab, 5338 searchFunction, result).fork(); 5339 } 5340 while (result.get() == null) { 5341 U u; 5342 Node<K,V> p; 5343 if ((p = advance()) == null) { 5344 propagateCompletion(); 5345 break; 5346 } 5347 if ((u = searchFunction.apply(p.key, p.val)) != null) { 5348 if (result.compareAndSet(null, u)) 5349 quietlyCompleteRoot(); 5350 break; 5351 } 5352 } 5353 } 5354 } 5355 } 5356 5357 @SuppressWarnings("serial") 5358 static final class ReduceKeysTask<K,V> 5359 extends BulkTask<K,V,K> { 5360 final BiFunction<? super K, ? super K, ? extends K> reducer; 5361 K result; 5362 ReduceKeysTask<K,V> rights, nextRight; ReduceKeysTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, ReduceKeysTask<K,V> nextRight, BiFunction<? super K, ? super K, ? extends K> reducer)5363 ReduceKeysTask 5364 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5365 ReduceKeysTask<K,V> nextRight, 5366 BiFunction<? super K, ? super K, ? extends K> reducer) { 5367 super(p, b, i, f, t); this.nextRight = nextRight; 5368 this.reducer = reducer; 5369 } getRawResult()5370 public final K getRawResult() { return result; } compute()5371 public final void compute() { 5372 final BiFunction<? super K, ? super K, ? extends K> reducer; 5373 if ((reducer = this.reducer) != null) { 5374 for (int i = baseIndex, f, h; batch > 0 && 5375 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5376 addToPendingCount(1); 5377 (rights = new ReduceKeysTask<K,V> 5378 (this, batch >>>= 1, baseLimit = h, f, tab, 5379 rights, reducer)).fork(); 5380 } 5381 K r = null; 5382 for (Node<K,V> p; (p = advance()) != null; ) { 5383 K u = p.key; 5384 r = (r == null) ? u : u == null ? r : reducer.apply(r, u); 5385 } 5386 result = r; 5387 CountedCompleter<?> c; 5388 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5389 @SuppressWarnings("unchecked") 5390 ReduceKeysTask<K,V> 5391 t = (ReduceKeysTask<K,V>)c, 5392 s = t.rights; 5393 while (s != null) { 5394 K tr, sr; 5395 if ((sr = s.result) != null) 5396 t.result = (((tr = t.result) == null) ? sr : 5397 reducer.apply(tr, sr)); 5398 s = t.rights = s.nextRight; 5399 } 5400 } 5401 } 5402 } 5403 } 5404 5405 @SuppressWarnings("serial") 5406 static final class ReduceValuesTask<K,V> 5407 extends BulkTask<K,V,V> { 5408 final BiFunction<? super V, ? super V, ? extends V> reducer; 5409 V result; 5410 ReduceValuesTask<K,V> rights, nextRight; ReduceValuesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, ReduceValuesTask<K,V> nextRight, BiFunction<? super V, ? super V, ? extends V> reducer)5411 ReduceValuesTask 5412 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5413 ReduceValuesTask<K,V> nextRight, 5414 BiFunction<? super V, ? super V, ? extends V> reducer) { 5415 super(p, b, i, f, t); this.nextRight = nextRight; 5416 this.reducer = reducer; 5417 } getRawResult()5418 public final V getRawResult() { return result; } compute()5419 public final void compute() { 5420 final BiFunction<? super V, ? super V, ? extends V> reducer; 5421 if ((reducer = this.reducer) != null) { 5422 for (int i = baseIndex, f, h; batch > 0 && 5423 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5424 addToPendingCount(1); 5425 (rights = new ReduceValuesTask<K,V> 5426 (this, batch >>>= 1, baseLimit = h, f, tab, 5427 rights, reducer)).fork(); 5428 } 5429 V r = null; 5430 for (Node<K,V> p; (p = advance()) != null; ) { 5431 V v = p.val; 5432 r = (r == null) ? v : reducer.apply(r, v); 5433 } 5434 result = r; 5435 CountedCompleter<?> c; 5436 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5437 @SuppressWarnings("unchecked") 5438 ReduceValuesTask<K,V> 5439 t = (ReduceValuesTask<K,V>)c, 5440 s = t.rights; 5441 while (s != null) { 5442 V tr, sr; 5443 if ((sr = s.result) != null) 5444 t.result = (((tr = t.result) == null) ? sr : 5445 reducer.apply(tr, sr)); 5446 s = t.rights = s.nextRight; 5447 } 5448 } 5449 } 5450 } 5451 } 5452 5453 @SuppressWarnings("serial") 5454 static final class ReduceEntriesTask<K,V> 5455 extends BulkTask<K,V,Map.Entry<K,V>> { 5456 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer; 5457 Map.Entry<K,V> result; 5458 ReduceEntriesTask<K,V> rights, nextRight; ReduceEntriesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, ReduceEntriesTask<K,V> nextRight, BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer)5459 ReduceEntriesTask 5460 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5461 ReduceEntriesTask<K,V> nextRight, 5462 BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) { 5463 super(p, b, i, f, t); this.nextRight = nextRight; 5464 this.reducer = reducer; 5465 } getRawResult()5466 public final Map.Entry<K,V> getRawResult() { return result; } compute()5467 public final void compute() { 5468 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer; 5469 if ((reducer = this.reducer) != null) { 5470 for (int i = baseIndex, f, h; batch > 0 && 5471 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5472 addToPendingCount(1); 5473 (rights = new ReduceEntriesTask<K,V> 5474 (this, batch >>>= 1, baseLimit = h, f, tab, 5475 rights, reducer)).fork(); 5476 } 5477 Map.Entry<K,V> r = null; 5478 for (Node<K,V> p; (p = advance()) != null; ) 5479 r = (r == null) ? p : reducer.apply(r, p); 5480 result = r; 5481 CountedCompleter<?> c; 5482 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5483 @SuppressWarnings("unchecked") 5484 ReduceEntriesTask<K,V> 5485 t = (ReduceEntriesTask<K,V>)c, 5486 s = t.rights; 5487 while (s != null) { 5488 Map.Entry<K,V> tr, sr; 5489 if ((sr = s.result) != null) 5490 t.result = (((tr = t.result) == null) ? sr : 5491 reducer.apply(tr, sr)); 5492 s = t.rights = s.nextRight; 5493 } 5494 } 5495 } 5496 } 5497 } 5498 5499 @SuppressWarnings("serial") 5500 static final class MapReduceKeysTask<K,V,U> 5501 extends BulkTask<K,V,U> { 5502 final Function<? super K, ? extends U> transformer; 5503 final BiFunction<? super U, ? super U, ? extends U> reducer; 5504 U result; 5505 MapReduceKeysTask<K,V,U> rights, nextRight; MapReduceKeysTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysTask<K,V,U> nextRight, Function<? super K, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)5506 MapReduceKeysTask 5507 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5508 MapReduceKeysTask<K,V,U> nextRight, 5509 Function<? super K, ? extends U> transformer, 5510 BiFunction<? super U, ? super U, ? extends U> reducer) { 5511 super(p, b, i, f, t); this.nextRight = nextRight; 5512 this.transformer = transformer; 5513 this.reducer = reducer; 5514 } getRawResult()5515 public final U getRawResult() { return result; } compute()5516 public final void compute() { 5517 final Function<? super K, ? extends U> transformer; 5518 final BiFunction<? super U, ? super U, ? extends U> reducer; 5519 if ((transformer = this.transformer) != null && 5520 (reducer = this.reducer) != null) { 5521 for (int i = baseIndex, f, h; batch > 0 && 5522 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5523 addToPendingCount(1); 5524 (rights = new MapReduceKeysTask<K,V,U> 5525 (this, batch >>>= 1, baseLimit = h, f, tab, 5526 rights, transformer, reducer)).fork(); 5527 } 5528 U r = null; 5529 for (Node<K,V> p; (p = advance()) != null; ) { 5530 U u; 5531 if ((u = transformer.apply(p.key)) != null) 5532 r = (r == null) ? u : reducer.apply(r, u); 5533 } 5534 result = r; 5535 CountedCompleter<?> c; 5536 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5537 @SuppressWarnings("unchecked") 5538 MapReduceKeysTask<K,V,U> 5539 t = (MapReduceKeysTask<K,V,U>)c, 5540 s = t.rights; 5541 while (s != null) { 5542 U tr, sr; 5543 if ((sr = s.result) != null) 5544 t.result = (((tr = t.result) == null) ? sr : 5545 reducer.apply(tr, sr)); 5546 s = t.rights = s.nextRight; 5547 } 5548 } 5549 } 5550 } 5551 } 5552 5553 @SuppressWarnings("serial") 5554 static final class MapReduceValuesTask<K,V,U> 5555 extends BulkTask<K,V,U> { 5556 final Function<? super V, ? extends U> transformer; 5557 final BiFunction<? super U, ? super U, ? extends U> reducer; 5558 U result; 5559 MapReduceValuesTask<K,V,U> rights, nextRight; MapReduceValuesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesTask<K,V,U> nextRight, Function<? super V, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)5560 MapReduceValuesTask 5561 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5562 MapReduceValuesTask<K,V,U> nextRight, 5563 Function<? super V, ? extends U> transformer, 5564 BiFunction<? super U, ? super U, ? extends U> reducer) { 5565 super(p, b, i, f, t); this.nextRight = nextRight; 5566 this.transformer = transformer; 5567 this.reducer = reducer; 5568 } getRawResult()5569 public final U getRawResult() { return result; } compute()5570 public final void compute() { 5571 final Function<? super V, ? extends U> transformer; 5572 final BiFunction<? super U, ? super U, ? extends U> reducer; 5573 if ((transformer = this.transformer) != null && 5574 (reducer = this.reducer) != null) { 5575 for (int i = baseIndex, f, h; batch > 0 && 5576 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5577 addToPendingCount(1); 5578 (rights = new MapReduceValuesTask<K,V,U> 5579 (this, batch >>>= 1, baseLimit = h, f, tab, 5580 rights, transformer, reducer)).fork(); 5581 } 5582 U r = null; 5583 for (Node<K,V> p; (p = advance()) != null; ) { 5584 U u; 5585 if ((u = transformer.apply(p.val)) != null) 5586 r = (r == null) ? u : reducer.apply(r, u); 5587 } 5588 result = r; 5589 CountedCompleter<?> c; 5590 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5591 @SuppressWarnings("unchecked") 5592 MapReduceValuesTask<K,V,U> 5593 t = (MapReduceValuesTask<K,V,U>)c, 5594 s = t.rights; 5595 while (s != null) { 5596 U tr, sr; 5597 if ((sr = s.result) != null) 5598 t.result = (((tr = t.result) == null) ? sr : 5599 reducer.apply(tr, sr)); 5600 s = t.rights = s.nextRight; 5601 } 5602 } 5603 } 5604 } 5605 } 5606 5607 @SuppressWarnings("serial") 5608 static final class MapReduceEntriesTask<K,V,U> 5609 extends BulkTask<K,V,U> { 5610 final Function<Map.Entry<K,V>, ? extends U> transformer; 5611 final BiFunction<? super U, ? super U, ? extends U> reducer; 5612 U result; 5613 MapReduceEntriesTask<K,V,U> rights, nextRight; MapReduceEntriesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesTask<K,V,U> nextRight, Function<Map.Entry<K,V>, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)5614 MapReduceEntriesTask 5615 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5616 MapReduceEntriesTask<K,V,U> nextRight, 5617 Function<Map.Entry<K,V>, ? extends U> transformer, 5618 BiFunction<? super U, ? super U, ? extends U> reducer) { 5619 super(p, b, i, f, t); this.nextRight = nextRight; 5620 this.transformer = transformer; 5621 this.reducer = reducer; 5622 } getRawResult()5623 public final U getRawResult() { return result; } compute()5624 public final void compute() { 5625 final Function<Map.Entry<K,V>, ? extends U> transformer; 5626 final BiFunction<? super U, ? super U, ? extends U> reducer; 5627 if ((transformer = this.transformer) != null && 5628 (reducer = this.reducer) != null) { 5629 for (int i = baseIndex, f, h; batch > 0 && 5630 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5631 addToPendingCount(1); 5632 (rights = new MapReduceEntriesTask<K,V,U> 5633 (this, batch >>>= 1, baseLimit = h, f, tab, 5634 rights, transformer, reducer)).fork(); 5635 } 5636 U r = null; 5637 for (Node<K,V> p; (p = advance()) != null; ) { 5638 U u; 5639 if ((u = transformer.apply(p)) != null) 5640 r = (r == null) ? u : reducer.apply(r, u); 5641 } 5642 result = r; 5643 CountedCompleter<?> c; 5644 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5645 @SuppressWarnings("unchecked") 5646 MapReduceEntriesTask<K,V,U> 5647 t = (MapReduceEntriesTask<K,V,U>)c, 5648 s = t.rights; 5649 while (s != null) { 5650 U tr, sr; 5651 if ((sr = s.result) != null) 5652 t.result = (((tr = t.result) == null) ? sr : 5653 reducer.apply(tr, sr)); 5654 s = t.rights = s.nextRight; 5655 } 5656 } 5657 } 5658 } 5659 } 5660 5661 @SuppressWarnings("serial") 5662 static final class MapReduceMappingsTask<K,V,U> 5663 extends BulkTask<K,V,U> { 5664 final BiFunction<? super K, ? super V, ? extends U> transformer; 5665 final BiFunction<? super U, ? super U, ? extends U> reducer; 5666 U result; 5667 MapReduceMappingsTask<K,V,U> rights, nextRight; MapReduceMappingsTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsTask<K,V,U> nextRight, BiFunction<? super K, ? super V, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)5668 MapReduceMappingsTask 5669 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5670 MapReduceMappingsTask<K,V,U> nextRight, 5671 BiFunction<? super K, ? super V, ? extends U> transformer, 5672 BiFunction<? super U, ? super U, ? extends U> reducer) { 5673 super(p, b, i, f, t); this.nextRight = nextRight; 5674 this.transformer = transformer; 5675 this.reducer = reducer; 5676 } getRawResult()5677 public final U getRawResult() { return result; } compute()5678 public final void compute() { 5679 final BiFunction<? super K, ? super V, ? extends U> transformer; 5680 final BiFunction<? super U, ? super U, ? extends U> reducer; 5681 if ((transformer = this.transformer) != null && 5682 (reducer = this.reducer) != null) { 5683 for (int i = baseIndex, f, h; batch > 0 && 5684 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5685 addToPendingCount(1); 5686 (rights = new MapReduceMappingsTask<K,V,U> 5687 (this, batch >>>= 1, baseLimit = h, f, tab, 5688 rights, transformer, reducer)).fork(); 5689 } 5690 U r = null; 5691 for (Node<K,V> p; (p = advance()) != null; ) { 5692 U u; 5693 if ((u = transformer.apply(p.key, p.val)) != null) 5694 r = (r == null) ? u : reducer.apply(r, u); 5695 } 5696 result = r; 5697 CountedCompleter<?> c; 5698 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5699 @SuppressWarnings("unchecked") 5700 MapReduceMappingsTask<K,V,U> 5701 t = (MapReduceMappingsTask<K,V,U>)c, 5702 s = t.rights; 5703 while (s != null) { 5704 U tr, sr; 5705 if ((sr = s.result) != null) 5706 t.result = (((tr = t.result) == null) ? sr : 5707 reducer.apply(tr, sr)); 5708 s = t.rights = s.nextRight; 5709 } 5710 } 5711 } 5712 } 5713 } 5714 5715 @SuppressWarnings("serial") 5716 static final class MapReduceKeysToDoubleTask<K,V> 5717 extends BulkTask<K,V,Double> { 5718 final ToDoubleFunction<? super K> transformer; 5719 final DoubleBinaryOperator reducer; 5720 final double basis; 5721 double result; 5722 MapReduceKeysToDoubleTask<K,V> rights, nextRight; MapReduceKeysToDoubleTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysToDoubleTask<K,V> nextRight, ToDoubleFunction<? super K> transformer, double basis, DoubleBinaryOperator reducer)5723 MapReduceKeysToDoubleTask 5724 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5725 MapReduceKeysToDoubleTask<K,V> nextRight, 5726 ToDoubleFunction<? super K> transformer, 5727 double basis, 5728 DoubleBinaryOperator reducer) { 5729 super(p, b, i, f, t); this.nextRight = nextRight; 5730 this.transformer = transformer; 5731 this.basis = basis; this.reducer = reducer; 5732 } getRawResult()5733 public final Double getRawResult() { return result; } compute()5734 public final void compute() { 5735 final ToDoubleFunction<? super K> transformer; 5736 final DoubleBinaryOperator reducer; 5737 if ((transformer = this.transformer) != null && 5738 (reducer = this.reducer) != null) { 5739 double r = this.basis; 5740 for (int i = baseIndex, f, h; batch > 0 && 5741 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5742 addToPendingCount(1); 5743 (rights = new MapReduceKeysToDoubleTask<K,V> 5744 (this, batch >>>= 1, baseLimit = h, f, tab, 5745 rights, transformer, r, reducer)).fork(); 5746 } 5747 for (Node<K,V> p; (p = advance()) != null; ) 5748 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key)); 5749 result = r; 5750 CountedCompleter<?> c; 5751 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5752 @SuppressWarnings("unchecked") 5753 MapReduceKeysToDoubleTask<K,V> 5754 t = (MapReduceKeysToDoubleTask<K,V>)c, 5755 s = t.rights; 5756 while (s != null) { 5757 t.result = reducer.applyAsDouble(t.result, s.result); 5758 s = t.rights = s.nextRight; 5759 } 5760 } 5761 } 5762 } 5763 } 5764 5765 @SuppressWarnings("serial") 5766 static final class MapReduceValuesToDoubleTask<K,V> 5767 extends BulkTask<K,V,Double> { 5768 final ToDoubleFunction<? super V> transformer; 5769 final DoubleBinaryOperator reducer; 5770 final double basis; 5771 double result; 5772 MapReduceValuesToDoubleTask<K,V> rights, nextRight; MapReduceValuesToDoubleTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesToDoubleTask<K,V> nextRight, ToDoubleFunction<? super V> transformer, double basis, DoubleBinaryOperator reducer)5773 MapReduceValuesToDoubleTask 5774 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5775 MapReduceValuesToDoubleTask<K,V> nextRight, 5776 ToDoubleFunction<? super V> transformer, 5777 double basis, 5778 DoubleBinaryOperator reducer) { 5779 super(p, b, i, f, t); this.nextRight = nextRight; 5780 this.transformer = transformer; 5781 this.basis = basis; this.reducer = reducer; 5782 } getRawResult()5783 public final Double getRawResult() { return result; } compute()5784 public final void compute() { 5785 final ToDoubleFunction<? super V> transformer; 5786 final DoubleBinaryOperator reducer; 5787 if ((transformer = this.transformer) != null && 5788 (reducer = this.reducer) != null) { 5789 double r = this.basis; 5790 for (int i = baseIndex, f, h; batch > 0 && 5791 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5792 addToPendingCount(1); 5793 (rights = new MapReduceValuesToDoubleTask<K,V> 5794 (this, batch >>>= 1, baseLimit = h, f, tab, 5795 rights, transformer, r, reducer)).fork(); 5796 } 5797 for (Node<K,V> p; (p = advance()) != null; ) 5798 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val)); 5799 result = r; 5800 CountedCompleter<?> c; 5801 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5802 @SuppressWarnings("unchecked") 5803 MapReduceValuesToDoubleTask<K,V> 5804 t = (MapReduceValuesToDoubleTask<K,V>)c, 5805 s = t.rights; 5806 while (s != null) { 5807 t.result = reducer.applyAsDouble(t.result, s.result); 5808 s = t.rights = s.nextRight; 5809 } 5810 } 5811 } 5812 } 5813 } 5814 5815 @SuppressWarnings("serial") 5816 static final class MapReduceEntriesToDoubleTask<K,V> 5817 extends BulkTask<K,V,Double> { 5818 final ToDoubleFunction<Map.Entry<K,V>> transformer; 5819 final DoubleBinaryOperator reducer; 5820 final double basis; 5821 double result; 5822 MapReduceEntriesToDoubleTask<K,V> rights, nextRight; MapReduceEntriesToDoubleTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesToDoubleTask<K,V> nextRight, ToDoubleFunction<Map.Entry<K,V>> transformer, double basis, DoubleBinaryOperator reducer)5823 MapReduceEntriesToDoubleTask 5824 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5825 MapReduceEntriesToDoubleTask<K,V> nextRight, 5826 ToDoubleFunction<Map.Entry<K,V>> transformer, 5827 double basis, 5828 DoubleBinaryOperator reducer) { 5829 super(p, b, i, f, t); this.nextRight = nextRight; 5830 this.transformer = transformer; 5831 this.basis = basis; this.reducer = reducer; 5832 } getRawResult()5833 public final Double getRawResult() { return result; } compute()5834 public final void compute() { 5835 final ToDoubleFunction<Map.Entry<K,V>> transformer; 5836 final DoubleBinaryOperator reducer; 5837 if ((transformer = this.transformer) != null && 5838 (reducer = this.reducer) != null) { 5839 double r = this.basis; 5840 for (int i = baseIndex, f, h; batch > 0 && 5841 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5842 addToPendingCount(1); 5843 (rights = new MapReduceEntriesToDoubleTask<K,V> 5844 (this, batch >>>= 1, baseLimit = h, f, tab, 5845 rights, transformer, r, reducer)).fork(); 5846 } 5847 for (Node<K,V> p; (p = advance()) != null; ) 5848 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p)); 5849 result = r; 5850 CountedCompleter<?> c; 5851 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5852 @SuppressWarnings("unchecked") 5853 MapReduceEntriesToDoubleTask<K,V> 5854 t = (MapReduceEntriesToDoubleTask<K,V>)c, 5855 s = t.rights; 5856 while (s != null) { 5857 t.result = reducer.applyAsDouble(t.result, s.result); 5858 s = t.rights = s.nextRight; 5859 } 5860 } 5861 } 5862 } 5863 } 5864 5865 @SuppressWarnings("serial") 5866 static final class MapReduceMappingsToDoubleTask<K,V> 5867 extends BulkTask<K,V,Double> { 5868 final ToDoubleBiFunction<? super K, ? super V> transformer; 5869 final DoubleBinaryOperator reducer; 5870 final double basis; 5871 double result; 5872 MapReduceMappingsToDoubleTask<K,V> rights, nextRight; MapReduceMappingsToDoubleTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsToDoubleTask<K,V> nextRight, ToDoubleBiFunction<? super K, ? super V> transformer, double basis, DoubleBinaryOperator reducer)5873 MapReduceMappingsToDoubleTask 5874 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5875 MapReduceMappingsToDoubleTask<K,V> nextRight, 5876 ToDoubleBiFunction<? super K, ? super V> transformer, 5877 double basis, 5878 DoubleBinaryOperator reducer) { 5879 super(p, b, i, f, t); this.nextRight = nextRight; 5880 this.transformer = transformer; 5881 this.basis = basis; this.reducer = reducer; 5882 } getRawResult()5883 public final Double getRawResult() { return result; } compute()5884 public final void compute() { 5885 final ToDoubleBiFunction<? super K, ? super V> transformer; 5886 final DoubleBinaryOperator reducer; 5887 if ((transformer = this.transformer) != null && 5888 (reducer = this.reducer) != null) { 5889 double r = this.basis; 5890 for (int i = baseIndex, f, h; batch > 0 && 5891 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5892 addToPendingCount(1); 5893 (rights = new MapReduceMappingsToDoubleTask<K,V> 5894 (this, batch >>>= 1, baseLimit = h, f, tab, 5895 rights, transformer, r, reducer)).fork(); 5896 } 5897 for (Node<K,V> p; (p = advance()) != null; ) 5898 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val)); 5899 result = r; 5900 CountedCompleter<?> c; 5901 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5902 @SuppressWarnings("unchecked") 5903 MapReduceMappingsToDoubleTask<K,V> 5904 t = (MapReduceMappingsToDoubleTask<K,V>)c, 5905 s = t.rights; 5906 while (s != null) { 5907 t.result = reducer.applyAsDouble(t.result, s.result); 5908 s = t.rights = s.nextRight; 5909 } 5910 } 5911 } 5912 } 5913 } 5914 5915 @SuppressWarnings("serial") 5916 static final class MapReduceKeysToLongTask<K,V> 5917 extends BulkTask<K,V,Long> { 5918 final ToLongFunction<? super K> transformer; 5919 final LongBinaryOperator reducer; 5920 final long basis; 5921 long result; 5922 MapReduceKeysToLongTask<K,V> rights, nextRight; MapReduceKeysToLongTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysToLongTask<K,V> nextRight, ToLongFunction<? super K> transformer, long basis, LongBinaryOperator reducer)5923 MapReduceKeysToLongTask 5924 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5925 MapReduceKeysToLongTask<K,V> nextRight, 5926 ToLongFunction<? super K> transformer, 5927 long basis, 5928 LongBinaryOperator reducer) { 5929 super(p, b, i, f, t); this.nextRight = nextRight; 5930 this.transformer = transformer; 5931 this.basis = basis; this.reducer = reducer; 5932 } getRawResult()5933 public final Long getRawResult() { return result; } compute()5934 public final void compute() { 5935 final ToLongFunction<? super K> transformer; 5936 final LongBinaryOperator reducer; 5937 if ((transformer = this.transformer) != null && 5938 (reducer = this.reducer) != null) { 5939 long r = this.basis; 5940 for (int i = baseIndex, f, h; batch > 0 && 5941 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5942 addToPendingCount(1); 5943 (rights = new MapReduceKeysToLongTask<K,V> 5944 (this, batch >>>= 1, baseLimit = h, f, tab, 5945 rights, transformer, r, reducer)).fork(); 5946 } 5947 for (Node<K,V> p; (p = advance()) != null; ) 5948 r = reducer.applyAsLong(r, transformer.applyAsLong(p.key)); 5949 result = r; 5950 CountedCompleter<?> c; 5951 for (c = firstComplete(); c != null; c = c.nextComplete()) { 5952 @SuppressWarnings("unchecked") 5953 MapReduceKeysToLongTask<K,V> 5954 t = (MapReduceKeysToLongTask<K,V>)c, 5955 s = t.rights; 5956 while (s != null) { 5957 t.result = reducer.applyAsLong(t.result, s.result); 5958 s = t.rights = s.nextRight; 5959 } 5960 } 5961 } 5962 } 5963 } 5964 5965 @SuppressWarnings("serial") 5966 static final class MapReduceValuesToLongTask<K,V> 5967 extends BulkTask<K,V,Long> { 5968 final ToLongFunction<? super V> transformer; 5969 final LongBinaryOperator reducer; 5970 final long basis; 5971 long result; 5972 MapReduceValuesToLongTask<K,V> rights, nextRight; MapReduceValuesToLongTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesToLongTask<K,V> nextRight, ToLongFunction<? super V> transformer, long basis, LongBinaryOperator reducer)5973 MapReduceValuesToLongTask 5974 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 5975 MapReduceValuesToLongTask<K,V> nextRight, 5976 ToLongFunction<? super V> transformer, 5977 long basis, 5978 LongBinaryOperator reducer) { 5979 super(p, b, i, f, t); this.nextRight = nextRight; 5980 this.transformer = transformer; 5981 this.basis = basis; this.reducer = reducer; 5982 } getRawResult()5983 public final Long getRawResult() { return result; } compute()5984 public final void compute() { 5985 final ToLongFunction<? super V> transformer; 5986 final LongBinaryOperator reducer; 5987 if ((transformer = this.transformer) != null && 5988 (reducer = this.reducer) != null) { 5989 long r = this.basis; 5990 for (int i = baseIndex, f, h; batch > 0 && 5991 (h = ((f = baseLimit) + i) >>> 1) > i;) { 5992 addToPendingCount(1); 5993 (rights = new MapReduceValuesToLongTask<K,V> 5994 (this, batch >>>= 1, baseLimit = h, f, tab, 5995 rights, transformer, r, reducer)).fork(); 5996 } 5997 for (Node<K,V> p; (p = advance()) != null; ) 5998 r = reducer.applyAsLong(r, transformer.applyAsLong(p.val)); 5999 result = r; 6000 CountedCompleter<?> c; 6001 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6002 @SuppressWarnings("unchecked") 6003 MapReduceValuesToLongTask<K,V> 6004 t = (MapReduceValuesToLongTask<K,V>)c, 6005 s = t.rights; 6006 while (s != null) { 6007 t.result = reducer.applyAsLong(t.result, s.result); 6008 s = t.rights = s.nextRight; 6009 } 6010 } 6011 } 6012 } 6013 } 6014 6015 @SuppressWarnings("serial") 6016 static final class MapReduceEntriesToLongTask<K,V> 6017 extends BulkTask<K,V,Long> { 6018 final ToLongFunction<Map.Entry<K,V>> transformer; 6019 final LongBinaryOperator reducer; 6020 final long basis; 6021 long result; 6022 MapReduceEntriesToLongTask<K,V> rights, nextRight; MapReduceEntriesToLongTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesToLongTask<K,V> nextRight, ToLongFunction<Map.Entry<K,V>> transformer, long basis, LongBinaryOperator reducer)6023 MapReduceEntriesToLongTask 6024 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6025 MapReduceEntriesToLongTask<K,V> nextRight, 6026 ToLongFunction<Map.Entry<K,V>> transformer, 6027 long basis, 6028 LongBinaryOperator reducer) { 6029 super(p, b, i, f, t); this.nextRight = nextRight; 6030 this.transformer = transformer; 6031 this.basis = basis; this.reducer = reducer; 6032 } getRawResult()6033 public final Long getRawResult() { return result; } compute()6034 public final void compute() { 6035 final ToLongFunction<Map.Entry<K,V>> transformer; 6036 final LongBinaryOperator reducer; 6037 if ((transformer = this.transformer) != null && 6038 (reducer = this.reducer) != null) { 6039 long r = this.basis; 6040 for (int i = baseIndex, f, h; batch > 0 && 6041 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6042 addToPendingCount(1); 6043 (rights = new MapReduceEntriesToLongTask<K,V> 6044 (this, batch >>>= 1, baseLimit = h, f, tab, 6045 rights, transformer, r, reducer)).fork(); 6046 } 6047 for (Node<K,V> p; (p = advance()) != null; ) 6048 r = reducer.applyAsLong(r, transformer.applyAsLong(p)); 6049 result = r; 6050 CountedCompleter<?> c; 6051 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6052 @SuppressWarnings("unchecked") 6053 MapReduceEntriesToLongTask<K,V> 6054 t = (MapReduceEntriesToLongTask<K,V>)c, 6055 s = t.rights; 6056 while (s != null) { 6057 t.result = reducer.applyAsLong(t.result, s.result); 6058 s = t.rights = s.nextRight; 6059 } 6060 } 6061 } 6062 } 6063 } 6064 6065 @SuppressWarnings("serial") 6066 static final class MapReduceMappingsToLongTask<K,V> 6067 extends BulkTask<K,V,Long> { 6068 final ToLongBiFunction<? super K, ? super V> transformer; 6069 final LongBinaryOperator reducer; 6070 final long basis; 6071 long result; 6072 MapReduceMappingsToLongTask<K,V> rights, nextRight; MapReduceMappingsToLongTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsToLongTask<K,V> nextRight, ToLongBiFunction<? super K, ? super V> transformer, long basis, LongBinaryOperator reducer)6073 MapReduceMappingsToLongTask 6074 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6075 MapReduceMappingsToLongTask<K,V> nextRight, 6076 ToLongBiFunction<? super K, ? super V> transformer, 6077 long basis, 6078 LongBinaryOperator reducer) { 6079 super(p, b, i, f, t); this.nextRight = nextRight; 6080 this.transformer = transformer; 6081 this.basis = basis; this.reducer = reducer; 6082 } getRawResult()6083 public final Long getRawResult() { return result; } compute()6084 public final void compute() { 6085 final ToLongBiFunction<? super K, ? super V> transformer; 6086 final LongBinaryOperator reducer; 6087 if ((transformer = this.transformer) != null && 6088 (reducer = this.reducer) != null) { 6089 long r = this.basis; 6090 for (int i = baseIndex, f, h; batch > 0 && 6091 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6092 addToPendingCount(1); 6093 (rights = new MapReduceMappingsToLongTask<K,V> 6094 (this, batch >>>= 1, baseLimit = h, f, tab, 6095 rights, transformer, r, reducer)).fork(); 6096 } 6097 for (Node<K,V> p; (p = advance()) != null; ) 6098 r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val)); 6099 result = r; 6100 CountedCompleter<?> c; 6101 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6102 @SuppressWarnings("unchecked") 6103 MapReduceMappingsToLongTask<K,V> 6104 t = (MapReduceMappingsToLongTask<K,V>)c, 6105 s = t.rights; 6106 while (s != null) { 6107 t.result = reducer.applyAsLong(t.result, s.result); 6108 s = t.rights = s.nextRight; 6109 } 6110 } 6111 } 6112 } 6113 } 6114 6115 @SuppressWarnings("serial") 6116 static final class MapReduceKeysToIntTask<K,V> 6117 extends BulkTask<K,V,Integer> { 6118 final ToIntFunction<? super K> transformer; 6119 final IntBinaryOperator reducer; 6120 final int basis; 6121 int result; 6122 MapReduceKeysToIntTask<K,V> rights, nextRight; MapReduceKeysToIntTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysToIntTask<K,V> nextRight, ToIntFunction<? super K> transformer, int basis, IntBinaryOperator reducer)6123 MapReduceKeysToIntTask 6124 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6125 MapReduceKeysToIntTask<K,V> nextRight, 6126 ToIntFunction<? super K> transformer, 6127 int basis, 6128 IntBinaryOperator reducer) { 6129 super(p, b, i, f, t); this.nextRight = nextRight; 6130 this.transformer = transformer; 6131 this.basis = basis; this.reducer = reducer; 6132 } getRawResult()6133 public final Integer getRawResult() { return result; } compute()6134 public final void compute() { 6135 final ToIntFunction<? super K> transformer; 6136 final IntBinaryOperator reducer; 6137 if ((transformer = this.transformer) != null && 6138 (reducer = this.reducer) != null) { 6139 int r = this.basis; 6140 for (int i = baseIndex, f, h; batch > 0 && 6141 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6142 addToPendingCount(1); 6143 (rights = new MapReduceKeysToIntTask<K,V> 6144 (this, batch >>>= 1, baseLimit = h, f, tab, 6145 rights, transformer, r, reducer)).fork(); 6146 } 6147 for (Node<K,V> p; (p = advance()) != null; ) 6148 r = reducer.applyAsInt(r, transformer.applyAsInt(p.key)); 6149 result = r; 6150 CountedCompleter<?> c; 6151 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6152 @SuppressWarnings("unchecked") 6153 MapReduceKeysToIntTask<K,V> 6154 t = (MapReduceKeysToIntTask<K,V>)c, 6155 s = t.rights; 6156 while (s != null) { 6157 t.result = reducer.applyAsInt(t.result, s.result); 6158 s = t.rights = s.nextRight; 6159 } 6160 } 6161 } 6162 } 6163 } 6164 6165 @SuppressWarnings("serial") 6166 static final class MapReduceValuesToIntTask<K,V> 6167 extends BulkTask<K,V,Integer> { 6168 final ToIntFunction<? super V> transformer; 6169 final IntBinaryOperator reducer; 6170 final int basis; 6171 int result; 6172 MapReduceValuesToIntTask<K,V> rights, nextRight; MapReduceValuesToIntTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesToIntTask<K,V> nextRight, ToIntFunction<? super V> transformer, int basis, IntBinaryOperator reducer)6173 MapReduceValuesToIntTask 6174 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6175 MapReduceValuesToIntTask<K,V> nextRight, 6176 ToIntFunction<? super V> transformer, 6177 int basis, 6178 IntBinaryOperator reducer) { 6179 super(p, b, i, f, t); this.nextRight = nextRight; 6180 this.transformer = transformer; 6181 this.basis = basis; this.reducer = reducer; 6182 } getRawResult()6183 public final Integer getRawResult() { return result; } compute()6184 public final void compute() { 6185 final ToIntFunction<? super V> transformer; 6186 final IntBinaryOperator reducer; 6187 if ((transformer = this.transformer) != null && 6188 (reducer = this.reducer) != null) { 6189 int r = this.basis; 6190 for (int i = baseIndex, f, h; batch > 0 && 6191 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6192 addToPendingCount(1); 6193 (rights = new MapReduceValuesToIntTask<K,V> 6194 (this, batch >>>= 1, baseLimit = h, f, tab, 6195 rights, transformer, r, reducer)).fork(); 6196 } 6197 for (Node<K,V> p; (p = advance()) != null; ) 6198 r = reducer.applyAsInt(r, transformer.applyAsInt(p.val)); 6199 result = r; 6200 CountedCompleter<?> c; 6201 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6202 @SuppressWarnings("unchecked") 6203 MapReduceValuesToIntTask<K,V> 6204 t = (MapReduceValuesToIntTask<K,V>)c, 6205 s = t.rights; 6206 while (s != null) { 6207 t.result = reducer.applyAsInt(t.result, s.result); 6208 s = t.rights = s.nextRight; 6209 } 6210 } 6211 } 6212 } 6213 } 6214 6215 @SuppressWarnings("serial") 6216 static final class MapReduceEntriesToIntTask<K,V> 6217 extends BulkTask<K,V,Integer> { 6218 final ToIntFunction<Map.Entry<K,V>> transformer; 6219 final IntBinaryOperator reducer; 6220 final int basis; 6221 int result; 6222 MapReduceEntriesToIntTask<K,V> rights, nextRight; MapReduceEntriesToIntTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesToIntTask<K,V> nextRight, ToIntFunction<Map.Entry<K,V>> transformer, int basis, IntBinaryOperator reducer)6223 MapReduceEntriesToIntTask 6224 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6225 MapReduceEntriesToIntTask<K,V> nextRight, 6226 ToIntFunction<Map.Entry<K,V>> transformer, 6227 int basis, 6228 IntBinaryOperator reducer) { 6229 super(p, b, i, f, t); this.nextRight = nextRight; 6230 this.transformer = transformer; 6231 this.basis = basis; this.reducer = reducer; 6232 } getRawResult()6233 public final Integer getRawResult() { return result; } compute()6234 public final void compute() { 6235 final ToIntFunction<Map.Entry<K,V>> transformer; 6236 final IntBinaryOperator reducer; 6237 if ((transformer = this.transformer) != null && 6238 (reducer = this.reducer) != null) { 6239 int r = this.basis; 6240 for (int i = baseIndex, f, h; batch > 0 && 6241 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6242 addToPendingCount(1); 6243 (rights = new MapReduceEntriesToIntTask<K,V> 6244 (this, batch >>>= 1, baseLimit = h, f, tab, 6245 rights, transformer, r, reducer)).fork(); 6246 } 6247 for (Node<K,V> p; (p = advance()) != null; ) 6248 r = reducer.applyAsInt(r, transformer.applyAsInt(p)); 6249 result = r; 6250 CountedCompleter<?> c; 6251 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6252 @SuppressWarnings("unchecked") 6253 MapReduceEntriesToIntTask<K,V> 6254 t = (MapReduceEntriesToIntTask<K,V>)c, 6255 s = t.rights; 6256 while (s != null) { 6257 t.result = reducer.applyAsInt(t.result, s.result); 6258 s = t.rights = s.nextRight; 6259 } 6260 } 6261 } 6262 } 6263 } 6264 6265 @SuppressWarnings("serial") 6266 static final class MapReduceMappingsToIntTask<K,V> 6267 extends BulkTask<K,V,Integer> { 6268 final ToIntBiFunction<? super K, ? super V> transformer; 6269 final IntBinaryOperator reducer; 6270 final int basis; 6271 int result; 6272 MapReduceMappingsToIntTask<K,V> rights, nextRight; MapReduceMappingsToIntTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsToIntTask<K,V> nextRight, ToIntBiFunction<? super K, ? super V> transformer, int basis, IntBinaryOperator reducer)6273 MapReduceMappingsToIntTask 6274 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, 6275 MapReduceMappingsToIntTask<K,V> nextRight, 6276 ToIntBiFunction<? super K, ? super V> transformer, 6277 int basis, 6278 IntBinaryOperator reducer) { 6279 super(p, b, i, f, t); this.nextRight = nextRight; 6280 this.transformer = transformer; 6281 this.basis = basis; this.reducer = reducer; 6282 } getRawResult()6283 public final Integer getRawResult() { return result; } compute()6284 public final void compute() { 6285 final ToIntBiFunction<? super K, ? super V> transformer; 6286 final IntBinaryOperator reducer; 6287 if ((transformer = this.transformer) != null && 6288 (reducer = this.reducer) != null) { 6289 int r = this.basis; 6290 for (int i = baseIndex, f, h; batch > 0 && 6291 (h = ((f = baseLimit) + i) >>> 1) > i;) { 6292 addToPendingCount(1); 6293 (rights = new MapReduceMappingsToIntTask<K,V> 6294 (this, batch >>>= 1, baseLimit = h, f, tab, 6295 rights, transformer, r, reducer)).fork(); 6296 } 6297 for (Node<K,V> p; (p = advance()) != null; ) 6298 r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val)); 6299 result = r; 6300 CountedCompleter<?> c; 6301 for (c = firstComplete(); c != null; c = c.nextComplete()) { 6302 @SuppressWarnings("unchecked") 6303 MapReduceMappingsToIntTask<K,V> 6304 t = (MapReduceMappingsToIntTask<K,V>)c, 6305 s = t.rights; 6306 while (s != null) { 6307 t.result = reducer.applyAsInt(t.result, s.result); 6308 s = t.rights = s.nextRight; 6309 } 6310 } 6311 } 6312 } 6313 } 6314 6315 // Unsafe mechanics 6316 private static final sun.misc.Unsafe U = sun.misc.Unsafe.getUnsafe(); 6317 private static final long SIZECTL; 6318 private static final long TRANSFERINDEX; 6319 private static final long BASECOUNT; 6320 private static final long CELLSBUSY; 6321 private static final long CELLVALUE; 6322 private static final int ABASE; 6323 private static final int ASHIFT; 6324 6325 static { 6326 try { 6327 SIZECTL = U.objectFieldOffset 6328 (ConcurrentHashMap.class.getDeclaredField("sizeCtl")); 6329 TRANSFERINDEX = U.objectFieldOffset 6330 (ConcurrentHashMap.class.getDeclaredField("transferIndex")); 6331 BASECOUNT = U.objectFieldOffset 6332 (ConcurrentHashMap.class.getDeclaredField("baseCount")); 6333 CELLSBUSY = U.objectFieldOffset 6334 (ConcurrentHashMap.class.getDeclaredField("cellsBusy")); 6335 6336 CELLVALUE = U.objectFieldOffset 6337 (CounterCell.class.getDeclaredField("value")); 6338 6339 ABASE = U.arrayBaseOffset(Node[].class); 6340 int scale = U.arrayIndexScale(Node[].class); 6341 if ((scale & (scale - 1)) != 0) 6342 throw new Error("array index scale not a power of two"); 6343 ASHIFT = 31 - Integer.numberOfLeadingZeros(scale); 6344 } catch (ReflectiveOperationException e) { 6345 throw new Error(e); 6346 } 6347 6348 // Reduce the risk of rare disastrous classloading in first call to 6349 // LockSupport.park: https://bugs.openjdk.java.net/browse/JDK-8074773 6350 Class<?> ensureLoaded = LockSupport.class; 6351 } 6352 } 6353