• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3  *
4  * This code is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 only, as
6  * published by the Free Software Foundation.  Oracle designates this
7  * particular file as subject to the "Classpath" exception as provided
8  * by Oracle in the LICENSE file that accompanied this code.
9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  */
24 
25 /*
26  * This file is available under and governed by the GNU General Public
27  * License version 2 only, as published by the Free Software Foundation.
28  * However, the following notice accompanied the original version of this
29  * file:
30  *
31  * Written by Doug Lea with assistance from members of JCP JSR-166
32  * Expert Group and released to the public domain, as explained at
33  * http://creativecommons.org/publicdomain/zero/1.0/
34  */
35 
36 package java.util.concurrent;
37 
38 import java.io.ObjectStreamField;
39 import java.io.Serializable;
40 import java.lang.reflect.ParameterizedType;
41 import java.lang.reflect.Type;
42 import java.util.AbstractMap;
43 import java.util.Arrays;
44 import java.util.Collection;
45 import java.util.Enumeration;
46 import java.util.HashMap;
47 import java.util.Hashtable;
48 import java.util.Iterator;
49 import java.util.Map;
50 import java.util.NoSuchElementException;
51 import java.util.Set;
52 import java.util.Spliterator;
53 import java.util.concurrent.atomic.AtomicReference;
54 import java.util.concurrent.locks.LockSupport;
55 import java.util.concurrent.locks.ReentrantLock;
56 import java.util.function.BiConsumer;
57 import java.util.function.BiFunction;
58 import java.util.function.Consumer;
59 import java.util.function.DoubleBinaryOperator;
60 import java.util.function.Function;
61 import java.util.function.IntBinaryOperator;
62 import java.util.function.LongBinaryOperator;
63 import java.util.function.Predicate;
64 import java.util.function.ToDoubleBiFunction;
65 import java.util.function.ToDoubleFunction;
66 import java.util.function.ToIntBiFunction;
67 import java.util.function.ToIntFunction;
68 import java.util.function.ToLongBiFunction;
69 import java.util.function.ToLongFunction;
70 import java.util.stream.Stream;
71 import jdk.internal.misc.Unsafe;
72 
73 /**
74  * A hash table supporting full concurrency of retrievals and
75  * high expected concurrency for updates. This class obeys the
76  * same functional specification as {@link java.util.Hashtable}, and
77  * includes versions of methods corresponding to each method of
78  * {@code Hashtable}. However, even though all operations are
79  * thread-safe, retrieval operations do <em>not</em> entail locking,
80  * and there is <em>not</em> any support for locking the entire table
81  * in a way that prevents all access.  This class is fully
82  * interoperable with {@code Hashtable} in programs that rely on its
83  * thread safety but not on its synchronization details.
84  *
85  * <p>Retrieval operations (including {@code get}) generally do not
86  * block, so may overlap with update operations (including {@code put}
87  * and {@code remove}). Retrievals reflect the results of the most
88  * recently <em>completed</em> update operations holding upon their
89  * onset. (More formally, an update operation for a given key bears a
90  * <em>happens-before</em> relation with any (non-null) retrieval for
91  * that key reporting the updated value.)  For aggregate operations
92  * such as {@code putAll} and {@code clear}, concurrent retrievals may
93  * reflect insertion or removal of only some entries.  Similarly,
94  * Iterators, Spliterators and Enumerations return elements reflecting the
95  * state of the hash table at some point at or since the creation of the
96  * iterator/enumeration.  They do <em>not</em> throw {@link
97  * java.util.ConcurrentModificationException ConcurrentModificationException}.
98  * However, iterators are designed to be used by only one thread at a time.
99  * Bear in mind that the results of aggregate status methods including
100  * {@code size}, {@code isEmpty}, and {@code containsValue} are typically
101  * useful only when a map is not undergoing concurrent updates in other threads.
102  * Otherwise the results of these methods reflect transient states
103  * that may be adequate for monitoring or estimation purposes, but not
104  * for program control.
105  *
106  * <p>The table is dynamically expanded when there are too many
107  * collisions (i.e., keys that have distinct hash codes but fall into
108  * the same slot modulo the table size), with the expected average
109  * effect of maintaining roughly two bins per mapping (corresponding
110  * to a 0.75 load factor threshold for resizing). There may be much
111  * variance around this average as mappings are added and removed, but
112  * overall, this maintains a commonly accepted time/space tradeoff for
113  * hash tables.  However, resizing this or any other kind of hash
114  * table may be a relatively slow operation. When possible, it is a
115  * good idea to provide a size estimate as an optional {@code
116  * initialCapacity} constructor argument. An additional optional
117  * {@code loadFactor} constructor argument provides a further means of
118  * customizing initial table capacity by specifying the table density
119  * to be used in calculating the amount of space to allocate for the
120  * given number of elements.  Also, for compatibility with previous
121  * versions of this class, constructors may optionally specify an
122  * expected {@code concurrencyLevel} as an additional hint for
123  * internal sizing.  Note that using many keys with exactly the same
124  * {@code hashCode()} is a sure way to slow down performance of any
125  * hash table. To ameliorate impact, when keys are {@link Comparable},
126  * this class may use comparison order among keys to help break ties.
127  *
128  * <p>A {@link Set} projection of a ConcurrentHashMap may be created
129  * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
130  * (using {@link #keySet(Object)} when only keys are of interest, and the
131  * mapped values are (perhaps transiently) not used or all take the
132  * same mapping value.
133  *
134  * <p>A ConcurrentHashMap can be used as a scalable frequency map (a
135  * form of histogram or multiset) by using {@link
136  * java.util.concurrent.atomic.LongAdder} values and initializing via
137  * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count
138  * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use
139  * {@code freqs.computeIfAbsent(key, k -> new LongAdder()).increment();}
140  *
141  * <p>This class and its views and iterators implement all of the
142  * <em>optional</em> methods of the {@link Map} and {@link Iterator}
143  * interfaces.
144  *
145  * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
146  * does <em>not</em> allow {@code null} to be used as a key or value.
147  *
148  * <p>ConcurrentHashMaps support a set of sequential and parallel bulk
149  * operations that, unlike most {@link Stream} methods, are designed
150  * to be safely, and often sensibly, applied even with maps that are
151  * being concurrently updated by other threads; for example, when
152  * computing a snapshot summary of the values in a shared registry.
153  * There are three kinds of operation, each with four forms, accepting
154  * functions with keys, values, entries, and (key, value) pairs as
155  * arguments and/or return values. Because the elements of a
156  * ConcurrentHashMap are not ordered in any particular way, and may be
157  * processed in different orders in different parallel executions, the
158  * correctness of supplied functions should not depend on any
159  * ordering, or on any other objects or values that may transiently
160  * change while computation is in progress; and except for forEach
161  * actions, should ideally be side-effect-free. Bulk operations on
162  * {@link Map.Entry} objects do not support method {@code setValue}.
163  *
164  * <ul>
165  * <li>forEach: Performs a given action on each element.
166  * A variant form applies a given transformation on each element
167  * before performing the action.
168  *
169  * <li>search: Returns the first available non-null result of
170  * applying a given function on each element; skipping further
171  * search when a result is found.
172  *
173  * <li>reduce: Accumulates each element.  The supplied reduction
174  * function cannot rely on ordering (more formally, it should be
175  * both associative and commutative).  There are five variants:
176  *
177  * <ul>
178  *
179  * <li>Plain reductions. (There is not a form of this method for
180  * (key, value) function arguments since there is no corresponding
181  * return type.)
182  *
183  * <li>Mapped reductions that accumulate the results of a given
184  * function applied to each element.
185  *
186  * <li>Reductions to scalar doubles, longs, and ints, using a
187  * given basis value.
188  *
189  * </ul>
190  * </ul>
191  *
192  * <p>These bulk operations accept a {@code parallelismThreshold}
193  * argument. Methods proceed sequentially if the current map size is
194  * estimated to be less than the given threshold. Using a value of
195  * {@code Long.MAX_VALUE} suppresses all parallelism.  Using a value
196  * of {@code 1} results in maximal parallelism by partitioning into
197  * enough subtasks to fully utilize the {@link
198  * ForkJoinPool#commonPool()} that is used for all parallel
199  * computations. Normally, you would initially choose one of these
200  * extreme values, and then measure performance of using in-between
201  * values that trade off overhead versus throughput.
202  *
203  * <p>The concurrency properties of bulk operations follow
204  * from those of ConcurrentHashMap: Any non-null result returned
205  * from {@code get(key)} and related access methods bears a
206  * happens-before relation with the associated insertion or
207  * update.  The result of any bulk operation reflects the
208  * composition of these per-element relations (but is not
209  * necessarily atomic with respect to the map as a whole unless it
210  * is somehow known to be quiescent).  Conversely, because keys
211  * and values in the map are never null, null serves as a reliable
212  * atomic indicator of the current lack of any result.  To
213  * maintain this property, null serves as an implicit basis for
214  * all non-scalar reduction operations. For the double, long, and
215  * int versions, the basis should be one that, when combined with
216  * any other value, returns that other value (more formally, it
217  * should be the identity element for the reduction). Most common
218  * reductions have these properties; for example, computing a sum
219  * with basis 0 or a minimum with basis MAX_VALUE.
220  *
221  * <p>Search and transformation functions provided as arguments
222  * should similarly return null to indicate the lack of any result
223  * (in which case it is not used). In the case of mapped
224  * reductions, this also enables transformations to serve as
225  * filters, returning null (or, in the case of primitive
226  * specializations, the identity basis) if the element should not
227  * be combined. You can create compound transformations and
228  * filterings by composing them yourself under this "null means
229  * there is nothing there now" rule before using them in search or
230  * reduce operations.
231  *
232  * <p>Methods accepting and/or returning Entry arguments maintain
233  * key-value associations. They may be useful for example when
234  * finding the key for the greatest value. Note that "plain" Entry
235  * arguments can be supplied using {@code new
236  * AbstractMap.SimpleEntry(k,v)}.
237  *
238  * <p>Bulk operations may complete abruptly, throwing an
239  * exception encountered in the application of a supplied
240  * function. Bear in mind when handling such exceptions that other
241  * concurrently executing functions could also have thrown
242  * exceptions, or would have done so if the first exception had
243  * not occurred.
244  *
245  * <p>Speedups for parallel compared to sequential forms are common
246  * but not guaranteed.  Parallel operations involving brief functions
247  * on small maps may execute more slowly than sequential forms if the
248  * underlying work to parallelize the computation is more expensive
249  * than the computation itself.  Similarly, parallelization may not
250  * lead to much actual parallelism if all processors are busy
251  * performing unrelated tasks.
252  *
253  * <p>All arguments to all task methods must be non-null.
254  *
255  * <p>This class is a member of the
256  * <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework">
257  * Java Collections Framework</a>.
258  *
259  * @since 1.5
260  * @author Doug Lea
261  * @param <K> the type of keys maintained by this map
262  * @param <V> the type of mapped values
263  */
264 public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
265     implements ConcurrentMap<K,V>, Serializable {
266     private static final long serialVersionUID = 7249069246763182397L;
267 
268     /*
269      * Overview:
270      *
271      * The primary design goal of this hash table is to maintain
272      * concurrent readability (typically method get(), but also
273      * iterators and related methods) while minimizing update
274      * contention. Secondary goals are to keep space consumption about
275      * the same or better than java.util.HashMap, and to support high
276      * initial insertion rates on an empty table by many threads.
277      *
278      * This map usually acts as a binned (bucketed) hash table.  Each
279      * key-value mapping is held in a Node.  Most nodes are instances
280      * of the basic Node class with hash, key, value, and next
281      * fields. However, various subclasses exist: TreeNodes are
282      * arranged in balanced trees, not lists.  TreeBins hold the roots
283      * of sets of TreeNodes. ForwardingNodes are placed at the heads
284      * of bins during resizing. ReservationNodes are used as
285      * placeholders while establishing values in computeIfAbsent and
286      * related methods.  The types TreeBin, ForwardingNode, and
287      * ReservationNode do not hold normal user keys, values, or
288      * hashes, and are readily distinguishable during search etc
289      * because they have negative hash fields and null key and value
290      * fields. (These special nodes are either uncommon or transient,
291      * so the impact of carrying around some unused fields is
292      * insignificant.)
293      *
294      * The table is lazily initialized to a power-of-two size upon the
295      * first insertion.  Each bin in the table normally contains a
296      * list of Nodes (most often, the list has only zero or one Node).
297      * Table accesses require volatile/atomic reads, writes, and
298      * CASes.  Because there is no other way to arrange this without
299      * adding further indirections, we use intrinsics
300      * (jdk.internal.misc.Unsafe) operations.
301      *
302      * We use the top (sign) bit of Node hash fields for control
303      * purposes -- it is available anyway because of addressing
304      * constraints.  Nodes with negative hash fields are specially
305      * handled or ignored in map methods.
306      *
307      * Insertion (via put or its variants) of the first node in an
308      * empty bin is performed by just CASing it to the bin.  This is
309      * by far the most common case for put operations under most
310      * key/hash distributions.  Other update operations (insert,
311      * delete, and replace) require locks.  We do not want to waste
312      * the space required to associate a distinct lock object with
313      * each bin, so instead use the first node of a bin list itself as
314      * a lock. Locking support for these locks relies on builtin
315      * "synchronized" monitors.
316      *
317      * Using the first node of a list as a lock does not by itself
318      * suffice though: When a node is locked, any update must first
319      * validate that it is still the first node after locking it, and
320      * retry if not. Because new nodes are always appended to lists,
321      * once a node is first in a bin, it remains first until deleted
322      * or the bin becomes invalidated (upon resizing).
323      *
324      * The main disadvantage of per-bin locks is that other update
325      * operations on other nodes in a bin list protected by the same
326      * lock can stall, for example when user equals() or mapping
327      * functions take a long time.  However, statistically, under
328      * random hash codes, this is not a common problem.  Ideally, the
329      * frequency of nodes in bins follows a Poisson distribution
330      * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
331      * parameter of about 0.5 on average, given the resizing threshold
332      * of 0.75, although with a large variance because of resizing
333      * granularity. Ignoring variance, the expected occurrences of
334      * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
335      * first values are:
336      *
337      * 0:    0.60653066
338      * 1:    0.30326533
339      * 2:    0.07581633
340      * 3:    0.01263606
341      * 4:    0.00157952
342      * 5:    0.00015795
343      * 6:    0.00001316
344      * 7:    0.00000094
345      * 8:    0.00000006
346      * more: less than 1 in ten million
347      *
348      * Lock contention probability for two threads accessing distinct
349      * elements is roughly 1 / (8 * #elements) under random hashes.
350      *
351      * Actual hash code distributions encountered in practice
352      * sometimes deviate significantly from uniform randomness.  This
353      * includes the case when N > (1<<30), so some keys MUST collide.
354      * Similarly for dumb or hostile usages in which multiple keys are
355      * designed to have identical hash codes or ones that differs only
356      * in masked-out high bits. So we use a secondary strategy that
357      * applies when the number of nodes in a bin exceeds a
358      * threshold. These TreeBins use a balanced tree to hold nodes (a
359      * specialized form of red-black trees), bounding search time to
360      * O(log N).  Each search step in a TreeBin is at least twice as
361      * slow as in a regular list, but given that N cannot exceed
362      * (1<<64) (before running out of addresses) this bounds search
363      * steps, lock hold times, etc, to reasonable constants (roughly
364      * 100 nodes inspected per operation worst case) so long as keys
365      * are Comparable (which is very common -- String, Long, etc).
366      * TreeBin nodes (TreeNodes) also maintain the same "next"
367      * traversal pointers as regular nodes, so can be traversed in
368      * iterators in the same way.
369      *
370      * The table is resized when occupancy exceeds a percentage
371      * threshold (nominally, 0.75, but see below).  Any thread
372      * noticing an overfull bin may assist in resizing after the
373      * initiating thread allocates and sets up the replacement array.
374      * However, rather than stalling, these other threads may proceed
375      * with insertions etc.  The use of TreeBins shields us from the
376      * worst case effects of overfilling while resizes are in
377      * progress.  Resizing proceeds by transferring bins, one by one,
378      * from the table to the next table. However, threads claim small
379      * blocks of indices to transfer (via field transferIndex) before
380      * doing so, reducing contention.  A generation stamp in field
381      * sizeCtl ensures that resizings do not overlap. Because we are
382      * using power-of-two expansion, the elements from each bin must
383      * either stay at same index, or move with a power of two
384      * offset. We eliminate unnecessary node creation by catching
385      * cases where old nodes can be reused because their next fields
386      * won't change.  On average, only about one-sixth of them need
387      * cloning when a table doubles. The nodes they replace will be
388      * garbage collectible as soon as they are no longer referenced by
389      * any reader thread that may be in the midst of concurrently
390      * traversing table.  Upon transfer, the old table bin contains
391      * only a special forwarding node (with hash field "MOVED") that
392      * contains the next table as its key. On encountering a
393      * forwarding node, access and update operations restart, using
394      * the new table.
395      *
396      * Each bin transfer requires its bin lock, which can stall
397      * waiting for locks while resizing. However, because other
398      * threads can join in and help resize rather than contend for
399      * locks, average aggregate waits become shorter as resizing
400      * progresses.  The transfer operation must also ensure that all
401      * accessible bins in both the old and new table are usable by any
402      * traversal.  This is arranged in part by proceeding from the
403      * last bin (table.length - 1) up towards the first.  Upon seeing
404      * a forwarding node, traversals (see class Traverser) arrange to
405      * move to the new table without revisiting nodes.  To ensure that
406      * no intervening nodes are skipped even when moved out of order,
407      * a stack (see class TableStack) is created on first encounter of
408      * a forwarding node during a traversal, to maintain its place if
409      * later processing the current table. The need for these
410      * save/restore mechanics is relatively rare, but when one
411      * forwarding node is encountered, typically many more will be.
412      * So Traversers use a simple caching scheme to avoid creating so
413      * many new TableStack nodes. (Thanks to Peter Levart for
414      * suggesting use of a stack here.)
415      *
416      * The traversal scheme also applies to partial traversals of
417      * ranges of bins (via an alternate Traverser constructor)
418      * to support partitioned aggregate operations.  Also, read-only
419      * operations give up if ever forwarded to a null table, which
420      * provides support for shutdown-style clearing, which is also not
421      * currently implemented.
422      *
423      * Lazy table initialization minimizes footprint until first use,
424      * and also avoids resizings when the first operation is from a
425      * putAll, constructor with map argument, or deserialization.
426      * These cases attempt to override the initial capacity settings,
427      * but harmlessly fail to take effect in cases of races.
428      *
429      * The element count is maintained using a specialization of
430      * LongAdder. We need to incorporate a specialization rather than
431      * just use a LongAdder in order to access implicit
432      * contention-sensing that leads to creation of multiple
433      * CounterCells.  The counter mechanics avoid contention on
434      * updates but can encounter cache thrashing if read too
435      * frequently during concurrent access. To avoid reading so often,
436      * resizing under contention is attempted only upon adding to a
437      * bin already holding two or more nodes. Under uniform hash
438      * distributions, the probability of this occurring at threshold
439      * is around 13%, meaning that only about 1 in 8 puts check
440      * threshold (and after resizing, many fewer do so).
441      *
442      * TreeBins use a special form of comparison for search and
443      * related operations (which is the main reason we cannot use
444      * existing collections such as TreeMaps). TreeBins contain
445      * Comparable elements, but may contain others, as well as
446      * elements that are Comparable but not necessarily Comparable for
447      * the same T, so we cannot invoke compareTo among them. To handle
448      * this, the tree is ordered primarily by hash value, then by
449      * Comparable.compareTo order if applicable.  On lookup at a node,
450      * if elements are not comparable or compare as 0 then both left
451      * and right children may need to be searched in the case of tied
452      * hash values. (This corresponds to the full list search that
453      * would be necessary if all elements were non-Comparable and had
454      * tied hashes.) On insertion, to keep a total ordering (or as
455      * close as is required here) across rebalancings, we compare
456      * classes and identityHashCodes as tie-breakers. The red-black
457      * balancing code is updated from pre-jdk-collections
458      * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
459      * based in turn on Cormen, Leiserson, and Rivest "Introduction to
460      * Algorithms" (CLR).
461      *
462      * TreeBins also require an additional locking mechanism.  While
463      * list traversal is always possible by readers even during
464      * updates, tree traversal is not, mainly because of tree-rotations
465      * that may change the root node and/or its linkages.  TreeBins
466      * include a simple read-write lock mechanism parasitic on the
467      * main bin-synchronization strategy: Structural adjustments
468      * associated with an insertion or removal are already bin-locked
469      * (and so cannot conflict with other writers) but must wait for
470      * ongoing readers to finish. Since there can be only one such
471      * waiter, we use a simple scheme using a single "waiter" field to
472      * block writers.  However, readers need never block.  If the root
473      * lock is held, they proceed along the slow traversal path (via
474      * next-pointers) until the lock becomes available or the list is
475      * exhausted, whichever comes first. These cases are not fast, but
476      * maximize aggregate expected throughput.
477      *
478      * Maintaining API and serialization compatibility with previous
479      * versions of this class introduces several oddities. Mainly: We
480      * leave untouched but unused constructor arguments referring to
481      * concurrencyLevel. We accept a loadFactor constructor argument,
482      * but apply it only to initial table capacity (which is the only
483      * time that we can guarantee to honor it.) We also declare an
484      * unused "Segment" class that is instantiated in minimal form
485      * only when serializing.
486      *
487      * Also, solely for compatibility with previous versions of this
488      * class, it extends AbstractMap, even though all of its methods
489      * are overridden, so it is just useless baggage.
490      *
491      * This file is organized to make things a little easier to follow
492      * while reading than they might otherwise: First the main static
493      * declarations and utilities, then fields, then main public
494      * methods (with a few factorings of multiple public methods into
495      * internal ones), then sizing methods, trees, traversers, and
496      * bulk operations.
497      */
498 
499     /* ---------------- Constants -------------- */
500 
501     /**
502      * The largest possible table capacity.  This value must be
503      * exactly 1<<30 to stay within Java array allocation and indexing
504      * bounds for power of two table sizes, and is further required
505      * because the top two bits of 32bit hash fields are used for
506      * control purposes.
507      */
508     private static final int MAXIMUM_CAPACITY = 1 << 30;
509 
510     /**
511      * The default initial table capacity.  Must be a power of 2
512      * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
513      */
514     private static final int DEFAULT_CAPACITY = 16;
515 
516     /**
517      * The largest possible (non-power of two) array size.
518      * Needed by toArray and related methods.
519      */
520     static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
521 
522     /**
523      * The default concurrency level for this table. Unused but
524      * defined for compatibility with previous versions of this class.
525      */
526     private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
527 
528     /**
529      * The load factor for this table. Overrides of this value in
530      * constructors affect only the initial table capacity.  The
531      * actual floating point value isn't normally used -- it is
532      * simpler to use expressions such as {@code n - (n >>> 2)} for
533      * the associated resizing threshold.
534      */
535     private static final float LOAD_FACTOR = 0.75f;
536 
537     /**
538      * The bin count threshold for using a tree rather than list for a
539      * bin.  Bins are converted to trees when adding an element to a
540      * bin with at least this many nodes. The value must be greater
541      * than 2, and should be at least 8 to mesh with assumptions in
542      * tree removal about conversion back to plain bins upon
543      * shrinkage.
544      */
545     static final int TREEIFY_THRESHOLD = 8;
546 
547     /**
548      * The bin count threshold for untreeifying a (split) bin during a
549      * resize operation. Should be less than TREEIFY_THRESHOLD, and at
550      * most 6 to mesh with shrinkage detection under removal.
551      */
552     static final int UNTREEIFY_THRESHOLD = 6;
553 
554     /**
555      * The smallest table capacity for which bins may be treeified.
556      * (Otherwise the table is resized if too many nodes in a bin.)
557      * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
558      * conflicts between resizing and treeification thresholds.
559      */
560     static final int MIN_TREEIFY_CAPACITY = 64;
561 
562     /**
563      * Minimum number of rebinnings per transfer step. Ranges are
564      * subdivided to allow multiple resizer threads.  This value
565      * serves as a lower bound to avoid resizers encountering
566      * excessive memory contention.  The value should be at least
567      * DEFAULT_CAPACITY.
568      */
569     private static final int MIN_TRANSFER_STRIDE = 16;
570 
571     /**
572      * The number of bits used for generation stamp in sizeCtl.
573      * Must be at least 6 for 32bit arrays.
574      */
575     private static final int RESIZE_STAMP_BITS = 16;
576 
577     /**
578      * The maximum number of threads that can help resize.
579      * Must fit in 32 - RESIZE_STAMP_BITS bits.
580      */
581     private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
582 
583     /**
584      * The bit shift for recording size stamp in sizeCtl.
585      */
586     private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
587 
588     /*
589      * Encodings for Node hash fields. See above for explanation.
590      */
591     static final int MOVED     = -1; // hash for forwarding nodes
592     static final int TREEBIN   = -2; // hash for roots of trees
593     static final int RESERVED  = -3; // hash for transient reservations
594     static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
595 
596     /** Number of CPUS, to place bounds on some sizings */
597     static final int NCPU = Runtime.getRuntime().availableProcessors();
598 
599     /**
600      * Serialized pseudo-fields, provided only for jdk7 compatibility.
601      * @serialField segments Segment[]
602      *   The segments, each of which is a specialized hash table.
603      * @serialField segmentMask int
604      *   Mask value for indexing into segments. The upper bits of a
605      *   key's hash code are used to choose the segment.
606      * @serialField segmentShift int
607      *   Shift value for indexing within segments.
608      */
609     private static final ObjectStreamField[] serialPersistentFields = {
610         new ObjectStreamField("segments", Segment[].class),
611         new ObjectStreamField("segmentMask", Integer.TYPE),
612         new ObjectStreamField("segmentShift", Integer.TYPE),
613     };
614 
615     /* ---------------- Nodes -------------- */
616 
617     /**
618      * Key-value entry.  This class is never exported out as a
619      * user-mutable Map.Entry (i.e., one supporting setValue; see
620      * MapEntry below), but can be used for read-only traversals used
621      * in bulk tasks.  Subclasses of Node with a negative hash field
622      * are special, and contain null keys and values (but are never
623      * exported).  Otherwise, keys and vals are never null.
624      */
625     static class Node<K,V> implements Map.Entry<K,V> {
626         final int hash;
627         final K key;
628         volatile V val;
629         volatile Node<K,V> next;
630 
Node(int hash, K key, V val)631         Node(int hash, K key, V val) {
632             this.hash = hash;
633             this.key = key;
634             this.val = val;
635         }
636 
Node(int hash, K key, V val, Node<K,V> next)637         Node(int hash, K key, V val, Node<K,V> next) {
638             this(hash, key, val);
639             this.next = next;
640         }
641 
getKey()642         public final K getKey()     { return key; }
getValue()643         public final V getValue()   { return val; }
hashCode()644         public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
toString()645         public final String toString() {
646             return Helpers.mapEntryToString(key, val);
647         }
setValue(V value)648         public final V setValue(V value) {
649             throw new UnsupportedOperationException();
650         }
651 
equals(Object o)652         public final boolean equals(Object o) {
653             Object k, v, u; Map.Entry<?,?> e;
654             return ((o instanceof Map.Entry) &&
655                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
656                     (v = e.getValue()) != null &&
657                     (k == key || k.equals(key)) &&
658                     (v == (u = val) || v.equals(u)));
659         }
660 
661         /**
662          * Virtualized support for map.get(); overridden in subclasses.
663          */
find(int h, Object k)664         Node<K,V> find(int h, Object k) {
665             Node<K,V> e = this;
666             if (k != null) {
667                 do {
668                     K ek;
669                     if (e.hash == h &&
670                         ((ek = e.key) == k || (ek != null && k.equals(ek))))
671                         return e;
672                 } while ((e = e.next) != null);
673             }
674             return null;
675         }
676     }
677 
678     /* ---------------- Static utilities -------------- */
679 
680     /**
681      * Spreads (XORs) higher bits of hash to lower and also forces top
682      * bit to 0. Because the table uses power-of-two masking, sets of
683      * hashes that vary only in bits above the current mask will
684      * always collide. (Among known examples are sets of Float keys
685      * holding consecutive whole numbers in small tables.)  So we
686      * apply a transform that spreads the impact of higher bits
687      * downward. There is a tradeoff between speed, utility, and
688      * quality of bit-spreading. Because many common sets of hashes
689      * are already reasonably distributed (so don't benefit from
690      * spreading), and because we use trees to handle large sets of
691      * collisions in bins, we just XOR some shifted bits in the
692      * cheapest possible way to reduce systematic lossage, as well as
693      * to incorporate impact of the highest bits that would otherwise
694      * never be used in index calculations because of table bounds.
695      */
spread(int h)696     static final int spread(int h) {
697         return (h ^ (h >>> 16)) & HASH_BITS;
698     }
699 
700     /**
701      * Returns a power of two table size for the given desired capacity.
702      * See Hackers Delight, sec 3.2
703      */
tableSizeFor(int c)704     private static final int tableSizeFor(int c) {
705         int n = -1 >>> Integer.numberOfLeadingZeros(c - 1);
706         return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
707     }
708 
709     /**
710      * Returns x's Class if it is of the form "class C implements
711      * Comparable<C>", else null.
712      */
comparableClassFor(Object x)713     static Class<?> comparableClassFor(Object x) {
714         if (x instanceof Comparable) {
715             Class<?> c; Type[] ts, as; ParameterizedType p;
716             if ((c = x.getClass()) == String.class) // bypass checks
717                 return c;
718             if ((ts = c.getGenericInterfaces()) != null) {
719                 for (Type t : ts) {
720                     if ((t instanceof ParameterizedType) &&
721                         ((p = (ParameterizedType)t).getRawType() ==
722                          Comparable.class) &&
723                         (as = p.getActualTypeArguments()) != null &&
724                         as.length == 1 && as[0] == c) // type arg is c
725                         return c;
726                 }
727             }
728         }
729         return null;
730     }
731 
732     /**
733      * Returns k.compareTo(x) if x matches kc (k's screened comparable
734      * class), else 0.
735      */
736     @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable
compareComparables(Class<?> kc, Object k, Object x)737     static int compareComparables(Class<?> kc, Object k, Object x) {
738         return (x == null || x.getClass() != kc ? 0 :
739                 ((Comparable)k).compareTo(x));
740     }
741 
742     /* ---------------- Table element access -------------- */
743 
744     /*
745      * Atomic access methods are used for table elements as well as
746      * elements of in-progress next table while resizing.  All uses of
747      * the tab arguments must be null checked by callers.  All callers
748      * also paranoically precheck that tab's length is not zero (or an
749      * equivalent check), thus ensuring that any index argument taking
750      * the form of a hash value anded with (length - 1) is a valid
751      * index.  Note that, to be correct wrt arbitrary concurrency
752      * errors by users, these checks must operate on local variables,
753      * which accounts for some odd-looking inline assignments below.
754      * Note that calls to setTabAt always occur within locked regions,
755      * and so require only release ordering.
756      */
757 
758     @SuppressWarnings("unchecked")
tabAt(Node<K,V>[] tab, int i)759     static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
760         return (Node<K,V>)U.getReferenceAcquire(tab, ((long)i << ASHIFT) + ABASE);
761     }
762 
casTabAt(Node<K,V>[] tab, int i, Node<K,V> c, Node<K,V> v)763     static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
764                                         Node<K,V> c, Node<K,V> v) {
765         return U.compareAndSetReference(tab, ((long)i << ASHIFT) + ABASE, c, v);
766     }
767 
setTabAt(Node<K,V>[] tab, int i, Node<K,V> v)768     static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
769         U.putReferenceRelease(tab, ((long)i << ASHIFT) + ABASE, v);
770     }
771 
772     /* ---------------- Fields -------------- */
773 
774     /**
775      * The array of bins. Lazily initialized upon first insertion.
776      * Size is always a power of two. Accessed directly by iterators.
777      */
778     transient volatile Node<K,V>[] table;
779 
780     /**
781      * The next table to use; non-null only while resizing.
782      */
783     private transient volatile Node<K,V>[] nextTable;
784 
785     /**
786      * Base counter value, used mainly when there is no contention,
787      * but also as a fallback during table initialization
788      * races. Updated via CAS.
789      */
790     private transient volatile long baseCount;
791 
792     /**
793      * Table initialization and resizing control.  When negative, the
794      * table is being initialized or resized: -1 for initialization,
795      * else -(1 + the number of active resizing threads).  Otherwise,
796      * when table is null, holds the initial table size to use upon
797      * creation, or 0 for default. After initialization, holds the
798      * next element count value upon which to resize the table.
799      */
800     private transient volatile int sizeCtl;
801 
802     /**
803      * The next table index (plus one) to split while resizing.
804      */
805     private transient volatile int transferIndex;
806 
807     /**
808      * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
809      */
810     private transient volatile int cellsBusy;
811 
812     /**
813      * Table of counter cells. When non-null, size is a power of 2.
814      */
815     private transient volatile CounterCell[] counterCells;
816 
817     // views
818     private transient KeySetView<K,V> keySet;
819     private transient ValuesView<K,V> values;
820     private transient EntrySetView<K,V> entrySet;
821 
822 
823     /* ---------------- Public operations -------------- */
824 
825     /**
826      * Creates a new, empty map with the default initial table size (16).
827      */
ConcurrentHashMap()828     public ConcurrentHashMap() {
829     }
830 
831     /**
832      * Creates a new, empty map with an initial table size
833      * accommodating the specified number of elements without the need
834      * to dynamically resize.
835      *
836      * @param initialCapacity The implementation performs internal
837      * sizing to accommodate this many elements.
838      * @throws IllegalArgumentException if the initial capacity of
839      * elements is negative
840      */
ConcurrentHashMap(int initialCapacity)841     public ConcurrentHashMap(int initialCapacity) {
842         this(initialCapacity, LOAD_FACTOR, 1);
843     }
844 
845     /**
846      * Creates a new map with the same mappings as the given map.
847      *
848      * @param m the map
849      */
ConcurrentHashMap(Map<? extends K, ? extends V> m)850     public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
851         this.sizeCtl = DEFAULT_CAPACITY;
852         putAll(m);
853     }
854 
855     /**
856      * Creates a new, empty map with an initial table size based on
857      * the given number of elements ({@code initialCapacity}) and
858      * initial table density ({@code loadFactor}).
859      *
860      * @param initialCapacity the initial capacity. The implementation
861      * performs internal sizing to accommodate this many elements,
862      * given the specified load factor.
863      * @param loadFactor the load factor (table density) for
864      * establishing the initial table size
865      * @throws IllegalArgumentException if the initial capacity of
866      * elements is negative or the load factor is nonpositive
867      *
868      * @since 1.6
869      */
ConcurrentHashMap(int initialCapacity, float loadFactor)870     public ConcurrentHashMap(int initialCapacity, float loadFactor) {
871         this(initialCapacity, loadFactor, 1);
872     }
873 
874     /**
875      * Creates a new, empty map with an initial table size based on
876      * the given number of elements ({@code initialCapacity}), initial
877      * table density ({@code loadFactor}), and number of concurrently
878      * updating threads ({@code concurrencyLevel}).
879      *
880      * @param initialCapacity the initial capacity. The implementation
881      * performs internal sizing to accommodate this many elements,
882      * given the specified load factor.
883      * @param loadFactor the load factor (table density) for
884      * establishing the initial table size
885      * @param concurrencyLevel the estimated number of concurrently
886      * updating threads. The implementation may use this value as
887      * a sizing hint.
888      * @throws IllegalArgumentException if the initial capacity is
889      * negative or the load factor or concurrencyLevel are
890      * nonpositive
891      */
ConcurrentHashMap(int initialCapacity, float loadFactor, int concurrencyLevel)892     public ConcurrentHashMap(int initialCapacity,
893                              float loadFactor, int concurrencyLevel) {
894         if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
895             throw new IllegalArgumentException();
896         if (initialCapacity < concurrencyLevel)   // Use at least as many bins
897             initialCapacity = concurrencyLevel;   // as estimated threads
898         long size = (long)(1.0 + (long)initialCapacity / loadFactor);
899         int cap = (size >= (long)MAXIMUM_CAPACITY) ?
900             MAXIMUM_CAPACITY : tableSizeFor((int)size);
901         this.sizeCtl = cap;
902     }
903 
904     // Original (since JDK1.2) Map methods
905 
906     /**
907      * {@inheritDoc}
908      */
size()909     public int size() {
910         long n = sumCount();
911         return ((n < 0L) ? 0 :
912                 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
913                 (int)n);
914     }
915 
916     /**
917      * {@inheritDoc}
918      */
isEmpty()919     public boolean isEmpty() {
920         return sumCount() <= 0L; // ignore transient negative values
921     }
922 
923     /**
924      * Returns the value to which the specified key is mapped,
925      * or {@code null} if this map contains no mapping for the key.
926      *
927      * <p>More formally, if this map contains a mapping from a key
928      * {@code k} to a value {@code v} such that {@code key.equals(k)},
929      * then this method returns {@code v}; otherwise it returns
930      * {@code null}.  (There can be at most one such mapping.)
931      *
932      * @throws NullPointerException if the specified key is null
933      */
get(Object key)934     public V get(Object key) {
935         Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
936         int h = spread(key.hashCode());
937         if ((tab = table) != null && (n = tab.length) > 0 &&
938             (e = tabAt(tab, (n - 1) & h)) != null) {
939             if ((eh = e.hash) == h) {
940                 if ((ek = e.key) == key || (ek != null && key.equals(ek)))
941                     return e.val;
942             }
943             else if (eh < 0)
944                 return (p = e.find(h, key)) != null ? p.val : null;
945             while ((e = e.next) != null) {
946                 if (e.hash == h &&
947                     ((ek = e.key) == key || (ek != null && key.equals(ek))))
948                     return e.val;
949             }
950         }
951         return null;
952     }
953 
954     /**
955      * Tests if the specified object is a key in this table.
956      *
957      * @param  key possible key
958      * @return {@code true} if and only if the specified object
959      *         is a key in this table, as determined by the
960      *         {@code equals} method; {@code false} otherwise
961      * @throws NullPointerException if the specified key is null
962      */
containsKey(Object key)963     public boolean containsKey(Object key) {
964         return get(key) != null;
965     }
966 
967     /**
968      * Returns {@code true} if this map maps one or more keys to the
969      * specified value. Note: This method may require a full traversal
970      * of the map, and is much slower than method {@code containsKey}.
971      *
972      * @param value value whose presence in this map is to be tested
973      * @return {@code true} if this map maps one or more keys to the
974      *         specified value
975      * @throws NullPointerException if the specified value is null
976      */
containsValue(Object value)977     public boolean containsValue(Object value) {
978         if (value == null)
979             throw new NullPointerException();
980         Node<K,V>[] t;
981         if ((t = table) != null) {
982             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
983             for (Node<K,V> p; (p = it.advance()) != null; ) {
984                 V v;
985                 if ((v = p.val) == value || (v != null && value.equals(v)))
986                     return true;
987             }
988         }
989         return false;
990     }
991 
992     /**
993      * Maps the specified key to the specified value in this table.
994      * Neither the key nor the value can be null.
995      *
996      * <p>The value can be retrieved by calling the {@code get} method
997      * with a key that is equal to the original key.
998      *
999      * @param key key with which the specified value is to be associated
1000      * @param value value to be associated with the specified key
1001      * @return the previous value associated with {@code key}, or
1002      *         {@code null} if there was no mapping for {@code key}
1003      * @throws NullPointerException if the specified key or value is null
1004      */
put(K key, V value)1005     public V put(K key, V value) {
1006         return putVal(key, value, false);
1007     }
1008 
1009     /** Implementation for put and putIfAbsent */
putVal(K key, V value, boolean onlyIfAbsent)1010     final V putVal(K key, V value, boolean onlyIfAbsent) {
1011         if (key == null || value == null) throw new NullPointerException();
1012         int hash = spread(key.hashCode());
1013         int binCount = 0;
1014         for (Node<K,V>[] tab = table;;) {
1015             Node<K,V> f; int n, i, fh; K fk; V fv;
1016             if (tab == null || (n = tab.length) == 0)
1017                 tab = initTable();
1018             else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
1019                 if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value)))
1020                     break;                   // no lock when adding to empty bin
1021             }
1022             else if ((fh = f.hash) == MOVED)
1023                 tab = helpTransfer(tab, f);
1024             else if (onlyIfAbsent // check first node without acquiring lock
1025                      && fh == hash
1026                      && ((fk = f.key) == key || (fk != null && key.equals(fk)))
1027                      && (fv = f.val) != null)
1028                 return fv;
1029             else {
1030                 V oldVal = null;
1031                 synchronized (f) {
1032                     if (tabAt(tab, i) == f) {
1033                         if (fh >= 0) {
1034                             binCount = 1;
1035                             for (Node<K,V> e = f;; ++binCount) {
1036                                 K ek;
1037                                 if (e.hash == hash &&
1038                                     ((ek = e.key) == key ||
1039                                      (ek != null && key.equals(ek)))) {
1040                                     oldVal = e.val;
1041                                     if (!onlyIfAbsent)
1042                                         e.val = value;
1043                                     break;
1044                                 }
1045                                 Node<K,V> pred = e;
1046                                 if ((e = e.next) == null) {
1047                                     pred.next = new Node<K,V>(hash, key, value);
1048                                     break;
1049                                 }
1050                             }
1051                         }
1052                         else if (f instanceof TreeBin) {
1053                             Node<K,V> p;
1054                             binCount = 2;
1055                             if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1056                                                            value)) != null) {
1057                                 oldVal = p.val;
1058                                 if (!onlyIfAbsent)
1059                                     p.val = value;
1060                             }
1061                         }
1062                         else if (f instanceof ReservationNode)
1063                             throw new IllegalStateException("Recursive update");
1064                     }
1065                 }
1066                 if (binCount != 0) {
1067                     if (binCount >= TREEIFY_THRESHOLD)
1068                         treeifyBin(tab, i);
1069                     if (oldVal != null)
1070                         return oldVal;
1071                     break;
1072                 }
1073             }
1074         }
1075         addCount(1L, binCount);
1076         return null;
1077     }
1078 
1079     /**
1080      * Copies all of the mappings from the specified map to this one.
1081      * These mappings replace any mappings that this map had for any of the
1082      * keys currently in the specified map.
1083      *
1084      * @param m mappings to be stored in this map
1085      */
putAll(Map<? extends K, ? extends V> m)1086     public void putAll(Map<? extends K, ? extends V> m) {
1087         tryPresize(m.size());
1088         for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1089             putVal(e.getKey(), e.getValue(), false);
1090     }
1091 
1092     /**
1093      * Removes the key (and its corresponding value) from this map.
1094      * This method does nothing if the key is not in the map.
1095      *
1096      * @param  key the key that needs to be removed
1097      * @return the previous value associated with {@code key}, or
1098      *         {@code null} if there was no mapping for {@code key}
1099      * @throws NullPointerException if the specified key is null
1100      */
remove(Object key)1101     public V remove(Object key) {
1102         return replaceNode(key, null, null);
1103     }
1104 
1105     /**
1106      * Implementation for the four public remove/replace methods:
1107      * Replaces node value with v, conditional upon match of cv if
1108      * non-null.  If resulting value is null, delete.
1109      */
replaceNode(Object key, V value, Object cv)1110     final V replaceNode(Object key, V value, Object cv) {
1111         int hash = spread(key.hashCode());
1112         for (Node<K,V>[] tab = table;;) {
1113             Node<K,V> f; int n, i, fh;
1114             if (tab == null || (n = tab.length) == 0 ||
1115                 (f = tabAt(tab, i = (n - 1) & hash)) == null)
1116                 break;
1117             else if ((fh = f.hash) == MOVED)
1118                 tab = helpTransfer(tab, f);
1119             else {
1120                 V oldVal = null;
1121                 boolean validated = false;
1122                 synchronized (f) {
1123                     if (tabAt(tab, i) == f) {
1124                         if (fh >= 0) {
1125                             validated = true;
1126                             for (Node<K,V> e = f, pred = null;;) {
1127                                 K ek;
1128                                 if (e.hash == hash &&
1129                                     ((ek = e.key) == key ||
1130                                      (ek != null && key.equals(ek)))) {
1131                                     V ev = e.val;
1132                                     if (cv == null || cv == ev ||
1133                                         (ev != null && cv.equals(ev))) {
1134                                         oldVal = ev;
1135                                         if (value != null)
1136                                             e.val = value;
1137                                         else if (pred != null)
1138                                             pred.next = e.next;
1139                                         else
1140                                             setTabAt(tab, i, e.next);
1141                                     }
1142                                     break;
1143                                 }
1144                                 pred = e;
1145                                 if ((e = e.next) == null)
1146                                     break;
1147                             }
1148                         }
1149                         else if (f instanceof TreeBin) {
1150                             validated = true;
1151                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1152                             TreeNode<K,V> r, p;
1153                             if ((r = t.root) != null &&
1154                                 (p = r.findTreeNode(hash, key, null)) != null) {
1155                                 V pv = p.val;
1156                                 if (cv == null || cv == pv ||
1157                                     (pv != null && cv.equals(pv))) {
1158                                     oldVal = pv;
1159                                     if (value != null)
1160                                         p.val = value;
1161                                     else if (t.removeTreeNode(p))
1162                                         setTabAt(tab, i, untreeify(t.first));
1163                                 }
1164                             }
1165                         }
1166                         else if (f instanceof ReservationNode)
1167                             throw new IllegalStateException("Recursive update");
1168                     }
1169                 }
1170                 if (validated) {
1171                     if (oldVal != null) {
1172                         if (value == null)
1173                             addCount(-1L, -1);
1174                         return oldVal;
1175                     }
1176                     break;
1177                 }
1178             }
1179         }
1180         return null;
1181     }
1182 
1183     /**
1184      * Removes all of the mappings from this map.
1185      */
clear()1186     public void clear() {
1187         long delta = 0L; // negative number of deletions
1188         int i = 0;
1189         Node<K,V>[] tab = table;
1190         while (tab != null && i < tab.length) {
1191             int fh;
1192             Node<K,V> f = tabAt(tab, i);
1193             if (f == null)
1194                 ++i;
1195             else if ((fh = f.hash) == MOVED) {
1196                 tab = helpTransfer(tab, f);
1197                 i = 0; // restart
1198             }
1199             else {
1200                 synchronized (f) {
1201                     if (tabAt(tab, i) == f) {
1202                         Node<K,V> p = (fh >= 0 ? f :
1203                                        (f instanceof TreeBin) ?
1204                                        ((TreeBin<K,V>)f).first : null);
1205                         while (p != null) {
1206                             --delta;
1207                             p = p.next;
1208                         }
1209                         setTabAt(tab, i++, null);
1210                     }
1211                 }
1212             }
1213         }
1214         if (delta != 0L)
1215             addCount(delta, -1);
1216     }
1217 
1218     /**
1219      * Returns a {@link Set} view of the keys contained in this map.
1220      * The set is backed by the map, so changes to the map are
1221      * reflected in the set, and vice-versa. The set supports element
1222      * removal, which removes the corresponding mapping from this map,
1223      * via the {@code Iterator.remove}, {@code Set.remove},
1224      * {@code removeAll}, {@code retainAll}, and {@code clear}
1225      * operations.  It does not support the {@code add} or
1226      * {@code addAll} operations.
1227      *
1228      * <p> The set returned by this method is guaranteed to an instance of
1229      * {@link KeySetView}.
1230      *
1231      * <p>The view's iterators and spliterators are
1232      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1233      *
1234      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1235      * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1236      *
1237      * @return the set view
1238      */
1239     // Android-changed: Return type for backwards compat. Was KeySetView<K,V>. http://b/28099367
1240     @dalvik.annotation.codegen.CovariantReturnType(returnType = KeySetView.class, presentAfter = 28)
keySet()1241     public Set<K> keySet() {
1242         KeySetView<K,V> ks;
1243         if ((ks = keySet) != null) return ks;
1244         return keySet = new KeySetView<K,V>(this, null);
1245     }
1246 
1247     /**
1248      * Returns a {@link Collection} view of the values contained in this map.
1249      * The collection is backed by the map, so changes to the map are
1250      * reflected in the collection, and vice-versa.  The collection
1251      * supports element removal, which removes the corresponding
1252      * mapping from this map, via the {@code Iterator.remove},
1253      * {@code Collection.remove}, {@code removeAll},
1254      * {@code retainAll}, and {@code clear} operations.  It does not
1255      * support the {@code add} or {@code addAll} operations.
1256      *
1257      * <p>The view's iterators and spliterators are
1258      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1259      *
1260      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}
1261      * and {@link Spliterator#NONNULL}.
1262      *
1263      * @return the collection view
1264      */
values()1265     public Collection<V> values() {
1266         ValuesView<K,V> vs;
1267         if ((vs = values) != null) return vs;
1268         return values = new ValuesView<K,V>(this);
1269     }
1270 
1271     /**
1272      * Returns a {@link Set} view of the mappings contained in this map.
1273      * The set is backed by the map, so changes to the map are
1274      * reflected in the set, and vice-versa.  The set supports element
1275      * removal, which removes the corresponding mapping from the map,
1276      * via the {@code Iterator.remove}, {@code Set.remove},
1277      * {@code removeAll}, {@code retainAll}, and {@code clear}
1278      * operations.
1279      *
1280      * <p>The view's iterators and spliterators are
1281      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1282      *
1283      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1284      * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1285      *
1286      * @return the set view
1287      */
entrySet()1288     public Set<Map.Entry<K,V>> entrySet() {
1289         EntrySetView<K,V> es;
1290         if ((es = entrySet) != null) return es;
1291         return entrySet = new EntrySetView<K,V>(this);
1292     }
1293 
1294     /**
1295      * Returns the hash code value for this {@link Map}, i.e.,
1296      * the sum of, for each key-value pair in the map,
1297      * {@code key.hashCode() ^ value.hashCode()}.
1298      *
1299      * @return the hash code value for this map
1300      */
hashCode()1301     public int hashCode() {
1302         int h = 0;
1303         Node<K,V>[] t;
1304         if ((t = table) != null) {
1305             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1306             for (Node<K,V> p; (p = it.advance()) != null; )
1307                 h += p.key.hashCode() ^ p.val.hashCode();
1308         }
1309         return h;
1310     }
1311 
1312     /**
1313      * Returns a string representation of this map.  The string
1314      * representation consists of a list of key-value mappings (in no
1315      * particular order) enclosed in braces ("{@code {}}").  Adjacent
1316      * mappings are separated by the characters {@code ", "} (comma
1317      * and space).  Each key-value mapping is rendered as the key
1318      * followed by an equals sign ("{@code =}") followed by the
1319      * associated value.
1320      *
1321      * @return a string representation of this map
1322      */
toString()1323     public String toString() {
1324         Node<K,V>[] t;
1325         int f = (t = table) == null ? 0 : t.length;
1326         Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1327         StringBuilder sb = new StringBuilder();
1328         sb.append('{');
1329         Node<K,V> p;
1330         if ((p = it.advance()) != null) {
1331             for (;;) {
1332                 K k = p.key;
1333                 V v = p.val;
1334                 sb.append(k == this ? "(this Map)" : k);
1335                 sb.append('=');
1336                 sb.append(v == this ? "(this Map)" : v);
1337                 if ((p = it.advance()) == null)
1338                     break;
1339                 sb.append(',').append(' ');
1340             }
1341         }
1342         return sb.append('}').toString();
1343     }
1344 
1345     /**
1346      * Compares the specified object with this map for equality.
1347      * Returns {@code true} if the given object is a map with the same
1348      * mappings as this map.  This operation may return misleading
1349      * results if either map is concurrently modified during execution
1350      * of this method.
1351      *
1352      * @param o object to be compared for equality with this map
1353      * @return {@code true} if the specified object is equal to this map
1354      */
equals(Object o)1355     public boolean equals(Object o) {
1356         if (o != this) {
1357             if (!(o instanceof Map))
1358                 return false;
1359             Map<?,?> m = (Map<?,?>) o;
1360             Node<K,V>[] t;
1361             int f = (t = table) == null ? 0 : t.length;
1362             Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1363             for (Node<K,V> p; (p = it.advance()) != null; ) {
1364                 V val = p.val;
1365                 Object v = m.get(p.key);
1366                 if (v == null || (v != val && !v.equals(val)))
1367                     return false;
1368             }
1369             for (Map.Entry<?,?> e : m.entrySet()) {
1370                 Object mk, mv, v;
1371                 if ((mk = e.getKey()) == null ||
1372                     (mv = e.getValue()) == null ||
1373                     (v = get(mk)) == null ||
1374                     (mv != v && !mv.equals(v)))
1375                     return false;
1376             }
1377         }
1378         return true;
1379     }
1380 
1381     /**
1382      * Stripped-down version of helper class used in previous version,
1383      * declared for the sake of serialization compatibility.
1384      */
1385     static class Segment<K,V> extends ReentrantLock implements Serializable {
1386         private static final long serialVersionUID = 2249069246763182397L;
1387         final float loadFactor;
Segment(float lf)1388         Segment(float lf) { this.loadFactor = lf; }
1389     }
1390 
1391     /**
1392      * Saves this map to a stream (that is, serializes it).
1393      *
1394      * @param s the stream
1395      * @throws java.io.IOException if an I/O error occurs
1396      * @serialData
1397      * the serialized fields, followed by the key (Object) and value
1398      * (Object) for each key-value mapping, followed by a null pair.
1399      * The key-value mappings are emitted in no particular order.
1400      */
writeObject(java.io.ObjectOutputStream s)1401     private void writeObject(java.io.ObjectOutputStream s)
1402         throws java.io.IOException {
1403         // For serialization compatibility
1404         // Emulate segment calculation from previous version of this class
1405         int sshift = 0;
1406         int ssize = 1;
1407         while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1408             ++sshift;
1409             ssize <<= 1;
1410         }
1411         int segmentShift = 32 - sshift;
1412         int segmentMask = ssize - 1;
1413         @SuppressWarnings("unchecked")
1414         Segment<K,V>[] segments = (Segment<K,V>[])
1415             new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1416         for (int i = 0; i < segments.length; ++i)
1417             segments[i] = new Segment<K,V>(LOAD_FACTOR);
1418         java.io.ObjectOutputStream.PutField streamFields = s.putFields();
1419         streamFields.put("segments", segments);
1420         streamFields.put("segmentShift", segmentShift);
1421         streamFields.put("segmentMask", segmentMask);
1422         s.writeFields();
1423 
1424         Node<K,V>[] t;
1425         if ((t = table) != null) {
1426             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1427             for (Node<K,V> p; (p = it.advance()) != null; ) {
1428                 s.writeObject(p.key);
1429                 s.writeObject(p.val);
1430             }
1431         }
1432         s.writeObject(null);
1433         s.writeObject(null);
1434     }
1435 
1436     /**
1437      * Reconstitutes this map from a stream (that is, deserializes it).
1438      * @param s the stream
1439      * @throws ClassNotFoundException if the class of a serialized object
1440      *         could not be found
1441      * @throws java.io.IOException if an I/O error occurs
1442      */
readObject(java.io.ObjectInputStream s)1443     private void readObject(java.io.ObjectInputStream s)
1444         throws java.io.IOException, ClassNotFoundException {
1445         /*
1446          * To improve performance in typical cases, we create nodes
1447          * while reading, then place in table once size is known.
1448          * However, we must also validate uniqueness and deal with
1449          * overpopulated bins while doing so, which requires
1450          * specialized versions of putVal mechanics.
1451          */
1452         sizeCtl = -1; // force exclusion for table construction
1453         s.defaultReadObject();
1454         long size = 0L;
1455         Node<K,V> p = null;
1456         for (;;) {
1457             @SuppressWarnings("unchecked")
1458             K k = (K) s.readObject();
1459             @SuppressWarnings("unchecked")
1460             V v = (V) s.readObject();
1461             if (k != null && v != null) {
1462                 p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1463                 ++size;
1464             }
1465             else
1466                 break;
1467         }
1468         if (size == 0L)
1469             sizeCtl = 0;
1470         else {
1471             long ts = (long)(1.0 + size / LOAD_FACTOR);
1472             int n = (ts >= (long)MAXIMUM_CAPACITY) ?
1473                 MAXIMUM_CAPACITY : tableSizeFor((int)ts);
1474             @SuppressWarnings("unchecked")
1475             Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n];
1476             int mask = n - 1;
1477             long added = 0L;
1478             while (p != null) {
1479                 boolean insertAtFront;
1480                 Node<K,V> next = p.next, first;
1481                 int h = p.hash, j = h & mask;
1482                 if ((first = tabAt(tab, j)) == null)
1483                     insertAtFront = true;
1484                 else {
1485                     K k = p.key;
1486                     if (first.hash < 0) {
1487                         TreeBin<K,V> t = (TreeBin<K,V>)first;
1488                         if (t.putTreeVal(h, k, p.val) == null)
1489                             ++added;
1490                         insertAtFront = false;
1491                     }
1492                     else {
1493                         int binCount = 0;
1494                         insertAtFront = true;
1495                         Node<K,V> q; K qk;
1496                         for (q = first; q != null; q = q.next) {
1497                             if (q.hash == h &&
1498                                 ((qk = q.key) == k ||
1499                                  (qk != null && k.equals(qk)))) {
1500                                 insertAtFront = false;
1501                                 break;
1502                             }
1503                             ++binCount;
1504                         }
1505                         if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1506                             insertAtFront = false;
1507                             ++added;
1508                             p.next = first;
1509                             TreeNode<K,V> hd = null, tl = null;
1510                             for (q = p; q != null; q = q.next) {
1511                                 TreeNode<K,V> t = new TreeNode<K,V>
1512                                     (q.hash, q.key, q.val, null, null);
1513                                 if ((t.prev = tl) == null)
1514                                     hd = t;
1515                                 else
1516                                     tl.next = t;
1517                                 tl = t;
1518                             }
1519                             setTabAt(tab, j, new TreeBin<K,V>(hd));
1520                         }
1521                     }
1522                 }
1523                 if (insertAtFront) {
1524                     ++added;
1525                     p.next = first;
1526                     setTabAt(tab, j, p);
1527                 }
1528                 p = next;
1529             }
1530             table = tab;
1531             sizeCtl = n - (n >>> 2);
1532             baseCount = added;
1533         }
1534     }
1535 
1536     // ConcurrentMap methods
1537 
1538     /**
1539      * {@inheritDoc}
1540      *
1541      * @return the previous value associated with the specified key,
1542      *         or {@code null} if there was no mapping for the key
1543      * @throws NullPointerException if the specified key or value is null
1544      */
putIfAbsent(K key, V value)1545     public V putIfAbsent(K key, V value) {
1546         return putVal(key, value, true);
1547     }
1548 
1549     /**
1550      * {@inheritDoc}
1551      *
1552      * @throws NullPointerException if the specified key is null
1553      */
remove(Object key, Object value)1554     public boolean remove(Object key, Object value) {
1555         if (key == null)
1556             throw new NullPointerException();
1557         return value != null && replaceNode(key, null, value) != null;
1558     }
1559 
1560     /**
1561      * {@inheritDoc}
1562      *
1563      * @throws NullPointerException if any of the arguments are null
1564      */
replace(K key, V oldValue, V newValue)1565     public boolean replace(K key, V oldValue, V newValue) {
1566         if (key == null || oldValue == null || newValue == null)
1567             throw new NullPointerException();
1568         return replaceNode(key, newValue, oldValue) != null;
1569     }
1570 
1571     /**
1572      * {@inheritDoc}
1573      *
1574      * @return the previous value associated with the specified key,
1575      *         or {@code null} if there was no mapping for the key
1576      * @throws NullPointerException if the specified key or value is null
1577      */
replace(K key, V value)1578     public V replace(K key, V value) {
1579         if (key == null || value == null)
1580             throw new NullPointerException();
1581         return replaceNode(key, value, null);
1582     }
1583 
1584     // Overrides of JDK8+ Map extension method defaults
1585 
1586     /**
1587      * Returns the value to which the specified key is mapped, or the
1588      * given default value if this map contains no mapping for the
1589      * key.
1590      *
1591      * @param key the key whose associated value is to be returned
1592      * @param defaultValue the value to return if this map contains
1593      * no mapping for the given key
1594      * @return the mapping for the key, if present; else the default value
1595      * @throws NullPointerException if the specified key is null
1596      */
getOrDefault(Object key, V defaultValue)1597     public V getOrDefault(Object key, V defaultValue) {
1598         V v;
1599         return (v = get(key)) == null ? defaultValue : v;
1600     }
1601 
forEach(BiConsumer<? super K, ? super V> action)1602     public void forEach(BiConsumer<? super K, ? super V> action) {
1603         if (action == null) throw new NullPointerException();
1604         Node<K,V>[] t;
1605         if ((t = table) != null) {
1606             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1607             for (Node<K,V> p; (p = it.advance()) != null; ) {
1608                 action.accept(p.key, p.val);
1609             }
1610         }
1611     }
1612 
replaceAll(BiFunction<? super K, ? super V, ? extends V> function)1613     public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
1614         if (function == null) throw new NullPointerException();
1615         Node<K,V>[] t;
1616         if ((t = table) != null) {
1617             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1618             for (Node<K,V> p; (p = it.advance()) != null; ) {
1619                 V oldValue = p.val;
1620                 for (K key = p.key;;) {
1621                     V newValue = function.apply(key, oldValue);
1622                     if (newValue == null)
1623                         throw new NullPointerException();
1624                     if (replaceNode(key, newValue, oldValue) != null ||
1625                         (oldValue = get(key)) == null)
1626                         break;
1627                 }
1628             }
1629         }
1630     }
1631 
1632     /**
1633      * Helper method for EntrySetView.removeIf.
1634      */
removeEntryIf(Predicate<? super Entry<K,V>> function)1635     boolean removeEntryIf(Predicate<? super Entry<K,V>> function) {
1636         if (function == null) throw new NullPointerException();
1637         Node<K,V>[] t;
1638         boolean removed = false;
1639         if ((t = table) != null) {
1640             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1641             for (Node<K,V> p; (p = it.advance()) != null; ) {
1642                 K k = p.key;
1643                 V v = p.val;
1644                 Map.Entry<K,V> e = new AbstractMap.SimpleImmutableEntry<>(k, v);
1645                 if (function.test(e) && replaceNode(k, null, v) != null)
1646                     removed = true;
1647             }
1648         }
1649         return removed;
1650     }
1651 
1652     /**
1653      * Helper method for ValuesView.removeIf.
1654      */
removeValueIf(Predicate<? super V> function)1655     boolean removeValueIf(Predicate<? super V> function) {
1656         if (function == null) throw new NullPointerException();
1657         Node<K,V>[] t;
1658         boolean removed = false;
1659         if ((t = table) != null) {
1660             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1661             for (Node<K,V> p; (p = it.advance()) != null; ) {
1662                 K k = p.key;
1663                 V v = p.val;
1664                 if (function.test(v) && replaceNode(k, null, v) != null)
1665                     removed = true;
1666             }
1667         }
1668         return removed;
1669     }
1670 
1671     /**
1672      * If the specified key is not already associated with a value,
1673      * attempts to compute its value using the given mapping function
1674      * and enters it into this map unless {@code null}.  The entire
1675      * method invocation is performed atomically.  The supplied
1676      * function is invoked exactly once per invocation of this method
1677      * if the key is absent, else not at all.  Some attempted update
1678      * operations on this map by other threads may be blocked while
1679      * computation is in progress, so the computation should be short
1680      * and simple.
1681      *
1682      * <p>The mapping function must not modify this map during computation.
1683      *
1684      * @param key key with which the specified value is to be associated
1685      * @param mappingFunction the function to compute a value
1686      * @return the current (existing or computed) value associated with
1687      *         the specified key, or null if the computed value is null
1688      * @throws NullPointerException if the specified key or mappingFunction
1689      *         is null
1690      * @throws IllegalStateException if the computation detectably
1691      *         attempts a recursive update to this map that would
1692      *         otherwise never complete
1693      * @throws RuntimeException or Error if the mappingFunction does so,
1694      *         in which case the mapping is left unestablished
1695      */
computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction)1696     public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
1697         if (key == null || mappingFunction == null)
1698             throw new NullPointerException();
1699         int h = spread(key.hashCode());
1700         V val = null;
1701         int binCount = 0;
1702         for (Node<K,V>[] tab = table;;) {
1703             Node<K,V> f; int n, i, fh; K fk; V fv;
1704             if (tab == null || (n = tab.length) == 0)
1705                 tab = initTable();
1706             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1707                 Node<K,V> r = new ReservationNode<K,V>();
1708                 synchronized (r) {
1709                     if (casTabAt(tab, i, null, r)) {
1710                         binCount = 1;
1711                         Node<K,V> node = null;
1712                         try {
1713                             if ((val = mappingFunction.apply(key)) != null)
1714                                 node = new Node<K,V>(h, key, val);
1715                         } finally {
1716                             setTabAt(tab, i, node);
1717                         }
1718                     }
1719                 }
1720                 if (binCount != 0)
1721                     break;
1722             }
1723             else if ((fh = f.hash) == MOVED)
1724                 tab = helpTransfer(tab, f);
1725             else if (fh == h    // check first node without acquiring lock
1726                      && ((fk = f.key) == key || (fk != null && key.equals(fk)))
1727                      && (fv = f.val) != null)
1728                 return fv;
1729             else {
1730                 boolean added = false;
1731                 synchronized (f) {
1732                     if (tabAt(tab, i) == f) {
1733                         if (fh >= 0) {
1734                             binCount = 1;
1735                             for (Node<K,V> e = f;; ++binCount) {
1736                                 K ek;
1737                                 if (e.hash == h &&
1738                                     ((ek = e.key) == key ||
1739                                      (ek != null && key.equals(ek)))) {
1740                                     val = e.val;
1741                                     break;
1742                                 }
1743                                 Node<K,V> pred = e;
1744                                 if ((e = e.next) == null) {
1745                                     if ((val = mappingFunction.apply(key)) != null) {
1746                                         if (pred.next != null)
1747                                             throw new IllegalStateException("Recursive update");
1748                                         added = true;
1749                                         pred.next = new Node<K,V>(h, key, val);
1750                                     }
1751                                     break;
1752                                 }
1753                             }
1754                         }
1755                         else if (f instanceof TreeBin) {
1756                             binCount = 2;
1757                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1758                             TreeNode<K,V> r, p;
1759                             if ((r = t.root) != null &&
1760                                 (p = r.findTreeNode(h, key, null)) != null)
1761                                 val = p.val;
1762                             else if ((val = mappingFunction.apply(key)) != null) {
1763                                 added = true;
1764                                 t.putTreeVal(h, key, val);
1765                             }
1766                         }
1767                         else if (f instanceof ReservationNode)
1768                             throw new IllegalStateException("Recursive update");
1769                     }
1770                 }
1771                 if (binCount != 0) {
1772                     if (binCount >= TREEIFY_THRESHOLD)
1773                         treeifyBin(tab, i);
1774                     if (!added)
1775                         return val;
1776                     break;
1777                 }
1778             }
1779         }
1780         if (val != null)
1781             addCount(1L, binCount);
1782         return val;
1783     }
1784 
1785     /**
1786      * If the value for the specified key is present, attempts to
1787      * compute a new mapping given the key and its current mapped
1788      * value.  The entire method invocation is performed atomically.
1789      * The supplied function is invoked exactly once per invocation of
1790      * this method if the key is present, else not at all.  Some
1791      * attempted update operations on this map by other threads may be
1792      * blocked while computation is in progress, so the computation
1793      * should be short and simple.
1794      *
1795      * <p>The remapping function must not modify this map during computation.
1796      *
1797      * @param key key with which a value may be associated
1798      * @param remappingFunction the function to compute a value
1799      * @return the new value associated with the specified key, or null if none
1800      * @throws NullPointerException if the specified key or remappingFunction
1801      *         is null
1802      * @throws IllegalStateException if the computation detectably
1803      *         attempts a recursive update to this map that would
1804      *         otherwise never complete
1805      * @throws RuntimeException or Error if the remappingFunction does so,
1806      *         in which case the mapping is unchanged
1807      */
computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction)1808     public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1809         if (key == null || remappingFunction == null)
1810             throw new NullPointerException();
1811         int h = spread(key.hashCode());
1812         V val = null;
1813         int delta = 0;
1814         int binCount = 0;
1815         for (Node<K,V>[] tab = table;;) {
1816             Node<K,V> f; int n, i, fh;
1817             if (tab == null || (n = tab.length) == 0)
1818                 tab = initTable();
1819             else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1820                 break;
1821             else if ((fh = f.hash) == MOVED)
1822                 tab = helpTransfer(tab, f);
1823             else {
1824                 synchronized (f) {
1825                     if (tabAt(tab, i) == f) {
1826                         if (fh >= 0) {
1827                             binCount = 1;
1828                             for (Node<K,V> e = f, pred = null;; ++binCount) {
1829                                 K ek;
1830                                 if (e.hash == h &&
1831                                     ((ek = e.key) == key ||
1832                                      (ek != null && key.equals(ek)))) {
1833                                     val = remappingFunction.apply(key, e.val);
1834                                     if (val != null)
1835                                         e.val = val;
1836                                     else {
1837                                         delta = -1;
1838                                         Node<K,V> en = e.next;
1839                                         if (pred != null)
1840                                             pred.next = en;
1841                                         else
1842                                             setTabAt(tab, i, en);
1843                                     }
1844                                     break;
1845                                 }
1846                                 pred = e;
1847                                 if ((e = e.next) == null)
1848                                     break;
1849                             }
1850                         }
1851                         else if (f instanceof TreeBin) {
1852                             binCount = 2;
1853                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1854                             TreeNode<K,V> r, p;
1855                             if ((r = t.root) != null &&
1856                                 (p = r.findTreeNode(h, key, null)) != null) {
1857                                 val = remappingFunction.apply(key, p.val);
1858                                 if (val != null)
1859                                     p.val = val;
1860                                 else {
1861                                     delta = -1;
1862                                     if (t.removeTreeNode(p))
1863                                         setTabAt(tab, i, untreeify(t.first));
1864                                 }
1865                             }
1866                         }
1867                         else if (f instanceof ReservationNode)
1868                             throw new IllegalStateException("Recursive update");
1869                     }
1870                 }
1871                 if (binCount != 0)
1872                     break;
1873             }
1874         }
1875         if (delta != 0)
1876             addCount((long)delta, binCount);
1877         return val;
1878     }
1879 
1880     /**
1881      * Attempts to compute a mapping for the specified key and its
1882      * current mapped value (or {@code null} if there is no current
1883      * mapping). The entire method invocation is performed atomically.
1884      * The supplied function is invoked exactly once per invocation of
1885      * this method.  Some attempted update operations on this map by
1886      * other threads may be blocked while computation is in progress,
1887      * so the computation should be short and simple.
1888      *
1889      * <p>The remapping function must not modify this map during computation.
1890      *
1891      * @param key key with which the specified value is to be associated
1892      * @param remappingFunction the function to compute a value
1893      * @return the new value associated with the specified key, or null if none
1894      * @throws NullPointerException if the specified key or remappingFunction
1895      *         is null
1896      * @throws IllegalStateException if the computation detectably
1897      *         attempts a recursive update to this map that would
1898      *         otherwise never complete
1899      * @throws RuntimeException or Error if the remappingFunction does so,
1900      *         in which case the mapping is unchanged
1901      */
compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction)1902     public V compute(K key,
1903                      BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1904         if (key == null || remappingFunction == null)
1905             throw new NullPointerException();
1906         int h = spread(key.hashCode());
1907         V val = null;
1908         int delta = 0;
1909         int binCount = 0;
1910         for (Node<K,V>[] tab = table;;) {
1911             Node<K,V> f; int n, i, fh;
1912             if (tab == null || (n = tab.length) == 0)
1913                 tab = initTable();
1914             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1915                 Node<K,V> r = new ReservationNode<K,V>();
1916                 synchronized (r) {
1917                     if (casTabAt(tab, i, null, r)) {
1918                         binCount = 1;
1919                         Node<K,V> node = null;
1920                         try {
1921                             if ((val = remappingFunction.apply(key, null)) != null) {
1922                                 delta = 1;
1923                                 node = new Node<K,V>(h, key, val);
1924                             }
1925                         } finally {
1926                             setTabAt(tab, i, node);
1927                         }
1928                     }
1929                 }
1930                 if (binCount != 0)
1931                     break;
1932             }
1933             else if ((fh = f.hash) == MOVED)
1934                 tab = helpTransfer(tab, f);
1935             else {
1936                 synchronized (f) {
1937                     if (tabAt(tab, i) == f) {
1938                         if (fh >= 0) {
1939                             binCount = 1;
1940                             for (Node<K,V> e = f, pred = null;; ++binCount) {
1941                                 K ek;
1942                                 if (e.hash == h &&
1943                                     ((ek = e.key) == key ||
1944                                      (ek != null && key.equals(ek)))) {
1945                                     val = remappingFunction.apply(key, e.val);
1946                                     if (val != null)
1947                                         e.val = val;
1948                                     else {
1949                                         delta = -1;
1950                                         Node<K,V> en = e.next;
1951                                         if (pred != null)
1952                                             pred.next = en;
1953                                         else
1954                                             setTabAt(tab, i, en);
1955                                     }
1956                                     break;
1957                                 }
1958                                 pred = e;
1959                                 if ((e = e.next) == null) {
1960                                     val = remappingFunction.apply(key, null);
1961                                     if (val != null) {
1962                                         if (pred.next != null)
1963                                             throw new IllegalStateException("Recursive update");
1964                                         delta = 1;
1965                                         pred.next = new Node<K,V>(h, key, val);
1966                                     }
1967                                     break;
1968                                 }
1969                             }
1970                         }
1971                         else if (f instanceof TreeBin) {
1972                             binCount = 1;
1973                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1974                             TreeNode<K,V> r, p;
1975                             if ((r = t.root) != null)
1976                                 p = r.findTreeNode(h, key, null);
1977                             else
1978                                 p = null;
1979                             V pv = (p == null) ? null : p.val;
1980                             val = remappingFunction.apply(key, pv);
1981                             if (val != null) {
1982                                 if (p != null)
1983                                     p.val = val;
1984                                 else {
1985                                     delta = 1;
1986                                     t.putTreeVal(h, key, val);
1987                                 }
1988                             }
1989                             else if (p != null) {
1990                                 delta = -1;
1991                                 if (t.removeTreeNode(p))
1992                                     setTabAt(tab, i, untreeify(t.first));
1993                             }
1994                         }
1995                         else if (f instanceof ReservationNode)
1996                             throw new IllegalStateException("Recursive update");
1997                     }
1998                 }
1999                 if (binCount != 0) {
2000                     if (binCount >= TREEIFY_THRESHOLD)
2001                         treeifyBin(tab, i);
2002                     break;
2003                 }
2004             }
2005         }
2006         if (delta != 0)
2007             addCount((long)delta, binCount);
2008         return val;
2009     }
2010 
2011     /**
2012      * If the specified key is not already associated with a
2013      * (non-null) value, associates it with the given value.
2014      * Otherwise, replaces the value with the results of the given
2015      * remapping function, or removes if {@code null}. The entire
2016      * method invocation is performed atomically.  Some attempted
2017      * update operations on this map by other threads may be blocked
2018      * while computation is in progress, so the computation should be
2019      * short and simple, and must not attempt to update any other
2020      * mappings of this Map.
2021      *
2022      * @param key key with which the specified value is to be associated
2023      * @param value the value to use if absent
2024      * @param remappingFunction the function to recompute a value if present
2025      * @return the new value associated with the specified key, or null if none
2026      * @throws NullPointerException if the specified key or the
2027      *         remappingFunction is null
2028      * @throws RuntimeException or Error if the remappingFunction does so,
2029      *         in which case the mapping is unchanged
2030      */
merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction)2031     public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
2032         if (key == null || value == null || remappingFunction == null)
2033             throw new NullPointerException();
2034         int h = spread(key.hashCode());
2035         V val = null;
2036         int delta = 0;
2037         int binCount = 0;
2038         for (Node<K,V>[] tab = table;;) {
2039             Node<K,V> f; int n, i, fh;
2040             if (tab == null || (n = tab.length) == 0)
2041                 tab = initTable();
2042             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
2043                 if (casTabAt(tab, i, null, new Node<K,V>(h, key, value))) {
2044                     delta = 1;
2045                     val = value;
2046                     break;
2047                 }
2048             }
2049             else if ((fh = f.hash) == MOVED)
2050                 tab = helpTransfer(tab, f);
2051             else {
2052                 synchronized (f) {
2053                     if (tabAt(tab, i) == f) {
2054                         if (fh >= 0) {
2055                             binCount = 1;
2056                             for (Node<K,V> e = f, pred = null;; ++binCount) {
2057                                 K ek;
2058                                 if (e.hash == h &&
2059                                     ((ek = e.key) == key ||
2060                                      (ek != null && key.equals(ek)))) {
2061                                     val = remappingFunction.apply(e.val, value);
2062                                     if (val != null)
2063                                         e.val = val;
2064                                     else {
2065                                         delta = -1;
2066                                         Node<K,V> en = e.next;
2067                                         if (pred != null)
2068                                             pred.next = en;
2069                                         else
2070                                             setTabAt(tab, i, en);
2071                                     }
2072                                     break;
2073                                 }
2074                                 pred = e;
2075                                 if ((e = e.next) == null) {
2076                                     delta = 1;
2077                                     val = value;
2078                                     pred.next = new Node<K,V>(h, key, val);
2079                                     break;
2080                                 }
2081                             }
2082                         }
2083                         else if (f instanceof TreeBin) {
2084                             binCount = 2;
2085                             TreeBin<K,V> t = (TreeBin<K,V>)f;
2086                             TreeNode<K,V> r = t.root;
2087                             TreeNode<K,V> p = (r == null) ? null :
2088                                 r.findTreeNode(h, key, null);
2089                             val = (p == null) ? value :
2090                                 remappingFunction.apply(p.val, value);
2091                             if (val != null) {
2092                                 if (p != null)
2093                                     p.val = val;
2094                                 else {
2095                                     delta = 1;
2096                                     t.putTreeVal(h, key, val);
2097                                 }
2098                             }
2099                             else if (p != null) {
2100                                 delta = -1;
2101                                 if (t.removeTreeNode(p))
2102                                     setTabAt(tab, i, untreeify(t.first));
2103                             }
2104                         }
2105                         else if (f instanceof ReservationNode)
2106                             throw new IllegalStateException("Recursive update");
2107                     }
2108                 }
2109                 if (binCount != 0) {
2110                     if (binCount >= TREEIFY_THRESHOLD)
2111                         treeifyBin(tab, i);
2112                     break;
2113                 }
2114             }
2115         }
2116         if (delta != 0)
2117             addCount((long)delta, binCount);
2118         return val;
2119     }
2120 
2121     // Hashtable legacy methods
2122 
2123     /**
2124      * Tests if some key maps into the specified value in this table.
2125      *
2126      * <p>Note that this method is identical in functionality to
2127      * {@link #containsValue(Object)}, and exists solely to ensure
2128      * full compatibility with class {@link java.util.Hashtable},
2129      * which supported this method prior to introduction of the
2130      * Java Collections Framework.
2131      *
2132      * @param  value a value to search for
2133      * @return {@code true} if and only if some key maps to the
2134      *         {@code value} argument in this table as
2135      *         determined by the {@code equals} method;
2136      *         {@code false} otherwise
2137      * @throws NullPointerException if the specified value is null
2138      */
contains(Object value)2139     public boolean contains(Object value) {
2140         return containsValue(value);
2141     }
2142 
2143     /**
2144      * Returns an enumeration of the keys in this table.
2145      *
2146      * @return an enumeration of the keys in this table
2147      * @see #keySet()
2148      */
keys()2149     public Enumeration<K> keys() {
2150         Node<K,V>[] t;
2151         int f = (t = table) == null ? 0 : t.length;
2152         return new KeyIterator<K,V>(t, f, 0, f, this);
2153     }
2154 
2155     /**
2156      * Returns an enumeration of the values in this table.
2157      *
2158      * @return an enumeration of the values in this table
2159      * @see #values()
2160      */
elements()2161     public Enumeration<V> elements() {
2162         Node<K,V>[] t;
2163         int f = (t = table) == null ? 0 : t.length;
2164         return new ValueIterator<K,V>(t, f, 0, f, this);
2165     }
2166 
2167     // ConcurrentHashMap-only methods
2168 
2169     /**
2170      * Returns the number of mappings. This method should be used
2171      * instead of {@link #size} because a ConcurrentHashMap may
2172      * contain more mappings than can be represented as an int. The
2173      * value returned is an estimate; the actual count may differ if
2174      * there are concurrent insertions or removals.
2175      *
2176      * @return the number of mappings
2177      * @since 1.8
2178      */
mappingCount()2179     public long mappingCount() {
2180         long n = sumCount();
2181         return (n < 0L) ? 0L : n; // ignore transient negative values
2182     }
2183 
2184     /**
2185      * Creates a new {@link Set} backed by a ConcurrentHashMap
2186      * from the given type to {@code Boolean.TRUE}.
2187      *
2188      * @param <K> the element type of the returned set
2189      * @return the new set
2190      * @since 1.8
2191      */
newKeySet()2192     public static <K> KeySetView<K,Boolean> newKeySet() {
2193         return new KeySetView<K,Boolean>
2194             (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE);
2195     }
2196 
2197     /**
2198      * Creates a new {@link Set} backed by a ConcurrentHashMap
2199      * from the given type to {@code Boolean.TRUE}.
2200      *
2201      * @param initialCapacity The implementation performs internal
2202      * sizing to accommodate this many elements.
2203      * @param <K> the element type of the returned set
2204      * @return the new set
2205      * @throws IllegalArgumentException if the initial capacity of
2206      * elements is negative
2207      * @since 1.8
2208      */
newKeySet(int initialCapacity)2209     public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2210         return new KeySetView<K,Boolean>
2211             (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE);
2212     }
2213 
2214     /**
2215      * Returns a {@link Set} view of the keys in this map, using the
2216      * given common mapped value for any additions (i.e., {@link
2217      * Collection#add} and {@link Collection#addAll(Collection)}).
2218      * This is of course only appropriate if it is acceptable to use
2219      * the same value for all additions from this view.
2220      *
2221      * @param mappedValue the mapped value to use for any additions
2222      * @return the set view
2223      * @throws NullPointerException if the mappedValue is null
2224      */
keySet(V mappedValue)2225     public KeySetView<K,V> keySet(V mappedValue) {
2226         if (mappedValue == null)
2227             throw new NullPointerException();
2228         return new KeySetView<K,V>(this, mappedValue);
2229     }
2230 
2231     /* ---------------- Special Nodes -------------- */
2232 
2233     /**
2234      * A node inserted at head of bins during transfer operations.
2235      */
2236     static final class ForwardingNode<K,V> extends Node<K,V> {
2237         final Node<K,V>[] nextTable;
ForwardingNode(Node<K,V>[] tab)2238         ForwardingNode(Node<K,V>[] tab) {
2239             super(MOVED, null, null);
2240             this.nextTable = tab;
2241         }
2242 
find(int h, Object k)2243         Node<K,V> find(int h, Object k) {
2244             // loop to avoid arbitrarily deep recursion on forwarding nodes
2245             outer: for (Node<K,V>[] tab = nextTable;;) {
2246                 Node<K,V> e; int n;
2247                 if (k == null || tab == null || (n = tab.length) == 0 ||
2248                     (e = tabAt(tab, (n - 1) & h)) == null)
2249                     return null;
2250                 for (;;) {
2251                     int eh; K ek;
2252                     if ((eh = e.hash) == h &&
2253                         ((ek = e.key) == k || (ek != null && k.equals(ek))))
2254                         return e;
2255                     if (eh < 0) {
2256                         if (e instanceof ForwardingNode) {
2257                             tab = ((ForwardingNode<K,V>)e).nextTable;
2258                             continue outer;
2259                         }
2260                         else
2261                             return e.find(h, k);
2262                     }
2263                     if ((e = e.next) == null)
2264                         return null;
2265                 }
2266             }
2267         }
2268     }
2269 
2270     /**
2271      * A place-holder node used in computeIfAbsent and compute.
2272      */
2273     static final class ReservationNode<K,V> extends Node<K,V> {
ReservationNode()2274         ReservationNode() {
2275             super(RESERVED, null, null);
2276         }
2277 
find(int h, Object k)2278         Node<K,V> find(int h, Object k) {
2279             return null;
2280         }
2281     }
2282 
2283     /* ---------------- Table Initialization and Resizing -------------- */
2284 
2285     /**
2286      * Returns the stamp bits for resizing a table of size n.
2287      * Must be negative when shifted left by RESIZE_STAMP_SHIFT.
2288      */
resizeStamp(int n)2289     static final int resizeStamp(int n) {
2290         return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
2291     }
2292 
2293     /**
2294      * Initializes table, using the size recorded in sizeCtl.
2295      */
initTable()2296     private final Node<K,V>[] initTable() {
2297         Node<K,V>[] tab; int sc;
2298         while ((tab = table) == null || tab.length == 0) {
2299             if ((sc = sizeCtl) < 0)
2300                 Thread.yield(); // lost initialization race; just spin
2301             else if (U.compareAndSetInt(this, SIZECTL, sc, -1)) {
2302                 try {
2303                     if ((tab = table) == null || tab.length == 0) {
2304                         int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2305                         @SuppressWarnings("unchecked")
2306                         Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2307                         table = tab = nt;
2308                         sc = n - (n >>> 2);
2309                     }
2310                 } finally {
2311                     sizeCtl = sc;
2312                 }
2313                 break;
2314             }
2315         }
2316         return tab;
2317     }
2318 
2319     /**
2320      * Adds to count, and if table is too small and not already
2321      * resizing, initiates transfer. If already resizing, helps
2322      * perform transfer if work is available.  Rechecks occupancy
2323      * after a transfer to see if another resize is already needed
2324      * because resizings are lagging additions.
2325      *
2326      * @param x the count to add
2327      * @param check if <0, don't check resize, if <= 1 only check if uncontended
2328      */
addCount(long x, int check)2329     private final void addCount(long x, int check) {
2330         CounterCell[] cs; long b, s;
2331         if ((cs = counterCells) != null ||
2332             !U.compareAndSetLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2333             CounterCell c; long v; int m;
2334             boolean uncontended = true;
2335             if (cs == null || (m = cs.length - 1) < 0 ||
2336                 (c = cs[ThreadLocalRandom.getProbe() & m]) == null ||
2337                 !(uncontended =
2338                   U.compareAndSetLong(c, CELLVALUE, v = c.value, v + x))) {
2339                 fullAddCount(x, uncontended);
2340                 return;
2341             }
2342             if (check <= 1)
2343                 return;
2344             s = sumCount();
2345         }
2346         if (check >= 0) {
2347             Node<K,V>[] tab, nt; int n, sc;
2348             while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2349                    (n = tab.length) < MAXIMUM_CAPACITY) {
2350                 int rs = resizeStamp(n) << RESIZE_STAMP_SHIFT;
2351                 if (sc < 0) {
2352                     if (sc == rs + MAX_RESIZERS || sc == rs + 1 ||
2353                         (nt = nextTable) == null || transferIndex <= 0)
2354                         break;
2355                     if (U.compareAndSetInt(this, SIZECTL, sc, sc + 1))
2356                         transfer(tab, nt);
2357                 }
2358                 else if (U.compareAndSetInt(this, SIZECTL, sc, rs + 2))
2359                     transfer(tab, null);
2360                 s = sumCount();
2361             }
2362         }
2363     }
2364 
2365     /**
2366      * Helps transfer if a resize is in progress.
2367      */
helpTransfer(Node<K,V>[] tab, Node<K,V> f)2368     final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2369         Node<K,V>[] nextTab; int sc;
2370         if (tab != null && (f instanceof ForwardingNode) &&
2371             (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2372             int rs = resizeStamp(tab.length) << RESIZE_STAMP_SHIFT;
2373             while (nextTab == nextTable && table == tab &&
2374                    (sc = sizeCtl) < 0) {
2375                 if (sc == rs + MAX_RESIZERS || sc == rs + 1 ||
2376                     transferIndex <= 0)
2377                     break;
2378                 if (U.compareAndSetInt(this, SIZECTL, sc, sc + 1)) {
2379                     transfer(tab, nextTab);
2380                     break;
2381                 }
2382             }
2383             return nextTab;
2384         }
2385         return table;
2386     }
2387 
2388     /**
2389      * Tries to presize table to accommodate the given number of elements.
2390      *
2391      * @param size number of elements (doesn't need to be perfectly accurate)
2392      */
tryPresize(int size)2393     private final void tryPresize(int size) {
2394         int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2395             tableSizeFor(size + (size >>> 1) + 1);
2396         int sc;
2397         while ((sc = sizeCtl) >= 0) {
2398             Node<K,V>[] tab = table; int n;
2399             if (tab == null || (n = tab.length) == 0) {
2400                 n = (sc > c) ? sc : c;
2401                 if (U.compareAndSetInt(this, SIZECTL, sc, -1)) {
2402                     try {
2403                         if (table == tab) {
2404                             @SuppressWarnings("unchecked")
2405                             Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2406                             table = nt;
2407                             sc = n - (n >>> 2);
2408                         }
2409                     } finally {
2410                         sizeCtl = sc;
2411                     }
2412                 }
2413             }
2414             else if (c <= sc || n >= MAXIMUM_CAPACITY)
2415                 break;
2416             else if (tab == table) {
2417                 int rs = resizeStamp(n);
2418                 if (U.compareAndSetInt(this, SIZECTL, sc,
2419                                         (rs << RESIZE_STAMP_SHIFT) + 2))
2420                     transfer(tab, null);
2421             }
2422         }
2423     }
2424 
2425     /**
2426      * Moves and/or copies the nodes in each bin to new table. See
2427      * above for explanation.
2428      */
transfer(Node<K,V>[] tab, Node<K,V>[] nextTab)2429     private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2430         int n = tab.length, stride;
2431         if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2432             stride = MIN_TRANSFER_STRIDE; // subdivide range
2433         if (nextTab == null) {            // initiating
2434             try {
2435                 @SuppressWarnings("unchecked")
2436                 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
2437                 nextTab = nt;
2438             } catch (Throwable ex) {      // try to cope with OOME
2439                 sizeCtl = Integer.MAX_VALUE;
2440                 return;
2441             }
2442             nextTable = nextTab;
2443             transferIndex = n;
2444         }
2445         int nextn = nextTab.length;
2446         ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2447         boolean advance = true;
2448         boolean finishing = false; // to ensure sweep before committing nextTab
2449         for (int i = 0, bound = 0;;) {
2450             Node<K,V> f; int fh;
2451             while (advance) {
2452                 int nextIndex, nextBound;
2453                 if (--i >= bound || finishing)
2454                     advance = false;
2455                 else if ((nextIndex = transferIndex) <= 0) {
2456                     i = -1;
2457                     advance = false;
2458                 }
2459                 else if (U.compareAndSetInt
2460                          (this, TRANSFERINDEX, nextIndex,
2461                           nextBound = (nextIndex > stride ?
2462                                        nextIndex - stride : 0))) {
2463                     bound = nextBound;
2464                     i = nextIndex - 1;
2465                     advance = false;
2466                 }
2467             }
2468             if (i < 0 || i >= n || i + n >= nextn) {
2469                 int sc;
2470                 if (finishing) {
2471                     nextTable = null;
2472                     table = nextTab;
2473                     sizeCtl = (n << 1) - (n >>> 1);
2474                     return;
2475                 }
2476                 if (U.compareAndSetInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
2477                     if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
2478                         return;
2479                     finishing = advance = true;
2480                     i = n; // recheck before commit
2481                 }
2482             }
2483             else if ((f = tabAt(tab, i)) == null)
2484                 advance = casTabAt(tab, i, null, fwd);
2485             else if ((fh = f.hash) == MOVED)
2486                 advance = true; // already processed
2487             else {
2488                 synchronized (f) {
2489                     if (tabAt(tab, i) == f) {
2490                         Node<K,V> ln, hn;
2491                         if (fh >= 0) {
2492                             int runBit = fh & n;
2493                             Node<K,V> lastRun = f;
2494                             for (Node<K,V> p = f.next; p != null; p = p.next) {
2495                                 int b = p.hash & n;
2496                                 if (b != runBit) {
2497                                     runBit = b;
2498                                     lastRun = p;
2499                                 }
2500                             }
2501                             if (runBit == 0) {
2502                                 ln = lastRun;
2503                                 hn = null;
2504                             }
2505                             else {
2506                                 hn = lastRun;
2507                                 ln = null;
2508                             }
2509                             for (Node<K,V> p = f; p != lastRun; p = p.next) {
2510                                 int ph = p.hash; K pk = p.key; V pv = p.val;
2511                                 if ((ph & n) == 0)
2512                                     ln = new Node<K,V>(ph, pk, pv, ln);
2513                                 else
2514                                     hn = new Node<K,V>(ph, pk, pv, hn);
2515                             }
2516                             setTabAt(nextTab, i, ln);
2517                             setTabAt(nextTab, i + n, hn);
2518                             setTabAt(tab, i, fwd);
2519                             advance = true;
2520                         }
2521                         else if (f instanceof TreeBin) {
2522                             TreeBin<K,V> t = (TreeBin<K,V>)f;
2523                             TreeNode<K,V> lo = null, loTail = null;
2524                             TreeNode<K,V> hi = null, hiTail = null;
2525                             int lc = 0, hc = 0;
2526                             for (Node<K,V> e = t.first; e != null; e = e.next) {
2527                                 int h = e.hash;
2528                                 TreeNode<K,V> p = new TreeNode<K,V>
2529                                     (h, e.key, e.val, null, null);
2530                                 if ((h & n) == 0) {
2531                                     if ((p.prev = loTail) == null)
2532                                         lo = p;
2533                                     else
2534                                         loTail.next = p;
2535                                     loTail = p;
2536                                     ++lc;
2537                                 }
2538                                 else {
2539                                     if ((p.prev = hiTail) == null)
2540                                         hi = p;
2541                                     else
2542                                         hiTail.next = p;
2543                                     hiTail = p;
2544                                     ++hc;
2545                                 }
2546                             }
2547                             ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
2548                                 (hc != 0) ? new TreeBin<K,V>(lo) : t;
2549                             hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
2550                                 (lc != 0) ? new TreeBin<K,V>(hi) : t;
2551                             setTabAt(nextTab, i, ln);
2552                             setTabAt(nextTab, i + n, hn);
2553                             setTabAt(tab, i, fwd);
2554                             advance = true;
2555                         }
2556                         else if (f instanceof ReservationNode)
2557                             throw new IllegalStateException("Recursive update");
2558                     }
2559                 }
2560             }
2561         }
2562     }
2563 
2564     /* ---------------- Counter support -------------- */
2565 
2566     /**
2567      * A padded cell for distributing counts.  Adapted from LongAdder
2568      * and Striped64.  See their internal docs for explanation.
2569      */
2570     @jdk.internal.vm.annotation.Contended
2571     static final class CounterCell {
2572         volatile long value;
CounterCell(long x)2573         CounterCell(long x) { value = x; }
2574     }
2575 
sumCount()2576     final long sumCount() {
2577         CounterCell[] cs = counterCells;
2578         long sum = baseCount;
2579         if (cs != null) {
2580             for (CounterCell c : cs)
2581                 if (c != null)
2582                     sum += c.value;
2583         }
2584         return sum;
2585     }
2586 
2587     // See LongAdder version for explanation
fullAddCount(long x, boolean wasUncontended)2588     private final void fullAddCount(long x, boolean wasUncontended) {
2589         int h;
2590         if ((h = ThreadLocalRandom.getProbe()) == 0) {
2591             ThreadLocalRandom.localInit();      // force initialization
2592             h = ThreadLocalRandom.getProbe();
2593             wasUncontended = true;
2594         }
2595         boolean collide = false;                // True if last slot nonempty
2596         for (;;) {
2597             CounterCell[] cs; CounterCell c; int n; long v;
2598             if ((cs = counterCells) != null && (n = cs.length) > 0) {
2599                 if ((c = cs[(n - 1) & h]) == null) {
2600                     if (cellsBusy == 0) {            // Try to attach new Cell
2601                         CounterCell r = new CounterCell(x); // Optimistic create
2602                         if (cellsBusy == 0 &&
2603                             U.compareAndSetInt(this, CELLSBUSY, 0, 1)) {
2604                             boolean created = false;
2605                             try {               // Recheck under lock
2606                                 CounterCell[] rs; int m, j;
2607                                 if ((rs = counterCells) != null &&
2608                                     (m = rs.length) > 0 &&
2609                                     rs[j = (m - 1) & h] == null) {
2610                                     rs[j] = r;
2611                                     created = true;
2612                                 }
2613                             } finally {
2614                                 cellsBusy = 0;
2615                             }
2616                             if (created)
2617                                 break;
2618                             continue;           // Slot is now non-empty
2619                         }
2620                     }
2621                     collide = false;
2622                 }
2623                 else if (!wasUncontended)       // CAS already known to fail
2624                     wasUncontended = true;      // Continue after rehash
2625                 else if (U.compareAndSetLong(c, CELLVALUE, v = c.value, v + x))
2626                     break;
2627                 else if (counterCells != cs || n >= NCPU)
2628                     collide = false;            // At max size or stale
2629                 else if (!collide)
2630                     collide = true;
2631                 else if (cellsBusy == 0 &&
2632                          U.compareAndSetInt(this, CELLSBUSY, 0, 1)) {
2633                     try {
2634                         if (counterCells == cs) // Expand table unless stale
2635                             counterCells = Arrays.copyOf(cs, n << 1);
2636                     } finally {
2637                         cellsBusy = 0;
2638                     }
2639                     collide = false;
2640                     continue;                   // Retry with expanded table
2641                 }
2642                 h = ThreadLocalRandom.advanceProbe(h);
2643             }
2644             else if (cellsBusy == 0 && counterCells == cs &&
2645                      U.compareAndSetInt(this, CELLSBUSY, 0, 1)) {
2646                 boolean init = false;
2647                 try {                           // Initialize table
2648                     if (counterCells == cs) {
2649                         CounterCell[] rs = new CounterCell[2];
2650                         rs[h & 1] = new CounterCell(x);
2651                         counterCells = rs;
2652                         init = true;
2653                     }
2654                 } finally {
2655                     cellsBusy = 0;
2656                 }
2657                 if (init)
2658                     break;
2659             }
2660             else if (U.compareAndSetLong(this, BASECOUNT, v = baseCount, v + x))
2661                 break;                          // Fall back on using base
2662         }
2663     }
2664 
2665     /* ---------------- Conversion from/to TreeBins -------------- */
2666 
2667     /**
2668      * Replaces all linked nodes in bin at given index unless table is
2669      * too small, in which case resizes instead.
2670      */
treeifyBin(Node<K,V>[] tab, int index)2671     private final void treeifyBin(Node<K,V>[] tab, int index) {
2672         Node<K,V> b; int n;
2673         if (tab != null) {
2674             if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
2675                 tryPresize(n << 1);
2676             else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
2677                 synchronized (b) {
2678                     if (tabAt(tab, index) == b) {
2679                         TreeNode<K,V> hd = null, tl = null;
2680                         for (Node<K,V> e = b; e != null; e = e.next) {
2681                             TreeNode<K,V> p =
2682                                 new TreeNode<K,V>(e.hash, e.key, e.val,
2683                                                   null, null);
2684                             if ((p.prev = tl) == null)
2685                                 hd = p;
2686                             else
2687                                 tl.next = p;
2688                             tl = p;
2689                         }
2690                         setTabAt(tab, index, new TreeBin<K,V>(hd));
2691                     }
2692                 }
2693             }
2694         }
2695     }
2696 
2697     /**
2698      * Returns a list of non-TreeNodes replacing those in given list.
2699      */
untreeify(Node<K,V> b)2700     static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2701         Node<K,V> hd = null, tl = null;
2702         for (Node<K,V> q = b; q != null; q = q.next) {
2703             Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val);
2704             if (tl == null)
2705                 hd = p;
2706             else
2707                 tl.next = p;
2708             tl = p;
2709         }
2710         return hd;
2711     }
2712 
2713     /* ---------------- TreeNodes -------------- */
2714 
2715     /**
2716      * Nodes for use in TreeBins.
2717      */
2718     static final class TreeNode<K,V> extends Node<K,V> {
2719         TreeNode<K,V> parent;  // red-black tree links
2720         TreeNode<K,V> left;
2721         TreeNode<K,V> right;
2722         TreeNode<K,V> prev;    // needed to unlink next upon deletion
2723         boolean red;
2724 
TreeNode(int hash, K key, V val, Node<K,V> next, TreeNode<K,V> parent)2725         TreeNode(int hash, K key, V val, Node<K,V> next,
2726                  TreeNode<K,V> parent) {
2727             super(hash, key, val, next);
2728             this.parent = parent;
2729         }
2730 
find(int h, Object k)2731         Node<K,V> find(int h, Object k) {
2732             return findTreeNode(h, k, null);
2733         }
2734 
2735         /**
2736          * Returns the TreeNode (or null if not found) for the given key
2737          * starting at given root.
2738          */
findTreeNode(int h, Object k, Class<?> kc)2739         final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2740             if (k != null) {
2741                 TreeNode<K,V> p = this;
2742                 do {
2743                     int ph, dir; K pk; TreeNode<K,V> q;
2744                     TreeNode<K,V> pl = p.left, pr = p.right;
2745                     if ((ph = p.hash) > h)
2746                         p = pl;
2747                     else if (ph < h)
2748                         p = pr;
2749                     else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2750                         return p;
2751                     else if (pl == null)
2752                         p = pr;
2753                     else if (pr == null)
2754                         p = pl;
2755                     else if ((kc != null ||
2756                               (kc = comparableClassFor(k)) != null) &&
2757                              (dir = compareComparables(kc, k, pk)) != 0)
2758                         p = (dir < 0) ? pl : pr;
2759                     else if ((q = pr.findTreeNode(h, k, kc)) != null)
2760                         return q;
2761                     else
2762                         p = pl;
2763                 } while (p != null);
2764             }
2765             return null;
2766         }
2767     }
2768 
2769     /* ---------------- TreeBins -------------- */
2770 
2771     /**
2772      * TreeNodes used at the heads of bins. TreeBins do not hold user
2773      * keys or values, but instead point to list of TreeNodes and
2774      * their root. They also maintain a parasitic read-write lock
2775      * forcing writers (who hold bin lock) to wait for readers (who do
2776      * not) to complete before tree restructuring operations.
2777      */
2778     static final class TreeBin<K,V> extends Node<K,V> {
2779         TreeNode<K,V> root;
2780         volatile TreeNode<K,V> first;
2781         volatile Thread waiter;
2782         volatile int lockState;
2783         // values for lockState
2784         static final int WRITER = 1; // set while holding write lock
2785         static final int WAITER = 2; // set when waiting for write lock
2786         static final int READER = 4; // increment value for setting read lock
2787 
2788         /**
2789          * Tie-breaking utility for ordering insertions when equal
2790          * hashCodes and non-comparable. We don't require a total
2791          * order, just a consistent insertion rule to maintain
2792          * equivalence across rebalancings. Tie-breaking further than
2793          * necessary simplifies testing a bit.
2794          */
tieBreakOrder(Object a, Object b)2795         static int tieBreakOrder(Object a, Object b) {
2796             int d;
2797             if (a == null || b == null ||
2798                 (d = a.getClass().getName().
2799                  compareTo(b.getClass().getName())) == 0)
2800                 d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
2801                      -1 : 1);
2802             return d;
2803         }
2804 
2805         /**
2806          * Creates bin with initial set of nodes headed by b.
2807          */
TreeBin(TreeNode<K,V> b)2808         TreeBin(TreeNode<K,V> b) {
2809             super(TREEBIN, null, null);
2810             this.first = b;
2811             TreeNode<K,V> r = null;
2812             for (TreeNode<K,V> x = b, next; x != null; x = next) {
2813                 next = (TreeNode<K,V>)x.next;
2814                 x.left = x.right = null;
2815                 if (r == null) {
2816                     x.parent = null;
2817                     x.red = false;
2818                     r = x;
2819                 }
2820                 else {
2821                     K k = x.key;
2822                     int h = x.hash;
2823                     Class<?> kc = null;
2824                     for (TreeNode<K,V> p = r;;) {
2825                         int dir, ph;
2826                         K pk = p.key;
2827                         if ((ph = p.hash) > h)
2828                             dir = -1;
2829                         else if (ph < h)
2830                             dir = 1;
2831                         else if ((kc == null &&
2832                                   (kc = comparableClassFor(k)) == null) ||
2833                                  (dir = compareComparables(kc, k, pk)) == 0)
2834                             dir = tieBreakOrder(k, pk);
2835                         TreeNode<K,V> xp = p;
2836                         if ((p = (dir <= 0) ? p.left : p.right) == null) {
2837                             x.parent = xp;
2838                             if (dir <= 0)
2839                                 xp.left = x;
2840                             else
2841                                 xp.right = x;
2842                             r = balanceInsertion(r, x);
2843                             break;
2844                         }
2845                     }
2846                 }
2847             }
2848             this.root = r;
2849             assert checkInvariants(root);
2850         }
2851 
2852         /**
2853          * Acquires write lock for tree restructuring.
2854          */
lockRoot()2855         private final void lockRoot() {
2856             if (!U.compareAndSetInt(this, LOCKSTATE, 0, WRITER))
2857                 contendedLock(); // offload to separate method
2858         }
2859 
2860         /**
2861          * Releases write lock for tree restructuring.
2862          */
unlockRoot()2863         private final void unlockRoot() {
2864             lockState = 0;
2865         }
2866 
2867         /**
2868          * Possibly blocks awaiting root lock.
2869          */
contendedLock()2870         private final void contendedLock() {
2871             Thread current = Thread.currentThread(), w;
2872             for (int s;;) {
2873                 if (((s = lockState) & ~WAITER) == 0) {
2874                     if (U.compareAndSetInt(this, LOCKSTATE, s, WRITER)) {
2875                         if (waiter == current)
2876                             U.compareAndSetReference(this, WAITERTHREAD, current, null);
2877                         return;
2878                     }
2879                 }
2880                 else if ((s & WAITER) == 0)
2881                     U.compareAndSetInt(this, LOCKSTATE, s, s | WAITER);
2882                 else if ((w = waiter) == null)
2883                     U.compareAndSetReference(this, WAITERTHREAD, null, current);
2884                 else if (w == current)
2885                     LockSupport.park(this);
2886             }
2887         }
2888 
2889         /**
2890          * Returns matching node or null if none. Tries to search
2891          * using tree comparisons from root, but continues linear
2892          * search when lock not available.
2893          */
find(int h, Object k)2894         final Node<K,V> find(int h, Object k) {
2895             if (k != null) {
2896                 for (Node<K,V> e = first; e != null; ) {
2897                     int s; K ek;
2898                     if (((s = lockState) & (WAITER|WRITER)) != 0) {
2899                         if (e.hash == h &&
2900                             ((ek = e.key) == k || (ek != null && k.equals(ek))))
2901                             return e;
2902                         e = e.next;
2903                     }
2904                     else if (U.compareAndSetInt(this, LOCKSTATE, s,
2905                                                  s + READER)) {
2906                         TreeNode<K,V> r, p;
2907                         try {
2908                             p = ((r = root) == null ? null :
2909                                  r.findTreeNode(h, k, null));
2910                         } finally {
2911                             Thread w;
2912                             if (U.getAndAddInt(this, LOCKSTATE, -READER) ==
2913                                 (READER|WAITER) && (w = waiter) != null)
2914                                 LockSupport.unpark(w);
2915                         }
2916                         return p;
2917                     }
2918                 }
2919             }
2920             return null;
2921         }
2922 
2923         /**
2924          * Finds or adds a node.
2925          * @return null if added
2926          */
putTreeVal(int h, K k, V v)2927         final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2928             Class<?> kc = null;
2929             boolean searched = false;
2930             for (TreeNode<K,V> p = root;;) {
2931                 int dir, ph; K pk;
2932                 if (p == null) {
2933                     first = root = new TreeNode<K,V>(h, k, v, null, null);
2934                     break;
2935                 }
2936                 else if ((ph = p.hash) > h)
2937                     dir = -1;
2938                 else if (ph < h)
2939                     dir = 1;
2940                 else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2941                     return p;
2942                 else if ((kc == null &&
2943                           (kc = comparableClassFor(k)) == null) ||
2944                          (dir = compareComparables(kc, k, pk)) == 0) {
2945                     if (!searched) {
2946                         TreeNode<K,V> q, ch;
2947                         searched = true;
2948                         if (((ch = p.left) != null &&
2949                              (q = ch.findTreeNode(h, k, kc)) != null) ||
2950                             ((ch = p.right) != null &&
2951                              (q = ch.findTreeNode(h, k, kc)) != null))
2952                             return q;
2953                     }
2954                     dir = tieBreakOrder(k, pk);
2955                 }
2956 
2957                 TreeNode<K,V> xp = p;
2958                 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2959                     TreeNode<K,V> x, f = first;
2960                     first = x = new TreeNode<K,V>(h, k, v, f, xp);
2961                     if (f != null)
2962                         f.prev = x;
2963                     if (dir <= 0)
2964                         xp.left = x;
2965                     else
2966                         xp.right = x;
2967                     if (!xp.red)
2968                         x.red = true;
2969                     else {
2970                         lockRoot();
2971                         try {
2972                             root = balanceInsertion(root, x);
2973                         } finally {
2974                             unlockRoot();
2975                         }
2976                     }
2977                     break;
2978                 }
2979             }
2980             assert checkInvariants(root);
2981             return null;
2982         }
2983 
2984         /**
2985          * Removes the given node, that must be present before this
2986          * call.  This is messier than typical red-black deletion code
2987          * because we cannot swap the contents of an interior node
2988          * with a leaf successor that is pinned by "next" pointers
2989          * that are accessible independently of lock. So instead we
2990          * swap the tree linkages.
2991          *
2992          * @return true if now too small, so should be untreeified
2993          */
removeTreeNode(TreeNode<K,V> p)2994         final boolean removeTreeNode(TreeNode<K,V> p) {
2995             TreeNode<K,V> next = (TreeNode<K,V>)p.next;
2996             TreeNode<K,V> pred = p.prev;  // unlink traversal pointers
2997             TreeNode<K,V> r, rl;
2998             if (pred == null)
2999                 first = next;
3000             else
3001                 pred.next = next;
3002             if (next != null)
3003                 next.prev = pred;
3004             if (first == null) {
3005                 root = null;
3006                 return true;
3007             }
3008             if ((r = root) == null || r.right == null || // too small
3009                 (rl = r.left) == null || rl.left == null)
3010                 return true;
3011             lockRoot();
3012             try {
3013                 TreeNode<K,V> replacement;
3014                 TreeNode<K,V> pl = p.left;
3015                 TreeNode<K,V> pr = p.right;
3016                 if (pl != null && pr != null) {
3017                     TreeNode<K,V> s = pr, sl;
3018                     while ((sl = s.left) != null) // find successor
3019                         s = sl;
3020                     boolean c = s.red; s.red = p.red; p.red = c; // swap colors
3021                     TreeNode<K,V> sr = s.right;
3022                     TreeNode<K,V> pp = p.parent;
3023                     if (s == pr) { // p was s's direct parent
3024                         p.parent = s;
3025                         s.right = p;
3026                     }
3027                     else {
3028                         TreeNode<K,V> sp = s.parent;
3029                         if ((p.parent = sp) != null) {
3030                             if (s == sp.left)
3031                                 sp.left = p;
3032                             else
3033                                 sp.right = p;
3034                         }
3035                         if ((s.right = pr) != null)
3036                             pr.parent = s;
3037                     }
3038                     p.left = null;
3039                     if ((p.right = sr) != null)
3040                         sr.parent = p;
3041                     if ((s.left = pl) != null)
3042                         pl.parent = s;
3043                     if ((s.parent = pp) == null)
3044                         r = s;
3045                     else if (p == pp.left)
3046                         pp.left = s;
3047                     else
3048                         pp.right = s;
3049                     if (sr != null)
3050                         replacement = sr;
3051                     else
3052                         replacement = p;
3053                 }
3054                 else if (pl != null)
3055                     replacement = pl;
3056                 else if (pr != null)
3057                     replacement = pr;
3058                 else
3059                     replacement = p;
3060                 if (replacement != p) {
3061                     TreeNode<K,V> pp = replacement.parent = p.parent;
3062                     if (pp == null)
3063                         r = replacement;
3064                     else if (p == pp.left)
3065                         pp.left = replacement;
3066                     else
3067                         pp.right = replacement;
3068                     p.left = p.right = p.parent = null;
3069                 }
3070 
3071                 root = (p.red) ? r : balanceDeletion(r, replacement);
3072 
3073                 if (p == replacement) {  // detach pointers
3074                     TreeNode<K,V> pp;
3075                     if ((pp = p.parent) != null) {
3076                         if (p == pp.left)
3077                             pp.left = null;
3078                         else if (p == pp.right)
3079                             pp.right = null;
3080                         p.parent = null;
3081                     }
3082                 }
3083             } finally {
3084                 unlockRoot();
3085             }
3086             assert checkInvariants(root);
3087             return false;
3088         }
3089 
3090         /* ------------------------------------------------------------ */
3091         // Red-black tree methods, all adapted from CLR
3092 
rotateLeft(TreeNode<K,V> root, TreeNode<K,V> p)3093         static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
3094                                               TreeNode<K,V> p) {
3095             TreeNode<K,V> r, pp, rl;
3096             if (p != null && (r = p.right) != null) {
3097                 if ((rl = p.right = r.left) != null)
3098                     rl.parent = p;
3099                 if ((pp = r.parent = p.parent) == null)
3100                     (root = r).red = false;
3101                 else if (pp.left == p)
3102                     pp.left = r;
3103                 else
3104                     pp.right = r;
3105                 r.left = p;
3106                 p.parent = r;
3107             }
3108             return root;
3109         }
3110 
rotateRight(TreeNode<K,V> root, TreeNode<K,V> p)3111         static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
3112                                                TreeNode<K,V> p) {
3113             TreeNode<K,V> l, pp, lr;
3114             if (p != null && (l = p.left) != null) {
3115                 if ((lr = p.left = l.right) != null)
3116                     lr.parent = p;
3117                 if ((pp = l.parent = p.parent) == null)
3118                     (root = l).red = false;
3119                 else if (pp.right == p)
3120                     pp.right = l;
3121                 else
3122                     pp.left = l;
3123                 l.right = p;
3124                 p.parent = l;
3125             }
3126             return root;
3127         }
3128 
balanceInsertion(TreeNode<K,V> root, TreeNode<K,V> x)3129         static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
3130                                                     TreeNode<K,V> x) {
3131             x.red = true;
3132             for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
3133                 if ((xp = x.parent) == null) {
3134                     x.red = false;
3135                     return x;
3136                 }
3137                 else if (!xp.red || (xpp = xp.parent) == null)
3138                     return root;
3139                 if (xp == (xppl = xpp.left)) {
3140                     if ((xppr = xpp.right) != null && xppr.red) {
3141                         xppr.red = false;
3142                         xp.red = false;
3143                         xpp.red = true;
3144                         x = xpp;
3145                     }
3146                     else {
3147                         if (x == xp.right) {
3148                             root = rotateLeft(root, x = xp);
3149                             xpp = (xp = x.parent) == null ? null : xp.parent;
3150                         }
3151                         if (xp != null) {
3152                             xp.red = false;
3153                             if (xpp != null) {
3154                                 xpp.red = true;
3155                                 root = rotateRight(root, xpp);
3156                             }
3157                         }
3158                     }
3159                 }
3160                 else {
3161                     if (xppl != null && xppl.red) {
3162                         xppl.red = false;
3163                         xp.red = false;
3164                         xpp.red = true;
3165                         x = xpp;
3166                     }
3167                     else {
3168                         if (x == xp.left) {
3169                             root = rotateRight(root, x = xp);
3170                             xpp = (xp = x.parent) == null ? null : xp.parent;
3171                         }
3172                         if (xp != null) {
3173                             xp.red = false;
3174                             if (xpp != null) {
3175                                 xpp.red = true;
3176                                 root = rotateLeft(root, xpp);
3177                             }
3178                         }
3179                     }
3180                 }
3181             }
3182         }
3183 
balanceDeletion(TreeNode<K,V> root, TreeNode<K,V> x)3184         static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
3185                                                    TreeNode<K,V> x) {
3186             for (TreeNode<K,V> xp, xpl, xpr;;) {
3187                 if (x == null || x == root)
3188                     return root;
3189                 else if ((xp = x.parent) == null) {
3190                     x.red = false;
3191                     return x;
3192                 }
3193                 else if (x.red) {
3194                     x.red = false;
3195                     return root;
3196                 }
3197                 else if ((xpl = xp.left) == x) {
3198                     if ((xpr = xp.right) != null && xpr.red) {
3199                         xpr.red = false;
3200                         xp.red = true;
3201                         root = rotateLeft(root, xp);
3202                         xpr = (xp = x.parent) == null ? null : xp.right;
3203                     }
3204                     if (xpr == null)
3205                         x = xp;
3206                     else {
3207                         TreeNode<K,V> sl = xpr.left, sr = xpr.right;
3208                         if ((sr == null || !sr.red) &&
3209                             (sl == null || !sl.red)) {
3210                             xpr.red = true;
3211                             x = xp;
3212                         }
3213                         else {
3214                             if (sr == null || !sr.red) {
3215                                 if (sl != null)
3216                                     sl.red = false;
3217                                 xpr.red = true;
3218                                 root = rotateRight(root, xpr);
3219                                 xpr = (xp = x.parent) == null ?
3220                                     null : xp.right;
3221                             }
3222                             if (xpr != null) {
3223                                 xpr.red = (xp == null) ? false : xp.red;
3224                                 if ((sr = xpr.right) != null)
3225                                     sr.red = false;
3226                             }
3227                             if (xp != null) {
3228                                 xp.red = false;
3229                                 root = rotateLeft(root, xp);
3230                             }
3231                             x = root;
3232                         }
3233                     }
3234                 }
3235                 else { // symmetric
3236                     if (xpl != null && xpl.red) {
3237                         xpl.red = false;
3238                         xp.red = true;
3239                         root = rotateRight(root, xp);
3240                         xpl = (xp = x.parent) == null ? null : xp.left;
3241                     }
3242                     if (xpl == null)
3243                         x = xp;
3244                     else {
3245                         TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3246                         if ((sl == null || !sl.red) &&
3247                             (sr == null || !sr.red)) {
3248                             xpl.red = true;
3249                             x = xp;
3250                         }
3251                         else {
3252                             if (sl == null || !sl.red) {
3253                                 if (sr != null)
3254                                     sr.red = false;
3255                                 xpl.red = true;
3256                                 root = rotateLeft(root, xpl);
3257                                 xpl = (xp = x.parent) == null ?
3258                                     null : xp.left;
3259                             }
3260                             if (xpl != null) {
3261                                 xpl.red = (xp == null) ? false : xp.red;
3262                                 if ((sl = xpl.left) != null)
3263                                     sl.red = false;
3264                             }
3265                             if (xp != null) {
3266                                 xp.red = false;
3267                                 root = rotateRight(root, xp);
3268                             }
3269                             x = root;
3270                         }
3271                     }
3272                 }
3273             }
3274         }
3275 
3276         /**
3277          * Checks invariants recursively for the tree of Nodes rooted at t.
3278          */
checkInvariants(TreeNode<K,V> t)3279         static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3280             TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3281                 tb = t.prev, tn = (TreeNode<K,V>)t.next;
3282             if (tb != null && tb.next != t)
3283                 return false;
3284             if (tn != null && tn.prev != t)
3285                 return false;
3286             if (tp != null && t != tp.left && t != tp.right)
3287                 return false;
3288             if (tl != null && (tl.parent != t || tl.hash > t.hash))
3289                 return false;
3290             if (tr != null && (tr.parent != t || tr.hash < t.hash))
3291                 return false;
3292             if (t.red && tl != null && tl.red && tr != null && tr.red)
3293                 return false;
3294             if (tl != null && !checkInvariants(tl))
3295                 return false;
3296             if (tr != null && !checkInvariants(tr))
3297                 return false;
3298             return true;
3299         }
3300 
3301         private static final long LOCKSTATE
3302             = U.objectFieldOffset(TreeBin.class, "lockState");
3303         private static final long WAITERTHREAD
3304             = U.objectFieldOffset(TreeBin.class, "waiter");
3305     }
3306 
3307     /* ----------------Table Traversal -------------- */
3308 
3309     /**
3310      * Records the table, its length, and current traversal index for a
3311      * traverser that must process a region of a forwarded table before
3312      * proceeding with current table.
3313      */
3314     static final class TableStack<K,V> {
3315         int length;
3316         int index;
3317         Node<K,V>[] tab;
3318         TableStack<K,V> next;
3319     }
3320 
3321     /**
3322      * Encapsulates traversal for methods such as containsValue; also
3323      * serves as a base class for other iterators and spliterators.
3324      *
3325      * Method advance visits once each still-valid node that was
3326      * reachable upon iterator construction. It might miss some that
3327      * were added to a bin after the bin was visited, which is OK wrt
3328      * consistency guarantees. Maintaining this property in the face
3329      * of possible ongoing resizes requires a fair amount of
3330      * bookkeeping state that is difficult to optimize away amidst
3331      * volatile accesses.  Even so, traversal maintains reasonable
3332      * throughput.
3333      *
3334      * Normally, iteration proceeds bin-by-bin traversing lists.
3335      * However, if the table has been resized, then all future steps
3336      * must traverse both the bin at the current index as well as at
3337      * (index + baseSize); and so on for further resizings. To
3338      * paranoically cope with potential sharing by users of iterators
3339      * across threads, iteration terminates if a bounds checks fails
3340      * for a table read.
3341      */
3342     static class Traverser<K,V> {
3343         Node<K,V>[] tab;        // current table; updated if resized
3344         Node<K,V> next;         // the next entry to use
3345         TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
3346         int index;              // index of bin to use next
3347         int baseIndex;          // current index of initial table
3348         int baseLimit;          // index bound for initial table
3349         final int baseSize;     // initial table size
3350 
Traverser(Node<K,V>[] tab, int size, int index, int limit)3351         Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3352             this.tab = tab;
3353             this.baseSize = size;
3354             this.baseIndex = this.index = index;
3355             this.baseLimit = limit;
3356             this.next = null;
3357         }
3358 
3359         /**
3360          * Advances if possible, returning next valid node, or null if none.
3361          */
advance()3362         final Node<K,V> advance() {
3363             Node<K,V> e;
3364             if ((e = next) != null)
3365                 e = e.next;
3366             for (;;) {
3367                 Node<K,V>[] t; int i, n;  // must use locals in checks
3368                 if (e != null)
3369                     return next = e;
3370                 if (baseIndex >= baseLimit || (t = tab) == null ||
3371                     (n = t.length) <= (i = index) || i < 0)
3372                     return next = null;
3373                 if ((e = tabAt(t, i)) != null && e.hash < 0) {
3374                     if (e instanceof ForwardingNode) {
3375                         tab = ((ForwardingNode<K,V>)e).nextTable;
3376                         e = null;
3377                         pushState(t, i, n);
3378                         continue;
3379                     }
3380                     else if (e instanceof TreeBin)
3381                         e = ((TreeBin<K,V>)e).first;
3382                     else
3383                         e = null;
3384                 }
3385                 if (stack != null)
3386                     recoverState(n);
3387                 else if ((index = i + baseSize) >= n)
3388                     index = ++baseIndex; // visit upper slots if present
3389             }
3390         }
3391 
3392         /**
3393          * Saves traversal state upon encountering a forwarding node.
3394          */
pushState(Node<K,V>[] t, int i, int n)3395         private void pushState(Node<K,V>[] t, int i, int n) {
3396             TableStack<K,V> s = spare;  // reuse if possible
3397             if (s != null)
3398                 spare = s.next;
3399             else
3400                 s = new TableStack<K,V>();
3401             s.tab = t;
3402             s.length = n;
3403             s.index = i;
3404             s.next = stack;
3405             stack = s;
3406         }
3407 
3408         /**
3409          * Possibly pops traversal state.
3410          *
3411          * @param n length of current table
3412          */
recoverState(int n)3413         private void recoverState(int n) {
3414             TableStack<K,V> s; int len;
3415             while ((s = stack) != null && (index += (len = s.length)) >= n) {
3416                 n = len;
3417                 index = s.index;
3418                 tab = s.tab;
3419                 s.tab = null;
3420                 TableStack<K,V> next = s.next;
3421                 s.next = spare; // save for reuse
3422                 stack = next;
3423                 spare = s;
3424             }
3425             if (s == null && (index += baseSize) >= n)
3426                 index = ++baseIndex;
3427         }
3428     }
3429 
3430     /**
3431      * Base of key, value, and entry Iterators. Adds fields to
3432      * Traverser to support iterator.remove.
3433      */
3434     static class BaseIterator<K,V> extends Traverser<K,V> {
3435         final ConcurrentHashMap<K,V> map;
3436         Node<K,V> lastReturned;
BaseIterator(Node<K,V>[] tab, int size, int index, int limit, ConcurrentHashMap<K,V> map)3437         BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3438                     ConcurrentHashMap<K,V> map) {
3439             super(tab, size, index, limit);
3440             this.map = map;
3441             advance();
3442         }
3443 
hasNext()3444         public final boolean hasNext() { return next != null; }
hasMoreElements()3445         public final boolean hasMoreElements() { return next != null; }
3446 
remove()3447         public final void remove() {
3448             Node<K,V> p;
3449             if ((p = lastReturned) == null)
3450                 throw new IllegalStateException();
3451             lastReturned = null;
3452             map.replaceNode(p.key, null, null);
3453         }
3454     }
3455 
3456     static final class KeyIterator<K,V> extends BaseIterator<K,V>
3457         implements Iterator<K>, Enumeration<K> {
KeyIterator(Node<K,V>[] tab, int size, int index, int limit, ConcurrentHashMap<K,V> map)3458         KeyIterator(Node<K,V>[] tab, int size, int index, int limit,
3459                     ConcurrentHashMap<K,V> map) {
3460             super(tab, size, index, limit, map);
3461         }
3462 
next()3463         public final K next() {
3464             Node<K,V> p;
3465             if ((p = next) == null)
3466                 throw new NoSuchElementException();
3467             K k = p.key;
3468             lastReturned = p;
3469             advance();
3470             return k;
3471         }
3472 
nextElement()3473         public final K nextElement() { return next(); }
3474     }
3475 
3476     static final class ValueIterator<K,V> extends BaseIterator<K,V>
3477         implements Iterator<V>, Enumeration<V> {
ValueIterator(Node<K,V>[] tab, int size, int index, int limit, ConcurrentHashMap<K,V> map)3478         ValueIterator(Node<K,V>[] tab, int size, int index, int limit,
3479                       ConcurrentHashMap<K,V> map) {
3480             super(tab, size, index, limit, map);
3481         }
3482 
next()3483         public final V next() {
3484             Node<K,V> p;
3485             if ((p = next) == null)
3486                 throw new NoSuchElementException();
3487             V v = p.val;
3488             lastReturned = p;
3489             advance();
3490             return v;
3491         }
3492 
nextElement()3493         public final V nextElement() { return next(); }
3494     }
3495 
3496     static final class EntryIterator<K,V> extends BaseIterator<K,V>
3497         implements Iterator<Map.Entry<K,V>> {
EntryIterator(Node<K,V>[] tab, int size, int index, int limit, ConcurrentHashMap<K,V> map)3498         EntryIterator(Node<K,V>[] tab, int size, int index, int limit,
3499                       ConcurrentHashMap<K,V> map) {
3500             super(tab, size, index, limit, map);
3501         }
3502 
next()3503         public final Map.Entry<K,V> next() {
3504             Node<K,V> p;
3505             if ((p = next) == null)
3506                 throw new NoSuchElementException();
3507             K k = p.key;
3508             V v = p.val;
3509             lastReturned = p;
3510             advance();
3511             return new MapEntry<K,V>(k, v, map);
3512         }
3513     }
3514 
3515     /**
3516      * Exported Entry for EntryIterator.
3517      */
3518     static final class MapEntry<K,V> implements Map.Entry<K,V> {
3519         final K key; // non-null
3520         V val;       // non-null
3521         final ConcurrentHashMap<K,V> map;
MapEntry(K key, V val, ConcurrentHashMap<K,V> map)3522         MapEntry(K key, V val, ConcurrentHashMap<K,V> map) {
3523             this.key = key;
3524             this.val = val;
3525             this.map = map;
3526         }
getKey()3527         public K getKey()        { return key; }
getValue()3528         public V getValue()      { return val; }
hashCode()3529         public int hashCode()    { return key.hashCode() ^ val.hashCode(); }
toString()3530         public String toString() {
3531             return Helpers.mapEntryToString(key, val);
3532         }
3533 
equals(Object o)3534         public boolean equals(Object o) {
3535             Object k, v; Map.Entry<?,?> e;
3536             return ((o instanceof Map.Entry) &&
3537                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3538                     (v = e.getValue()) != null &&
3539                     (k == key || k.equals(key)) &&
3540                     (v == val || v.equals(val)));
3541         }
3542 
3543         /**
3544          * Sets our entry's value and writes through to the map. The
3545          * value to return is somewhat arbitrary here. Since we do not
3546          * necessarily track asynchronous changes, the most recent
3547          * "previous" value could be different from what we return (or
3548          * could even have been removed, in which case the put will
3549          * re-establish). We do not and cannot guarantee more.
3550          */
setValue(V value)3551         public V setValue(V value) {
3552             if (value == null) throw new NullPointerException();
3553             V v = val;
3554             val = value;
3555             map.put(key, value);
3556             return v;
3557         }
3558     }
3559 
3560     static final class KeySpliterator<K,V> extends Traverser<K,V>
3561         implements Spliterator<K> {
3562         long est;               // size estimate
KeySpliterator(Node<K,V>[] tab, int size, int index, int limit, long est)3563         KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3564                        long est) {
3565             super(tab, size, index, limit);
3566             this.est = est;
3567         }
3568 
trySplit()3569         public KeySpliterator<K,V> trySplit() {
3570             int i, f, h;
3571             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3572                 new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3573                                         f, est >>>= 1);
3574         }
3575 
forEachRemaining(Consumer<? super K> action)3576         public void forEachRemaining(Consumer<? super K> action) {
3577             if (action == null) throw new NullPointerException();
3578             for (Node<K,V> p; (p = advance()) != null;)
3579                 action.accept(p.key);
3580         }
3581 
tryAdvance(Consumer<? super K> action)3582         public boolean tryAdvance(Consumer<? super K> action) {
3583             if (action == null) throw new NullPointerException();
3584             Node<K,V> p;
3585             if ((p = advance()) == null)
3586                 return false;
3587             action.accept(p.key);
3588             return true;
3589         }
3590 
estimateSize()3591         public long estimateSize() { return est; }
3592 
characteristics()3593         public int characteristics() {
3594             return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3595                 Spliterator.NONNULL;
3596         }
3597     }
3598 
3599     static final class ValueSpliterator<K,V> extends Traverser<K,V>
3600         implements Spliterator<V> {
3601         long est;               // size estimate
ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit, long est)3602         ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3603                          long est) {
3604             super(tab, size, index, limit);
3605             this.est = est;
3606         }
3607 
trySplit()3608         public ValueSpliterator<K,V> trySplit() {
3609             int i, f, h;
3610             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3611                 new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3612                                           f, est >>>= 1);
3613         }
3614 
forEachRemaining(Consumer<? super V> action)3615         public void forEachRemaining(Consumer<? super V> action) {
3616             if (action == null) throw new NullPointerException();
3617             for (Node<K,V> p; (p = advance()) != null;)
3618                 action.accept(p.val);
3619         }
3620 
tryAdvance(Consumer<? super V> action)3621         public boolean tryAdvance(Consumer<? super V> action) {
3622             if (action == null) throw new NullPointerException();
3623             Node<K,V> p;
3624             if ((p = advance()) == null)
3625                 return false;
3626             action.accept(p.val);
3627             return true;
3628         }
3629 
estimateSize()3630         public long estimateSize() { return est; }
3631 
characteristics()3632         public int characteristics() {
3633             return Spliterator.CONCURRENT | Spliterator.NONNULL;
3634         }
3635     }
3636 
3637     static final class EntrySpliterator<K,V> extends Traverser<K,V>
3638         implements Spliterator<Map.Entry<K,V>> {
3639         final ConcurrentHashMap<K,V> map; // To export MapEntry
3640         long est;               // size estimate
EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit, long est, ConcurrentHashMap<K,V> map)3641         EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3642                          long est, ConcurrentHashMap<K,V> map) {
3643             super(tab, size, index, limit);
3644             this.map = map;
3645             this.est = est;
3646         }
3647 
trySplit()3648         public EntrySpliterator<K,V> trySplit() {
3649             int i, f, h;
3650             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3651                 new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3652                                           f, est >>>= 1, map);
3653         }
3654 
forEachRemaining(Consumer<? super Map.Entry<K,V>> action)3655         public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
3656             if (action == null) throw new NullPointerException();
3657             for (Node<K,V> p; (p = advance()) != null; )
3658                 action.accept(new MapEntry<K,V>(p.key, p.val, map));
3659         }
3660 
tryAdvance(Consumer<? super Map.Entry<K,V>> action)3661         public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
3662             if (action == null) throw new NullPointerException();
3663             Node<K,V> p;
3664             if ((p = advance()) == null)
3665                 return false;
3666             action.accept(new MapEntry<K,V>(p.key, p.val, map));
3667             return true;
3668         }
3669 
estimateSize()3670         public long estimateSize() { return est; }
3671 
characteristics()3672         public int characteristics() {
3673             return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3674                 Spliterator.NONNULL;
3675         }
3676     }
3677 
3678     // Parallel bulk operations
3679 
3680     /**
3681      * Computes initial batch value for bulk tasks. The returned value
3682      * is approximately exp2 of the number of times (minus one) to
3683      * split task by two before executing leaf action. This value is
3684      * faster to compute and more convenient to use as a guide to
3685      * splitting than is the depth, since it is used while dividing by
3686      * two anyway.
3687      */
batchFor(long b)3688     final int batchFor(long b) {
3689         long n;
3690         if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3691             return 0;
3692         int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3693         return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3694     }
3695 
3696     /**
3697      * Performs the given action for each (key, value).
3698      *
3699      * @param parallelismThreshold the (estimated) number of elements
3700      * needed for this operation to be executed in parallel
3701      * @param action the action
3702      * @since 1.8
3703      */
forEach(long parallelismThreshold, BiConsumer<? super K,? super V> action)3704     public void forEach(long parallelismThreshold,
3705                         BiConsumer<? super K,? super V> action) {
3706         if (action == null) throw new NullPointerException();
3707         new ForEachMappingTask<K,V>
3708             (null, batchFor(parallelismThreshold), 0, 0, table,
3709              action).invoke();
3710     }
3711 
3712     /**
3713      * Performs the given action for each non-null transformation
3714      * of each (key, value).
3715      *
3716      * @param parallelismThreshold the (estimated) number of elements
3717      * needed for this operation to be executed in parallel
3718      * @param transformer a function returning the transformation
3719      * for an element, or null if there is no transformation (in
3720      * which case the action is not applied)
3721      * @param action the action
3722      * @param <U> the return type of the transformer
3723      * @since 1.8
3724      */
forEach(long parallelismThreshold, BiFunction<? super K, ? super V, ? extends U> transformer, Consumer<? super U> action)3725     public <U> void forEach(long parallelismThreshold,
3726                             BiFunction<? super K, ? super V, ? extends U> transformer,
3727                             Consumer<? super U> action) {
3728         if (transformer == null || action == null)
3729             throw new NullPointerException();
3730         new ForEachTransformedMappingTask<K,V,U>
3731             (null, batchFor(parallelismThreshold), 0, 0, table,
3732              transformer, action).invoke();
3733     }
3734 
3735     /**
3736      * Returns a non-null result from applying the given search
3737      * function on each (key, value), or null if none.  Upon
3738      * success, further element processing is suppressed and the
3739      * results of any other parallel invocations of the search
3740      * function are ignored.
3741      *
3742      * @param parallelismThreshold the (estimated) number of elements
3743      * needed for this operation to be executed in parallel
3744      * @param searchFunction a function returning a non-null
3745      * result on success, else null
3746      * @param <U> the return type of the search function
3747      * @return a non-null result from applying the given search
3748      * function on each (key, value), or null if none
3749      * @since 1.8
3750      */
search(long parallelismThreshold, BiFunction<? super K, ? super V, ? extends U> searchFunction)3751     public <U> U search(long parallelismThreshold,
3752                         BiFunction<? super K, ? super V, ? extends U> searchFunction) {
3753         if (searchFunction == null) throw new NullPointerException();
3754         return new SearchMappingsTask<K,V,U>
3755             (null, batchFor(parallelismThreshold), 0, 0, table,
3756              searchFunction, new AtomicReference<U>()).invoke();
3757     }
3758 
3759     /**
3760      * Returns the result of accumulating the given transformation
3761      * of all (key, value) pairs using the given reducer to
3762      * combine values, or null if none.
3763      *
3764      * @param parallelismThreshold the (estimated) number of elements
3765      * needed for this operation to be executed in parallel
3766      * @param transformer a function returning the transformation
3767      * for an element, or null if there is no transformation (in
3768      * which case it is not combined)
3769      * @param reducer a commutative associative combining function
3770      * @param <U> the return type of the transformer
3771      * @return the result of accumulating the given transformation
3772      * of all (key, value) pairs
3773      * @since 1.8
3774      */
reduce(long parallelismThreshold, BiFunction<? super K, ? super V, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)3775     public <U> U reduce(long parallelismThreshold,
3776                         BiFunction<? super K, ? super V, ? extends U> transformer,
3777                         BiFunction<? super U, ? super U, ? extends U> reducer) {
3778         if (transformer == null || reducer == null)
3779             throw new NullPointerException();
3780         return new MapReduceMappingsTask<K,V,U>
3781             (null, batchFor(parallelismThreshold), 0, 0, table,
3782              null, transformer, reducer).invoke();
3783     }
3784 
3785     /**
3786      * Returns the result of accumulating the given transformation
3787      * of all (key, value) pairs using the given reducer to
3788      * combine values, and the given basis as an identity value.
3789      *
3790      * @param parallelismThreshold the (estimated) number of elements
3791      * needed for this operation to be executed in parallel
3792      * @param transformer a function returning the transformation
3793      * for an element
3794      * @param basis the identity (initial default value) for the reduction
3795      * @param reducer a commutative associative combining function
3796      * @return the result of accumulating the given transformation
3797      * of all (key, value) pairs
3798      * @since 1.8
3799      */
reduceToDouble(long parallelismThreshold, ToDoubleBiFunction<? super K, ? super V> transformer, double basis, DoubleBinaryOperator reducer)3800     public double reduceToDouble(long parallelismThreshold,
3801                                  ToDoubleBiFunction<? super K, ? super V> transformer,
3802                                  double basis,
3803                                  DoubleBinaryOperator reducer) {
3804         if (transformer == null || reducer == null)
3805             throw new NullPointerException();
3806         return new MapReduceMappingsToDoubleTask<K,V>
3807             (null, batchFor(parallelismThreshold), 0, 0, table,
3808              null, transformer, basis, reducer).invoke();
3809     }
3810 
3811     /**
3812      * Returns the result of accumulating the given transformation
3813      * of all (key, value) pairs using the given reducer to
3814      * combine values, and the given basis as an identity value.
3815      *
3816      * @param parallelismThreshold the (estimated) number of elements
3817      * needed for this operation to be executed in parallel
3818      * @param transformer a function returning the transformation
3819      * for an element
3820      * @param basis the identity (initial default value) for the reduction
3821      * @param reducer a commutative associative combining function
3822      * @return the result of accumulating the given transformation
3823      * of all (key, value) pairs
3824      * @since 1.8
3825      */
reduceToLong(long parallelismThreshold, ToLongBiFunction<? super K, ? super V> transformer, long basis, LongBinaryOperator reducer)3826     public long reduceToLong(long parallelismThreshold,
3827                              ToLongBiFunction<? super K, ? super V> transformer,
3828                              long basis,
3829                              LongBinaryOperator reducer) {
3830         if (transformer == null || reducer == null)
3831             throw new NullPointerException();
3832         return new MapReduceMappingsToLongTask<K,V>
3833             (null, batchFor(parallelismThreshold), 0, 0, table,
3834              null, transformer, basis, reducer).invoke();
3835     }
3836 
3837     /**
3838      * Returns the result of accumulating the given transformation
3839      * of all (key, value) pairs using the given reducer to
3840      * combine values, and the given basis as an identity value.
3841      *
3842      * @param parallelismThreshold the (estimated) number of elements
3843      * needed for this operation to be executed in parallel
3844      * @param transformer a function returning the transformation
3845      * for an element
3846      * @param basis the identity (initial default value) for the reduction
3847      * @param reducer a commutative associative combining function
3848      * @return the result of accumulating the given transformation
3849      * of all (key, value) pairs
3850      * @since 1.8
3851      */
reduceToInt(long parallelismThreshold, ToIntBiFunction<? super K, ? super V> transformer, int basis, IntBinaryOperator reducer)3852     public int reduceToInt(long parallelismThreshold,
3853                            ToIntBiFunction<? super K, ? super V> transformer,
3854                            int basis,
3855                            IntBinaryOperator reducer) {
3856         if (transformer == null || reducer == null)
3857             throw new NullPointerException();
3858         return new MapReduceMappingsToIntTask<K,V>
3859             (null, batchFor(parallelismThreshold), 0, 0, table,
3860              null, transformer, basis, reducer).invoke();
3861     }
3862 
3863     /**
3864      * Performs the given action for each key.
3865      *
3866      * @param parallelismThreshold the (estimated) number of elements
3867      * needed for this operation to be executed in parallel
3868      * @param action the action
3869      * @since 1.8
3870      */
forEachKey(long parallelismThreshold, Consumer<? super K> action)3871     public void forEachKey(long parallelismThreshold,
3872                            Consumer<? super K> action) {
3873         if (action == null) throw new NullPointerException();
3874         new ForEachKeyTask<K,V>
3875             (null, batchFor(parallelismThreshold), 0, 0, table,
3876              action).invoke();
3877     }
3878 
3879     /**
3880      * Performs the given action for each non-null transformation
3881      * of each key.
3882      *
3883      * @param parallelismThreshold the (estimated) number of elements
3884      * needed for this operation to be executed in parallel
3885      * @param transformer a function returning the transformation
3886      * for an element, or null if there is no transformation (in
3887      * which case the action is not applied)
3888      * @param action the action
3889      * @param <U> the return type of the transformer
3890      * @since 1.8
3891      */
forEachKey(long parallelismThreshold, Function<? super K, ? extends U> transformer, Consumer<? super U> action)3892     public <U> void forEachKey(long parallelismThreshold,
3893                                Function<? super K, ? extends U> transformer,
3894                                Consumer<? super U> action) {
3895         if (transformer == null || action == null)
3896             throw new NullPointerException();
3897         new ForEachTransformedKeyTask<K,V,U>
3898             (null, batchFor(parallelismThreshold), 0, 0, table,
3899              transformer, action).invoke();
3900     }
3901 
3902     /**
3903      * Returns a non-null result from applying the given search
3904      * function on each key, or null if none. Upon success,
3905      * further element processing is suppressed and the results of
3906      * any other parallel invocations of the search function are
3907      * ignored.
3908      *
3909      * @param parallelismThreshold the (estimated) number of elements
3910      * needed for this operation to be executed in parallel
3911      * @param searchFunction a function returning a non-null
3912      * result on success, else null
3913      * @param <U> the return type of the search function
3914      * @return a non-null result from applying the given search
3915      * function on each key, or null if none
3916      * @since 1.8
3917      */
searchKeys(long parallelismThreshold, Function<? super K, ? extends U> searchFunction)3918     public <U> U searchKeys(long parallelismThreshold,
3919                             Function<? super K, ? extends U> searchFunction) {
3920         if (searchFunction == null) throw new NullPointerException();
3921         return new SearchKeysTask<K,V,U>
3922             (null, batchFor(parallelismThreshold), 0, 0, table,
3923              searchFunction, new AtomicReference<U>()).invoke();
3924     }
3925 
3926     /**
3927      * Returns the result of accumulating all keys using the given
3928      * reducer to combine values, or null if none.
3929      *
3930      * @param parallelismThreshold the (estimated) number of elements
3931      * needed for this operation to be executed in parallel
3932      * @param reducer a commutative associative combining function
3933      * @return the result of accumulating all keys using the given
3934      * reducer to combine values, or null if none
3935      * @since 1.8
3936      */
reduceKeys(long parallelismThreshold, BiFunction<? super K, ? super K, ? extends K> reducer)3937     public K reduceKeys(long parallelismThreshold,
3938                         BiFunction<? super K, ? super K, ? extends K> reducer) {
3939         if (reducer == null) throw new NullPointerException();
3940         return new ReduceKeysTask<K,V>
3941             (null, batchFor(parallelismThreshold), 0, 0, table,
3942              null, reducer).invoke();
3943     }
3944 
3945     /**
3946      * Returns the result of accumulating the given transformation
3947      * of all keys using the given reducer to combine values, or
3948      * null if none.
3949      *
3950      * @param parallelismThreshold the (estimated) number of elements
3951      * needed for this operation to be executed in parallel
3952      * @param transformer a function returning the transformation
3953      * for an element, or null if there is no transformation (in
3954      * which case it is not combined)
3955      * @param reducer a commutative associative combining function
3956      * @param <U> the return type of the transformer
3957      * @return the result of accumulating the given transformation
3958      * of all keys
3959      * @since 1.8
3960      */
reduceKeys(long parallelismThreshold, Function<? super K, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)3961     public <U> U reduceKeys(long parallelismThreshold,
3962                             Function<? super K, ? extends U> transformer,
3963          BiFunction<? super U, ? super U, ? extends U> reducer) {
3964         if (transformer == null || reducer == null)
3965             throw new NullPointerException();
3966         return new MapReduceKeysTask<K,V,U>
3967             (null, batchFor(parallelismThreshold), 0, 0, table,
3968              null, transformer, reducer).invoke();
3969     }
3970 
3971     /**
3972      * Returns the result of accumulating the given transformation
3973      * of all keys using the given reducer to combine values, and
3974      * the given basis as an identity value.
3975      *
3976      * @param parallelismThreshold the (estimated) number of elements
3977      * needed for this operation to be executed in parallel
3978      * @param transformer a function returning the transformation
3979      * for an element
3980      * @param basis the identity (initial default value) for the reduction
3981      * @param reducer a commutative associative combining function
3982      * @return the result of accumulating the given transformation
3983      * of all keys
3984      * @since 1.8
3985      */
reduceKeysToDouble(long parallelismThreshold, ToDoubleFunction<? super K> transformer, double basis, DoubleBinaryOperator reducer)3986     public double reduceKeysToDouble(long parallelismThreshold,
3987                                      ToDoubleFunction<? super K> transformer,
3988                                      double basis,
3989                                      DoubleBinaryOperator reducer) {
3990         if (transformer == null || reducer == null)
3991             throw new NullPointerException();
3992         return new MapReduceKeysToDoubleTask<K,V>
3993             (null, batchFor(parallelismThreshold), 0, 0, table,
3994              null, transformer, basis, reducer).invoke();
3995     }
3996 
3997     /**
3998      * Returns the result of accumulating the given transformation
3999      * of all keys using the given reducer to combine values, and
4000      * the given basis as an identity value.
4001      *
4002      * @param parallelismThreshold the (estimated) number of elements
4003      * needed for this operation to be executed in parallel
4004      * @param transformer a function returning the transformation
4005      * for an element
4006      * @param basis the identity (initial default value) for the reduction
4007      * @param reducer a commutative associative combining function
4008      * @return the result of accumulating the given transformation
4009      * of all keys
4010      * @since 1.8
4011      */
reduceKeysToLong(long parallelismThreshold, ToLongFunction<? super K> transformer, long basis, LongBinaryOperator reducer)4012     public long reduceKeysToLong(long parallelismThreshold,
4013                                  ToLongFunction<? super K> transformer,
4014                                  long basis,
4015                                  LongBinaryOperator reducer) {
4016         if (transformer == null || reducer == null)
4017             throw new NullPointerException();
4018         return new MapReduceKeysToLongTask<K,V>
4019             (null, batchFor(parallelismThreshold), 0, 0, table,
4020              null, transformer, basis, reducer).invoke();
4021     }
4022 
4023     /**
4024      * Returns the result of accumulating the given transformation
4025      * of all keys using the given reducer to combine values, and
4026      * the given basis as an identity value.
4027      *
4028      * @param parallelismThreshold the (estimated) number of elements
4029      * needed for this operation to be executed in parallel
4030      * @param transformer a function returning the transformation
4031      * for an element
4032      * @param basis the identity (initial default value) for the reduction
4033      * @param reducer a commutative associative combining function
4034      * @return the result of accumulating the given transformation
4035      * of all keys
4036      * @since 1.8
4037      */
reduceKeysToInt(long parallelismThreshold, ToIntFunction<? super K> transformer, int basis, IntBinaryOperator reducer)4038     public int reduceKeysToInt(long parallelismThreshold,
4039                                ToIntFunction<? super K> transformer,
4040                                int basis,
4041                                IntBinaryOperator reducer) {
4042         if (transformer == null || reducer == null)
4043             throw new NullPointerException();
4044         return new MapReduceKeysToIntTask<K,V>
4045             (null, batchFor(parallelismThreshold), 0, 0, table,
4046              null, transformer, basis, reducer).invoke();
4047     }
4048 
4049     /**
4050      * Performs the given action for each value.
4051      *
4052      * @param parallelismThreshold the (estimated) number of elements
4053      * needed for this operation to be executed in parallel
4054      * @param action the action
4055      * @since 1.8
4056      */
forEachValue(long parallelismThreshold, Consumer<? super V> action)4057     public void forEachValue(long parallelismThreshold,
4058                              Consumer<? super V> action) {
4059         if (action == null)
4060             throw new NullPointerException();
4061         new ForEachValueTask<K,V>
4062             (null, batchFor(parallelismThreshold), 0, 0, table,
4063              action).invoke();
4064     }
4065 
4066     /**
4067      * Performs the given action for each non-null transformation
4068      * of each value.
4069      *
4070      * @param parallelismThreshold the (estimated) number of elements
4071      * needed for this operation to be executed in parallel
4072      * @param transformer a function returning the transformation
4073      * for an element, or null if there is no transformation (in
4074      * which case the action is not applied)
4075      * @param action the action
4076      * @param <U> the return type of the transformer
4077      * @since 1.8
4078      */
forEachValue(long parallelismThreshold, Function<? super V, ? extends U> transformer, Consumer<? super U> action)4079     public <U> void forEachValue(long parallelismThreshold,
4080                                  Function<? super V, ? extends U> transformer,
4081                                  Consumer<? super U> action) {
4082         if (transformer == null || action == null)
4083             throw new NullPointerException();
4084         new ForEachTransformedValueTask<K,V,U>
4085             (null, batchFor(parallelismThreshold), 0, 0, table,
4086              transformer, action).invoke();
4087     }
4088 
4089     /**
4090      * Returns a non-null result from applying the given search
4091      * function on each value, or null if none.  Upon success,
4092      * further element processing is suppressed and the results of
4093      * any other parallel invocations of the search function are
4094      * ignored.
4095      *
4096      * @param parallelismThreshold the (estimated) number of elements
4097      * needed for this operation to be executed in parallel
4098      * @param searchFunction a function returning a non-null
4099      * result on success, else null
4100      * @param <U> the return type of the search function
4101      * @return a non-null result from applying the given search
4102      * function on each value, or null if none
4103      * @since 1.8
4104      */
searchValues(long parallelismThreshold, Function<? super V, ? extends U> searchFunction)4105     public <U> U searchValues(long parallelismThreshold,
4106                               Function<? super V, ? extends U> searchFunction) {
4107         if (searchFunction == null) throw new NullPointerException();
4108         return new SearchValuesTask<K,V,U>
4109             (null, batchFor(parallelismThreshold), 0, 0, table,
4110              searchFunction, new AtomicReference<U>()).invoke();
4111     }
4112 
4113     /**
4114      * Returns the result of accumulating all values using the
4115      * given reducer to combine values, or null if none.
4116      *
4117      * @param parallelismThreshold the (estimated) number of elements
4118      * needed for this operation to be executed in parallel
4119      * @param reducer a commutative associative combining function
4120      * @return the result of accumulating all values
4121      * @since 1.8
4122      */
reduceValues(long parallelismThreshold, BiFunction<? super V, ? super V, ? extends V> reducer)4123     public V reduceValues(long parallelismThreshold,
4124                           BiFunction<? super V, ? super V, ? extends V> reducer) {
4125         if (reducer == null) throw new NullPointerException();
4126         return new ReduceValuesTask<K,V>
4127             (null, batchFor(parallelismThreshold), 0, 0, table,
4128              null, reducer).invoke();
4129     }
4130 
4131     /**
4132      * Returns the result of accumulating the given transformation
4133      * of all values using the given reducer to combine values, or
4134      * null if none.
4135      *
4136      * @param parallelismThreshold the (estimated) number of elements
4137      * needed for this operation to be executed in parallel
4138      * @param transformer a function returning the transformation
4139      * for an element, or null if there is no transformation (in
4140      * which case it is not combined)
4141      * @param reducer a commutative associative combining function
4142      * @param <U> the return type of the transformer
4143      * @return the result of accumulating the given transformation
4144      * of all values
4145      * @since 1.8
4146      */
reduceValues(long parallelismThreshold, Function<? super V, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)4147     public <U> U reduceValues(long parallelismThreshold,
4148                               Function<? super V, ? extends U> transformer,
4149                               BiFunction<? super U, ? super U, ? extends U> reducer) {
4150         if (transformer == null || reducer == null)
4151             throw new NullPointerException();
4152         return new MapReduceValuesTask<K,V,U>
4153             (null, batchFor(parallelismThreshold), 0, 0, table,
4154              null, transformer, reducer).invoke();
4155     }
4156 
4157     /**
4158      * Returns the result of accumulating the given transformation
4159      * of all values using the given reducer to combine values,
4160      * and the given basis as an identity value.
4161      *
4162      * @param parallelismThreshold the (estimated) number of elements
4163      * needed for this operation to be executed in parallel
4164      * @param transformer a function returning the transformation
4165      * for an element
4166      * @param basis the identity (initial default value) for the reduction
4167      * @param reducer a commutative associative combining function
4168      * @return the result of accumulating the given transformation
4169      * of all values
4170      * @since 1.8
4171      */
reduceValuesToDouble(long parallelismThreshold, ToDoubleFunction<? super V> transformer, double basis, DoubleBinaryOperator reducer)4172     public double reduceValuesToDouble(long parallelismThreshold,
4173                                        ToDoubleFunction<? super V> transformer,
4174                                        double basis,
4175                                        DoubleBinaryOperator reducer) {
4176         if (transformer == null || reducer == null)
4177             throw new NullPointerException();
4178         return new MapReduceValuesToDoubleTask<K,V>
4179             (null, batchFor(parallelismThreshold), 0, 0, table,
4180              null, transformer, basis, reducer).invoke();
4181     }
4182 
4183     /**
4184      * Returns the result of accumulating the given transformation
4185      * of all values using the given reducer to combine values,
4186      * and the given basis as an identity value.
4187      *
4188      * @param parallelismThreshold the (estimated) number of elements
4189      * needed for this operation to be executed in parallel
4190      * @param transformer a function returning the transformation
4191      * for an element
4192      * @param basis the identity (initial default value) for the reduction
4193      * @param reducer a commutative associative combining function
4194      * @return the result of accumulating the given transformation
4195      * of all values
4196      * @since 1.8
4197      */
reduceValuesToLong(long parallelismThreshold, ToLongFunction<? super V> transformer, long basis, LongBinaryOperator reducer)4198     public long reduceValuesToLong(long parallelismThreshold,
4199                                    ToLongFunction<? super V> transformer,
4200                                    long basis,
4201                                    LongBinaryOperator reducer) {
4202         if (transformer == null || reducer == null)
4203             throw new NullPointerException();
4204         return new MapReduceValuesToLongTask<K,V>
4205             (null, batchFor(parallelismThreshold), 0, 0, table,
4206              null, transformer, basis, reducer).invoke();
4207     }
4208 
4209     /**
4210      * Returns the result of accumulating the given transformation
4211      * of all values using the given reducer to combine values,
4212      * and the given basis as an identity value.
4213      *
4214      * @param parallelismThreshold the (estimated) number of elements
4215      * needed for this operation to be executed in parallel
4216      * @param transformer a function returning the transformation
4217      * for an element
4218      * @param basis the identity (initial default value) for the reduction
4219      * @param reducer a commutative associative combining function
4220      * @return the result of accumulating the given transformation
4221      * of all values
4222      * @since 1.8
4223      */
reduceValuesToInt(long parallelismThreshold, ToIntFunction<? super V> transformer, int basis, IntBinaryOperator reducer)4224     public int reduceValuesToInt(long parallelismThreshold,
4225                                  ToIntFunction<? super V> transformer,
4226                                  int basis,
4227                                  IntBinaryOperator reducer) {
4228         if (transformer == null || reducer == null)
4229             throw new NullPointerException();
4230         return new MapReduceValuesToIntTask<K,V>
4231             (null, batchFor(parallelismThreshold), 0, 0, table,
4232              null, transformer, basis, reducer).invoke();
4233     }
4234 
4235     /**
4236      * Performs the given action for each entry.
4237      *
4238      * @param parallelismThreshold the (estimated) number of elements
4239      * needed for this operation to be executed in parallel
4240      * @param action the action
4241      * @since 1.8
4242      */
forEachEntry(long parallelismThreshold, Consumer<? super Map.Entry<K,V>> action)4243     public void forEachEntry(long parallelismThreshold,
4244                              Consumer<? super Map.Entry<K,V>> action) {
4245         if (action == null) throw new NullPointerException();
4246         new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
4247                                   action).invoke();
4248     }
4249 
4250     /**
4251      * Performs the given action for each non-null transformation
4252      * of each entry.
4253      *
4254      * @param parallelismThreshold the (estimated) number of elements
4255      * needed for this operation to be executed in parallel
4256      * @param transformer a function returning the transformation
4257      * for an element, or null if there is no transformation (in
4258      * which case the action is not applied)
4259      * @param action the action
4260      * @param <U> the return type of the transformer
4261      * @since 1.8
4262      */
forEachEntry(long parallelismThreshold, Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action)4263     public <U> void forEachEntry(long parallelismThreshold,
4264                                  Function<Map.Entry<K,V>, ? extends U> transformer,
4265                                  Consumer<? super U> action) {
4266         if (transformer == null || action == null)
4267             throw new NullPointerException();
4268         new ForEachTransformedEntryTask<K,V,U>
4269             (null, batchFor(parallelismThreshold), 0, 0, table,
4270              transformer, action).invoke();
4271     }
4272 
4273     /**
4274      * Returns a non-null result from applying the given search
4275      * function on each entry, or null if none.  Upon success,
4276      * further element processing is suppressed and the results of
4277      * any other parallel invocations of the search function are
4278      * ignored.
4279      *
4280      * @param parallelismThreshold the (estimated) number of elements
4281      * needed for this operation to be executed in parallel
4282      * @param searchFunction a function returning a non-null
4283      * result on success, else null
4284      * @param <U> the return type of the search function
4285      * @return a non-null result from applying the given search
4286      * function on each entry, or null if none
4287      * @since 1.8
4288      */
searchEntries(long parallelismThreshold, Function<Map.Entry<K,V>, ? extends U> searchFunction)4289     public <U> U searchEntries(long parallelismThreshold,
4290                                Function<Map.Entry<K,V>, ? extends U> searchFunction) {
4291         if (searchFunction == null) throw new NullPointerException();
4292         return new SearchEntriesTask<K,V,U>
4293             (null, batchFor(parallelismThreshold), 0, 0, table,
4294              searchFunction, new AtomicReference<U>()).invoke();
4295     }
4296 
4297     /**
4298      * Returns the result of accumulating all entries using the
4299      * given reducer to combine values, or null if none.
4300      *
4301      * @param parallelismThreshold the (estimated) number of elements
4302      * needed for this operation to be executed in parallel
4303      * @param reducer a commutative associative combining function
4304      * @return the result of accumulating all entries
4305      * @since 1.8
4306      */
reduceEntries(long parallelismThreshold, BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer)4307     public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4308                                         BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4309         if (reducer == null) throw new NullPointerException();
4310         return new ReduceEntriesTask<K,V>
4311             (null, batchFor(parallelismThreshold), 0, 0, table,
4312              null, reducer).invoke();
4313     }
4314 
4315     /**
4316      * Returns the result of accumulating the given transformation
4317      * of all entries using the given reducer to combine values,
4318      * or null if none.
4319      *
4320      * @param parallelismThreshold the (estimated) number of elements
4321      * needed for this operation to be executed in parallel
4322      * @param transformer a function returning the transformation
4323      * for an element, or null if there is no transformation (in
4324      * which case it is not combined)
4325      * @param reducer a commutative associative combining function
4326      * @param <U> the return type of the transformer
4327      * @return the result of accumulating the given transformation
4328      * of all entries
4329      * @since 1.8
4330      */
reduceEntries(long parallelismThreshold, Function<Map.Entry<K,V>, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)4331     public <U> U reduceEntries(long parallelismThreshold,
4332                                Function<Map.Entry<K,V>, ? extends U> transformer,
4333                                BiFunction<? super U, ? super U, ? extends U> reducer) {
4334         if (transformer == null || reducer == null)
4335             throw new NullPointerException();
4336         return new MapReduceEntriesTask<K,V,U>
4337             (null, batchFor(parallelismThreshold), 0, 0, table,
4338              null, transformer, reducer).invoke();
4339     }
4340 
4341     /**
4342      * Returns the result of accumulating the given transformation
4343      * of all entries using the given reducer to combine values,
4344      * and the given basis as an identity value.
4345      *
4346      * @param parallelismThreshold the (estimated) number of elements
4347      * needed for this operation to be executed in parallel
4348      * @param transformer a function returning the transformation
4349      * for an element
4350      * @param basis the identity (initial default value) for the reduction
4351      * @param reducer a commutative associative combining function
4352      * @return the result of accumulating the given transformation
4353      * of all entries
4354      * @since 1.8
4355      */
reduceEntriesToDouble(long parallelismThreshold, ToDoubleFunction<Map.Entry<K,V>> transformer, double basis, DoubleBinaryOperator reducer)4356     public double reduceEntriesToDouble(long parallelismThreshold,
4357                                         ToDoubleFunction<Map.Entry<K,V>> transformer,
4358                                         double basis,
4359                                         DoubleBinaryOperator reducer) {
4360         if (transformer == null || reducer == null)
4361             throw new NullPointerException();
4362         return new MapReduceEntriesToDoubleTask<K,V>
4363             (null, batchFor(parallelismThreshold), 0, 0, table,
4364              null, transformer, basis, reducer).invoke();
4365     }
4366 
4367     /**
4368      * Returns the result of accumulating the given transformation
4369      * of all entries using the given reducer to combine values,
4370      * and the given basis as an identity value.
4371      *
4372      * @param parallelismThreshold the (estimated) number of elements
4373      * needed for this operation to be executed in parallel
4374      * @param transformer a function returning the transformation
4375      * for an element
4376      * @param basis the identity (initial default value) for the reduction
4377      * @param reducer a commutative associative combining function
4378      * @return the result of accumulating the given transformation
4379      * of all entries
4380      * @since 1.8
4381      */
reduceEntriesToLong(long parallelismThreshold, ToLongFunction<Map.Entry<K,V>> transformer, long basis, LongBinaryOperator reducer)4382     public long reduceEntriesToLong(long parallelismThreshold,
4383                                     ToLongFunction<Map.Entry<K,V>> transformer,
4384                                     long basis,
4385                                     LongBinaryOperator reducer) {
4386         if (transformer == null || reducer == null)
4387             throw new NullPointerException();
4388         return new MapReduceEntriesToLongTask<K,V>
4389             (null, batchFor(parallelismThreshold), 0, 0, table,
4390              null, transformer, basis, reducer).invoke();
4391     }
4392 
4393     /**
4394      * Returns the result of accumulating the given transformation
4395      * of all entries using the given reducer to combine values,
4396      * and the given basis as an identity value.
4397      *
4398      * @param parallelismThreshold the (estimated) number of elements
4399      * needed for this operation to be executed in parallel
4400      * @param transformer a function returning the transformation
4401      * for an element
4402      * @param basis the identity (initial default value) for the reduction
4403      * @param reducer a commutative associative combining function
4404      * @return the result of accumulating the given transformation
4405      * of all entries
4406      * @since 1.8
4407      */
reduceEntriesToInt(long parallelismThreshold, ToIntFunction<Map.Entry<K,V>> transformer, int basis, IntBinaryOperator reducer)4408     public int reduceEntriesToInt(long parallelismThreshold,
4409                                   ToIntFunction<Map.Entry<K,V>> transformer,
4410                                   int basis,
4411                                   IntBinaryOperator reducer) {
4412         if (transformer == null || reducer == null)
4413             throw new NullPointerException();
4414         return new MapReduceEntriesToIntTask<K,V>
4415             (null, batchFor(parallelismThreshold), 0, 0, table,
4416              null, transformer, basis, reducer).invoke();
4417     }
4418 
4419 
4420     /* ----------------Views -------------- */
4421 
4422     /**
4423      * Base class for views.
4424      */
4425     // Android-changed: Remove sealed keyword due to Metalava support. (b/355648520)
4426     // abstract static sealed class CollectionView<K,V,E>
4427     //     implements Collection<E>, java.io.Serializable permits EntrySetView, KeySetView, ValuesView {
4428     abstract static class CollectionView<K,V,E>
4429         implements Collection<E>, java.io.Serializable {
4430         private static final long serialVersionUID = 7249069246763182397L;
4431         final ConcurrentHashMap<K,V> map;
CollectionView(ConcurrentHashMap<K,V> map)4432         CollectionView(ConcurrentHashMap<K,V> map)  { this.map = map; }
4433 
4434         /**
4435          * Returns the map backing this view.
4436          *
4437          * @return the map backing this view
4438          */
getMap()4439         public ConcurrentHashMap<K,V> getMap() { return map; }
4440 
4441         /**
4442          * Removes all of the elements from this view, by removing all
4443          * the mappings from the map backing this view.
4444          */
clear()4445         public final void clear()      { map.clear(); }
size()4446         public final int size()        { return map.size(); }
isEmpty()4447         public final boolean isEmpty() { return map.isEmpty(); }
4448 
4449         // implementations below rely on concrete classes supplying these
4450         // abstract methods
4451         /**
4452          * Returns an iterator over the elements in this collection.
4453          *
4454          * <p>The returned iterator is
4455          * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
4456          *
4457          * @return an iterator over the elements in this collection
4458          */
iterator()4459         public abstract Iterator<E> iterator();
contains(Object o)4460         public abstract boolean contains(Object o);
remove(Object o)4461         public abstract boolean remove(Object o);
4462 
4463         private static final String OOME_MSG = "Required array size too large";
4464 
toArray()4465         public final Object[] toArray() {
4466             long sz = map.mappingCount();
4467             if (sz > MAX_ARRAY_SIZE)
4468                 throw new OutOfMemoryError(OOME_MSG);
4469             int n = (int)sz;
4470             Object[] r = new Object[n];
4471             int i = 0;
4472             for (E e : this) {
4473                 if (i == n) {
4474                     if (n >= MAX_ARRAY_SIZE)
4475                         throw new OutOfMemoryError(OOME_MSG);
4476                     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4477                         n = MAX_ARRAY_SIZE;
4478                     else
4479                         n += (n >>> 1) + 1;
4480                     r = Arrays.copyOf(r, n);
4481                 }
4482                 r[i++] = e;
4483             }
4484             return (i == n) ? r : Arrays.copyOf(r, i);
4485         }
4486 
4487         @SuppressWarnings("unchecked")
toArray(T[] a)4488         public final <T> T[] toArray(T[] a) {
4489             long sz = map.mappingCount();
4490             if (sz > MAX_ARRAY_SIZE)
4491                 throw new OutOfMemoryError(OOME_MSG);
4492             int m = (int)sz;
4493             T[] r = (a.length >= m) ? a :
4494                 (T[])java.lang.reflect.Array
4495                 .newInstance(a.getClass().getComponentType(), m);
4496             int n = r.length;
4497             int i = 0;
4498             for (E e : this) {
4499                 if (i == n) {
4500                     if (n >= MAX_ARRAY_SIZE)
4501                         throw new OutOfMemoryError(OOME_MSG);
4502                     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4503                         n = MAX_ARRAY_SIZE;
4504                     else
4505                         n += (n >>> 1) + 1;
4506                     r = Arrays.copyOf(r, n);
4507                 }
4508                 r[i++] = (T)e;
4509             }
4510             if (a == r && i < n) {
4511                 r[i] = null; // null-terminate
4512                 return r;
4513             }
4514             return (i == n) ? r : Arrays.copyOf(r, i);
4515         }
4516 
4517         /**
4518          * Returns a string representation of this collection.
4519          * The string representation consists of the string representations
4520          * of the collection's elements in the order they are returned by
4521          * its iterator, enclosed in square brackets ({@code "[]"}).
4522          * Adjacent elements are separated by the characters {@code ", "}
4523          * (comma and space).  Elements are converted to strings as by
4524          * {@link String#valueOf(Object)}.
4525          *
4526          * @return a string representation of this collection
4527          */
toString()4528         public final String toString() {
4529             StringBuilder sb = new StringBuilder();
4530             sb.append('[');
4531             Iterator<E> it = iterator();
4532             if (it.hasNext()) {
4533                 for (;;) {
4534                     Object e = it.next();
4535                     sb.append(e == this ? "(this Collection)" : e);
4536                     if (!it.hasNext())
4537                         break;
4538                     sb.append(',').append(' ');
4539                 }
4540             }
4541             return sb.append(']').toString();
4542         }
4543 
containsAll(Collection<?> c)4544         public final boolean containsAll(Collection<?> c) {
4545             if (c != this) {
4546                 for (Object e : c) {
4547                     if (e == null || !contains(e))
4548                         return false;
4549                 }
4550             }
4551             return true;
4552         }
4553 
removeAll(Collection<?> c)4554         public boolean removeAll(Collection<?> c) {
4555             if (c == null) throw new NullPointerException();
4556             boolean modified = false;
4557             // Use (c instanceof Set) as a hint that lookup in c is as
4558             // efficient as this view
4559             Node<K,V>[] t;
4560             if ((t = map.table) == null) {
4561                 return false;
4562             } else if (c instanceof Set<?> && c.size() > t.length) {
4563                 for (Iterator<?> it = iterator(); it.hasNext(); ) {
4564                     if (c.contains(it.next())) {
4565                         it.remove();
4566                         modified = true;
4567                     }
4568                 }
4569             } else {
4570                 for (Object e : c)
4571                     modified |= remove(e);
4572             }
4573             return modified;
4574         }
4575 
retainAll(Collection<?> c)4576         public final boolean retainAll(Collection<?> c) {
4577             if (c == null) throw new NullPointerException();
4578             boolean modified = false;
4579             for (Iterator<E> it = iterator(); it.hasNext();) {
4580                 if (!c.contains(it.next())) {
4581                     it.remove();
4582                     modified = true;
4583                 }
4584             }
4585             return modified;
4586         }
4587 
4588     }
4589 
4590     /**
4591      * A view of a ConcurrentHashMap as a {@link Set} of keys, in
4592      * which additions may optionally be enabled by mapping to a
4593      * common value.  This class cannot be directly instantiated.
4594      * See {@link #keySet(Object) keySet(V)},
4595      * {@link #newKeySet() newKeySet()},
4596      * {@link #newKeySet(int) newKeySet(int)}.
4597      *
4598      * @param <K> the type of keys
4599      * @param <V> the type of values in the backing map
4600      *
4601      * @since 1.8
4602      */
4603     public static final class KeySetView<K,V> extends CollectionView<K,V,K>
4604         implements Set<K>, java.io.Serializable {
4605         private static final long serialVersionUID = 7249069246763182397L;
4606         @SuppressWarnings("serial") // Conditionally serializable
4607         private final V value;
KeySetView(ConcurrentHashMap<K,V> map, V value)4608         KeySetView(ConcurrentHashMap<K,V> map, V value) {  // non-public
4609             super(map);
4610             this.value = value;
4611         }
4612 
4613         /**
4614          * Returns the default mapped value for additions,
4615          * or {@code null} if additions are not supported.
4616          *
4617          * @return the default mapped value for additions, or {@code null}
4618          * if not supported
4619          */
getMappedValue()4620         public V getMappedValue() { return value; }
4621 
4622         /**
4623          * {@inheritDoc}
4624          * @throws NullPointerException if the specified key is null
4625          */
contains(Object o)4626         public boolean contains(Object o) { return map.containsKey(o); }
4627 
4628         /**
4629          * Removes the key from this map view, by removing the key (and its
4630          * corresponding value) from the backing map.  This method does
4631          * nothing if the key is not in the map.
4632          *
4633          * @param  o the key to be removed from the backing map
4634          * @return {@code true} if the backing map contained the specified key
4635          * @throws NullPointerException if the specified key is null
4636          */
remove(Object o)4637         public boolean remove(Object o) { return map.remove(o) != null; }
4638 
4639         /**
4640          * @return an iterator over the keys of the backing map
4641          */
iterator()4642         public Iterator<K> iterator() {
4643             Node<K,V>[] t;
4644             ConcurrentHashMap<K,V> m = map;
4645             int f = (t = m.table) == null ? 0 : t.length;
4646             return new KeyIterator<K,V>(t, f, 0, f, m);
4647         }
4648 
4649         /**
4650          * Adds the specified key to this set view by mapping the key to
4651          * the default mapped value in the backing map, if defined.
4652          *
4653          * @param e key to be added
4654          * @return {@code true} if this set changed as a result of the call
4655          * @throws NullPointerException if the specified key is null
4656          * @throws UnsupportedOperationException if no default mapped value
4657          * for additions was provided
4658          */
add(K e)4659         public boolean add(K e) {
4660             V v;
4661             if ((v = value) == null)
4662                 throw new UnsupportedOperationException();
4663             return map.putVal(e, v, true) == null;
4664         }
4665 
4666         /**
4667          * Adds all of the elements in the specified collection to this set,
4668          * as if by calling {@link #add} on each one.
4669          *
4670          * @param c the elements to be inserted into this set
4671          * @return {@code true} if this set changed as a result of the call
4672          * @throws NullPointerException if the collection or any of its
4673          * elements are {@code null}
4674          * @throws UnsupportedOperationException if no default mapped value
4675          * for additions was provided
4676          */
addAll(Collection<? extends K> c)4677         public boolean addAll(Collection<? extends K> c) {
4678             boolean added = false;
4679             V v;
4680             if ((v = value) == null)
4681                 throw new UnsupportedOperationException();
4682             for (K e : c) {
4683                 if (map.putVal(e, v, true) == null)
4684                     added = true;
4685             }
4686             return added;
4687         }
4688 
hashCode()4689         public int hashCode() {
4690             int h = 0;
4691             for (K e : this)
4692                 h += e.hashCode();
4693             return h;
4694         }
4695 
equals(Object o)4696         public boolean equals(Object o) {
4697             Set<?> c;
4698             return ((o instanceof Set) &&
4699                     ((c = (Set<?>)o) == this ||
4700                      (containsAll(c) && c.containsAll(this))));
4701         }
4702 
spliterator()4703         public Spliterator<K> spliterator() {
4704             Node<K,V>[] t;
4705             ConcurrentHashMap<K,V> m = map;
4706             long n = m.sumCount();
4707             int f = (t = m.table) == null ? 0 : t.length;
4708             return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4709         }
4710 
forEach(Consumer<? super K> action)4711         public void forEach(Consumer<? super K> action) {
4712             if (action == null) throw new NullPointerException();
4713             Node<K,V>[] t;
4714             if ((t = map.table) != null) {
4715                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4716                 for (Node<K,V> p; (p = it.advance()) != null; )
4717                     action.accept(p.key);
4718             }
4719         }
4720     }
4721 
4722     /**
4723      * A view of a ConcurrentHashMap as a {@link Collection} of
4724      * values, in which additions are disabled. This class cannot be
4725      * directly instantiated. See {@link #values()}.
4726      */
4727     static final class ValuesView<K,V> extends CollectionView<K,V,V>
4728         implements Collection<V>, java.io.Serializable {
4729         private static final long serialVersionUID = 2249069246763182397L;
ValuesView(ConcurrentHashMap<K,V> map)4730         ValuesView(ConcurrentHashMap<K,V> map) { super(map); }
contains(Object o)4731         public final boolean contains(Object o) {
4732             return map.containsValue(o);
4733         }
4734 
remove(Object o)4735         public final boolean remove(Object o) {
4736             if (o != null) {
4737                 for (Iterator<V> it = iterator(); it.hasNext();) {
4738                     if (o.equals(it.next())) {
4739                         it.remove();
4740                         return true;
4741                     }
4742                 }
4743             }
4744             return false;
4745         }
4746 
iterator()4747         public final Iterator<V> iterator() {
4748             ConcurrentHashMap<K,V> m = map;
4749             Node<K,V>[] t;
4750             int f = (t = m.table) == null ? 0 : t.length;
4751             return new ValueIterator<K,V>(t, f, 0, f, m);
4752         }
4753 
add(V e)4754         public final boolean add(V e) {
4755             throw new UnsupportedOperationException();
4756         }
addAll(Collection<? extends V> c)4757         public final boolean addAll(Collection<? extends V> c) {
4758             throw new UnsupportedOperationException();
4759         }
4760 
removeAll(Collection<?> c)4761         @Override public boolean removeAll(Collection<?> c) {
4762             if (c == null) throw new NullPointerException();
4763             boolean modified = false;
4764             for (Iterator<V> it = iterator(); it.hasNext();) {
4765                 if (c.contains(it.next())) {
4766                     it.remove();
4767                     modified = true;
4768                 }
4769             }
4770             return modified;
4771         }
4772 
removeIf(Predicate<? super V> filter)4773         public boolean removeIf(Predicate<? super V> filter) {
4774             return map.removeValueIf(filter);
4775         }
4776 
spliterator()4777         public Spliterator<V> spliterator() {
4778             Node<K,V>[] t;
4779             ConcurrentHashMap<K,V> m = map;
4780             long n = m.sumCount();
4781             int f = (t = m.table) == null ? 0 : t.length;
4782             return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4783         }
4784 
forEach(Consumer<? super V> action)4785         public void forEach(Consumer<? super V> action) {
4786             if (action == null) throw new NullPointerException();
4787             Node<K,V>[] t;
4788             if ((t = map.table) != null) {
4789                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4790                 for (Node<K,V> p; (p = it.advance()) != null; )
4791                     action.accept(p.val);
4792             }
4793         }
4794     }
4795 
4796     /**
4797      * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
4798      * entries.  This class cannot be directly instantiated. See
4799      * {@link #entrySet()}.
4800      */
4801     static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4802         implements Set<Map.Entry<K,V>>, java.io.Serializable {
4803         private static final long serialVersionUID = 2249069246763182397L;
EntrySetView(ConcurrentHashMap<K,V> map)4804         EntrySetView(ConcurrentHashMap<K,V> map) { super(map); }
4805 
contains(Object o)4806         public boolean contains(Object o) {
4807             Object k, v, r; Map.Entry<?,?> e;
4808             return ((o instanceof Map.Entry) &&
4809                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4810                     (r = map.get(k)) != null &&
4811                     (v = e.getValue()) != null &&
4812                     (v == r || v.equals(r)));
4813         }
4814 
remove(Object o)4815         public boolean remove(Object o) {
4816             Object k, v; Map.Entry<?,?> e;
4817             return ((o instanceof Map.Entry) &&
4818                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4819                     (v = e.getValue()) != null &&
4820                     map.remove(k, v));
4821         }
4822 
4823         /**
4824          * @return an iterator over the entries of the backing map
4825          */
iterator()4826         public Iterator<Map.Entry<K,V>> iterator() {
4827             ConcurrentHashMap<K,V> m = map;
4828             Node<K,V>[] t;
4829             int f = (t = m.table) == null ? 0 : t.length;
4830             return new EntryIterator<K,V>(t, f, 0, f, m);
4831         }
4832 
add(Entry<K,V> e)4833         public boolean add(Entry<K,V> e) {
4834             return map.putVal(e.getKey(), e.getValue(), false) == null;
4835         }
4836 
addAll(Collection<? extends Entry<K,V>> c)4837         public boolean addAll(Collection<? extends Entry<K,V>> c) {
4838             boolean added = false;
4839             for (Entry<K,V> e : c) {
4840                 if (add(e))
4841                     added = true;
4842             }
4843             return added;
4844         }
4845 
removeIf(Predicate<? super Entry<K,V>> filter)4846         public boolean removeIf(Predicate<? super Entry<K,V>> filter) {
4847             return map.removeEntryIf(filter);
4848         }
4849 
hashCode()4850         public final int hashCode() {
4851             int h = 0;
4852             Node<K,V>[] t;
4853             if ((t = map.table) != null) {
4854                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4855                 for (Node<K,V> p; (p = it.advance()) != null; ) {
4856                     h += p.hashCode();
4857                 }
4858             }
4859             return h;
4860         }
4861 
equals(Object o)4862         public final boolean equals(Object o) {
4863             Set<?> c;
4864             return ((o instanceof Set) &&
4865                     ((c = (Set<?>)o) == this ||
4866                      (containsAll(c) && c.containsAll(this))));
4867         }
4868 
spliterator()4869         public Spliterator<Map.Entry<K,V>> spliterator() {
4870             Node<K,V>[] t;
4871             ConcurrentHashMap<K,V> m = map;
4872             long n = m.sumCount();
4873             int f = (t = m.table) == null ? 0 : t.length;
4874             return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4875         }
4876 
forEach(Consumer<? super Map.Entry<K,V>> action)4877         public void forEach(Consumer<? super Map.Entry<K,V>> action) {
4878             if (action == null) throw new NullPointerException();
4879             Node<K,V>[] t;
4880             if ((t = map.table) != null) {
4881                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4882                 for (Node<K,V> p; (p = it.advance()) != null; )
4883                     action.accept(new MapEntry<K,V>(p.key, p.val, map));
4884             }
4885         }
4886 
4887     }
4888 
4889     // -------------------------------------------------------
4890 
4891     /**
4892      * Base class for bulk tasks. Repeats some fields and code from
4893      * class Traverser, because we need to subclass CountedCompleter.
4894      */
4895     @SuppressWarnings("serial")
4896     abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4897         Node<K,V>[] tab;        // same as Traverser
4898         Node<K,V> next;
4899         TableStack<K,V> stack, spare;
4900         int index;
4901         int baseIndex;
4902         int baseLimit;
4903         final int baseSize;
4904         int batch;              // split control
4905 
BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t)4906         BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4907             super(par);
4908             this.batch = b;
4909             this.index = this.baseIndex = i;
4910             if ((this.tab = t) == null)
4911                 this.baseSize = this.baseLimit = 0;
4912             else if (par == null)
4913                 this.baseSize = this.baseLimit = t.length;
4914             else {
4915                 this.baseLimit = f;
4916                 this.baseSize = par.baseSize;
4917             }
4918         }
4919 
4920         /**
4921          * Same as Traverser version.
4922          */
advance()4923         final Node<K,V> advance() {
4924             Node<K,V> e;
4925             if ((e = next) != null)
4926                 e = e.next;
4927             for (;;) {
4928                 Node<K,V>[] t; int i, n;
4929                 if (e != null)
4930                     return next = e;
4931                 if (baseIndex >= baseLimit || (t = tab) == null ||
4932                     (n = t.length) <= (i = index) || i < 0)
4933                     return next = null;
4934                 if ((e = tabAt(t, i)) != null && e.hash < 0) {
4935                     if (e instanceof ForwardingNode) {
4936                         tab = ((ForwardingNode<K,V>)e).nextTable;
4937                         e = null;
4938                         pushState(t, i, n);
4939                         continue;
4940                     }
4941                     else if (e instanceof TreeBin)
4942                         e = ((TreeBin<K,V>)e).first;
4943                     else
4944                         e = null;
4945                 }
4946                 if (stack != null)
4947                     recoverState(n);
4948                 else if ((index = i + baseSize) >= n)
4949                     index = ++baseIndex;
4950             }
4951         }
4952 
pushState(Node<K,V>[] t, int i, int n)4953         private void pushState(Node<K,V>[] t, int i, int n) {
4954             TableStack<K,V> s = spare;
4955             if (s != null)
4956                 spare = s.next;
4957             else
4958                 s = new TableStack<K,V>();
4959             s.tab = t;
4960             s.length = n;
4961             s.index = i;
4962             s.next = stack;
4963             stack = s;
4964         }
4965 
recoverState(int n)4966         private void recoverState(int n) {
4967             TableStack<K,V> s; int len;
4968             while ((s = stack) != null && (index += (len = s.length)) >= n) {
4969                 n = len;
4970                 index = s.index;
4971                 tab = s.tab;
4972                 s.tab = null;
4973                 TableStack<K,V> next = s.next;
4974                 s.next = spare; // save for reuse
4975                 stack = next;
4976                 spare = s;
4977             }
4978             if (s == null && (index += baseSize) >= n)
4979                 index = ++baseIndex;
4980         }
4981     }
4982 
4983     /*
4984      * Task classes. Coded in a regular but ugly format/style to
4985      * simplify checks that each variant differs in the right way from
4986      * others. The null screenings exist because compilers cannot tell
4987      * that we've already null-checked task arguments, so we force
4988      * simplest hoisted bypass to help avoid convoluted traps.
4989      */
4990     @SuppressWarnings("serial")
4991     static final class ForEachKeyTask<K,V>
4992         extends BulkTask<K,V,Void> {
4993         final Consumer<? super K> action;
ForEachKeyTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Consumer<? super K> action)4994         ForEachKeyTask
4995             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
4996              Consumer<? super K> action) {
4997             super(p, b, i, f, t);
4998             this.action = action;
4999         }
compute()5000         public final void compute() {
5001             final Consumer<? super K> action;
5002             if ((action = this.action) != null) {
5003                 for (int i = baseIndex, f, h; batch > 0 &&
5004                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5005                     addToPendingCount(1);
5006                     new ForEachKeyTask<K,V>
5007                         (this, batch >>>= 1, baseLimit = h, f, tab,
5008                          action).fork();
5009                 }
5010                 for (Node<K,V> p; (p = advance()) != null;)
5011                     action.accept(p.key);
5012                 propagateCompletion();
5013             }
5014         }
5015     }
5016 
5017     @SuppressWarnings("serial")
5018     static final class ForEachValueTask<K,V>
5019         extends BulkTask<K,V,Void> {
5020         final Consumer<? super V> action;
ForEachValueTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Consumer<? super V> action)5021         ForEachValueTask
5022             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5023              Consumer<? super V> action) {
5024             super(p, b, i, f, t);
5025             this.action = action;
5026         }
compute()5027         public final void compute() {
5028             final Consumer<? super V> action;
5029             if ((action = this.action) != null) {
5030                 for (int i = baseIndex, f, h; batch > 0 &&
5031                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5032                     addToPendingCount(1);
5033                     new ForEachValueTask<K,V>
5034                         (this, batch >>>= 1, baseLimit = h, f, tab,
5035                          action).fork();
5036                 }
5037                 for (Node<K,V> p; (p = advance()) != null;)
5038                     action.accept(p.val);
5039                 propagateCompletion();
5040             }
5041         }
5042     }
5043 
5044     @SuppressWarnings("serial")
5045     static final class ForEachEntryTask<K,V>
5046         extends BulkTask<K,V,Void> {
5047         final Consumer<? super Entry<K,V>> action;
ForEachEntryTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Consumer<? super Entry<K,V>> action)5048         ForEachEntryTask
5049             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5050              Consumer<? super Entry<K,V>> action) {
5051             super(p, b, i, f, t);
5052             this.action = action;
5053         }
compute()5054         public final void compute() {
5055             final Consumer<? super Entry<K,V>> action;
5056             if ((action = this.action) != null) {
5057                 for (int i = baseIndex, f, h; batch > 0 &&
5058                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5059                     addToPendingCount(1);
5060                     new ForEachEntryTask<K,V>
5061                         (this, batch >>>= 1, baseLimit = h, f, tab,
5062                          action).fork();
5063                 }
5064                 for (Node<K,V> p; (p = advance()) != null; )
5065                     action.accept(p);
5066                 propagateCompletion();
5067             }
5068         }
5069     }
5070 
5071     @SuppressWarnings("serial")
5072     static final class ForEachMappingTask<K,V>
5073         extends BulkTask<K,V,Void> {
5074         final BiConsumer<? super K, ? super V> action;
ForEachMappingTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, BiConsumer<? super K,? super V> action)5075         ForEachMappingTask
5076             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5077              BiConsumer<? super K,? super V> action) {
5078             super(p, b, i, f, t);
5079             this.action = action;
5080         }
compute()5081         public final void compute() {
5082             final BiConsumer<? super K, ? super V> action;
5083             if ((action = this.action) != null) {
5084                 for (int i = baseIndex, f, h; batch > 0 &&
5085                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5086                     addToPendingCount(1);
5087                     new ForEachMappingTask<K,V>
5088                         (this, batch >>>= 1, baseLimit = h, f, tab,
5089                          action).fork();
5090                 }
5091                 for (Node<K,V> p; (p = advance()) != null; )
5092                     action.accept(p.key, p.val);
5093                 propagateCompletion();
5094             }
5095         }
5096     }
5097 
5098     @SuppressWarnings("serial")
5099     static final class ForEachTransformedKeyTask<K,V,U>
5100         extends BulkTask<K,V,Void> {
5101         final Function<? super K, ? extends U> transformer;
5102         final Consumer<? super U> action;
ForEachTransformedKeyTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<? super K, ? extends U> transformer, Consumer<? super U> action)5103         ForEachTransformedKeyTask
5104             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5105              Function<? super K, ? extends U> transformer, Consumer<? super U> action) {
5106             super(p, b, i, f, t);
5107             this.transformer = transformer; this.action = action;
5108         }
compute()5109         public final void compute() {
5110             final Function<? super K, ? extends U> transformer;
5111             final Consumer<? super U> action;
5112             if ((transformer = this.transformer) != null &&
5113                 (action = this.action) != null) {
5114                 for (int i = baseIndex, f, h; batch > 0 &&
5115                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5116                     addToPendingCount(1);
5117                     new ForEachTransformedKeyTask<K,V,U>
5118                         (this, batch >>>= 1, baseLimit = h, f, tab,
5119                          transformer, action).fork();
5120                 }
5121                 for (Node<K,V> p; (p = advance()) != null; ) {
5122                     U u;
5123                     if ((u = transformer.apply(p.key)) != null)
5124                         action.accept(u);
5125                 }
5126                 propagateCompletion();
5127             }
5128         }
5129     }
5130 
5131     @SuppressWarnings("serial")
5132     static final class ForEachTransformedValueTask<K,V,U>
5133         extends BulkTask<K,V,Void> {
5134         final Function<? super V, ? extends U> transformer;
5135         final Consumer<? super U> action;
ForEachTransformedValueTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<? super V, ? extends U> transformer, Consumer<? super U> action)5136         ForEachTransformedValueTask
5137             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5138              Function<? super V, ? extends U> transformer, Consumer<? super U> action) {
5139             super(p, b, i, f, t);
5140             this.transformer = transformer; this.action = action;
5141         }
compute()5142         public final void compute() {
5143             final Function<? super V, ? extends U> transformer;
5144             final Consumer<? super U> action;
5145             if ((transformer = this.transformer) != null &&
5146                 (action = this.action) != null) {
5147                 for (int i = baseIndex, f, h; batch > 0 &&
5148                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5149                     addToPendingCount(1);
5150                     new ForEachTransformedValueTask<K,V,U>
5151                         (this, batch >>>= 1, baseLimit = h, f, tab,
5152                          transformer, action).fork();
5153                 }
5154                 for (Node<K,V> p; (p = advance()) != null; ) {
5155                     U u;
5156                     if ((u = transformer.apply(p.val)) != null)
5157                         action.accept(u);
5158                 }
5159                 propagateCompletion();
5160             }
5161         }
5162     }
5163 
5164     @SuppressWarnings("serial")
5165     static final class ForEachTransformedEntryTask<K,V,U>
5166         extends BulkTask<K,V,Void> {
5167         final Function<Map.Entry<K,V>, ? extends U> transformer;
5168         final Consumer<? super U> action;
ForEachTransformedEntryTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action)5169         ForEachTransformedEntryTask
5170             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5171              Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) {
5172             super(p, b, i, f, t);
5173             this.transformer = transformer; this.action = action;
5174         }
compute()5175         public final void compute() {
5176             final Function<Map.Entry<K,V>, ? extends U> transformer;
5177             final Consumer<? super U> action;
5178             if ((transformer = this.transformer) != null &&
5179                 (action = this.action) != null) {
5180                 for (int i = baseIndex, f, h; batch > 0 &&
5181                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5182                     addToPendingCount(1);
5183                     new ForEachTransformedEntryTask<K,V,U>
5184                         (this, batch >>>= 1, baseLimit = h, f, tab,
5185                          transformer, action).fork();
5186                 }
5187                 for (Node<K,V> p; (p = advance()) != null; ) {
5188                     U u;
5189                     if ((u = transformer.apply(p)) != null)
5190                         action.accept(u);
5191                 }
5192                 propagateCompletion();
5193             }
5194         }
5195     }
5196 
5197     @SuppressWarnings("serial")
5198     static final class ForEachTransformedMappingTask<K,V,U>
5199         extends BulkTask<K,V,Void> {
5200         final BiFunction<? super K, ? super V, ? extends U> transformer;
5201         final Consumer<? super U> action;
ForEachTransformedMappingTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, BiFunction<? super K, ? super V, ? extends U> transformer, Consumer<? super U> action)5202         ForEachTransformedMappingTask
5203             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5204              BiFunction<? super K, ? super V, ? extends U> transformer,
5205              Consumer<? super U> action) {
5206             super(p, b, i, f, t);
5207             this.transformer = transformer; this.action = action;
5208         }
compute()5209         public final void compute() {
5210             final BiFunction<? super K, ? super V, ? extends U> transformer;
5211             final Consumer<? super U> action;
5212             if ((transformer = this.transformer) != null &&
5213                 (action = this.action) != null) {
5214                 for (int i = baseIndex, f, h; batch > 0 &&
5215                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5216                     addToPendingCount(1);
5217                     new ForEachTransformedMappingTask<K,V,U>
5218                         (this, batch >>>= 1, baseLimit = h, f, tab,
5219                          transformer, action).fork();
5220                 }
5221                 for (Node<K,V> p; (p = advance()) != null; ) {
5222                     U u;
5223                     if ((u = transformer.apply(p.key, p.val)) != null)
5224                         action.accept(u);
5225                 }
5226                 propagateCompletion();
5227             }
5228         }
5229     }
5230 
5231     @SuppressWarnings("serial")
5232     static final class SearchKeysTask<K,V,U>
5233         extends BulkTask<K,V,U> {
5234         final Function<? super K, ? extends U> searchFunction;
5235         final AtomicReference<U> result;
SearchKeysTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<? super K, ? extends U> searchFunction, AtomicReference<U> result)5236         SearchKeysTask
5237             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5238              Function<? super K, ? extends U> searchFunction,
5239              AtomicReference<U> result) {
5240             super(p, b, i, f, t);
5241             this.searchFunction = searchFunction; this.result = result;
5242         }
getRawResult()5243         public final U getRawResult() { return result.get(); }
compute()5244         public final void compute() {
5245             final Function<? super K, ? extends U> searchFunction;
5246             final AtomicReference<U> result;
5247             if ((searchFunction = this.searchFunction) != null &&
5248                 (result = this.result) != null) {
5249                 for (int i = baseIndex, f, h; batch > 0 &&
5250                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5251                     if (result.get() != null)
5252                         return;
5253                     addToPendingCount(1);
5254                     new SearchKeysTask<K,V,U>
5255                         (this, batch >>>= 1, baseLimit = h, f, tab,
5256                          searchFunction, result).fork();
5257                 }
5258                 while (result.get() == null) {
5259                     U u;
5260                     Node<K,V> p;
5261                     if ((p = advance()) == null) {
5262                         propagateCompletion();
5263                         break;
5264                     }
5265                     if ((u = searchFunction.apply(p.key)) != null) {
5266                         if (result.compareAndSet(null, u))
5267                             quietlyCompleteRoot();
5268                         break;
5269                     }
5270                 }
5271             }
5272         }
5273     }
5274 
5275     @SuppressWarnings("serial")
5276     static final class SearchValuesTask<K,V,U>
5277         extends BulkTask<K,V,U> {
5278         final Function<? super V, ? extends U> searchFunction;
5279         final AtomicReference<U> result;
SearchValuesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<? super V, ? extends U> searchFunction, AtomicReference<U> result)5280         SearchValuesTask
5281             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5282              Function<? super V, ? extends U> searchFunction,
5283              AtomicReference<U> result) {
5284             super(p, b, i, f, t);
5285             this.searchFunction = searchFunction; this.result = result;
5286         }
getRawResult()5287         public final U getRawResult() { return result.get(); }
compute()5288         public final void compute() {
5289             final Function<? super V, ? extends U> searchFunction;
5290             final AtomicReference<U> result;
5291             if ((searchFunction = this.searchFunction) != null &&
5292                 (result = this.result) != null) {
5293                 for (int i = baseIndex, f, h; batch > 0 &&
5294                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5295                     if (result.get() != null)
5296                         return;
5297                     addToPendingCount(1);
5298                     new SearchValuesTask<K,V,U>
5299                         (this, batch >>>= 1, baseLimit = h, f, tab,
5300                          searchFunction, result).fork();
5301                 }
5302                 while (result.get() == null) {
5303                     U u;
5304                     Node<K,V> p;
5305                     if ((p = advance()) == null) {
5306                         propagateCompletion();
5307                         break;
5308                     }
5309                     if ((u = searchFunction.apply(p.val)) != null) {
5310                         if (result.compareAndSet(null, u))
5311                             quietlyCompleteRoot();
5312                         break;
5313                     }
5314                 }
5315             }
5316         }
5317     }
5318 
5319     @SuppressWarnings("serial")
5320     static final class SearchEntriesTask<K,V,U>
5321         extends BulkTask<K,V,U> {
5322         final Function<Entry<K,V>, ? extends U> searchFunction;
5323         final AtomicReference<U> result;
SearchEntriesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, Function<Entry<K,V>, ? extends U> searchFunction, AtomicReference<U> result)5324         SearchEntriesTask
5325             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5326              Function<Entry<K,V>, ? extends U> searchFunction,
5327              AtomicReference<U> result) {
5328             super(p, b, i, f, t);
5329             this.searchFunction = searchFunction; this.result = result;
5330         }
getRawResult()5331         public final U getRawResult() { return result.get(); }
compute()5332         public final void compute() {
5333             final Function<Entry<K,V>, ? extends U> searchFunction;
5334             final AtomicReference<U> result;
5335             if ((searchFunction = this.searchFunction) != null &&
5336                 (result = this.result) != null) {
5337                 for (int i = baseIndex, f, h; batch > 0 &&
5338                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5339                     if (result.get() != null)
5340                         return;
5341                     addToPendingCount(1);
5342                     new SearchEntriesTask<K,V,U>
5343                         (this, batch >>>= 1, baseLimit = h, f, tab,
5344                          searchFunction, result).fork();
5345                 }
5346                 while (result.get() == null) {
5347                     U u;
5348                     Node<K,V> p;
5349                     if ((p = advance()) == null) {
5350                         propagateCompletion();
5351                         break;
5352                     }
5353                     if ((u = searchFunction.apply(p)) != null) {
5354                         if (result.compareAndSet(null, u))
5355                             quietlyCompleteRoot();
5356                         return;
5357                     }
5358                 }
5359             }
5360         }
5361     }
5362 
5363     @SuppressWarnings("serial")
5364     static final class SearchMappingsTask<K,V,U>
5365         extends BulkTask<K,V,U> {
5366         final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5367         final AtomicReference<U> result;
SearchMappingsTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, BiFunction<? super K, ? super V, ? extends U> searchFunction, AtomicReference<U> result)5368         SearchMappingsTask
5369             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5370              BiFunction<? super K, ? super V, ? extends U> searchFunction,
5371              AtomicReference<U> result) {
5372             super(p, b, i, f, t);
5373             this.searchFunction = searchFunction; this.result = result;
5374         }
getRawResult()5375         public final U getRawResult() { return result.get(); }
compute()5376         public final void compute() {
5377             final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5378             final AtomicReference<U> result;
5379             if ((searchFunction = this.searchFunction) != null &&
5380                 (result = this.result) != null) {
5381                 for (int i = baseIndex, f, h; batch > 0 &&
5382                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5383                     if (result.get() != null)
5384                         return;
5385                     addToPendingCount(1);
5386                     new SearchMappingsTask<K,V,U>
5387                         (this, batch >>>= 1, baseLimit = h, f, tab,
5388                          searchFunction, result).fork();
5389                 }
5390                 while (result.get() == null) {
5391                     U u;
5392                     Node<K,V> p;
5393                     if ((p = advance()) == null) {
5394                         propagateCompletion();
5395                         break;
5396                     }
5397                     if ((u = searchFunction.apply(p.key, p.val)) != null) {
5398                         if (result.compareAndSet(null, u))
5399                             quietlyCompleteRoot();
5400                         break;
5401                     }
5402                 }
5403             }
5404         }
5405     }
5406 
5407     @SuppressWarnings("serial")
5408     static final class ReduceKeysTask<K,V>
5409         extends BulkTask<K,V,K> {
5410         final BiFunction<? super K, ? super K, ? extends K> reducer;
5411         K result;
5412         ReduceKeysTask<K,V> rights, nextRight;
ReduceKeysTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, ReduceKeysTask<K,V> nextRight, BiFunction<? super K, ? super K, ? extends K> reducer)5413         ReduceKeysTask
5414             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5415              ReduceKeysTask<K,V> nextRight,
5416              BiFunction<? super K, ? super K, ? extends K> reducer) {
5417             super(p, b, i, f, t); this.nextRight = nextRight;
5418             this.reducer = reducer;
5419         }
getRawResult()5420         public final K getRawResult() { return result; }
compute()5421         public final void compute() {
5422             final BiFunction<? super K, ? super K, ? extends K> reducer;
5423             if ((reducer = this.reducer) != null) {
5424                 for (int i = baseIndex, f, h; batch > 0 &&
5425                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5426                     addToPendingCount(1);
5427                     (rights = new ReduceKeysTask<K,V>
5428                      (this, batch >>>= 1, baseLimit = h, f, tab,
5429                       rights, reducer)).fork();
5430                 }
5431                 K r = null;
5432                 for (Node<K,V> p; (p = advance()) != null; ) {
5433                     K u = p.key;
5434                     r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5435                 }
5436                 result = r;
5437                 CountedCompleter<?> c;
5438                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5439                     @SuppressWarnings("unchecked")
5440                     ReduceKeysTask<K,V>
5441                         t = (ReduceKeysTask<K,V>)c,
5442                         s = t.rights;
5443                     while (s != null) {
5444                         K tr, sr;
5445                         if ((sr = s.result) != null)
5446                             t.result = (((tr = t.result) == null) ? sr :
5447                                         reducer.apply(tr, sr));
5448                         s = t.rights = s.nextRight;
5449                     }
5450                 }
5451             }
5452         }
5453     }
5454 
5455     @SuppressWarnings("serial")
5456     static final class ReduceValuesTask<K,V>
5457         extends BulkTask<K,V,V> {
5458         final BiFunction<? super V, ? super V, ? extends V> reducer;
5459         V result;
5460         ReduceValuesTask<K,V> rights, nextRight;
ReduceValuesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, ReduceValuesTask<K,V> nextRight, BiFunction<? super V, ? super V, ? extends V> reducer)5461         ReduceValuesTask
5462             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5463              ReduceValuesTask<K,V> nextRight,
5464              BiFunction<? super V, ? super V, ? extends V> reducer) {
5465             super(p, b, i, f, t); this.nextRight = nextRight;
5466             this.reducer = reducer;
5467         }
getRawResult()5468         public final V getRawResult() { return result; }
compute()5469         public final void compute() {
5470             final BiFunction<? super V, ? super V, ? extends V> reducer;
5471             if ((reducer = this.reducer) != null) {
5472                 for (int i = baseIndex, f, h; batch > 0 &&
5473                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5474                     addToPendingCount(1);
5475                     (rights = new ReduceValuesTask<K,V>
5476                      (this, batch >>>= 1, baseLimit = h, f, tab,
5477                       rights, reducer)).fork();
5478                 }
5479                 V r = null;
5480                 for (Node<K,V> p; (p = advance()) != null; ) {
5481                     V v = p.val;
5482                     r = (r == null) ? v : reducer.apply(r, v);
5483                 }
5484                 result = r;
5485                 CountedCompleter<?> c;
5486                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5487                     @SuppressWarnings("unchecked")
5488                     ReduceValuesTask<K,V>
5489                         t = (ReduceValuesTask<K,V>)c,
5490                         s = t.rights;
5491                     while (s != null) {
5492                         V tr, sr;
5493                         if ((sr = s.result) != null)
5494                             t.result = (((tr = t.result) == null) ? sr :
5495                                         reducer.apply(tr, sr));
5496                         s = t.rights = s.nextRight;
5497                     }
5498                 }
5499             }
5500         }
5501     }
5502 
5503     @SuppressWarnings("serial")
5504     static final class ReduceEntriesTask<K,V>
5505         extends BulkTask<K,V,Map.Entry<K,V>> {
5506         final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5507         Map.Entry<K,V> result;
5508         ReduceEntriesTask<K,V> rights, nextRight;
ReduceEntriesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, ReduceEntriesTask<K,V> nextRight, BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer)5509         ReduceEntriesTask
5510             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5511              ReduceEntriesTask<K,V> nextRight,
5512              BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5513             super(p, b, i, f, t); this.nextRight = nextRight;
5514             this.reducer = reducer;
5515         }
getRawResult()5516         public final Map.Entry<K,V> getRawResult() { return result; }
compute()5517         public final void compute() {
5518             final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5519             if ((reducer = this.reducer) != null) {
5520                 for (int i = baseIndex, f, h; batch > 0 &&
5521                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5522                     addToPendingCount(1);
5523                     (rights = new ReduceEntriesTask<K,V>
5524                      (this, batch >>>= 1, baseLimit = h, f, tab,
5525                       rights, reducer)).fork();
5526                 }
5527                 Map.Entry<K,V> r = null;
5528                 for (Node<K,V> p; (p = advance()) != null; )
5529                     r = (r == null) ? p : reducer.apply(r, p);
5530                 result = r;
5531                 CountedCompleter<?> c;
5532                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5533                     @SuppressWarnings("unchecked")
5534                     ReduceEntriesTask<K,V>
5535                         t = (ReduceEntriesTask<K,V>)c,
5536                         s = t.rights;
5537                     while (s != null) {
5538                         Map.Entry<K,V> tr, sr;
5539                         if ((sr = s.result) != null)
5540                             t.result = (((tr = t.result) == null) ? sr :
5541                                         reducer.apply(tr, sr));
5542                         s = t.rights = s.nextRight;
5543                     }
5544                 }
5545             }
5546         }
5547     }
5548 
5549     @SuppressWarnings("serial")
5550     static final class MapReduceKeysTask<K,V,U>
5551         extends BulkTask<K,V,U> {
5552         final Function<? super K, ? extends U> transformer;
5553         final BiFunction<? super U, ? super U, ? extends U> reducer;
5554         U result;
5555         MapReduceKeysTask<K,V,U> rights, nextRight;
MapReduceKeysTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysTask<K,V,U> nextRight, Function<? super K, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)5556         MapReduceKeysTask
5557             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5558              MapReduceKeysTask<K,V,U> nextRight,
5559              Function<? super K, ? extends U> transformer,
5560              BiFunction<? super U, ? super U, ? extends U> reducer) {
5561             super(p, b, i, f, t); this.nextRight = nextRight;
5562             this.transformer = transformer;
5563             this.reducer = reducer;
5564         }
getRawResult()5565         public final U getRawResult() { return result; }
compute()5566         public final void compute() {
5567             final Function<? super K, ? extends U> transformer;
5568             final BiFunction<? super U, ? super U, ? extends U> reducer;
5569             if ((transformer = this.transformer) != null &&
5570                 (reducer = this.reducer) != null) {
5571                 for (int i = baseIndex, f, h; batch > 0 &&
5572                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5573                     addToPendingCount(1);
5574                     (rights = new MapReduceKeysTask<K,V,U>
5575                      (this, batch >>>= 1, baseLimit = h, f, tab,
5576                       rights, transformer, reducer)).fork();
5577                 }
5578                 U r = null;
5579                 for (Node<K,V> p; (p = advance()) != null; ) {
5580                     U u;
5581                     if ((u = transformer.apply(p.key)) != null)
5582                         r = (r == null) ? u : reducer.apply(r, u);
5583                 }
5584                 result = r;
5585                 CountedCompleter<?> c;
5586                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5587                     @SuppressWarnings("unchecked")
5588                     MapReduceKeysTask<K,V,U>
5589                         t = (MapReduceKeysTask<K,V,U>)c,
5590                         s = t.rights;
5591                     while (s != null) {
5592                         U tr, sr;
5593                         if ((sr = s.result) != null)
5594                             t.result = (((tr = t.result) == null) ? sr :
5595                                         reducer.apply(tr, sr));
5596                         s = t.rights = s.nextRight;
5597                     }
5598                 }
5599             }
5600         }
5601     }
5602 
5603     @SuppressWarnings("serial")
5604     static final class MapReduceValuesTask<K,V,U>
5605         extends BulkTask<K,V,U> {
5606         final Function<? super V, ? extends U> transformer;
5607         final BiFunction<? super U, ? super U, ? extends U> reducer;
5608         U result;
5609         MapReduceValuesTask<K,V,U> rights, nextRight;
MapReduceValuesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesTask<K,V,U> nextRight, Function<? super V, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)5610         MapReduceValuesTask
5611             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5612              MapReduceValuesTask<K,V,U> nextRight,
5613              Function<? super V, ? extends U> transformer,
5614              BiFunction<? super U, ? super U, ? extends U> reducer) {
5615             super(p, b, i, f, t); this.nextRight = nextRight;
5616             this.transformer = transformer;
5617             this.reducer = reducer;
5618         }
getRawResult()5619         public final U getRawResult() { return result; }
compute()5620         public final void compute() {
5621             final Function<? super V, ? extends U> transformer;
5622             final BiFunction<? super U, ? super U, ? extends U> reducer;
5623             if ((transformer = this.transformer) != null &&
5624                 (reducer = this.reducer) != null) {
5625                 for (int i = baseIndex, f, h; batch > 0 &&
5626                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5627                     addToPendingCount(1);
5628                     (rights = new MapReduceValuesTask<K,V,U>
5629                      (this, batch >>>= 1, baseLimit = h, f, tab,
5630                       rights, transformer, reducer)).fork();
5631                 }
5632                 U r = null;
5633                 for (Node<K,V> p; (p = advance()) != null; ) {
5634                     U u;
5635                     if ((u = transformer.apply(p.val)) != null)
5636                         r = (r == null) ? u : reducer.apply(r, u);
5637                 }
5638                 result = r;
5639                 CountedCompleter<?> c;
5640                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5641                     @SuppressWarnings("unchecked")
5642                     MapReduceValuesTask<K,V,U>
5643                         t = (MapReduceValuesTask<K,V,U>)c,
5644                         s = t.rights;
5645                     while (s != null) {
5646                         U tr, sr;
5647                         if ((sr = s.result) != null)
5648                             t.result = (((tr = t.result) == null) ? sr :
5649                                         reducer.apply(tr, sr));
5650                         s = t.rights = s.nextRight;
5651                     }
5652                 }
5653             }
5654         }
5655     }
5656 
5657     @SuppressWarnings("serial")
5658     static final class MapReduceEntriesTask<K,V,U>
5659         extends BulkTask<K,V,U> {
5660         final Function<Map.Entry<K,V>, ? extends U> transformer;
5661         final BiFunction<? super U, ? super U, ? extends U> reducer;
5662         U result;
5663         MapReduceEntriesTask<K,V,U> rights, nextRight;
MapReduceEntriesTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesTask<K,V,U> nextRight, Function<Map.Entry<K,V>, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)5664         MapReduceEntriesTask
5665             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5666              MapReduceEntriesTask<K,V,U> nextRight,
5667              Function<Map.Entry<K,V>, ? extends U> transformer,
5668              BiFunction<? super U, ? super U, ? extends U> reducer) {
5669             super(p, b, i, f, t); this.nextRight = nextRight;
5670             this.transformer = transformer;
5671             this.reducer = reducer;
5672         }
getRawResult()5673         public final U getRawResult() { return result; }
compute()5674         public final void compute() {
5675             final Function<Map.Entry<K,V>, ? extends U> transformer;
5676             final BiFunction<? super U, ? super U, ? extends U> reducer;
5677             if ((transformer = this.transformer) != null &&
5678                 (reducer = this.reducer) != null) {
5679                 for (int i = baseIndex, f, h; batch > 0 &&
5680                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5681                     addToPendingCount(1);
5682                     (rights = new MapReduceEntriesTask<K,V,U>
5683                      (this, batch >>>= 1, baseLimit = h, f, tab,
5684                       rights, transformer, reducer)).fork();
5685                 }
5686                 U r = null;
5687                 for (Node<K,V> p; (p = advance()) != null; ) {
5688                     U u;
5689                     if ((u = transformer.apply(p)) != null)
5690                         r = (r == null) ? u : reducer.apply(r, u);
5691                 }
5692                 result = r;
5693                 CountedCompleter<?> c;
5694                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5695                     @SuppressWarnings("unchecked")
5696                     MapReduceEntriesTask<K,V,U>
5697                         t = (MapReduceEntriesTask<K,V,U>)c,
5698                         s = t.rights;
5699                     while (s != null) {
5700                         U tr, sr;
5701                         if ((sr = s.result) != null)
5702                             t.result = (((tr = t.result) == null) ? sr :
5703                                         reducer.apply(tr, sr));
5704                         s = t.rights = s.nextRight;
5705                     }
5706                 }
5707             }
5708         }
5709     }
5710 
5711     @SuppressWarnings("serial")
5712     static final class MapReduceMappingsTask<K,V,U>
5713         extends BulkTask<K,V,U> {
5714         final BiFunction<? super K, ? super V, ? extends U> transformer;
5715         final BiFunction<? super U, ? super U, ? extends U> reducer;
5716         U result;
5717         MapReduceMappingsTask<K,V,U> rights, nextRight;
MapReduceMappingsTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsTask<K,V,U> nextRight, BiFunction<? super K, ? super V, ? extends U> transformer, BiFunction<? super U, ? super U, ? extends U> reducer)5718         MapReduceMappingsTask
5719             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5720              MapReduceMappingsTask<K,V,U> nextRight,
5721              BiFunction<? super K, ? super V, ? extends U> transformer,
5722              BiFunction<? super U, ? super U, ? extends U> reducer) {
5723             super(p, b, i, f, t); this.nextRight = nextRight;
5724             this.transformer = transformer;
5725             this.reducer = reducer;
5726         }
getRawResult()5727         public final U getRawResult() { return result; }
compute()5728         public final void compute() {
5729             final BiFunction<? super K, ? super V, ? extends U> transformer;
5730             final BiFunction<? super U, ? super U, ? extends U> reducer;
5731             if ((transformer = this.transformer) != null &&
5732                 (reducer = this.reducer) != null) {
5733                 for (int i = baseIndex, f, h; batch > 0 &&
5734                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5735                     addToPendingCount(1);
5736                     (rights = new MapReduceMappingsTask<K,V,U>
5737                      (this, batch >>>= 1, baseLimit = h, f, tab,
5738                       rights, transformer, reducer)).fork();
5739                 }
5740                 U r = null;
5741                 for (Node<K,V> p; (p = advance()) != null; ) {
5742                     U u;
5743                     if ((u = transformer.apply(p.key, p.val)) != null)
5744                         r = (r == null) ? u : reducer.apply(r, u);
5745                 }
5746                 result = r;
5747                 CountedCompleter<?> c;
5748                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5749                     @SuppressWarnings("unchecked")
5750                     MapReduceMappingsTask<K,V,U>
5751                         t = (MapReduceMappingsTask<K,V,U>)c,
5752                         s = t.rights;
5753                     while (s != null) {
5754                         U tr, sr;
5755                         if ((sr = s.result) != null)
5756                             t.result = (((tr = t.result) == null) ? sr :
5757                                         reducer.apply(tr, sr));
5758                         s = t.rights = s.nextRight;
5759                     }
5760                 }
5761             }
5762         }
5763     }
5764 
5765     @SuppressWarnings("serial")
5766     static final class MapReduceKeysToDoubleTask<K,V>
5767         extends BulkTask<K,V,Double> {
5768         final ToDoubleFunction<? super K> transformer;
5769         final DoubleBinaryOperator reducer;
5770         final double basis;
5771         double result;
5772         MapReduceKeysToDoubleTask<K,V> rights, nextRight;
MapReduceKeysToDoubleTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysToDoubleTask<K,V> nextRight, ToDoubleFunction<? super K> transformer, double basis, DoubleBinaryOperator reducer)5773         MapReduceKeysToDoubleTask
5774             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5775              MapReduceKeysToDoubleTask<K,V> nextRight,
5776              ToDoubleFunction<? super K> transformer,
5777              double basis,
5778              DoubleBinaryOperator reducer) {
5779             super(p, b, i, f, t); this.nextRight = nextRight;
5780             this.transformer = transformer;
5781             this.basis = basis; this.reducer = reducer;
5782         }
getRawResult()5783         public final Double getRawResult() { return result; }
compute()5784         public final void compute() {
5785             final ToDoubleFunction<? super K> transformer;
5786             final DoubleBinaryOperator reducer;
5787             if ((transformer = this.transformer) != null &&
5788                 (reducer = this.reducer) != null) {
5789                 double r = this.basis;
5790                 for (int i = baseIndex, f, h; batch > 0 &&
5791                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5792                     addToPendingCount(1);
5793                     (rights = new MapReduceKeysToDoubleTask<K,V>
5794                      (this, batch >>>= 1, baseLimit = h, f, tab,
5795                       rights, transformer, r, reducer)).fork();
5796                 }
5797                 for (Node<K,V> p; (p = advance()) != null; )
5798                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key));
5799                 result = r;
5800                 CountedCompleter<?> c;
5801                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5802                     @SuppressWarnings("unchecked")
5803                     MapReduceKeysToDoubleTask<K,V>
5804                         t = (MapReduceKeysToDoubleTask<K,V>)c,
5805                         s = t.rights;
5806                     while (s != null) {
5807                         t.result = reducer.applyAsDouble(t.result, s.result);
5808                         s = t.rights = s.nextRight;
5809                     }
5810                 }
5811             }
5812         }
5813     }
5814 
5815     @SuppressWarnings("serial")
5816     static final class MapReduceValuesToDoubleTask<K,V>
5817         extends BulkTask<K,V,Double> {
5818         final ToDoubleFunction<? super V> transformer;
5819         final DoubleBinaryOperator reducer;
5820         final double basis;
5821         double result;
5822         MapReduceValuesToDoubleTask<K,V> rights, nextRight;
MapReduceValuesToDoubleTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesToDoubleTask<K,V> nextRight, ToDoubleFunction<? super V> transformer, double basis, DoubleBinaryOperator reducer)5823         MapReduceValuesToDoubleTask
5824             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5825              MapReduceValuesToDoubleTask<K,V> nextRight,
5826              ToDoubleFunction<? super V> transformer,
5827              double basis,
5828              DoubleBinaryOperator reducer) {
5829             super(p, b, i, f, t); this.nextRight = nextRight;
5830             this.transformer = transformer;
5831             this.basis = basis; this.reducer = reducer;
5832         }
getRawResult()5833         public final Double getRawResult() { return result; }
compute()5834         public final void compute() {
5835             final ToDoubleFunction<? super V> transformer;
5836             final DoubleBinaryOperator reducer;
5837             if ((transformer = this.transformer) != null &&
5838                 (reducer = this.reducer) != null) {
5839                 double r = this.basis;
5840                 for (int i = baseIndex, f, h; batch > 0 &&
5841                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5842                     addToPendingCount(1);
5843                     (rights = new MapReduceValuesToDoubleTask<K,V>
5844                      (this, batch >>>= 1, baseLimit = h, f, tab,
5845                       rights, transformer, r, reducer)).fork();
5846                 }
5847                 for (Node<K,V> p; (p = advance()) != null; )
5848                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val));
5849                 result = r;
5850                 CountedCompleter<?> c;
5851                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5852                     @SuppressWarnings("unchecked")
5853                     MapReduceValuesToDoubleTask<K,V>
5854                         t = (MapReduceValuesToDoubleTask<K,V>)c,
5855                         s = t.rights;
5856                     while (s != null) {
5857                         t.result = reducer.applyAsDouble(t.result, s.result);
5858                         s = t.rights = s.nextRight;
5859                     }
5860                 }
5861             }
5862         }
5863     }
5864 
5865     @SuppressWarnings("serial")
5866     static final class MapReduceEntriesToDoubleTask<K,V>
5867         extends BulkTask<K,V,Double> {
5868         final ToDoubleFunction<Map.Entry<K,V>> transformer;
5869         final DoubleBinaryOperator reducer;
5870         final double basis;
5871         double result;
5872         MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
MapReduceEntriesToDoubleTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesToDoubleTask<K,V> nextRight, ToDoubleFunction<Map.Entry<K,V>> transformer, double basis, DoubleBinaryOperator reducer)5873         MapReduceEntriesToDoubleTask
5874             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5875              MapReduceEntriesToDoubleTask<K,V> nextRight,
5876              ToDoubleFunction<Map.Entry<K,V>> transformer,
5877              double basis,
5878              DoubleBinaryOperator reducer) {
5879             super(p, b, i, f, t); this.nextRight = nextRight;
5880             this.transformer = transformer;
5881             this.basis = basis; this.reducer = reducer;
5882         }
getRawResult()5883         public final Double getRawResult() { return result; }
compute()5884         public final void compute() {
5885             final ToDoubleFunction<Map.Entry<K,V>> transformer;
5886             final DoubleBinaryOperator reducer;
5887             if ((transformer = this.transformer) != null &&
5888                 (reducer = this.reducer) != null) {
5889                 double r = this.basis;
5890                 for (int i = baseIndex, f, h; batch > 0 &&
5891                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5892                     addToPendingCount(1);
5893                     (rights = new MapReduceEntriesToDoubleTask<K,V>
5894                      (this, batch >>>= 1, baseLimit = h, f, tab,
5895                       rights, transformer, r, reducer)).fork();
5896                 }
5897                 for (Node<K,V> p; (p = advance()) != null; )
5898                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p));
5899                 result = r;
5900                 CountedCompleter<?> c;
5901                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5902                     @SuppressWarnings("unchecked")
5903                     MapReduceEntriesToDoubleTask<K,V>
5904                         t = (MapReduceEntriesToDoubleTask<K,V>)c,
5905                         s = t.rights;
5906                     while (s != null) {
5907                         t.result = reducer.applyAsDouble(t.result, s.result);
5908                         s = t.rights = s.nextRight;
5909                     }
5910                 }
5911             }
5912         }
5913     }
5914 
5915     @SuppressWarnings("serial")
5916     static final class MapReduceMappingsToDoubleTask<K,V>
5917         extends BulkTask<K,V,Double> {
5918         final ToDoubleBiFunction<? super K, ? super V> transformer;
5919         final DoubleBinaryOperator reducer;
5920         final double basis;
5921         double result;
5922         MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
MapReduceMappingsToDoubleTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsToDoubleTask<K,V> nextRight, ToDoubleBiFunction<? super K, ? super V> transformer, double basis, DoubleBinaryOperator reducer)5923         MapReduceMappingsToDoubleTask
5924             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5925              MapReduceMappingsToDoubleTask<K,V> nextRight,
5926              ToDoubleBiFunction<? super K, ? super V> transformer,
5927              double basis,
5928              DoubleBinaryOperator reducer) {
5929             super(p, b, i, f, t); this.nextRight = nextRight;
5930             this.transformer = transformer;
5931             this.basis = basis; this.reducer = reducer;
5932         }
getRawResult()5933         public final Double getRawResult() { return result; }
compute()5934         public final void compute() {
5935             final ToDoubleBiFunction<? super K, ? super V> transformer;
5936             final DoubleBinaryOperator reducer;
5937             if ((transformer = this.transformer) != null &&
5938                 (reducer = this.reducer) != null) {
5939                 double r = this.basis;
5940                 for (int i = baseIndex, f, h; batch > 0 &&
5941                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5942                     addToPendingCount(1);
5943                     (rights = new MapReduceMappingsToDoubleTask<K,V>
5944                      (this, batch >>>= 1, baseLimit = h, f, tab,
5945                       rights, transformer, r, reducer)).fork();
5946                 }
5947                 for (Node<K,V> p; (p = advance()) != null; )
5948                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val));
5949                 result = r;
5950                 CountedCompleter<?> c;
5951                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5952                     @SuppressWarnings("unchecked")
5953                     MapReduceMappingsToDoubleTask<K,V>
5954                         t = (MapReduceMappingsToDoubleTask<K,V>)c,
5955                         s = t.rights;
5956                     while (s != null) {
5957                         t.result = reducer.applyAsDouble(t.result, s.result);
5958                         s = t.rights = s.nextRight;
5959                     }
5960                 }
5961             }
5962         }
5963     }
5964 
5965     @SuppressWarnings("serial")
5966     static final class MapReduceKeysToLongTask<K,V>
5967         extends BulkTask<K,V,Long> {
5968         final ToLongFunction<? super K> transformer;
5969         final LongBinaryOperator reducer;
5970         final long basis;
5971         long result;
5972         MapReduceKeysToLongTask<K,V> rights, nextRight;
MapReduceKeysToLongTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysToLongTask<K,V> nextRight, ToLongFunction<? super K> transformer, long basis, LongBinaryOperator reducer)5973         MapReduceKeysToLongTask
5974             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5975              MapReduceKeysToLongTask<K,V> nextRight,
5976              ToLongFunction<? super K> transformer,
5977              long basis,
5978              LongBinaryOperator reducer) {
5979             super(p, b, i, f, t); this.nextRight = nextRight;
5980             this.transformer = transformer;
5981             this.basis = basis; this.reducer = reducer;
5982         }
getRawResult()5983         public final Long getRawResult() { return result; }
compute()5984         public final void compute() {
5985             final ToLongFunction<? super K> transformer;
5986             final LongBinaryOperator reducer;
5987             if ((transformer = this.transformer) != null &&
5988                 (reducer = this.reducer) != null) {
5989                 long r = this.basis;
5990                 for (int i = baseIndex, f, h; batch > 0 &&
5991                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5992                     addToPendingCount(1);
5993                     (rights = new MapReduceKeysToLongTask<K,V>
5994                      (this, batch >>>= 1, baseLimit = h, f, tab,
5995                       rights, transformer, r, reducer)).fork();
5996                 }
5997                 for (Node<K,V> p; (p = advance()) != null; )
5998                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.key));
5999                 result = r;
6000                 CountedCompleter<?> c;
6001                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6002                     @SuppressWarnings("unchecked")
6003                     MapReduceKeysToLongTask<K,V>
6004                         t = (MapReduceKeysToLongTask<K,V>)c,
6005                         s = t.rights;
6006                     while (s != null) {
6007                         t.result = reducer.applyAsLong(t.result, s.result);
6008                         s = t.rights = s.nextRight;
6009                     }
6010                 }
6011             }
6012         }
6013     }
6014 
6015     @SuppressWarnings("serial")
6016     static final class MapReduceValuesToLongTask<K,V>
6017         extends BulkTask<K,V,Long> {
6018         final ToLongFunction<? super V> transformer;
6019         final LongBinaryOperator reducer;
6020         final long basis;
6021         long result;
6022         MapReduceValuesToLongTask<K,V> rights, nextRight;
MapReduceValuesToLongTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesToLongTask<K,V> nextRight, ToLongFunction<? super V> transformer, long basis, LongBinaryOperator reducer)6023         MapReduceValuesToLongTask
6024             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6025              MapReduceValuesToLongTask<K,V> nextRight,
6026              ToLongFunction<? super V> transformer,
6027              long basis,
6028              LongBinaryOperator reducer) {
6029             super(p, b, i, f, t); this.nextRight = nextRight;
6030             this.transformer = transformer;
6031             this.basis = basis; this.reducer = reducer;
6032         }
getRawResult()6033         public final Long getRawResult() { return result; }
compute()6034         public final void compute() {
6035             final ToLongFunction<? super V> transformer;
6036             final LongBinaryOperator reducer;
6037             if ((transformer = this.transformer) != null &&
6038                 (reducer = this.reducer) != null) {
6039                 long r = this.basis;
6040                 for (int i = baseIndex, f, h; batch > 0 &&
6041                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6042                     addToPendingCount(1);
6043                     (rights = new MapReduceValuesToLongTask<K,V>
6044                      (this, batch >>>= 1, baseLimit = h, f, tab,
6045                       rights, transformer, r, reducer)).fork();
6046                 }
6047                 for (Node<K,V> p; (p = advance()) != null; )
6048                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.val));
6049                 result = r;
6050                 CountedCompleter<?> c;
6051                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6052                     @SuppressWarnings("unchecked")
6053                     MapReduceValuesToLongTask<K,V>
6054                         t = (MapReduceValuesToLongTask<K,V>)c,
6055                         s = t.rights;
6056                     while (s != null) {
6057                         t.result = reducer.applyAsLong(t.result, s.result);
6058                         s = t.rights = s.nextRight;
6059                     }
6060                 }
6061             }
6062         }
6063     }
6064 
6065     @SuppressWarnings("serial")
6066     static final class MapReduceEntriesToLongTask<K,V>
6067         extends BulkTask<K,V,Long> {
6068         final ToLongFunction<Map.Entry<K,V>> transformer;
6069         final LongBinaryOperator reducer;
6070         final long basis;
6071         long result;
6072         MapReduceEntriesToLongTask<K,V> rights, nextRight;
MapReduceEntriesToLongTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesToLongTask<K,V> nextRight, ToLongFunction<Map.Entry<K,V>> transformer, long basis, LongBinaryOperator reducer)6073         MapReduceEntriesToLongTask
6074             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6075              MapReduceEntriesToLongTask<K,V> nextRight,
6076              ToLongFunction<Map.Entry<K,V>> transformer,
6077              long basis,
6078              LongBinaryOperator reducer) {
6079             super(p, b, i, f, t); this.nextRight = nextRight;
6080             this.transformer = transformer;
6081             this.basis = basis; this.reducer = reducer;
6082         }
getRawResult()6083         public final Long getRawResult() { return result; }
compute()6084         public final void compute() {
6085             final ToLongFunction<Map.Entry<K,V>> transformer;
6086             final LongBinaryOperator reducer;
6087             if ((transformer = this.transformer) != null &&
6088                 (reducer = this.reducer) != null) {
6089                 long r = this.basis;
6090                 for (int i = baseIndex, f, h; batch > 0 &&
6091                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6092                     addToPendingCount(1);
6093                     (rights = new MapReduceEntriesToLongTask<K,V>
6094                      (this, batch >>>= 1, baseLimit = h, f, tab,
6095                       rights, transformer, r, reducer)).fork();
6096                 }
6097                 for (Node<K,V> p; (p = advance()) != null; )
6098                     r = reducer.applyAsLong(r, transformer.applyAsLong(p));
6099                 result = r;
6100                 CountedCompleter<?> c;
6101                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6102                     @SuppressWarnings("unchecked")
6103                     MapReduceEntriesToLongTask<K,V>
6104                         t = (MapReduceEntriesToLongTask<K,V>)c,
6105                         s = t.rights;
6106                     while (s != null) {
6107                         t.result = reducer.applyAsLong(t.result, s.result);
6108                         s = t.rights = s.nextRight;
6109                     }
6110                 }
6111             }
6112         }
6113     }
6114 
6115     @SuppressWarnings("serial")
6116     static final class MapReduceMappingsToLongTask<K,V>
6117         extends BulkTask<K,V,Long> {
6118         final ToLongBiFunction<? super K, ? super V> transformer;
6119         final LongBinaryOperator reducer;
6120         final long basis;
6121         long result;
6122         MapReduceMappingsToLongTask<K,V> rights, nextRight;
MapReduceMappingsToLongTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsToLongTask<K,V> nextRight, ToLongBiFunction<? super K, ? super V> transformer, long basis, LongBinaryOperator reducer)6123         MapReduceMappingsToLongTask
6124             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6125              MapReduceMappingsToLongTask<K,V> nextRight,
6126              ToLongBiFunction<? super K, ? super V> transformer,
6127              long basis,
6128              LongBinaryOperator reducer) {
6129             super(p, b, i, f, t); this.nextRight = nextRight;
6130             this.transformer = transformer;
6131             this.basis = basis; this.reducer = reducer;
6132         }
getRawResult()6133         public final Long getRawResult() { return result; }
compute()6134         public final void compute() {
6135             final ToLongBiFunction<? super K, ? super V> transformer;
6136             final LongBinaryOperator reducer;
6137             if ((transformer = this.transformer) != null &&
6138                 (reducer = this.reducer) != null) {
6139                 long r = this.basis;
6140                 for (int i = baseIndex, f, h; batch > 0 &&
6141                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6142                     addToPendingCount(1);
6143                     (rights = new MapReduceMappingsToLongTask<K,V>
6144                      (this, batch >>>= 1, baseLimit = h, f, tab,
6145                       rights, transformer, r, reducer)).fork();
6146                 }
6147                 for (Node<K,V> p; (p = advance()) != null; )
6148                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val));
6149                 result = r;
6150                 CountedCompleter<?> c;
6151                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6152                     @SuppressWarnings("unchecked")
6153                     MapReduceMappingsToLongTask<K,V>
6154                         t = (MapReduceMappingsToLongTask<K,V>)c,
6155                         s = t.rights;
6156                     while (s != null) {
6157                         t.result = reducer.applyAsLong(t.result, s.result);
6158                         s = t.rights = s.nextRight;
6159                     }
6160                 }
6161             }
6162         }
6163     }
6164 
6165     @SuppressWarnings("serial")
6166     static final class MapReduceKeysToIntTask<K,V>
6167         extends BulkTask<K,V,Integer> {
6168         final ToIntFunction<? super K> transformer;
6169         final IntBinaryOperator reducer;
6170         final int basis;
6171         int result;
6172         MapReduceKeysToIntTask<K,V> rights, nextRight;
MapReduceKeysToIntTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceKeysToIntTask<K,V> nextRight, ToIntFunction<? super K> transformer, int basis, IntBinaryOperator reducer)6173         MapReduceKeysToIntTask
6174             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6175              MapReduceKeysToIntTask<K,V> nextRight,
6176              ToIntFunction<? super K> transformer,
6177              int basis,
6178              IntBinaryOperator reducer) {
6179             super(p, b, i, f, t); this.nextRight = nextRight;
6180             this.transformer = transformer;
6181             this.basis = basis; this.reducer = reducer;
6182         }
getRawResult()6183         public final Integer getRawResult() { return result; }
compute()6184         public final void compute() {
6185             final ToIntFunction<? super K> transformer;
6186             final IntBinaryOperator reducer;
6187             if ((transformer = this.transformer) != null &&
6188                 (reducer = this.reducer) != null) {
6189                 int r = this.basis;
6190                 for (int i = baseIndex, f, h; batch > 0 &&
6191                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6192                     addToPendingCount(1);
6193                     (rights = new MapReduceKeysToIntTask<K,V>
6194                      (this, batch >>>= 1, baseLimit = h, f, tab,
6195                       rights, transformer, r, reducer)).fork();
6196                 }
6197                 for (Node<K,V> p; (p = advance()) != null; )
6198                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.key));
6199                 result = r;
6200                 CountedCompleter<?> c;
6201                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6202                     @SuppressWarnings("unchecked")
6203                     MapReduceKeysToIntTask<K,V>
6204                         t = (MapReduceKeysToIntTask<K,V>)c,
6205                         s = t.rights;
6206                     while (s != null) {
6207                         t.result = reducer.applyAsInt(t.result, s.result);
6208                         s = t.rights = s.nextRight;
6209                     }
6210                 }
6211             }
6212         }
6213     }
6214 
6215     @SuppressWarnings("serial")
6216     static final class MapReduceValuesToIntTask<K,V>
6217         extends BulkTask<K,V,Integer> {
6218         final ToIntFunction<? super V> transformer;
6219         final IntBinaryOperator reducer;
6220         final int basis;
6221         int result;
6222         MapReduceValuesToIntTask<K,V> rights, nextRight;
MapReduceValuesToIntTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceValuesToIntTask<K,V> nextRight, ToIntFunction<? super V> transformer, int basis, IntBinaryOperator reducer)6223         MapReduceValuesToIntTask
6224             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6225              MapReduceValuesToIntTask<K,V> nextRight,
6226              ToIntFunction<? super V> transformer,
6227              int basis,
6228              IntBinaryOperator reducer) {
6229             super(p, b, i, f, t); this.nextRight = nextRight;
6230             this.transformer = transformer;
6231             this.basis = basis; this.reducer = reducer;
6232         }
getRawResult()6233         public final Integer getRawResult() { return result; }
compute()6234         public final void compute() {
6235             final ToIntFunction<? super V> transformer;
6236             final IntBinaryOperator reducer;
6237             if ((transformer = this.transformer) != null &&
6238                 (reducer = this.reducer) != null) {
6239                 int r = this.basis;
6240                 for (int i = baseIndex, f, h; batch > 0 &&
6241                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6242                     addToPendingCount(1);
6243                     (rights = new MapReduceValuesToIntTask<K,V>
6244                      (this, batch >>>= 1, baseLimit = h, f, tab,
6245                       rights, transformer, r, reducer)).fork();
6246                 }
6247                 for (Node<K,V> p; (p = advance()) != null; )
6248                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.val));
6249                 result = r;
6250                 CountedCompleter<?> c;
6251                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6252                     @SuppressWarnings("unchecked")
6253                     MapReduceValuesToIntTask<K,V>
6254                         t = (MapReduceValuesToIntTask<K,V>)c,
6255                         s = t.rights;
6256                     while (s != null) {
6257                         t.result = reducer.applyAsInt(t.result, s.result);
6258                         s = t.rights = s.nextRight;
6259                     }
6260                 }
6261             }
6262         }
6263     }
6264 
6265     @SuppressWarnings("serial")
6266     static final class MapReduceEntriesToIntTask<K,V>
6267         extends BulkTask<K,V,Integer> {
6268         final ToIntFunction<Map.Entry<K,V>> transformer;
6269         final IntBinaryOperator reducer;
6270         final int basis;
6271         int result;
6272         MapReduceEntriesToIntTask<K,V> rights, nextRight;
MapReduceEntriesToIntTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceEntriesToIntTask<K,V> nextRight, ToIntFunction<Map.Entry<K,V>> transformer, int basis, IntBinaryOperator reducer)6273         MapReduceEntriesToIntTask
6274             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6275              MapReduceEntriesToIntTask<K,V> nextRight,
6276              ToIntFunction<Map.Entry<K,V>> transformer,
6277              int basis,
6278              IntBinaryOperator reducer) {
6279             super(p, b, i, f, t); this.nextRight = nextRight;
6280             this.transformer = transformer;
6281             this.basis = basis; this.reducer = reducer;
6282         }
getRawResult()6283         public final Integer getRawResult() { return result; }
compute()6284         public final void compute() {
6285             final ToIntFunction<Map.Entry<K,V>> transformer;
6286             final IntBinaryOperator reducer;
6287             if ((transformer = this.transformer) != null &&
6288                 (reducer = this.reducer) != null) {
6289                 int r = this.basis;
6290                 for (int i = baseIndex, f, h; batch > 0 &&
6291                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6292                     addToPendingCount(1);
6293                     (rights = new MapReduceEntriesToIntTask<K,V>
6294                      (this, batch >>>= 1, baseLimit = h, f, tab,
6295                       rights, transformer, r, reducer)).fork();
6296                 }
6297                 for (Node<K,V> p; (p = advance()) != null; )
6298                     r = reducer.applyAsInt(r, transformer.applyAsInt(p));
6299                 result = r;
6300                 CountedCompleter<?> c;
6301                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6302                     @SuppressWarnings("unchecked")
6303                     MapReduceEntriesToIntTask<K,V>
6304                         t = (MapReduceEntriesToIntTask<K,V>)c,
6305                         s = t.rights;
6306                     while (s != null) {
6307                         t.result = reducer.applyAsInt(t.result, s.result);
6308                         s = t.rights = s.nextRight;
6309                     }
6310                 }
6311             }
6312         }
6313     }
6314 
6315     @SuppressWarnings("serial")
6316     static final class MapReduceMappingsToIntTask<K,V>
6317         extends BulkTask<K,V,Integer> {
6318         final ToIntBiFunction<? super K, ? super V> transformer;
6319         final IntBinaryOperator reducer;
6320         final int basis;
6321         int result;
6322         MapReduceMappingsToIntTask<K,V> rights, nextRight;
MapReduceMappingsToIntTask(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, MapReduceMappingsToIntTask<K,V> nextRight, ToIntBiFunction<? super K, ? super V> transformer, int basis, IntBinaryOperator reducer)6323         MapReduceMappingsToIntTask
6324             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6325              MapReduceMappingsToIntTask<K,V> nextRight,
6326              ToIntBiFunction<? super K, ? super V> transformer,
6327              int basis,
6328              IntBinaryOperator reducer) {
6329             super(p, b, i, f, t); this.nextRight = nextRight;
6330             this.transformer = transformer;
6331             this.basis = basis; this.reducer = reducer;
6332         }
getRawResult()6333         public final Integer getRawResult() { return result; }
compute()6334         public final void compute() {
6335             final ToIntBiFunction<? super K, ? super V> transformer;
6336             final IntBinaryOperator reducer;
6337             if ((transformer = this.transformer) != null &&
6338                 (reducer = this.reducer) != null) {
6339                 int r = this.basis;
6340                 for (int i = baseIndex, f, h; batch > 0 &&
6341                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6342                     addToPendingCount(1);
6343                     (rights = new MapReduceMappingsToIntTask<K,V>
6344                      (this, batch >>>= 1, baseLimit = h, f, tab,
6345                       rights, transformer, r, reducer)).fork();
6346                 }
6347                 for (Node<K,V> p; (p = advance()) != null; )
6348                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val));
6349                 result = r;
6350                 CountedCompleter<?> c;
6351                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6352                     @SuppressWarnings("unchecked")
6353                     MapReduceMappingsToIntTask<K,V>
6354                         t = (MapReduceMappingsToIntTask<K,V>)c,
6355                         s = t.rights;
6356                     while (s != null) {
6357                         t.result = reducer.applyAsInt(t.result, s.result);
6358                         s = t.rights = s.nextRight;
6359                     }
6360                 }
6361             }
6362         }
6363     }
6364 
6365     // Unsafe mechanics
6366     private static final Unsafe U = Unsafe.getUnsafe();
6367     private static final long SIZECTL
6368         = U.objectFieldOffset(ConcurrentHashMap.class, "sizeCtl");
6369     private static final long TRANSFERINDEX
6370         = U.objectFieldOffset(ConcurrentHashMap.class, "transferIndex");
6371     private static final long BASECOUNT
6372         = U.objectFieldOffset(ConcurrentHashMap.class, "baseCount");
6373     private static final long CELLSBUSY
6374         = U.objectFieldOffset(ConcurrentHashMap.class, "cellsBusy");
6375     private static final long CELLVALUE
6376         = U.objectFieldOffset(CounterCell.class, "value");
6377     private static final int ABASE = U.arrayBaseOffset(Node[].class);
6378     private static final int ASHIFT;
6379 
6380     static {
6381         int scale = U.arrayIndexScale(Node[].class);
6382         if ((scale & (scale - 1)) != 0)
6383             throw new ExceptionInInitializerError("array index scale not a power of two");
6384         ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6385 
6386         // Reduce the risk of rare disastrous classloading in first call to
6387         // LockSupport.park: https://bugs.openjdk.org/browse/JDK-8074773
6388         Class<?> ensureLoaded = LockSupport.class;
6389 
6390         // Eager class load observed to help JIT during startup
6391         ensureLoaded = ReservationNode.class;
6392     }
6393 }
6394