1 /*
2 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
3 *
4 * This code is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 only, as
6 * published by the Free Software Foundation. Oracle designates this
7 * particular file as subject to the "Classpath" exception as provided
8 * by Oracle in the LICENSE file that accompanied this code.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 */
24
25 /*
26 * This file is available under and governed by the GNU General Public
27 * License version 2 only, as published by the Free Software Foundation.
28 * However, the following notice accompanied the original version of this
29 * file:
30 *
31 * Written by Doug Lea with assistance from members of JCP JSR-166
32 * Expert Group and released to the public domain, as explained at
33 * http://creativecommons.org/publicdomain/zero/1.0/
34 */
35
36 package java.util.concurrent;
37
38 import java.io.ObjectStreamField;
39 import java.io.Serializable;
40 import java.lang.reflect.ParameterizedType;
41 import java.lang.reflect.Type;
42 import java.util.AbstractMap;
43 import java.util.Arrays;
44 import java.util.Collection;
45 import java.util.Enumeration;
46 import java.util.HashMap;
47 import java.util.Hashtable;
48 import java.util.Iterator;
49 import java.util.Map;
50 import java.util.NoSuchElementException;
51 import java.util.Set;
52 import java.util.Spliterator;
53 import java.util.concurrent.atomic.AtomicReference;
54 import java.util.concurrent.locks.LockSupport;
55 import java.util.concurrent.locks.ReentrantLock;
56 import java.util.function.BiConsumer;
57 import java.util.function.BiFunction;
58 import java.util.function.Consumer;
59 import java.util.function.DoubleBinaryOperator;
60 import java.util.function.Function;
61 import java.util.function.IntBinaryOperator;
62 import java.util.function.LongBinaryOperator;
63 import java.util.function.Predicate;
64 import java.util.function.ToDoubleBiFunction;
65 import java.util.function.ToDoubleFunction;
66 import java.util.function.ToIntBiFunction;
67 import java.util.function.ToIntFunction;
68 import java.util.function.ToLongBiFunction;
69 import java.util.function.ToLongFunction;
70 import java.util.stream.Stream;
71 import jdk.internal.misc.Unsafe;
72 import jdk.internal.util.ArraysSupport;
73 import jdk.internal.vm.annotation.AOTRuntimeSetup;
74 import jdk.internal.vm.annotation.AOTSafeClassInitializer;
75 import jdk.internal.vm.annotation.Stable;
76
77 /**
78 * A hash table supporting full concurrency of retrievals and
79 * high expected concurrency for updates. This class obeys the
80 * same functional specification as {@link java.util.Hashtable}, and
81 * includes versions of methods corresponding to each method of
82 * {@code Hashtable}. However, even though all operations are
83 * thread-safe, retrieval operations do <em>not</em> entail locking,
84 * and there is <em>not</em> any support for locking the entire table
85 * in a way that prevents all access. This class is fully
86 * interoperable with {@code Hashtable} in programs that rely on its
87 * thread safety but not on its synchronization details.
88 *
89 * <p>Retrieval operations (including {@code get}) generally do not
90 * block, so may overlap with update operations (including {@code put}
91 * and {@code remove}). Retrievals reflect the results of the most
92 * recently <em>completed</em> update operations holding upon their
93 * onset. (More formally, an update operation for a given key bears a
94 * <em>happens-before</em> relation with any (non-null) retrieval for
95 * that key reporting the updated value.) For aggregate operations
96 * such as {@code putAll} and {@code clear}, concurrent retrievals may
97 * reflect insertion or removal of only some entries. Similarly,
98 * Iterators, Spliterators and Enumerations return elements reflecting the
99 * state of the hash table at some point at or since the creation of the
100 * iterator/enumeration. They do <em>not</em> throw {@link
101 * java.util.ConcurrentModificationException ConcurrentModificationException}.
102 * However, iterators are designed to be used by only one thread at a time.
103 * Bear in mind that the results of aggregate status methods including
104 * {@code size}, {@code isEmpty}, and {@code containsValue} are typically
105 * useful only when a map is not undergoing concurrent updates in other threads.
106 * Otherwise the results of these methods reflect transient states
107 * that may be adequate for monitoring or estimation purposes, but not
108 * for program control.
109 *
110 * <p>The table is dynamically expanded when there are too many
111 * collisions (i.e., keys that have distinct hash codes but fall into
112 * the same slot modulo the table size), with the expected average
113 * effect of maintaining roughly two bins per mapping (corresponding
114 * to a 0.75 load factor threshold for resizing). There may be much
115 * variance around this average as mappings are added and removed, but
116 * overall, this maintains a commonly accepted time/space tradeoff for
117 * hash tables. However, resizing this or any other kind of hash
118 * table may be a relatively slow operation. When possible, it is a
119 * good idea to provide a size estimate as an optional {@code
120 * initialCapacity} constructor argument. An additional optional
121 * {@code loadFactor} constructor argument provides a further means of
122 * customizing initial table capacity by specifying the table density
123 * to be used in calculating the amount of space to allocate for the
124 * given number of elements. Also, for compatibility with previous
125 * versions of this class, constructors may optionally specify an
126 * expected {@code concurrencyLevel} as an additional hint for
127 * internal sizing. Note that using many keys with exactly the same
128 * {@code hashCode()} is a sure way to slow down performance of any
129 * hash table. To ameliorate impact, when keys are {@link Comparable},
130 * this class may use comparison order among keys to help break ties.
131 *
132 * <p>A {@link Set} projection of a ConcurrentHashMap may be created
133 * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
134 * (using {@link #keySet(Object)} when only keys are of interest, and the
135 * mapped values are (perhaps transiently) not used or all take the
136 * same mapping value.
137 *
138 * <p>A ConcurrentHashMap can be used as a scalable frequency map (a
139 * form of histogram or multiset) by using {@link
140 * java.util.concurrent.atomic.LongAdder} values and initializing via
141 * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count
142 * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use
143 * {@code freqs.computeIfAbsent(key, k -> new LongAdder()).increment();}
144 *
145 * <p>This class and its views and iterators implement all of the
146 * <em>optional</em> methods of the {@link Map} and {@link Iterator}
147 * interfaces.
148 *
149 * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
150 * does <em>not</em> allow {@code null} to be used as a key or value.
151 *
152 * <p id="Bulk">ConcurrentHashMaps support a set of sequential and parallel bulk
153 * operations that, unlike most {@link Stream} methods, are designed
154 * to be safely, and often sensibly, applied even with maps that are
155 * being concurrently updated by other threads; for example, when
156 * computing a snapshot summary of the values in a shared registry.
157 * There are three kinds of operation, each with four forms, accepting
158 * functions with keys, values, entries, and (key, value) pairs as
159 * arguments and/or return values. Because the elements of a
160 * ConcurrentHashMap are not ordered in any particular way, and may be
161 * processed in different orders in different parallel executions, the
162 * correctness of supplied functions should not depend on any
163 * ordering, or on any other objects or values that may transiently
164 * change while computation is in progress; and except for forEach
165 * actions, should ideally be side-effect-free. Bulk operations on
166 * {@link Map.Entry} objects do not support method {@code setValue}.
167 *
168 * <ul>
169 * <li>forEach: Performs a given action on each element.
170 * A variant form applies a given transformation on each element
171 * before performing the action.
172 *
173 * <li>search: Returns the first available non-null result of
174 * applying a given function on each element; skipping further
175 * search when a result is found.
176 *
177 * <li>reduce: Accumulates each element. The supplied reduction
178 * function cannot rely on ordering (more formally, it should be
179 * both associative and commutative). There are five variants:
180 *
181 * <ul>
182 *
183 * <li>Plain reductions. (There is not a form of this method for
184 * (key, value) function arguments since there is no corresponding
185 * return type.)
186 *
187 * <li>Mapped reductions that accumulate the results of a given
188 * function applied to each element.
189 *
190 * <li>Reductions to scalar doubles, longs, and ints, using a
191 * given basis value.
192 *
193 * </ul>
194 * </ul>
195 *
196 * <p>These bulk operations accept a {@code parallelismThreshold}
197 * argument. Methods proceed sequentially if the current map size is
198 * estimated to be less than the given threshold. Using a value of
199 * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value
200 * of {@code 1} results in maximal parallelism by partitioning into
201 * enough subtasks to fully utilize the {@link
202 * ForkJoinPool#commonPool()} that is used for all parallel
203 * computations. Normally, you would initially choose one of these
204 * extreme values, and then measure performance of using in-between
205 * values that trade off overhead versus throughput.
206 *
207 * <p>The concurrency properties of bulk operations follow
208 * from those of ConcurrentHashMap: Any non-null result returned
209 * from {@code get(key)} and related access methods bears a
210 * happens-before relation with the associated insertion or
211 * update. The result of any bulk operation reflects the
212 * composition of these per-element relations (but is not
213 * necessarily atomic with respect to the map as a whole unless it
214 * is somehow known to be quiescent). Conversely, because keys
215 * and values in the map are never null, null serves as a reliable
216 * atomic indicator of the current lack of any result. To
217 * maintain this property, null serves as an implicit basis for
218 * all non-scalar reduction operations. For the double, long, and
219 * int versions, the basis should be one that, when combined with
220 * any other value, returns that other value (more formally, it
221 * should be the identity element for the reduction). Most common
222 * reductions have these properties; for example, computing a sum
223 * with basis 0 or a minimum with basis MAX_VALUE.
224 *
225 * <p>Search and transformation functions provided as arguments
226 * should similarly return null to indicate the lack of any result
227 * (in which case it is not used). In the case of mapped
228 * reductions, this also enables transformations to serve as
229 * filters, returning null (or, in the case of primitive
230 * specializations, the identity basis) if the element should not
231 * be combined. You can create compound transformations and
232 * filterings by composing them yourself under this "null means
233 * there is nothing there now" rule before using them in search or
234 * reduce operations.
235 *
236 * <p>Methods accepting and/or returning Entry arguments maintain
237 * key-value associations. They may be useful for example when
238 * finding the key for the greatest value. Note that "plain" Entry
239 * arguments can be supplied using {@code new
240 * AbstractMap.SimpleEntry(k,v)}.
241 *
242 * <p>Bulk operations may complete abruptly, throwing an
243 * exception encountered in the application of a supplied
244 * function. Bear in mind when handling such exceptions that other
245 * concurrently executing functions could also have thrown
246 * exceptions, or would have done so if the first exception had
247 * not occurred.
248 *
249 * <p>Speedups for parallel compared to sequential forms are common
250 * but not guaranteed. Parallel operations involving brief functions
251 * on small maps may execute more slowly than sequential forms if the
252 * underlying work to parallelize the computation is more expensive
253 * than the computation itself. Similarly, parallelization may not
254 * lead to much actual parallelism if all processors are busy
255 * performing unrelated tasks.
256 *
257 * <p>All arguments to all task methods must be non-null.
258 *
259 * <p>This class is a member of the
260 * <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework">
261 * Java Collections Framework</a>.
262 *
263 * @since 1.5
264 * @author Doug Lea
265 * @param <K> the type of keys maintained by this map
266 * @param <V> the type of mapped values
267 */
268 @AOTSafeClassInitializer
269 public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
270 implements ConcurrentMap<K,V>, Serializable {
271 private static final long serialVersionUID = 7249069246763182397L;
272
273 /*
274 * Overview:
275 *
276 * The primary design goal of this hash table is to maintain
277 * concurrent readability (typically method get(), but also
278 * iterators and related methods) while minimizing update
279 * contention. Secondary goals are to keep space consumption about
280 * the same or better than java.util.HashMap, and to support high
281 * initial insertion rates on an empty table by many threads.
282 *
283 * This map usually acts as a binned (bucketed) hash table. Each
284 * key-value mapping is held in a Node. Most nodes are instances
285 * of the basic Node class with hash, key, value, and next
286 * fields. However, various subclasses exist: TreeNodes are
287 * arranged in balanced trees, not lists. TreeBins hold the roots
288 * of sets of TreeNodes. ForwardingNodes are placed at the heads
289 * of bins during resizing. ReservationNodes are used as
290 * placeholders while establishing values in computeIfAbsent and
291 * related methods. The types TreeBin, ForwardingNode, and
292 * ReservationNode do not hold normal user keys, values, or
293 * hashes, and are readily distinguishable during search etc
294 * because they have negative hash fields and null key and value
295 * fields. (These special nodes are either uncommon or transient,
296 * so the impact of carrying around some unused fields is
297 * insignificant.)
298 *
299 * The table is lazily initialized to a power-of-two size upon the
300 * first insertion. Each bin in the table normally contains a
301 * list of Nodes (most often, the list has only zero or one Node).
302 * Table accesses require volatile/atomic reads, writes, and
303 * CASes. Because there is no other way to arrange this without
304 * adding further indirections, we use intrinsics
305 * (jdk.internal.misc.Unsafe) operations.
306 *
307 * We use the top (sign) bit of Node hash fields for control
308 * purposes -- it is available anyway because of addressing
309 * constraints. Nodes with negative hash fields are specially
310 * handled or ignored in map methods.
311 *
312 * Insertion (via put or its variants) of the first node in an
313 * empty bin is performed by just CASing it to the bin. This is
314 * by far the most common case for put operations under most
315 * key/hash distributions. Other update operations (insert,
316 * delete, and replace) require locks. We do not want to waste
317 * the space required to associate a distinct lock object with
318 * each bin, so instead use the first node of a bin list itself as
319 * a lock. Locking support for these locks relies on builtin
320 * "synchronized" monitors.
321 *
322 * Using the first node of a list as a lock does not by itself
323 * suffice though: When a node is locked, any update must first
324 * validate that it is still the first node after locking it, and
325 * retry if not. Because new nodes are always appended to lists,
326 * once a node is first in a bin, it remains first until deleted
327 * or the bin becomes invalidated (upon resizing).
328 *
329 * The main disadvantage of per-bin locks is that other update
330 * operations on other nodes in a bin list protected by the same
331 * lock can stall, for example when user equals() or mapping
332 * functions take a long time. However, statistically, under
333 * random hash codes, this is not a common problem. Ideally, the
334 * frequency of nodes in bins follows a Poisson distribution
335 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
336 * parameter of about 0.5 on average, given the resizing threshold
337 * of 0.75, although with a large variance because of resizing
338 * granularity. Ignoring variance, the expected occurrences of
339 * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
340 * first values are:
341 *
342 * 0: 0.60653066
343 * 1: 0.30326533
344 * 2: 0.07581633
345 * 3: 0.01263606
346 * 4: 0.00157952
347 * 5: 0.00015795
348 * 6: 0.00001316
349 * 7: 0.00000094
350 * 8: 0.00000006
351 * more: less than 1 in ten million
352 *
353 * Lock contention probability for two threads accessing distinct
354 * elements is roughly 1 / (8 * #elements) under random hashes.
355 *
356 * Actual hash code distributions encountered in practice
357 * sometimes deviate significantly from uniform randomness. This
358 * includes the case when N > (1<<30), so some keys MUST collide.
359 * Similarly for dumb or hostile usages in which multiple keys are
360 * designed to have identical hash codes or ones that differs only
361 * in masked-out high bits. So we use a secondary strategy that
362 * applies when the number of nodes in a bin exceeds a
363 * threshold. These TreeBins use a balanced tree to hold nodes (a
364 * specialized form of red-black trees), bounding search time to
365 * O(log N). Each search step in a TreeBin is at least twice as
366 * slow as in a regular list, but given that N cannot exceed
367 * (1<<64) (before running out of addresses) this bounds search
368 * steps, lock hold times, etc, to reasonable constants (roughly
369 * 100 nodes inspected per operation worst case) so long as keys
370 * are Comparable (which is very common -- String, Long, etc).
371 * TreeBin nodes (TreeNodes) also maintain the same "next"
372 * traversal pointers as regular nodes, so can be traversed in
373 * iterators in the same way.
374 *
375 * The table is resized when occupancy exceeds a percentage
376 * threshold (nominally, 0.75, but see below). Any thread
377 * noticing an overfull bin may assist in resizing after the
378 * initiating thread allocates and sets up the replacement array.
379 * However, rather than stalling, these other threads may proceed
380 * with insertions etc. The use of TreeBins shields us from the
381 * worst case effects of overfilling while resizes are in
382 * progress. Resizing proceeds by transferring bins, one by one,
383 * from the table to the next table. However, threads claim small
384 * blocks of indices to transfer (via field transferIndex) before
385 * doing so, reducing contention. A generation stamp in field
386 * sizeCtl ensures that resizings do not overlap. Because we are
387 * using power-of-two expansion, the elements from each bin must
388 * either stay at same index, or move with a power of two
389 * offset. We eliminate unnecessary node creation by catching
390 * cases where old nodes can be reused because their next fields
391 * won't change. On average, only about one-sixth of them need
392 * cloning when a table doubles. The nodes they replace will be
393 * garbage collectible as soon as they are no longer referenced by
394 * any reader thread that may be in the midst of concurrently
395 * traversing table. Upon transfer, the old table bin contains
396 * only a special forwarding node (with hash field "MOVED") that
397 * contains the next table as its key. On encountering a
398 * forwarding node, access and update operations restart, using
399 * the new table.
400 *
401 * Each bin transfer requires its bin lock, which can stall
402 * waiting for locks while resizing. However, because other
403 * threads can join in and help resize rather than contend for
404 * locks, average aggregate waits become shorter as resizing
405 * progresses. The transfer operation must also ensure that all
406 * accessible bins in both the old and new table are usable by any
407 * traversal. This is arranged in part by proceeding from the
408 * last bin (table.length - 1) up towards the first. Upon seeing
409 * a forwarding node, traversals (see class Traverser) arrange to
410 * move to the new table without revisiting nodes. To ensure that
411 * no intervening nodes are skipped even when moved out of order,
412 * a stack (see class TableStack) is created on first encounter of
413 * a forwarding node during a traversal, to maintain its place if
414 * later processing the current table. The need for these
415 * save/restore mechanics is relatively rare, but when one
416 * forwarding node is encountered, typically many more will be.
417 * So Traversers use a simple caching scheme to avoid creating so
418 * many new TableStack nodes. (Thanks to Peter Levart for
419 * suggesting use of a stack here.)
420 *
421 * The traversal scheme also applies to partial traversals of
422 * ranges of bins (via an alternate Traverser constructor)
423 * to support partitioned aggregate operations. Also, read-only
424 * operations give up if ever forwarded to a null table, which
425 * provides support for shutdown-style clearing, which is also not
426 * currently implemented.
427 *
428 * Lazy table initialization minimizes footprint until first use,
429 * and also avoids resizings when the first operation is from a
430 * putAll, constructor with map argument, or deserialization.
431 * These cases attempt to override the initial capacity settings,
432 * but harmlessly fail to take effect in cases of races.
433 *
434 * The element count is maintained using a specialization of
435 * LongAdder. We need to incorporate a specialization rather than
436 * just use a LongAdder in order to access implicit
437 * contention-sensing that leads to creation of multiple
438 * CounterCells. The counter mechanics avoid contention on
439 * updates but can encounter cache thrashing if read too
440 * frequently during concurrent access. To avoid reading so often,
441 * resizing under contention is attempted only upon adding to a
442 * bin already holding two or more nodes. Under uniform hash
443 * distributions, the probability of this occurring at threshold
444 * is around 13%, meaning that only about 1 in 8 puts check
445 * threshold (and after resizing, many fewer do so).
446 *
447 * TreeBins use a special form of comparison for search and
448 * related operations (which is the main reason we cannot use
449 * existing collections such as TreeMaps). TreeBins contain
450 * Comparable elements, but may contain others, as well as
451 * elements that are Comparable but not necessarily Comparable for
452 * the same T, so we cannot invoke compareTo among them. To handle
453 * this, the tree is ordered primarily by hash value, then by
454 * Comparable.compareTo order if applicable. On lookup at a node,
455 * if elements are not comparable or compare as 0 then both left
456 * and right children may need to be searched in the case of tied
457 * hash values. (This corresponds to the full list search that
458 * would be necessary if all elements were non-Comparable and had
459 * tied hashes.) On insertion, to keep a total ordering (or as
460 * close as is required here) across rebalancings, we compare
461 * classes and identityHashCodes as tie-breakers. The red-black
462 * balancing code is updated from pre-jdk-collections
463 * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
464 * based in turn on Cormen, Leiserson, and Rivest "Introduction to
465 * Algorithms" (CLR).
466 *
467 * TreeBins also require an additional locking mechanism. While
468 * list traversal is always possible by readers even during
469 * updates, tree traversal is not, mainly because of tree-rotations
470 * that may change the root node and/or its linkages. TreeBins
471 * include a simple read-write lock mechanism parasitic on the
472 * main bin-synchronization strategy: Structural adjustments
473 * associated with an insertion or removal are already bin-locked
474 * (and so cannot conflict with other writers) but must wait for
475 * ongoing readers to finish. Since there can be only one such
476 * waiter, we use a simple scheme using a single "waiter" field to
477 * block writers. However, readers need never block. If the root
478 * lock is held, they proceed along the slow traversal path (via
479 * next-pointers) until the lock becomes available or the list is
480 * exhausted, whichever comes first. These cases are not fast, but
481 * maximize aggregate expected throughput.
482 *
483 * Maintaining API and serialization compatibility with previous
484 * versions of this class introduces several oddities. Mainly: We
485 * leave untouched but unused constructor arguments referring to
486 * concurrencyLevel. We accept a loadFactor constructor argument,
487 * but apply it only to initial table capacity (which is the only
488 * time that we can guarantee to honor it.) We also declare an
489 * unused "Segment" class that is instantiated in minimal form
490 * only when serializing.
491 *
492 * Also, solely for compatibility with previous versions of this
493 * class, it extends AbstractMap, even though all of its methods
494 * are overridden, so it is just useless baggage.
495 *
496 * This file is organized to make things a little easier to follow
497 * while reading than they might otherwise: First the main static
498 * declarations and utilities, then fields, then main public
499 * methods (with a few factorings of multiple public methods into
500 * internal ones), then sizing methods, trees, traversers, and
501 * bulk operations.
502 */
503
504 /* ---------------- Constants -------------- */
505
506 /**
507 * The largest possible table capacity. This value must be
508 * exactly 1<<30 to stay within Java array allocation and indexing
509 * bounds for power of two table sizes, and is further required
510 * because the top two bits of 32bit hash fields are used for
511 * control purposes.
512 */
513 private static final int MAXIMUM_CAPACITY = 1 << 30;
514
515 /**
516 * The default initial table capacity. Must be a power of 2
517 * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
518 */
519 private static final int DEFAULT_CAPACITY = 16;
520
521 /**
522 * The largest possible (non-power of two) array size.
523 * Needed by toArray and related methods.
524 */
525 static final int MAX_ARRAY_SIZE = ArraysSupport.SOFT_MAX_ARRAY_LENGTH;
526
527 /**
528 * The default concurrency level for this table. Unused but
529 * defined for compatibility with previous versions of this class.
530 */
531 private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
532
533 /**
534 * The load factor for this table. Overrides of this value in
535 * constructors affect only the initial table capacity. The
536 * actual floating point value isn't normally used -- it is
537 * simpler to use expressions such as {@code n - (n >>> 2)} for
538 * the associated resizing threshold.
539 */
540 private static final float LOAD_FACTOR = 0.75f;
541
542 /**
543 * The bin count threshold for using a tree rather than list for a
544 * bin. Bins are converted to trees when adding an element to a
545 * bin with at least this many nodes. The value must be greater
546 * than 2, and should be at least 8 to mesh with assumptions in
547 * tree removal about conversion back to plain bins upon
548 * shrinkage.
549 */
550 static final int TREEIFY_THRESHOLD = 8;
551
552 /**
553 * The bin count threshold for untreeifying a (split) bin during a
554 * resize operation. Should be less than TREEIFY_THRESHOLD, and at
555 * most 6 to mesh with shrinkage detection under removal.
556 */
557 static final int UNTREEIFY_THRESHOLD = 6;
558
559 /**
560 * The smallest table capacity for which bins may be treeified.
561 * (Otherwise the table is resized if too many nodes in a bin.)
562 * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
563 * conflicts between resizing and treeification thresholds.
564 */
565 static final int MIN_TREEIFY_CAPACITY = 64;
566
567 /**
568 * Minimum number of rebinnings per transfer step. Ranges are
569 * subdivided to allow multiple resizer threads. This value
570 * serves as a lower bound to avoid resizers encountering
571 * excessive memory contention. The value should be at least
572 * DEFAULT_CAPACITY.
573 */
574 private static final int MIN_TRANSFER_STRIDE = 16;
575
576 /**
577 * The number of bits used for generation stamp in sizeCtl.
578 * Must be at least 6 for 32bit arrays.
579 */
580 private static final int RESIZE_STAMP_BITS = 16;
581
582 /**
583 * The maximum number of threads that can help resize.
584 * Must fit in 32 - RESIZE_STAMP_BITS bits.
585 */
586 private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
587
588 /**
589 * The bit shift for recording size stamp in sizeCtl.
590 */
591 private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
592
593 /*
594 * Encodings for Node hash fields. See above for explanation.
595 */
596 static final int MOVED = -1; // hash for forwarding nodes
597 static final int TREEBIN = -2; // hash for roots of trees
598 static final int RESERVED = -3; // hash for transient reservations
599 static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
600
601 /** Number of CPUS, to place bounds on some sizings */
602 static @Stable int NCPU;
603
604 static {
605 runtimeSetup();
606 }
607
608 @AOTRuntimeSetup
609 private static void runtimeSetup() {
610 NCPU = Runtime.getRuntime().availableProcessors();
611 }
612
613 /**
614 * Serialized pseudo-fields, provided only for jdk7 compatibility.
615 * @serialField segments Segment[]
616 * The segments, each of which is a specialized hash table.
617 * @serialField segmentMask int
618 * Mask value for indexing into segments. The upper bits of a
619 * key's hash code are used to choose the segment.
620 * @serialField segmentShift int
621 * Shift value for indexing within segments.
622 */
623 private static final ObjectStreamField[] serialPersistentFields = {
624 new ObjectStreamField("segments", Segment[].class),
625 new ObjectStreamField("segmentMask", Integer.TYPE),
626 new ObjectStreamField("segmentShift", Integer.TYPE),
627 };
628
629 /* ---------------- Nodes -------------- */
630
631 /**
632 * Key-value entry. This class is never exported out as a
633 * user-mutable Map.Entry (i.e., one supporting setValue; see
634 * MapEntry below), but can be used for read-only traversals used
635 * in bulk tasks. Subclasses of Node with a negative hash field
636 * are special, and contain null keys and values (but are never
637 * exported). Otherwise, keys and vals are never null.
638 */
639 static class Node<K,V> implements Map.Entry<K,V> {
640 final int hash;
641 final K key;
642 volatile V val;
643 volatile Node<K,V> next;
644
645 Node(int hash, K key, V val) {
646 this.hash = hash;
647 this.key = key;
648 this.val = val;
649 }
650
651 Node(int hash, K key, V val, Node<K,V> next) {
652 this(hash, key, val);
653 this.next = next;
654 }
655
656 public final K getKey() { return key; }
657 public final V getValue() { return val; }
658 public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
659 public final String toString() {
660 return Helpers.mapEntryToString(key, val);
661 }
662 public final V setValue(V value) {
663 throw new UnsupportedOperationException();
664 }
665
666 public final boolean equals(Object o) {
667 Object k, v, u; Map.Entry<?,?> e;
668 return ((o instanceof Map.Entry) &&
669 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
670 (v = e.getValue()) != null &&
671 (k == key || k.equals(key)) &&
672 (v == (u = val) || v.equals(u)));
673 }
674
675 /**
676 * Virtualized support for map.get(); overridden in subclasses.
677 */
678 Node<K,V> find(int h, Object k) {
679 Node<K,V> e = this;
680 if (k != null) {
681 do {
682 K ek;
683 if (e.hash == h &&
684 ((ek = e.key) == k || (ek != null && k.equals(ek))))
685 return e;
686 } while ((e = e.next) != null);
687 }
688 return null;
689 }
690 }
691
692 /* ---------------- Static utilities -------------- */
693
694 /**
695 * Spreads (XORs) higher bits of hash to lower and also forces top
696 * bit to 0. Because the table uses power-of-two masking, sets of
697 * hashes that vary only in bits above the current mask will
698 * always collide. (Among known examples are sets of Float keys
699 * holding consecutive whole numbers in small tables.) So we
700 * apply a transform that spreads the impact of higher bits
701 * downward. There is a tradeoff between speed, utility, and
702 * quality of bit-spreading. Because many common sets of hashes
703 * are already reasonably distributed (so don't benefit from
704 * spreading), and because we use trees to handle large sets of
705 * collisions in bins, we just XOR some shifted bits in the
706 * cheapest possible way to reduce systematic lossage, as well as
707 * to incorporate impact of the highest bits that would otherwise
708 * never be used in index calculations because of table bounds.
709 */
710 static final int spread(int h) {
711 return (h ^ (h >>> 16)) & HASH_BITS;
712 }
713
714 /**
715 * Returns a power of two table size for the given desired capacity.
716 * See Hackers Delight, sec 3.2
717 */
718 private static final int tableSizeFor(int c) {
719 int n = -1 >>> Integer.numberOfLeadingZeros(c - 1);
720 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
721 }
722
723 /**
724 * Returns x's Class if it is of the form "class C implements
725 * Comparable<C>", else null.
726 */
727 static Class<?> comparableClassFor(Object x) {
728 if (x instanceof Comparable) {
729 Class<?> c; Type[] ts, as; ParameterizedType p;
730 if ((c = x.getClass()) == String.class) // bypass checks
731 return c;
732 if ((ts = c.getGenericInterfaces()) != null) {
733 for (Type t : ts) {
734 if ((t instanceof ParameterizedType) &&
735 ((p = (ParameterizedType)t).getRawType() ==
736 Comparable.class) &&
737 (as = p.getActualTypeArguments()) != null &&
738 as.length == 1 && as[0] == c) // type arg is c
739 return c;
740 }
741 }
742 }
743 return null;
744 }
745
746 /**
747 * Returns k.compareTo(x) if x matches kc (k's screened comparable
748 * class), else 0.
749 */
750 @SuppressWarnings("unchecked") // for cast to Comparable
751 static int compareComparables(Class<?> kc, Object k, Object x) {
752 return (x == null || x.getClass() != kc ? 0 :
753 ((Comparable)k).compareTo(x));
754 }
755
756 /* ---------------- Table element access -------------- */
757
758 /*
759 * Atomic access methods are used for table elements as well as
760 * elements of in-progress next table while resizing. All uses of
761 * the tab arguments must be null checked by callers. All callers
762 * also paranoically precheck that tab's length is not zero (or an
763 * equivalent check), thus ensuring that any index argument taking
764 * the form of a hash value anded with (length - 1) is a valid
765 * index. Note that, to be correct wrt arbitrary concurrency
766 * errors by users, these checks must operate on local variables,
767 * which accounts for some odd-looking inline assignments below.
768 * Note that calls to setTabAt always occur within locked regions,
769 * and so require only release ordering.
770 */
771
772 @SuppressWarnings("unchecked")
773 static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
774 return (Node<K,V>)U.getReferenceAcquire(tab, ((long)i << ASHIFT) + ABASE);
775 }
776
777 static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
778 Node<K,V> c, Node<K,V> v) {
779 return U.compareAndSetReference(tab, ((long)i << ASHIFT) + ABASE, c, v);
780 }
781
782 static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
783 U.putReferenceRelease(tab, ((long)i << ASHIFT) + ABASE, v);
784 }
785
786 /* ---------------- Fields -------------- */
787
788 /**
789 * The array of bins. Lazily initialized upon first insertion.
790 * Size is always a power of two. Accessed directly by iterators.
791 */
792 transient volatile Node<K,V>[] table;
793
794 /**
795 * The next table to use; non-null only while resizing.
796 */
797 private transient volatile Node<K,V>[] nextTable;
798
799 /**
800 * Base counter value, used mainly when there is no contention,
801 * but also as a fallback during table initialization
802 * races. Updated via CAS.
803 */
804 private transient volatile long baseCount;
805
806 /**
807 * Table initialization and resizing control. When negative, the
808 * table is being initialized or resized: -1 for initialization,
809 * else -(1 + the number of active resizing threads). Otherwise,
810 * when table is null, holds the initial table size to use upon
811 * creation, or 0 for default. After initialization, holds the
812 * next element count value upon which to resize the table.
813 */
814 private transient volatile int sizeCtl;
815
816 /**
817 * The next table index (plus one) to split while resizing.
818 */
819 private transient volatile int transferIndex;
820
821 /**
822 * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
823 */
824 private transient volatile int cellsBusy;
825
826 /**
827 * Table of counter cells. When non-null, size is a power of 2.
828 */
829 private transient volatile CounterCell[] counterCells;
830
831 // views
832 private transient KeySetView<K,V> keySet;
833 private transient ValuesView<K,V> values;
834 private transient EntrySetView<K,V> entrySet;
835
836
837 /* ---------------- Public operations -------------- */
838
839 /**
840 * Creates a new, empty map with the default initial table size (16).
841 */
842 public ConcurrentHashMap() {
843 }
844
845 /**
846 * Creates a new, empty map with an initial table size
847 * accommodating the specified number of elements without the need
848 * to dynamically resize.
849 *
850 * @param initialCapacity The implementation performs internal
851 * sizing to accommodate this many elements.
852 * @throws IllegalArgumentException if the initial capacity of
853 * elements is negative
854 */
855 public ConcurrentHashMap(int initialCapacity) {
856 this(initialCapacity, LOAD_FACTOR, 1);
857 }
858
859 /**
860 * Creates a new map with the same mappings as the given map.
861 *
862 * @param m the map
863 */
864 @SuppressWarnings("this-escape")
865 public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
866 this(m.size());
867 putAll(m);
868 }
869
870 /**
871 * Creates a new, empty map with an initial table size based on
872 * the given number of elements ({@code initialCapacity}) and
873 * initial table density ({@code loadFactor}).
874 *
875 * @param initialCapacity the initial capacity. The implementation
876 * performs internal sizing to accommodate this many elements,
877 * given the specified load factor.
878 * @param loadFactor the load factor (table density) for
879 * establishing the initial table size
880 * @throws IllegalArgumentException if the initial capacity of
881 * elements is negative or the load factor is nonpositive
882 *
883 * @since 1.6
884 */
885 public ConcurrentHashMap(int initialCapacity, float loadFactor) {
886 this(initialCapacity, loadFactor, 1);
887 }
888
889 /**
890 * Creates a new, empty map with an initial table size based on
891 * the given number of elements ({@code initialCapacity}), initial
892 * table density ({@code loadFactor}), and number of concurrently
893 * updating threads ({@code concurrencyLevel}).
894 *
895 * @param initialCapacity the initial capacity. The implementation
896 * performs internal sizing to accommodate this many elements,
897 * given the specified load factor.
898 * @param loadFactor the load factor (table density) for
899 * establishing the initial table size
900 * @param concurrencyLevel the estimated number of concurrently
901 * updating threads. The implementation may use this value as
902 * a sizing hint.
903 * @throws IllegalArgumentException if the initial capacity is
904 * negative or the load factor or concurrencyLevel are
905 * nonpositive
906 */
907 public ConcurrentHashMap(int initialCapacity,
908 float loadFactor, int concurrencyLevel) {
909 if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
910 throw new IllegalArgumentException();
911 if (initialCapacity < concurrencyLevel) // Use at least as many bins
912 initialCapacity = concurrencyLevel; // as estimated threads
913 long size = (long)(1.0 + (long)initialCapacity / loadFactor);
914 int cap = (size >= (long)MAXIMUM_CAPACITY) ?
915 MAXIMUM_CAPACITY : tableSizeFor((int)size);
916 this.sizeCtl = cap;
917 }
918
919 // Original (since JDK1.2) Map methods
920
921 /**
922 * {@inheritDoc}
923 */
924 public int size() {
925 long n = sumCount();
926 return ((n < 0L) ? 0 :
927 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
928 (int)n);
929 }
930
931 /**
932 * {@inheritDoc}
933 */
934 public boolean isEmpty() {
935 return sumCount() <= 0L; // ignore transient negative values
936 }
937
938 /**
939 * Returns the value to which the specified key is mapped,
940 * or {@code null} if this map contains no mapping for the key.
941 *
942 * <p>More formally, if this map contains a mapping from a key
943 * {@code k} to a value {@code v} such that {@code key.equals(k)},
944 * then this method returns {@code v}; otherwise it returns
945 * {@code null}. (There can be at most one such mapping.)
946 *
947 * @throws NullPointerException if the specified key is null
948 */
949 public V get(Object key) {
950 Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
951 int h = spread(key.hashCode());
952 if ((tab = table) != null && (n = tab.length) > 0 &&
953 (e = tabAt(tab, (n - 1) & h)) != null) {
954 if ((eh = e.hash) == h) {
955 if ((ek = e.key) == key || (ek != null && key.equals(ek)))
956 return e.val;
957 }
958 else if (eh < 0)
959 return (p = e.find(h, key)) != null ? p.val : null;
960 while ((e = e.next) != null) {
961 if (e.hash == h &&
962 ((ek = e.key) == key || (ek != null && key.equals(ek))))
963 return e.val;
964 }
965 }
966 return null;
967 }
968
969 /**
970 * Tests if the specified object is a key in this table.
971 *
972 * @param key possible key
973 * @return {@code true} if and only if the specified object
974 * is a key in this table, as determined by the
975 * {@code equals} method; {@code false} otherwise
976 * @throws NullPointerException if the specified key is null
977 */
978 public boolean containsKey(Object key) {
979 return get(key) != null;
980 }
981
982 /**
983 * Returns {@code true} if this map maps one or more keys to the
984 * specified value. Note: This method may require a full traversal
985 * of the map, and is much slower than method {@code containsKey}.
986 *
987 * @param value value whose presence in this map is to be tested
988 * @return {@code true} if this map maps one or more keys to the
989 * specified value
990 * @throws NullPointerException if the specified value is null
991 */
992 public boolean containsValue(Object value) {
993 if (value == null)
994 throw new NullPointerException();
995 Node<K,V>[] t;
996 if ((t = table) != null) {
997 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
998 for (Node<K,V> p; (p = it.advance()) != null; ) {
999 V v;
1000 if ((v = p.val) == value || (v != null && value.equals(v)))
1001 return true;
1002 }
1003 }
1004 return false;
1005 }
1006
1007 /**
1008 * Maps the specified key to the specified value in this table.
1009 * Neither the key nor the value can be null.
1010 *
1011 * <p>The value can be retrieved by calling the {@code get} method
1012 * with a key that is equal to the original key.
1013 *
1014 * @param key key with which the specified value is to be associated
1015 * @param value value to be associated with the specified key
1016 * @return the previous value associated with {@code key}, or
1017 * {@code null} if there was no mapping for {@code key}
1018 * @throws NullPointerException if the specified key or value is null
1019 */
1020 public V put(K key, V value) {
1021 return putVal(key, value, false);
1022 }
1023
1024 /** Implementation for put and putIfAbsent */
1025 final V putVal(K key, V value, boolean onlyIfAbsent) {
1026 if (key == null || value == null) throw new NullPointerException();
1027 int hash = spread(key.hashCode());
1028 int binCount = 0;
1029 for (Node<K,V>[] tab = table;;) {
1030 Node<K,V> f; int n, i, fh; K fk; V fv;
1031 if (tab == null || (n = tab.length) == 0)
1032 tab = initTable();
1033 else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
1034 if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value)))
1035 break; // no lock when adding to empty bin
1036 }
1037 else if ((fh = f.hash) == MOVED)
1038 tab = helpTransfer(tab, f);
1039 else if (onlyIfAbsent // check first node without acquiring lock
1040 && fh == hash
1041 && ((fk = f.key) == key || (fk != null && key.equals(fk)))
1042 && (fv = f.val) != null)
1043 return fv;
1044 else {
1045 V oldVal = null;
1046 synchronized (f) {
1047 if (tabAt(tab, i) == f) {
1048 if (fh >= 0) {
1049 binCount = 1;
1050 for (Node<K,V> e = f;; ++binCount) {
1051 K ek;
1052 if (e.hash == hash &&
1053 ((ek = e.key) == key ||
1054 (ek != null && key.equals(ek)))) {
1055 oldVal = e.val;
1056 if (!onlyIfAbsent)
1057 e.val = value;
1058 break;
1059 }
1060 Node<K,V> pred = e;
1061 if ((e = e.next) == null) {
1062 pred.next = new Node<K,V>(hash, key, value);
1063 break;
1064 }
1065 }
1066 }
1067 else if (f instanceof TreeBin) {
1068 Node<K,V> p;
1069 binCount = 2;
1070 if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1071 value)) != null) {
1072 oldVal = p.val;
1073 if (!onlyIfAbsent)
1074 p.val = value;
1075 }
1076 }
1077 else if (f instanceof ReservationNode)
1078 throw new IllegalStateException("Recursive update");
1079 }
1080 }
1081 if (binCount != 0) {
1082 if (binCount >= TREEIFY_THRESHOLD)
1083 treeifyBin(tab, i);
1084 if (oldVal != null)
1085 return oldVal;
1086 break;
1087 }
1088 }
1089 }
1090 addCount(1L, binCount);
1091 return null;
1092 }
1093
1094 /**
1095 * Copies all of the mappings from the specified map to this one.
1096 * These mappings replace any mappings that this map had for any of the
1097 * keys currently in the specified map.
1098 *
1099 * @param m mappings to be stored in this map
1100 */
1101 public void putAll(Map<? extends K, ? extends V> m) {
1102 if (table != null) {
1103 tryPresize(size() + m.size());
1104 }
1105 for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1106 putVal(e.getKey(), e.getValue(), false);
1107 }
1108
1109 /**
1110 * Removes the key (and its corresponding value) from this map.
1111 * This method does nothing if the key is not in the map.
1112 *
1113 * @param key the key that needs to be removed
1114 * @return the previous value associated with {@code key}, or
1115 * {@code null} if there was no mapping for {@code key}
1116 * @throws NullPointerException if the specified key is null
1117 */
1118 public V remove(Object key) {
1119 return replaceNode(key, null, null);
1120 }
1121
1122 /**
1123 * Implementation for the four public remove/replace methods:
1124 * Replaces node value with v, conditional upon match of cv if
1125 * non-null. If resulting value is null, delete.
1126 */
1127 final V replaceNode(Object key, V value, Object cv) {
1128 int hash = spread(key.hashCode());
1129 for (Node<K,V>[] tab = table;;) {
1130 Node<K,V> f; int n, i, fh;
1131 if (tab == null || (n = tab.length) == 0 ||
1132 (f = tabAt(tab, i = (n - 1) & hash)) == null)
1133 break;
1134 else if ((fh = f.hash) == MOVED)
1135 tab = helpTransfer(tab, f);
1136 else {
1137 V oldVal = null;
1138 boolean validated = false;
1139 synchronized (f) {
1140 if (tabAt(tab, i) == f) {
1141 if (fh >= 0) {
1142 validated = true;
1143 for (Node<K,V> e = f, pred = null;;) {
1144 K ek;
1145 if (e.hash == hash &&
1146 ((ek = e.key) == key ||
1147 (ek != null && key.equals(ek)))) {
1148 V ev = e.val;
1149 if (cv == null || cv == ev ||
1150 (ev != null && cv.equals(ev))) {
1151 oldVal = ev;
1152 if (value != null)
1153 e.val = value;
1154 else if (pred != null)
1155 pred.next = e.next;
1156 else
1157 setTabAt(tab, i, e.next);
1158 }
1159 break;
1160 }
1161 pred = e;
1162 if ((e = e.next) == null)
1163 break;
1164 }
1165 }
1166 else if (f instanceof TreeBin) {
1167 validated = true;
1168 TreeBin<K,V> t = (TreeBin<K,V>)f;
1169 TreeNode<K,V> r, p;
1170 if ((r = t.root) != null &&
1171 (p = r.findTreeNode(hash, key, null)) != null) {
1172 V pv = p.val;
1173 if (cv == null || cv == pv ||
1174 (pv != null && cv.equals(pv))) {
1175 oldVal = pv;
1176 if (value != null)
1177 p.val = value;
1178 else if (t.removeTreeNode(p))
1179 setTabAt(tab, i, untreeify(t.first));
1180 }
1181 }
1182 }
1183 else if (f instanceof ReservationNode)
1184 throw new IllegalStateException("Recursive update");
1185 }
1186 }
1187 if (validated) {
1188 if (oldVal != null) {
1189 if (value == null)
1190 addCount(-1L, -1);
1191 return oldVal;
1192 }
1193 break;
1194 }
1195 }
1196 }
1197 return null;
1198 }
1199
1200 /**
1201 * Removes all of the mappings from this map.
1202 */
1203 public void clear() {
1204 long delta = 0L; // negative number of deletions
1205 int i = 0;
1206 Node<K,V>[] tab = table;
1207 while (tab != null && i < tab.length) {
1208 int fh;
1209 Node<K,V> f = tabAt(tab, i);
1210 if (f == null)
1211 ++i;
1212 else if ((fh = f.hash) == MOVED) {
1213 tab = helpTransfer(tab, f);
1214 i = 0; // restart
1215 }
1216 else {
1217 synchronized (f) {
1218 if (tabAt(tab, i) == f) {
1219 Node<K,V> p = (fh >= 0 ? f :
1220 (f instanceof TreeBin) ?
1221 ((TreeBin<K,V>)f).first : null);
1222 while (p != null) {
1223 --delta;
1224 p = p.next;
1225 }
1226 setTabAt(tab, i++, null);
1227 }
1228 }
1229 }
1230 }
1231 if (delta != 0L)
1232 addCount(delta, -1);
1233 }
1234
1235 /**
1236 * Returns a {@link Set} view of the keys contained in this map.
1237 * The set is backed by the map, so changes to the map are
1238 * reflected in the set, and vice-versa. The set supports element
1239 * removal, which removes the corresponding mapping from this map,
1240 * via the {@code Iterator.remove}, {@code Set.remove},
1241 * {@code removeAll}, {@code retainAll}, and {@code clear}
1242 * operations. It does not support the {@code add} or
1243 * {@code addAll} operations.
1244 *
1245 * <p>The view's iterators and spliterators are
1246 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1247 *
1248 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1249 * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1250 *
1251 * @return the set view
1252 */
1253 public KeySetView<K,V> keySet() {
1254 KeySetView<K,V> ks;
1255 if ((ks = keySet) != null) return ks;
1256 return keySet = new KeySetView<K,V>(this, null);
1257 }
1258
1259 /**
1260 * Returns a {@link Collection} view of the values contained in this map.
1261 * The collection is backed by the map, so changes to the map are
1262 * reflected in the collection, and vice-versa. The collection
1263 * supports element removal, which removes the corresponding
1264 * mapping from this map, via the {@code Iterator.remove},
1265 * {@code Collection.remove}, {@code removeAll},
1266 * {@code retainAll}, and {@code clear} operations. It does not
1267 * support the {@code add} or {@code addAll} operations.
1268 *
1269 * <p>The view's iterators and spliterators are
1270 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1271 *
1272 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}
1273 * and {@link Spliterator#NONNULL}.
1274 *
1275 * @return the collection view
1276 */
1277 public Collection<V> values() {
1278 ValuesView<K,V> vs;
1279 if ((vs = values) != null) return vs;
1280 return values = new ValuesView<K,V>(this);
1281 }
1282
1283 /**
1284 * Returns a {@link Set} view of the mappings contained in this map.
1285 * The set is backed by the map, so changes to the map are
1286 * reflected in the set, and vice-versa. The set supports element
1287 * removal, which removes the corresponding mapping from the map,
1288 * via the {@code Iterator.remove}, {@code Set.remove},
1289 * {@code removeAll}, {@code retainAll}, and {@code clear}
1290 * operations.
1291 *
1292 * <p>The view's iterators and spliterators are
1293 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1294 *
1295 * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1296 * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1297 *
1298 * @return the set view
1299 */
1300 public Set<Map.Entry<K,V>> entrySet() {
1301 EntrySetView<K,V> es;
1302 if ((es = entrySet) != null) return es;
1303 return entrySet = new EntrySetView<K,V>(this);
1304 }
1305
1306 /**
1307 * Returns the hash code value for this {@link Map}, i.e.,
1308 * the sum of, for each key-value pair in the map,
1309 * {@code key.hashCode() ^ value.hashCode()}.
1310 *
1311 * @return the hash code value for this map
1312 */
1313 public int hashCode() {
1314 int h = 0;
1315 Node<K,V>[] t;
1316 if ((t = table) != null) {
1317 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1318 for (Node<K,V> p; (p = it.advance()) != null; )
1319 h += p.key.hashCode() ^ p.val.hashCode();
1320 }
1321 return h;
1322 }
1323
1324 /**
1325 * Returns a string representation of this map. The string
1326 * representation consists of a list of key-value mappings (in no
1327 * particular order) enclosed in braces ("{@code {}}"). Adjacent
1328 * mappings are separated by the characters {@code ", "} (comma
1329 * and space). Each key-value mapping is rendered as the key
1330 * followed by an equals sign ("{@code =}") followed by the
1331 * associated value.
1332 *
1333 * @return a string representation of this map
1334 */
1335 public String toString() {
1336 Node<K,V>[] t;
1337 int f = (t = table) == null ? 0 : t.length;
1338 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1339 StringBuilder sb = new StringBuilder();
1340 sb.append('{');
1341 Node<K,V> p;
1342 if ((p = it.advance()) != null) {
1343 for (;;) {
1344 K k = p.key;
1345 V v = p.val;
1346 sb.append(k == this ? "(this Map)" : k);
1347 sb.append('=');
1348 sb.append(v == this ? "(this Map)" : v);
1349 if ((p = it.advance()) == null)
1350 break;
1351 sb.append(',').append(' ');
1352 }
1353 }
1354 return sb.append('}').toString();
1355 }
1356
1357 /**
1358 * Compares the specified object with this map for equality.
1359 * Returns {@code true} if the given object is a map with the same
1360 * mappings as this map. This operation may return misleading
1361 * results if either map is concurrently modified during execution
1362 * of this method.
1363 *
1364 * @param o object to be compared for equality with this map
1365 * @return {@code true} if the specified object is equal to this map
1366 */
1367 public boolean equals(Object o) {
1368 if (o != this) {
1369 if (!(o instanceof Map))
1370 return false;
1371 Map<?,?> m = (Map<?,?>) o;
1372 Node<K,V>[] t;
1373 int f = (t = table) == null ? 0 : t.length;
1374 Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1375 for (Node<K,V> p; (p = it.advance()) != null; ) {
1376 V val = p.val;
1377 Object v = m.get(p.key);
1378 if (v == null || (v != val && !v.equals(val)))
1379 return false;
1380 }
1381 for (Map.Entry<?,?> e : m.entrySet()) {
1382 Object mk, mv, v;
1383 if ((mk = e.getKey()) == null ||
1384 (mv = e.getValue()) == null ||
1385 (v = get(mk)) == null ||
1386 (mv != v && !mv.equals(v)))
1387 return false;
1388 }
1389 }
1390 return true;
1391 }
1392
1393 /**
1394 * Stripped-down version of helper class used in previous version,
1395 * declared for the sake of serialization compatibility.
1396 */
1397 static class Segment<K,V> extends ReentrantLock implements Serializable {
1398 private static final long serialVersionUID = 2249069246763182397L;
1399 final float loadFactor;
1400 Segment(float lf) { this.loadFactor = lf; }
1401 }
1402
1403 /**
1404 * Saves this map to a stream (that is, serializes it).
1405 *
1406 * @param s the stream
1407 * @throws java.io.IOException if an I/O error occurs
1408 * @serialData
1409 * the serialized fields, followed by the key (Object) and value
1410 * (Object) for each key-value mapping, followed by a null pair.
1411 * The key-value mappings are emitted in no particular order.
1412 */
1413 private void writeObject(java.io.ObjectOutputStream s)
1414 throws java.io.IOException {
1415 // For serialization compatibility
1416 // Emulate segment calculation from previous version of this class
1417 int sshift = 0;
1418 int ssize = 1;
1419 while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1420 ++sshift;
1421 ssize <<= 1;
1422 }
1423 int segmentShift = 32 - sshift;
1424 int segmentMask = ssize - 1;
1425 @SuppressWarnings("unchecked")
1426 Segment<K,V>[] segments = (Segment<K,V>[])
1427 new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1428 for (int i = 0; i < segments.length; ++i)
1429 segments[i] = new Segment<K,V>(LOAD_FACTOR);
1430 java.io.ObjectOutputStream.PutField streamFields = s.putFields();
1431 streamFields.put("segments", segments);
1432 streamFields.put("segmentShift", segmentShift);
1433 streamFields.put("segmentMask", segmentMask);
1434 s.writeFields();
1435
1436 Node<K,V>[] t;
1437 if ((t = table) != null) {
1438 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1439 for (Node<K,V> p; (p = it.advance()) != null; ) {
1440 s.writeObject(p.key);
1441 s.writeObject(p.val);
1442 }
1443 }
1444 s.writeObject(null);
1445 s.writeObject(null);
1446 }
1447
1448 /**
1449 * Reconstitutes this map from a stream (that is, deserializes it).
1450 * @param s the stream
1451 * @throws ClassNotFoundException if the class of a serialized object
1452 * could not be found
1453 * @throws java.io.IOException if an I/O error occurs
1454 */
1455 private void readObject(java.io.ObjectInputStream s)
1456 throws java.io.IOException, ClassNotFoundException {
1457 /*
1458 * To improve performance in typical cases, we create nodes
1459 * while reading, then place in table once size is known.
1460 * However, we must also validate uniqueness and deal with
1461 * overpopulated bins while doing so, which requires
1462 * specialized versions of putVal mechanics.
1463 */
1464 sizeCtl = -1; // force exclusion for table construction
1465 s.defaultReadObject();
1466 long size = 0L;
1467 Node<K,V> p = null;
1468 for (;;) {
1469 @SuppressWarnings("unchecked")
1470 K k = (K) s.readObject();
1471 @SuppressWarnings("unchecked")
1472 V v = (V) s.readObject();
1473 if (k != null && v != null) {
1474 p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1475 ++size;
1476 }
1477 else
1478 break;
1479 }
1480 if (size == 0L)
1481 sizeCtl = 0;
1482 else {
1483 long ts = (long)(1.0 + size / LOAD_FACTOR);
1484 int n = (ts >= (long)MAXIMUM_CAPACITY) ?
1485 MAXIMUM_CAPACITY : tableSizeFor((int)ts);
1486 @SuppressWarnings("unchecked")
1487 Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n];
1488 int mask = n - 1;
1489 long added = 0L;
1490 while (p != null) {
1491 boolean insertAtFront;
1492 Node<K,V> next = p.next, first;
1493 int h = p.hash, j = h & mask;
1494 if ((first = tabAt(tab, j)) == null)
1495 insertAtFront = true;
1496 else {
1497 K k = p.key;
1498 if (first.hash < 0) {
1499 TreeBin<K,V> t = (TreeBin<K,V>)first;
1500 if (t.putTreeVal(h, k, p.val) == null)
1501 ++added;
1502 insertAtFront = false;
1503 }
1504 else {
1505 int binCount = 0;
1506 insertAtFront = true;
1507 Node<K,V> q; K qk;
1508 for (q = first; q != null; q = q.next) {
1509 if (q.hash == h &&
1510 ((qk = q.key) == k ||
1511 (qk != null && k.equals(qk)))) {
1512 insertAtFront = false;
1513 break;
1514 }
1515 ++binCount;
1516 }
1517 if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1518 insertAtFront = false;
1519 ++added;
1520 p.next = first;
1521 TreeNode<K,V> hd = null, tl = null;
1522 for (q = p; q != null; q = q.next) {
1523 TreeNode<K,V> t = new TreeNode<K,V>
1524 (q.hash, q.key, q.val, null, null);
1525 if ((t.prev = tl) == null)
1526 hd = t;
1527 else
1528 tl.next = t;
1529 tl = t;
1530 }
1531 setTabAt(tab, j, new TreeBin<K,V>(hd));
1532 }
1533 }
1534 }
1535 if (insertAtFront) {
1536 ++added;
1537 p.next = first;
1538 setTabAt(tab, j, p);
1539 }
1540 p = next;
1541 }
1542 table = tab;
1543 sizeCtl = n - (n >>> 2);
1544 baseCount = added;
1545 }
1546 }
1547
1548 // ConcurrentMap methods
1549
1550 /**
1551 * {@inheritDoc ConcurrentMap}
1552 *
1553 * @return the previous value associated with the specified key,
1554 * or {@code null} if there was no mapping for the key
1555 * @throws NullPointerException if the specified key or value is null
1556 */
1557 public V putIfAbsent(K key, V value) {
1558 return putVal(key, value, true);
1559 }
1560
1561 /**
1562 * {@inheritDoc ConcurrentMap}
1563 *
1564 * @throws NullPointerException if the specified key is null
1565 * @return {@inheritDoc ConcurrentMap}
1566 */
1567 public boolean remove(Object key, Object value) {
1568 if (key == null)
1569 throw new NullPointerException();
1570 return value != null && replaceNode(key, null, value) != null;
1571 }
1572
1573 /**
1574 * {@inheritDoc ConcurrentMap}
1575 *
1576 * @throws NullPointerException if any of the arguments are null
1577 * @return {@inheritDoc ConcurrentMap}
1578 */
1579 public boolean replace(K key, V oldValue, V newValue) {
1580 if (key == null || oldValue == null || newValue == null)
1581 throw new NullPointerException();
1582 return replaceNode(key, newValue, oldValue) != null;
1583 }
1584
1585 /**
1586 * {@inheritDoc ConcurrentMap}
1587 *
1588 * @return the previous value associated with the specified key,
1589 * or {@code null} if there was no mapping for the key
1590 * @throws NullPointerException if the specified key or value is null
1591 */
1592 public V replace(K key, V value) {
1593 if (key == null || value == null)
1594 throw new NullPointerException();
1595 return replaceNode(key, value, null);
1596 }
1597
1598 // Overrides of JDK8+ Map extension method defaults
1599
1600 /**
1601 * Returns the value to which the specified key is mapped, or the
1602 * given default value if this map contains no mapping for the
1603 * key.
1604 *
1605 * @param key the key whose associated value is to be returned
1606 * @param defaultValue the value to return if this map contains
1607 * no mapping for the given key
1608 * @return the mapping for the key, if present; else the default value
1609 * @throws NullPointerException if the specified key is null
1610 */
1611 public V getOrDefault(Object key, V defaultValue) {
1612 V v;
1613 return (v = get(key)) == null ? defaultValue : v;
1614 }
1615
1616 public void forEach(BiConsumer<? super K, ? super V> action) {
1617 if (action == null) throw new NullPointerException();
1618 Node<K,V>[] t;
1619 if ((t = table) != null) {
1620 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1621 for (Node<K,V> p; (p = it.advance()) != null; ) {
1622 action.accept(p.key, p.val);
1623 }
1624 }
1625 }
1626
1627 public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
1628 if (function == null) throw new NullPointerException();
1629 Node<K,V>[] t;
1630 if ((t = table) != null) {
1631 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1632 for (Node<K,V> p; (p = it.advance()) != null; ) {
1633 V oldValue = p.val;
1634 for (K key = p.key;;) {
1635 V newValue = function.apply(key, oldValue);
1636 if (newValue == null)
1637 throw new NullPointerException();
1638 if (replaceNode(key, newValue, oldValue) != null ||
1639 (oldValue = get(key)) == null)
1640 break;
1641 }
1642 }
1643 }
1644 }
1645
1646 /**
1647 * Helper method for EntrySetView.removeIf.
1648 */
1649 boolean removeEntryIf(Predicate<? super Entry<K,V>> function) {
1650 if (function == null) throw new NullPointerException();
1651 Node<K,V>[] t;
1652 boolean removed = false;
1653 if ((t = table) != null) {
1654 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1655 for (Node<K,V> p; (p = it.advance()) != null; ) {
1656 K k = p.key;
1657 V v = p.val;
1658 Map.Entry<K,V> e = new AbstractMap.SimpleImmutableEntry<>(k, v);
1659 if (function.test(e) && replaceNode(k, null, v) != null)
1660 removed = true;
1661 }
1662 }
1663 return removed;
1664 }
1665
1666 /**
1667 * Helper method for ValuesView.removeIf.
1668 */
1669 boolean removeValueIf(Predicate<? super V> function) {
1670 if (function == null) throw new NullPointerException();
1671 Node<K,V>[] t;
1672 boolean removed = false;
1673 if ((t = table) != null) {
1674 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1675 for (Node<K,V> p; (p = it.advance()) != null; ) {
1676 K k = p.key;
1677 V v = p.val;
1678 if (function.test(v) && replaceNode(k, null, v) != null)
1679 removed = true;
1680 }
1681 }
1682 return removed;
1683 }
1684
1685 /**
1686 * If the specified key is not already associated with a value,
1687 * attempts to compute its value using the given mapping function
1688 * and enters it into this map unless {@code null}. The entire
1689 * method invocation is performed atomically. The supplied
1690 * function is invoked exactly once per invocation of this method
1691 * if the key is absent, else not at all. Some attempted update
1692 * operations on this map by other threads may be blocked while
1693 * computation is in progress, so the computation should be short
1694 * and simple.
1695 *
1696 * <p>The mapping function must not modify this map during computation.
1697 *
1698 * @param key key with which the specified value is to be associated
1699 * @param mappingFunction the function to compute a value
1700 * @return the current (existing or computed) value associated with
1701 * the specified key, or null if the computed value is null
1702 * @throws NullPointerException if the specified key or mappingFunction
1703 * is null
1704 * @throws IllegalStateException if the computation detectably
1705 * attempts a recursive update to this map that would
1706 * otherwise never complete
1707 * @throws RuntimeException or Error if the mappingFunction does so,
1708 * in which case the mapping is left unestablished
1709 */
1710 public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
1711 if (key == null || mappingFunction == null)
1712 throw new NullPointerException();
1713 int h = spread(key.hashCode());
1714 V val = null;
1715 int binCount = 0;
1716 for (Node<K,V>[] tab = table;;) {
1717 Node<K,V> f; int n, i, fh; K fk; V fv;
1718 if (tab == null || (n = tab.length) == 0)
1719 tab = initTable();
1720 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1721 Node<K,V> r = new ReservationNode<K,V>();
1722 synchronized (r) {
1723 if (casTabAt(tab, i, null, r)) {
1724 binCount = 1;
1725 Node<K,V> node = null;
1726 try {
1727 if ((val = mappingFunction.apply(key)) != null)
1728 node = new Node<K,V>(h, key, val);
1729 } finally {
1730 setTabAt(tab, i, node);
1731 }
1732 }
1733 }
1734 if (binCount != 0)
1735 break;
1736 }
1737 else if ((fh = f.hash) == MOVED)
1738 tab = helpTransfer(tab, f);
1739 else if (fh == h // check first node without acquiring lock
1740 && ((fk = f.key) == key || (fk != null && key.equals(fk)))
1741 && (fv = f.val) != null)
1742 return fv;
1743 else {
1744 boolean added = false;
1745 synchronized (f) {
1746 if (tabAt(tab, i) == f) {
1747 if (fh >= 0) {
1748 binCount = 1;
1749 for (Node<K,V> e = f;; ++binCount) {
1750 K ek;
1751 if (e.hash == h &&
1752 ((ek = e.key) == key ||
1753 (ek != null && key.equals(ek)))) {
1754 val = e.val;
1755 break;
1756 }
1757 Node<K,V> pred = e;
1758 if ((e = e.next) == null) {
1759 if ((val = mappingFunction.apply(key)) != null) {
1760 if (pred.next != null)
1761 throw new IllegalStateException("Recursive update");
1762 added = true;
1763 pred.next = new Node<K,V>(h, key, val);
1764 }
1765 break;
1766 }
1767 }
1768 }
1769 else if (f instanceof TreeBin) {
1770 binCount = 2;
1771 TreeBin<K,V> t = (TreeBin<K,V>)f;
1772 TreeNode<K,V> r, p;
1773 if ((r = t.root) != null &&
1774 (p = r.findTreeNode(h, key, null)) != null)
1775 val = p.val;
1776 else if ((val = mappingFunction.apply(key)) != null) {
1777 added = true;
1778 t.putTreeVal(h, key, val);
1779 }
1780 }
1781 else if (f instanceof ReservationNode)
1782 throw new IllegalStateException("Recursive update");
1783 }
1784 }
1785 if (binCount != 0) {
1786 if (binCount >= TREEIFY_THRESHOLD)
1787 treeifyBin(tab, i);
1788 if (!added)
1789 return val;
1790 break;
1791 }
1792 }
1793 }
1794 if (val != null)
1795 addCount(1L, binCount);
1796 return val;
1797 }
1798
1799 /**
1800 * If the value for the specified key is present, attempts to
1801 * compute a new mapping given the key and its current mapped
1802 * value. The entire method invocation is performed atomically.
1803 * The supplied function is invoked exactly once per invocation of
1804 * this method if the key is present, else not at all. Some
1805 * attempted update operations on this map by other threads may be
1806 * blocked while computation is in progress, so the computation
1807 * should be short and simple.
1808 *
1809 * <p>The remapping function must not modify this map during computation.
1810 *
1811 * @param key key with which a value may be associated
1812 * @param remappingFunction the function to compute a value
1813 * @return the new value associated with the specified key, or null if none
1814 * @throws NullPointerException if the specified key or remappingFunction
1815 * is null
1816 * @throws IllegalStateException if the computation detectably
1817 * attempts a recursive update to this map that would
1818 * otherwise never complete
1819 * @throws RuntimeException or Error if the remappingFunction does so,
1820 * in which case the mapping is unchanged
1821 */
1822 public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1823 if (key == null || remappingFunction == null)
1824 throw new NullPointerException();
1825 int h = spread(key.hashCode());
1826 V val = null;
1827 int delta = 0;
1828 int binCount = 0;
1829 for (Node<K,V>[] tab = table;;) {
1830 Node<K,V> f; int n, i, fh;
1831 if (tab == null || (n = tab.length) == 0)
1832 tab = initTable();
1833 else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1834 break;
1835 else if ((fh = f.hash) == MOVED)
1836 tab = helpTransfer(tab, f);
1837 else {
1838 synchronized (f) {
1839 if (tabAt(tab, i) == f) {
1840 if (fh >= 0) {
1841 binCount = 1;
1842 for (Node<K,V> e = f, pred = null;; ++binCount) {
1843 K ek;
1844 if (e.hash == h &&
1845 ((ek = e.key) == key ||
1846 (ek != null && key.equals(ek)))) {
1847 val = remappingFunction.apply(key, e.val);
1848 if (val != null)
1849 e.val = val;
1850 else {
1851 delta = -1;
1852 Node<K,V> en = e.next;
1853 if (pred != null)
1854 pred.next = en;
1855 else
1856 setTabAt(tab, i, en);
1857 }
1858 break;
1859 }
1860 pred = e;
1861 if ((e = e.next) == null)
1862 break;
1863 }
1864 }
1865 else if (f instanceof TreeBin) {
1866 binCount = 2;
1867 TreeBin<K,V> t = (TreeBin<K,V>)f;
1868 TreeNode<K,V> r, p;
1869 if ((r = t.root) != null &&
1870 (p = r.findTreeNode(h, key, null)) != null) {
1871 val = remappingFunction.apply(key, p.val);
1872 if (val != null)
1873 p.val = val;
1874 else {
1875 delta = -1;
1876 if (t.removeTreeNode(p))
1877 setTabAt(tab, i, untreeify(t.first));
1878 }
1879 }
1880 }
1881 else if (f instanceof ReservationNode)
1882 throw new IllegalStateException("Recursive update");
1883 }
1884 }
1885 if (binCount != 0)
1886 break;
1887 }
1888 }
1889 if (delta != 0)
1890 addCount((long)delta, binCount);
1891 return val;
1892 }
1893
1894 /**
1895 * Attempts to compute a mapping for the specified key and its
1896 * current mapped value (or {@code null} if there is no current
1897 * mapping). The entire method invocation is performed atomically.
1898 * The supplied function is invoked exactly once per invocation of
1899 * this method. Some attempted update operations on this map by
1900 * other threads may be blocked while computation is in progress,
1901 * so the computation should be short and simple.
1902 *
1903 * <p>The remapping function must not modify this map during computation.
1904 *
1905 * @param key key with which the specified value is to be associated
1906 * @param remappingFunction the function to compute a value
1907 * @return the new value associated with the specified key, or null if none
1908 * @throws NullPointerException if the specified key or remappingFunction
1909 * is null
1910 * @throws IllegalStateException if the computation detectably
1911 * attempts a recursive update to this map that would
1912 * otherwise never complete
1913 * @throws RuntimeException or Error if the remappingFunction does so,
1914 * in which case the mapping is unchanged
1915 */
1916 public V compute(K key,
1917 BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1918 if (key == null || remappingFunction == null)
1919 throw new NullPointerException();
1920 int h = spread(key.hashCode());
1921 V val = null;
1922 int delta = 0;
1923 int binCount = 0;
1924 for (Node<K,V>[] tab = table;;) {
1925 Node<K,V> f; int n, i, fh;
1926 if (tab == null || (n = tab.length) == 0)
1927 tab = initTable();
1928 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1929 Node<K,V> r = new ReservationNode<K,V>();
1930 synchronized (r) {
1931 if (casTabAt(tab, i, null, r)) {
1932 binCount = 1;
1933 Node<K,V> node = null;
1934 try {
1935 if ((val = remappingFunction.apply(key, null)) != null) {
1936 delta = 1;
1937 node = new Node<K,V>(h, key, val);
1938 }
1939 } finally {
1940 setTabAt(tab, i, node);
1941 }
1942 }
1943 }
1944 if (binCount != 0)
1945 break;
1946 }
1947 else if ((fh = f.hash) == MOVED)
1948 tab = helpTransfer(tab, f);
1949 else {
1950 synchronized (f) {
1951 if (tabAt(tab, i) == f) {
1952 if (fh >= 0) {
1953 binCount = 1;
1954 for (Node<K,V> e = f, pred = null;; ++binCount) {
1955 K ek;
1956 if (e.hash == h &&
1957 ((ek = e.key) == key ||
1958 (ek != null && key.equals(ek)))) {
1959 val = remappingFunction.apply(key, e.val);
1960 if (val != null)
1961 e.val = val;
1962 else {
1963 delta = -1;
1964 Node<K,V> en = e.next;
1965 if (pred != null)
1966 pred.next = en;
1967 else
1968 setTabAt(tab, i, en);
1969 }
1970 break;
1971 }
1972 pred = e;
1973 if ((e = e.next) == null) {
1974 val = remappingFunction.apply(key, null);
1975 if (val != null) {
1976 if (pred.next != null)
1977 throw new IllegalStateException("Recursive update");
1978 delta = 1;
1979 pred.next = new Node<K,V>(h, key, val);
1980 }
1981 break;
1982 }
1983 }
1984 }
1985 else if (f instanceof TreeBin) {
1986 binCount = 1;
1987 TreeBin<K,V> t = (TreeBin<K,V>)f;
1988 TreeNode<K,V> r, p;
1989 if ((r = t.root) != null)
1990 p = r.findTreeNode(h, key, null);
1991 else
1992 p = null;
1993 V pv = (p == null) ? null : p.val;
1994 val = remappingFunction.apply(key, pv);
1995 if (val != null) {
1996 if (p != null)
1997 p.val = val;
1998 else {
1999 delta = 1;
2000 t.putTreeVal(h, key, val);
2001 }
2002 }
2003 else if (p != null) {
2004 delta = -1;
2005 if (t.removeTreeNode(p))
2006 setTabAt(tab, i, untreeify(t.first));
2007 }
2008 }
2009 else if (f instanceof ReservationNode)
2010 throw new IllegalStateException("Recursive update");
2011 }
2012 }
2013 if (binCount != 0) {
2014 if (binCount >= TREEIFY_THRESHOLD)
2015 treeifyBin(tab, i);
2016 break;
2017 }
2018 }
2019 }
2020 if (delta != 0)
2021 addCount((long)delta, binCount);
2022 return val;
2023 }
2024
2025 /**
2026 * If the specified key is not already associated with a
2027 * (non-null) value, associates it with the given value.
2028 * Otherwise, replaces the value with the results of the given
2029 * remapping function, or removes if {@code null}. The entire
2030 * method invocation is performed atomically. Some attempted
2031 * update operations on this map by other threads may be blocked
2032 * while computation is in progress, so the computation should be
2033 * short and simple, and must not attempt to update any other
2034 * mappings of this Map.
2035 *
2036 * @param key key with which the specified value is to be associated
2037 * @param value the value to use if absent
2038 * @param remappingFunction the function to recompute a value if present
2039 * @return the new value associated with the specified key, or null if none
2040 * @throws NullPointerException if the specified key or the
2041 * remappingFunction is null
2042 * @throws RuntimeException or Error if the remappingFunction does so,
2043 * in which case the mapping is unchanged
2044 */
2045 public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
2046 if (key == null || value == null || remappingFunction == null)
2047 throw new NullPointerException();
2048 int h = spread(key.hashCode());
2049 V val = null;
2050 int delta = 0;
2051 int binCount = 0;
2052 for (Node<K,V>[] tab = table;;) {
2053 Node<K,V> f; int n, i, fh;
2054 if (tab == null || (n = tab.length) == 0)
2055 tab = initTable();
2056 else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
2057 if (casTabAt(tab, i, null, new Node<K,V>(h, key, value))) {
2058 delta = 1;
2059 val = value;
2060 break;
2061 }
2062 }
2063 else if ((fh = f.hash) == MOVED)
2064 tab = helpTransfer(tab, f);
2065 else {
2066 synchronized (f) {
2067 if (tabAt(tab, i) == f) {
2068 if (fh >= 0) {
2069 binCount = 1;
2070 for (Node<K,V> e = f, pred = null;; ++binCount) {
2071 K ek;
2072 if (e.hash == h &&
2073 ((ek = e.key) == key ||
2074 (ek != null && key.equals(ek)))) {
2075 val = remappingFunction.apply(e.val, value);
2076 if (val != null)
2077 e.val = val;
2078 else {
2079 delta = -1;
2080 Node<K,V> en = e.next;
2081 if (pred != null)
2082 pred.next = en;
2083 else
2084 setTabAt(tab, i, en);
2085 }
2086 break;
2087 }
2088 pred = e;
2089 if ((e = e.next) == null) {
2090 delta = 1;
2091 val = value;
2092 pred.next = new Node<K,V>(h, key, val);
2093 break;
2094 }
2095 }
2096 }
2097 else if (f instanceof TreeBin) {
2098 binCount = 2;
2099 TreeBin<K,V> t = (TreeBin<K,V>)f;
2100 TreeNode<K,V> r = t.root;
2101 TreeNode<K,V> p = (r == null) ? null :
2102 r.findTreeNode(h, key, null);
2103 val = (p == null) ? value :
2104 remappingFunction.apply(p.val, value);
2105 if (val != null) {
2106 if (p != null)
2107 p.val = val;
2108 else {
2109 delta = 1;
2110 t.putTreeVal(h, key, val);
2111 }
2112 }
2113 else if (p != null) {
2114 delta = -1;
2115 if (t.removeTreeNode(p))
2116 setTabAt(tab, i, untreeify(t.first));
2117 }
2118 }
2119 else if (f instanceof ReservationNode)
2120 throw new IllegalStateException("Recursive update");
2121 }
2122 }
2123 if (binCount != 0) {
2124 if (binCount >= TREEIFY_THRESHOLD)
2125 treeifyBin(tab, i);
2126 break;
2127 }
2128 }
2129 }
2130 if (delta != 0)
2131 addCount((long)delta, binCount);
2132 return val;
2133 }
2134
2135 // Hashtable legacy methods
2136
2137 /**
2138 * Tests if some key maps into the specified value in this table.
2139 *
2140 * <p>Note that this method is identical in functionality to
2141 * {@link #containsValue(Object)}, and exists solely to ensure
2142 * full compatibility with class {@link java.util.Hashtable},
2143 * which supported this method prior to introduction of the
2144 * Java Collections Framework.
2145 *
2146 * @param value a value to search for
2147 * @return {@code true} if and only if some key maps to the
2148 * {@code value} argument in this table as
2149 * determined by the {@code equals} method;
2150 * {@code false} otherwise
2151 * @throws NullPointerException if the specified value is null
2152 */
2153 public boolean contains(Object value) {
2154 return containsValue(value);
2155 }
2156
2157 /**
2158 * Returns an enumeration of the keys in this table.
2159 *
2160 * @return an enumeration of the keys in this table
2161 * @see #keySet()
2162 */
2163 public Enumeration<K> keys() {
2164 Node<K,V>[] t;
2165 int f = (t = table) == null ? 0 : t.length;
2166 return new KeyIterator<K,V>(t, f, 0, f, this);
2167 }
2168
2169 /**
2170 * Returns an enumeration of the values in this table.
2171 *
2172 * @return an enumeration of the values in this table
2173 * @see #values()
2174 */
2175 public Enumeration<V> elements() {
2176 Node<K,V>[] t;
2177 int f = (t = table) == null ? 0 : t.length;
2178 return new ValueIterator<K,V>(t, f, 0, f, this);
2179 }
2180
2181 // ConcurrentHashMap-only methods
2182
2183 /**
2184 * Returns the number of mappings. This method should be used
2185 * instead of {@link #size} because a ConcurrentHashMap may
2186 * contain more mappings than can be represented as an int. The
2187 * value returned is an estimate; the actual count may differ if
2188 * there are concurrent insertions or removals.
2189 *
2190 * @return the number of mappings
2191 * @since 1.8
2192 */
2193 public long mappingCount() {
2194 long n = sumCount();
2195 return (n < 0L) ? 0L : n; // ignore transient negative values
2196 }
2197
2198 /**
2199 * Creates a new {@link Set} backed by a ConcurrentHashMap
2200 * from the given type to {@code Boolean.TRUE}.
2201 *
2202 * @param <K> the element type of the returned set
2203 * @return the new set
2204 * @since 1.8
2205 */
2206 public static <K> KeySetView<K,Boolean> newKeySet() {
2207 return new KeySetView<K,Boolean>
2208 (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE);
2209 }
2210
2211 /**
2212 * Creates a new {@link Set} backed by a ConcurrentHashMap
2213 * from the given type to {@code Boolean.TRUE}.
2214 *
2215 * @param initialCapacity The implementation performs internal
2216 * sizing to accommodate this many elements.
2217 * @param <K> the element type of the returned set
2218 * @return the new set
2219 * @throws IllegalArgumentException if the initial capacity of
2220 * elements is negative
2221 * @since 1.8
2222 */
2223 public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2224 return new KeySetView<K,Boolean>
2225 (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE);
2226 }
2227
2228 /**
2229 * Returns a {@link Set} view of the keys in this map, using the
2230 * given common mapped value for any additions (i.e., {@link
2231 * Collection#add} and {@link Collection#addAll(Collection)}).
2232 * This is of course only appropriate if it is acceptable to use
2233 * the same value for all additions from this view.
2234 *
2235 * @param mappedValue the mapped value to use for any additions
2236 * @return the set view
2237 * @throws NullPointerException if the mappedValue is null
2238 */
2239 public KeySetView<K,V> keySet(V mappedValue) {
2240 if (mappedValue == null)
2241 throw new NullPointerException();
2242 return new KeySetView<K,V>(this, mappedValue);
2243 }
2244
2245 /* ---------------- Special Nodes -------------- */
2246
2247 /**
2248 * A node inserted at head of bins during transfer operations.
2249 */
2250 static final class ForwardingNode<K,V> extends Node<K,V> {
2251 final Node<K,V>[] nextTable;
2252 ForwardingNode(Node<K,V>[] tab) {
2253 super(MOVED, null, null);
2254 this.nextTable = tab;
2255 }
2256
2257 Node<K,V> find(int h, Object k) {
2258 // loop to avoid arbitrarily deep recursion on forwarding nodes
2259 outer: for (Node<K,V>[] tab = nextTable;;) {
2260 Node<K,V> e; int n;
2261 if (k == null || tab == null || (n = tab.length) == 0 ||
2262 (e = tabAt(tab, (n - 1) & h)) == null)
2263 return null;
2264 for (;;) {
2265 int eh; K ek;
2266 if ((eh = e.hash) == h &&
2267 ((ek = e.key) == k || (ek != null && k.equals(ek))))
2268 return e;
2269 if (eh < 0) {
2270 if (e instanceof ForwardingNode) {
2271 tab = ((ForwardingNode<K,V>)e).nextTable;
2272 continue outer;
2273 }
2274 else
2275 return e.find(h, k);
2276 }
2277 if ((e = e.next) == null)
2278 return null;
2279 }
2280 }
2281 }
2282 }
2283
2284 /**
2285 * A place-holder node used in computeIfAbsent and compute.
2286 */
2287 static final class ReservationNode<K,V> extends Node<K,V> {
2288 ReservationNode() {
2289 super(RESERVED, null, null);
2290 }
2291
2292 Node<K,V> find(int h, Object k) {
2293 return null;
2294 }
2295 }
2296
2297 /* ---------------- Table Initialization and Resizing -------------- */
2298
2299 /**
2300 * Returns the stamp bits for resizing a table of size n.
2301 * Must be negative when shifted left by RESIZE_STAMP_SHIFT.
2302 */
2303 static final int resizeStamp(int n) {
2304 return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
2305 }
2306
2307 /**
2308 * Initializes table, using the size recorded in sizeCtl.
2309 */
2310 private final Node<K,V>[] initTable() {
2311 Node<K,V>[] tab; int sc;
2312 while ((tab = table) == null || tab.length == 0) {
2313 if ((sc = sizeCtl) < 0)
2314 Thread.yield(); // lost initialization race; just spin
2315 else if (U.compareAndSetInt(this, SIZECTL, sc, -1)) {
2316 try {
2317 if ((tab = table) == null || tab.length == 0) {
2318 int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2319 @SuppressWarnings("unchecked")
2320 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2321 table = tab = nt;
2322 sc = n - (n >>> 2);
2323 }
2324 } finally {
2325 sizeCtl = sc;
2326 }
2327 break;
2328 }
2329 }
2330 return tab;
2331 }
2332
2333 /**
2334 * Adds to count, and if table is too small and not already
2335 * resizing, initiates transfer. If already resizing, helps
2336 * perform transfer if work is available. Rechecks occupancy
2337 * after a transfer to see if another resize is already needed
2338 * because resizings are lagging additions.
2339 *
2340 * @param x the count to add
2341 * @param check if <0, don't check resize, if <= 1 only check if uncontended
2342 */
2343 private final void addCount(long x, int check) {
2344 CounterCell[] cs; long b, s;
2345 if ((cs = counterCells) != null ||
2346 !U.compareAndSetLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2347 CounterCell c; long v; int m;
2348 boolean uncontended = true;
2349 if (cs == null || (m = cs.length - 1) < 0 ||
2350 (c = cs[ThreadLocalRandom.getProbe() & m]) == null ||
2351 !(uncontended =
2352 U.compareAndSetLong(c, CELLVALUE, v = c.value, v + x))) {
2353 fullAddCount(x, uncontended);
2354 return;
2355 }
2356 if (check <= 1)
2357 return;
2358 s = sumCount();
2359 }
2360 if (check >= 0) {
2361 Node<K,V>[] tab, nt; int n, sc;
2362 while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2363 (n = tab.length) < MAXIMUM_CAPACITY) {
2364 int rs = resizeStamp(n) << RESIZE_STAMP_SHIFT;
2365 if (sc < 0) {
2366 if (sc == rs + MAX_RESIZERS || sc == rs + 1 ||
2367 (nt = nextTable) == null || transferIndex <= 0)
2368 break;
2369 if (U.compareAndSetInt(this, SIZECTL, sc, sc + 1))
2370 transfer(tab, nt);
2371 }
2372 else if (U.compareAndSetInt(this, SIZECTL, sc, rs + 2))
2373 transfer(tab, null);
2374 s = sumCount();
2375 }
2376 }
2377 }
2378
2379 /**
2380 * Helps transfer if a resize is in progress.
2381 */
2382 final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2383 Node<K,V>[] nextTab; int sc;
2384 if (tab != null && (f instanceof ForwardingNode) &&
2385 (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2386 int rs = resizeStamp(tab.length) << RESIZE_STAMP_SHIFT;
2387 while (nextTab == nextTable && table == tab &&
2388 (sc = sizeCtl) < 0) {
2389 if (sc == rs + MAX_RESIZERS || sc == rs + 1 ||
2390 transferIndex <= 0)
2391 break;
2392 if (U.compareAndSetInt(this, SIZECTL, sc, sc + 1)) {
2393 transfer(tab, nextTab);
2394 break;
2395 }
2396 }
2397 return nextTab;
2398 }
2399 return table;
2400 }
2401
2402 /**
2403 * Tries to presize table to accommodate the given number of elements.
2404 *
2405 * @param size number of elements (doesn't need to be perfectly accurate)
2406 */
2407 private final void tryPresize(int size) {
2408 int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2409 tableSizeFor(size + (size >>> 1) + 1);
2410 int sc;
2411 while ((sc = sizeCtl) >= 0) {
2412 Node<K,V>[] tab = table; int n;
2413 if (tab == null || (n = tab.length) == 0) {
2414 n = (sc > c) ? sc : c;
2415 if (U.compareAndSetInt(this, SIZECTL, sc, -1)) {
2416 try {
2417 if (table == tab) {
2418 @SuppressWarnings("unchecked")
2419 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2420 table = nt;
2421 sc = n - (n >>> 2);
2422 }
2423 } finally {
2424 sizeCtl = sc;
2425 }
2426 }
2427 }
2428 else if (c <= sc || n >= MAXIMUM_CAPACITY)
2429 break;
2430 else if (tab == table) {
2431 int rs = resizeStamp(n);
2432 if (U.compareAndSetInt(this, SIZECTL, sc,
2433 (rs << RESIZE_STAMP_SHIFT) + 2))
2434 transfer(tab, null);
2435 }
2436 }
2437 }
2438
2439 /**
2440 * Moves and/or copies the nodes in each bin to new table. See
2441 * above for explanation.
2442 */
2443 private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2444 int n = tab.length, stride;
2445 if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2446 stride = MIN_TRANSFER_STRIDE; // subdivide range
2447 if (nextTab == null) { // initiating
2448 try {
2449 @SuppressWarnings("unchecked")
2450 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
2451 nextTab = nt;
2452 } catch (Throwable ex) { // try to cope with OOME
2453 sizeCtl = Integer.MAX_VALUE;
2454 return;
2455 }
2456 nextTable = nextTab;
2457 transferIndex = n;
2458 }
2459 int nextn = nextTab.length;
2460 ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2461 boolean advance = true;
2462 boolean finishing = false; // to ensure sweep before committing nextTab
2463 for (int i = 0, bound = 0;;) {
2464 Node<K,V> f; int fh;
2465 while (advance) {
2466 int nextIndex, nextBound;
2467 if (--i >= bound || finishing)
2468 advance = false;
2469 else if ((nextIndex = transferIndex) <= 0) {
2470 i = -1;
2471 advance = false;
2472 }
2473 else if (U.compareAndSetInt
2474 (this, TRANSFERINDEX, nextIndex,
2475 nextBound = (nextIndex > stride ?
2476 nextIndex - stride : 0))) {
2477 bound = nextBound;
2478 i = nextIndex - 1;
2479 advance = false;
2480 }
2481 }
2482 if (i < 0 || i >= n || i + n >= nextn) {
2483 int sc;
2484 if (finishing) {
2485 nextTable = null;
2486 table = nextTab;
2487 sizeCtl = (n << 1) - (n >>> 1);
2488 return;
2489 }
2490 if (U.compareAndSetInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
2491 if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
2492 return;
2493 finishing = advance = true;
2494 i = n; // recheck before commit
2495 }
2496 }
2497 else if ((f = tabAt(tab, i)) == null)
2498 advance = casTabAt(tab, i, null, fwd);
2499 else if ((fh = f.hash) == MOVED)
2500 advance = true; // already processed
2501 else {
2502 synchronized (f) {
2503 if (tabAt(tab, i) == f) {
2504 Node<K,V> ln, hn;
2505 if (fh >= 0) {
2506 int runBit = fh & n;
2507 Node<K,V> lastRun = f;
2508 for (Node<K,V> p = f.next; p != null; p = p.next) {
2509 int b = p.hash & n;
2510 if (b != runBit) {
2511 runBit = b;
2512 lastRun = p;
2513 }
2514 }
2515 if (runBit == 0) {
2516 ln = lastRun;
2517 hn = null;
2518 }
2519 else {
2520 hn = lastRun;
2521 ln = null;
2522 }
2523 for (Node<K,V> p = f; p != lastRun; p = p.next) {
2524 int ph = p.hash; K pk = p.key; V pv = p.val;
2525 if ((ph & n) == 0)
2526 ln = new Node<K,V>(ph, pk, pv, ln);
2527 else
2528 hn = new Node<K,V>(ph, pk, pv, hn);
2529 }
2530 setTabAt(nextTab, i, ln);
2531 setTabAt(nextTab, i + n, hn);
2532 setTabAt(tab, i, fwd);
2533 advance = true;
2534 }
2535 else if (f instanceof TreeBin) {
2536 TreeBin<K,V> t = (TreeBin<K,V>)f;
2537 TreeNode<K,V> lo = null, loTail = null;
2538 TreeNode<K,V> hi = null, hiTail = null;
2539 int lc = 0, hc = 0;
2540 for (Node<K,V> e = t.first; e != null; e = e.next) {
2541 int h = e.hash;
2542 TreeNode<K,V> p = new TreeNode<K,V>
2543 (h, e.key, e.val, null, null);
2544 if ((h & n) == 0) {
2545 if ((p.prev = loTail) == null)
2546 lo = p;
2547 else
2548 loTail.next = p;
2549 loTail = p;
2550 ++lc;
2551 }
2552 else {
2553 if ((p.prev = hiTail) == null)
2554 hi = p;
2555 else
2556 hiTail.next = p;
2557 hiTail = p;
2558 ++hc;
2559 }
2560 }
2561 ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
2562 (hc != 0) ? new TreeBin<K,V>(lo) : t;
2563 hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
2564 (lc != 0) ? new TreeBin<K,V>(hi) : t;
2565 setTabAt(nextTab, i, ln);
2566 setTabAt(nextTab, i + n, hn);
2567 setTabAt(tab, i, fwd);
2568 advance = true;
2569 }
2570 else if (f instanceof ReservationNode)
2571 throw new IllegalStateException("Recursive update");
2572 }
2573 }
2574 }
2575 }
2576 }
2577
2578 /* ---------------- Counter support -------------- */
2579
2580 /**
2581 * A padded cell for distributing counts. Adapted from LongAdder
2582 * and Striped64. See their internal docs for explanation.
2583 */
2584 @jdk.internal.vm.annotation.Contended static final class CounterCell {
2585 volatile long value;
2586 CounterCell(long x) { value = x; }
2587 }
2588
2589 final long sumCount() {
2590 CounterCell[] cs = counterCells;
2591 long sum = baseCount;
2592 if (cs != null) {
2593 for (CounterCell c : cs)
2594 if (c != null)
2595 sum += c.value;
2596 }
2597 return sum;
2598 }
2599
2600 // See LongAdder version for explanation
2601 private final void fullAddCount(long x, boolean wasUncontended) {
2602 int h;
2603 if ((h = ThreadLocalRandom.getProbe()) == 0) {
2604 ThreadLocalRandom.localInit(); // force initialization
2605 h = ThreadLocalRandom.getProbe();
2606 wasUncontended = true;
2607 }
2608 boolean collide = false; // True if last slot nonempty
2609 for (;;) {
2610 CounterCell[] cs; CounterCell c; int n; long v;
2611 if ((cs = counterCells) != null && (n = cs.length) > 0) {
2612 if ((c = cs[(n - 1) & h]) == null) {
2613 if (cellsBusy == 0) { // Try to attach new Cell
2614 CounterCell r = new CounterCell(x); // Optimistic create
2615 if (cellsBusy == 0 &&
2616 U.compareAndSetInt(this, CELLSBUSY, 0, 1)) {
2617 boolean created = false;
2618 try { // Recheck under lock
2619 CounterCell[] rs; int m, j;
2620 if ((rs = counterCells) != null &&
2621 (m = rs.length) > 0 &&
2622 rs[j = (m - 1) & h] == null) {
2623 rs[j] = r;
2624 created = true;
2625 }
2626 } finally {
2627 cellsBusy = 0;
2628 }
2629 if (created)
2630 break;
2631 continue; // Slot is now non-empty
2632 }
2633 }
2634 collide = false;
2635 }
2636 else if (!wasUncontended) // CAS already known to fail
2637 wasUncontended = true; // Continue after rehash
2638 else if (U.compareAndSetLong(c, CELLVALUE, v = c.value, v + x))
2639 break;
2640 else if (counterCells != cs || n >= NCPU)
2641 collide = false; // At max size or stale
2642 else if (!collide)
2643 collide = true;
2644 else if (cellsBusy == 0 &&
2645 U.compareAndSetInt(this, CELLSBUSY, 0, 1)) {
2646 try {
2647 if (counterCells == cs) // Expand table unless stale
2648 counterCells = Arrays.copyOf(cs, n << 1);
2649 } finally {
2650 cellsBusy = 0;
2651 }
2652 collide = false;
2653 continue; // Retry with expanded table
2654 }
2655 h = ThreadLocalRandom.advanceProbe(h);
2656 }
2657 else if (cellsBusy == 0 && counterCells == cs &&
2658 U.compareAndSetInt(this, CELLSBUSY, 0, 1)) {
2659 boolean init = false;
2660 try { // Initialize table
2661 if (counterCells == cs) {
2662 CounterCell[] rs = new CounterCell[2];
2663 rs[h & 1] = new CounterCell(x);
2664 counterCells = rs;
2665 init = true;
2666 }
2667 } finally {
2668 cellsBusy = 0;
2669 }
2670 if (init)
2671 break;
2672 }
2673 else if (U.compareAndSetLong(this, BASECOUNT, v = baseCount, v + x))
2674 break; // Fall back on using base
2675 }
2676 }
2677
2678 /* ---------------- Conversion from/to TreeBins -------------- */
2679
2680 /**
2681 * Replaces all linked nodes in bin at given index unless table is
2682 * too small, in which case resizes instead.
2683 */
2684 private final void treeifyBin(Node<K,V>[] tab, int index) {
2685 Node<K,V> b; int n;
2686 if (tab != null) {
2687 if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
2688 tryPresize(n << 1);
2689 else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
2690 synchronized (b) {
2691 if (tabAt(tab, index) == b) {
2692 TreeNode<K,V> hd = null, tl = null;
2693 for (Node<K,V> e = b; e != null; e = e.next) {
2694 TreeNode<K,V> p =
2695 new TreeNode<K,V>(e.hash, e.key, e.val,
2696 null, null);
2697 if ((p.prev = tl) == null)
2698 hd = p;
2699 else
2700 tl.next = p;
2701 tl = p;
2702 }
2703 setTabAt(tab, index, new TreeBin<K,V>(hd));
2704 }
2705 }
2706 }
2707 }
2708 }
2709
2710 /**
2711 * Returns a list of non-TreeNodes replacing those in given list.
2712 */
2713 static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2714 Node<K,V> hd = null, tl = null;
2715 for (Node<K,V> q = b; q != null; q = q.next) {
2716 Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val);
2717 if (tl == null)
2718 hd = p;
2719 else
2720 tl.next = p;
2721 tl = p;
2722 }
2723 return hd;
2724 }
2725
2726 /* ---------------- TreeNodes -------------- */
2727
2728 /**
2729 * Nodes for use in TreeBins.
2730 */
2731 static final class TreeNode<K,V> extends Node<K,V> {
2732 TreeNode<K,V> parent; // red-black tree links
2733 TreeNode<K,V> left;
2734 TreeNode<K,V> right;
2735 TreeNode<K,V> prev; // needed to unlink next upon deletion
2736 boolean red;
2737
2738 TreeNode(int hash, K key, V val, Node<K,V> next,
2739 TreeNode<K,V> parent) {
2740 super(hash, key, val, next);
2741 this.parent = parent;
2742 }
2743
2744 Node<K,V> find(int h, Object k) {
2745 return findTreeNode(h, k, null);
2746 }
2747
2748 /**
2749 * Returns the TreeNode (or null if not found) for the given key
2750 * starting at given root.
2751 */
2752 final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2753 if (k != null) {
2754 TreeNode<K,V> p = this;
2755 do {
2756 int ph, dir; K pk; TreeNode<K,V> q;
2757 TreeNode<K,V> pl = p.left, pr = p.right;
2758 if ((ph = p.hash) > h)
2759 p = pl;
2760 else if (ph < h)
2761 p = pr;
2762 else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2763 return p;
2764 else if (pl == null)
2765 p = pr;
2766 else if (pr == null)
2767 p = pl;
2768 else if ((kc != null ||
2769 (kc = comparableClassFor(k)) != null) &&
2770 (dir = compareComparables(kc, k, pk)) != 0)
2771 p = (dir < 0) ? pl : pr;
2772 else if ((q = pr.findTreeNode(h, k, kc)) != null)
2773 return q;
2774 else
2775 p = pl;
2776 } while (p != null);
2777 }
2778 return null;
2779 }
2780 }
2781
2782 /* ---------------- TreeBins -------------- */
2783
2784 /**
2785 * TreeNodes used at the heads of bins. TreeBins do not hold user
2786 * keys or values, but instead point to list of TreeNodes and
2787 * their root. They also maintain a parasitic read-write lock
2788 * forcing writers (who hold bin lock) to wait for readers (who do
2789 * not) to complete before tree restructuring operations.
2790 */
2791 static final class TreeBin<K,V> extends Node<K,V> {
2792 TreeNode<K,V> root;
2793 volatile TreeNode<K,V> first;
2794 volatile Thread waiter;
2795 volatile int lockState;
2796 // values for lockState
2797 static final int WRITER = 1; // set while holding write lock
2798 static final int WAITER = 2; // set when waiting for write lock
2799 static final int READER = 4; // increment value for setting read lock
2800
2801 /**
2802 * Tie-breaking utility for ordering insertions when equal
2803 * hashCodes and non-comparable. We don't require a total
2804 * order, just a consistent insertion rule to maintain
2805 * equivalence across rebalancings. Tie-breaking further than
2806 * necessary simplifies testing a bit.
2807 */
2808 static int tieBreakOrder(Object a, Object b) {
2809 int d;
2810 if (a == null || b == null ||
2811 (d = a.getClass().getName().
2812 compareTo(b.getClass().getName())) == 0)
2813 d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
2814 -1 : 1);
2815 return d;
2816 }
2817
2818 /**
2819 * Creates bin with initial set of nodes headed by b.
2820 */
2821 TreeBin(TreeNode<K,V> b) {
2822 super(TREEBIN, null, null);
2823 this.first = b;
2824 TreeNode<K,V> r = null;
2825 for (TreeNode<K,V> x = b, next; x != null; x = next) {
2826 next = (TreeNode<K,V>)x.next;
2827 x.left = x.right = null;
2828 if (r == null) {
2829 x.parent = null;
2830 x.red = false;
2831 r = x;
2832 }
2833 else {
2834 K k = x.key;
2835 int h = x.hash;
2836 Class<?> kc = null;
2837 for (TreeNode<K,V> p = r;;) {
2838 int dir, ph;
2839 K pk = p.key;
2840 if ((ph = p.hash) > h)
2841 dir = -1;
2842 else if (ph < h)
2843 dir = 1;
2844 else if ((kc == null &&
2845 (kc = comparableClassFor(k)) == null) ||
2846 (dir = compareComparables(kc, k, pk)) == 0)
2847 dir = tieBreakOrder(k, pk);
2848 TreeNode<K,V> xp = p;
2849 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2850 x.parent = xp;
2851 if (dir <= 0)
2852 xp.left = x;
2853 else
2854 xp.right = x;
2855 r = balanceInsertion(r, x);
2856 break;
2857 }
2858 }
2859 }
2860 }
2861 this.root = r;
2862 assert checkInvariants(root);
2863 }
2864
2865 /**
2866 * Acquires write lock for tree restructuring.
2867 */
2868 private final void lockRoot() {
2869 if (!U.compareAndSetInt(this, LOCKSTATE, 0, WRITER))
2870 contendedLock(); // offload to separate method
2871 }
2872
2873 /**
2874 * Releases write lock for tree restructuring.
2875 */
2876 private final void unlockRoot() {
2877 lockState = 0;
2878 }
2879
2880 /**
2881 * Possibly blocks awaiting root lock.
2882 */
2883 private final void contendedLock() {
2884 Thread current = Thread.currentThread(), w;
2885 for (int s;;) {
2886 if (((s = lockState) & ~WAITER) == 0) {
2887 if (U.compareAndSetInt(this, LOCKSTATE, s, WRITER)) {
2888 if (waiter == current)
2889 U.compareAndSetReference(this, WAITERTHREAD, current, null);
2890 return;
2891 }
2892 }
2893 else if ((s & WAITER) == 0)
2894 U.compareAndSetInt(this, LOCKSTATE, s, s | WAITER);
2895 else if ((w = waiter) == null)
2896 U.compareAndSetReference(this, WAITERTHREAD, null, current);
2897 else if (w == current)
2898 LockSupport.park(this);
2899 }
2900 }
2901
2902 /**
2903 * Returns matching node or null if none. Tries to search
2904 * using tree comparisons from root, but continues linear
2905 * search when lock not available.
2906 */
2907 final Node<K,V> find(int h, Object k) {
2908 if (k != null) {
2909 for (Node<K,V> e = first; e != null; ) {
2910 int s; K ek;
2911 if (((s = lockState) & (WAITER|WRITER)) != 0) {
2912 if (e.hash == h &&
2913 ((ek = e.key) == k || (ek != null && k.equals(ek))))
2914 return e;
2915 e = e.next;
2916 }
2917 else if (U.compareAndSetInt(this, LOCKSTATE, s,
2918 s + READER)) {
2919 TreeNode<K,V> r, p;
2920 try {
2921 p = ((r = root) == null ? null :
2922 r.findTreeNode(h, k, null));
2923 } finally {
2924 Thread w;
2925 if (U.getAndAddInt(this, LOCKSTATE, -READER) ==
2926 (READER|WAITER) && (w = waiter) != null)
2927 LockSupport.unpark(w);
2928 }
2929 return p;
2930 }
2931 }
2932 }
2933 return null;
2934 }
2935
2936 /**
2937 * Finds or adds a node.
2938 * @return null if added
2939 */
2940 final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2941 Class<?> kc = null;
2942 boolean searched = false;
2943 for (TreeNode<K,V> p = root;;) {
2944 int dir, ph; K pk;
2945 if (p == null) {
2946 first = root = new TreeNode<K,V>(h, k, v, null, null);
2947 break;
2948 }
2949 else if ((ph = p.hash) > h)
2950 dir = -1;
2951 else if (ph < h)
2952 dir = 1;
2953 else if ((pk = p.key) == k || (pk != null && k.equals(pk)))
2954 return p;
2955 else if ((kc == null &&
2956 (kc = comparableClassFor(k)) == null) ||
2957 (dir = compareComparables(kc, k, pk)) == 0) {
2958 if (!searched) {
2959 TreeNode<K,V> q, ch;
2960 searched = true;
2961 if (((ch = p.left) != null &&
2962 (q = ch.findTreeNode(h, k, kc)) != null) ||
2963 ((ch = p.right) != null &&
2964 (q = ch.findTreeNode(h, k, kc)) != null))
2965 return q;
2966 }
2967 dir = tieBreakOrder(k, pk);
2968 }
2969
2970 TreeNode<K,V> xp = p;
2971 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2972 TreeNode<K,V> x, f = first;
2973 first = x = new TreeNode<K,V>(h, k, v, f, xp);
2974 if (f != null)
2975 f.prev = x;
2976 if (dir <= 0)
2977 xp.left = x;
2978 else
2979 xp.right = x;
2980 if (!xp.red)
2981 x.red = true;
2982 else {
2983 lockRoot();
2984 try {
2985 root = balanceInsertion(root, x);
2986 } finally {
2987 unlockRoot();
2988 }
2989 }
2990 break;
2991 }
2992 }
2993 assert checkInvariants(root);
2994 return null;
2995 }
2996
2997 /**
2998 * Removes the given node, that must be present before this
2999 * call. This is messier than typical red-black deletion code
3000 * because we cannot swap the contents of an interior node
3001 * with a leaf successor that is pinned by "next" pointers
3002 * that are accessible independently of lock. So instead we
3003 * swap the tree linkages.
3004 *
3005 * @return true if now too small, so should be untreeified
3006 */
3007 final boolean removeTreeNode(TreeNode<K,V> p) {
3008 TreeNode<K,V> next = (TreeNode<K,V>)p.next;
3009 TreeNode<K,V> pred = p.prev; // unlink traversal pointers
3010 TreeNode<K,V> r, rl;
3011 if (pred == null)
3012 first = next;
3013 else
3014 pred.next = next;
3015 if (next != null)
3016 next.prev = pred;
3017 if (first == null) {
3018 root = null;
3019 return true;
3020 }
3021 if ((r = root) == null || r.right == null || // too small
3022 (rl = r.left) == null || rl.left == null)
3023 return true;
3024 lockRoot();
3025 try {
3026 TreeNode<K,V> replacement;
3027 TreeNode<K,V> pl = p.left;
3028 TreeNode<K,V> pr = p.right;
3029 if (pl != null && pr != null) {
3030 TreeNode<K,V> s = pr, sl;
3031 while ((sl = s.left) != null) // find successor
3032 s = sl;
3033 boolean c = s.red; s.red = p.red; p.red = c; // swap colors
3034 TreeNode<K,V> sr = s.right;
3035 TreeNode<K,V> pp = p.parent;
3036 if (s == pr) { // p was s's direct parent
3037 p.parent = s;
3038 s.right = p;
3039 }
3040 else {
3041 TreeNode<K,V> sp = s.parent;
3042 if ((p.parent = sp) != null) {
3043 if (s == sp.left)
3044 sp.left = p;
3045 else
3046 sp.right = p;
3047 }
3048 if ((s.right = pr) != null)
3049 pr.parent = s;
3050 }
3051 p.left = null;
3052 if ((p.right = sr) != null)
3053 sr.parent = p;
3054 if ((s.left = pl) != null)
3055 pl.parent = s;
3056 if ((s.parent = pp) == null)
3057 r = s;
3058 else if (p == pp.left)
3059 pp.left = s;
3060 else
3061 pp.right = s;
3062 if (sr != null)
3063 replacement = sr;
3064 else
3065 replacement = p;
3066 }
3067 else if (pl != null)
3068 replacement = pl;
3069 else if (pr != null)
3070 replacement = pr;
3071 else
3072 replacement = p;
3073 if (replacement != p) {
3074 TreeNode<K,V> pp = replacement.parent = p.parent;
3075 if (pp == null)
3076 r = replacement;
3077 else if (p == pp.left)
3078 pp.left = replacement;
3079 else
3080 pp.right = replacement;
3081 p.left = p.right = p.parent = null;
3082 }
3083
3084 root = (p.red) ? r : balanceDeletion(r, replacement);
3085
3086 if (p == replacement) { // detach pointers
3087 TreeNode<K,V> pp;
3088 if ((pp = p.parent) != null) {
3089 if (p == pp.left)
3090 pp.left = null;
3091 else if (p == pp.right)
3092 pp.right = null;
3093 p.parent = null;
3094 }
3095 }
3096 } finally {
3097 unlockRoot();
3098 }
3099 assert checkInvariants(root);
3100 return false;
3101 }
3102
3103 /* ------------------------------------------------------------ */
3104 // Red-black tree methods, all adapted from CLR
3105
3106 static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
3107 TreeNode<K,V> p) {
3108 TreeNode<K,V> r, pp, rl;
3109 if (p != null && (r = p.right) != null) {
3110 if ((rl = p.right = r.left) != null)
3111 rl.parent = p;
3112 if ((pp = r.parent = p.parent) == null)
3113 (root = r).red = false;
3114 else if (pp.left == p)
3115 pp.left = r;
3116 else
3117 pp.right = r;
3118 r.left = p;
3119 p.parent = r;
3120 }
3121 return root;
3122 }
3123
3124 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
3125 TreeNode<K,V> p) {
3126 TreeNode<K,V> l, pp, lr;
3127 if (p != null && (l = p.left) != null) {
3128 if ((lr = p.left = l.right) != null)
3129 lr.parent = p;
3130 if ((pp = l.parent = p.parent) == null)
3131 (root = l).red = false;
3132 else if (pp.right == p)
3133 pp.right = l;
3134 else
3135 pp.left = l;
3136 l.right = p;
3137 p.parent = l;
3138 }
3139 return root;
3140 }
3141
3142 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
3143 TreeNode<K,V> x) {
3144 x.red = true;
3145 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
3146 if ((xp = x.parent) == null) {
3147 x.red = false;
3148 return x;
3149 }
3150 else if (!xp.red || (xpp = xp.parent) == null)
3151 return root;
3152 if (xp == (xppl = xpp.left)) {
3153 if ((xppr = xpp.right) != null && xppr.red) {
3154 xppr.red = false;
3155 xp.red = false;
3156 xpp.red = true;
3157 x = xpp;
3158 }
3159 else {
3160 if (x == xp.right) {
3161 root = rotateLeft(root, x = xp);
3162 xpp = (xp = x.parent) == null ? null : xp.parent;
3163 }
3164 if (xp != null) {
3165 xp.red = false;
3166 if (xpp != null) {
3167 xpp.red = true;
3168 root = rotateRight(root, xpp);
3169 }
3170 }
3171 }
3172 }
3173 else {
3174 if (xppl != null && xppl.red) {
3175 xppl.red = false;
3176 xp.red = false;
3177 xpp.red = true;
3178 x = xpp;
3179 }
3180 else {
3181 if (x == xp.left) {
3182 root = rotateRight(root, x = xp);
3183 xpp = (xp = x.parent) == null ? null : xp.parent;
3184 }
3185 if (xp != null) {
3186 xp.red = false;
3187 if (xpp != null) {
3188 xpp.red = true;
3189 root = rotateLeft(root, xpp);
3190 }
3191 }
3192 }
3193 }
3194 }
3195 }
3196
3197 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
3198 TreeNode<K,V> x) {
3199 for (TreeNode<K,V> xp, xpl, xpr;;) {
3200 if (x == null || x == root)
3201 return root;
3202 else if ((xp = x.parent) == null) {
3203 x.red = false;
3204 return x;
3205 }
3206 else if (x.red) {
3207 x.red = false;
3208 return root;
3209 }
3210 else if ((xpl = xp.left) == x) {
3211 if ((xpr = xp.right) != null && xpr.red) {
3212 xpr.red = false;
3213 xp.red = true;
3214 root = rotateLeft(root, xp);
3215 xpr = (xp = x.parent) == null ? null : xp.right;
3216 }
3217 if (xpr == null)
3218 x = xp;
3219 else {
3220 TreeNode<K,V> sl = xpr.left, sr = xpr.right;
3221 if ((sr == null || !sr.red) &&
3222 (sl == null || !sl.red)) {
3223 xpr.red = true;
3224 x = xp;
3225 }
3226 else {
3227 if (sr == null || !sr.red) {
3228 if (sl != null)
3229 sl.red = false;
3230 xpr.red = true;
3231 root = rotateRight(root, xpr);
3232 xpr = (xp = x.parent) == null ?
3233 null : xp.right;
3234 }
3235 if (xpr != null) {
3236 xpr.red = (xp == null) ? false : xp.red;
3237 if ((sr = xpr.right) != null)
3238 sr.red = false;
3239 }
3240 if (xp != null) {
3241 xp.red = false;
3242 root = rotateLeft(root, xp);
3243 }
3244 x = root;
3245 }
3246 }
3247 }
3248 else { // symmetric
3249 if (xpl != null && xpl.red) {
3250 xpl.red = false;
3251 xp.red = true;
3252 root = rotateRight(root, xp);
3253 xpl = (xp = x.parent) == null ? null : xp.left;
3254 }
3255 if (xpl == null)
3256 x = xp;
3257 else {
3258 TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3259 if ((sl == null || !sl.red) &&
3260 (sr == null || !sr.red)) {
3261 xpl.red = true;
3262 x = xp;
3263 }
3264 else {
3265 if (sl == null || !sl.red) {
3266 if (sr != null)
3267 sr.red = false;
3268 xpl.red = true;
3269 root = rotateLeft(root, xpl);
3270 xpl = (xp = x.parent) == null ?
3271 null : xp.left;
3272 }
3273 if (xpl != null) {
3274 xpl.red = (xp == null) ? false : xp.red;
3275 if ((sl = xpl.left) != null)
3276 sl.red = false;
3277 }
3278 if (xp != null) {
3279 xp.red = false;
3280 root = rotateRight(root, xp);
3281 }
3282 x = root;
3283 }
3284 }
3285 }
3286 }
3287 }
3288
3289 /**
3290 * Checks invariants recursively for the tree of Nodes rooted at t.
3291 */
3292 static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3293 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3294 tb = t.prev, tn = (TreeNode<K,V>)t.next;
3295 if (tb != null && tb.next != t)
3296 return false;
3297 if (tn != null && tn.prev != t)
3298 return false;
3299 if (tp != null && t != tp.left && t != tp.right)
3300 return false;
3301 if (tl != null && (tl.parent != t || tl.hash > t.hash))
3302 return false;
3303 if (tr != null && (tr.parent != t || tr.hash < t.hash))
3304 return false;
3305 if (t.red && tl != null && tl.red && tr != null && tr.red)
3306 return false;
3307 if (tl != null && !checkInvariants(tl))
3308 return false;
3309 if (tr != null && !checkInvariants(tr))
3310 return false;
3311 return true;
3312 }
3313
3314 private static final long LOCKSTATE
3315 = U.objectFieldOffset(TreeBin.class, "lockState");
3316 private static final long WAITERTHREAD
3317 = U.objectFieldOffset(TreeBin.class, "waiter");
3318 }
3319
3320 /* ----------------Table Traversal -------------- */
3321
3322 /**
3323 * Records the table, its length, and current traversal index for a
3324 * traverser that must process a region of a forwarded table before
3325 * proceeding with current table.
3326 */
3327 static final class TableStack<K,V> {
3328 int length;
3329 int index;
3330 Node<K,V>[] tab;
3331 TableStack<K,V> next;
3332 }
3333
3334 /**
3335 * Encapsulates traversal for methods such as containsValue; also
3336 * serves as a base class for other iterators and spliterators.
3337 *
3338 * Method advance visits once each still-valid node that was
3339 * reachable upon iterator construction. It might miss some that
3340 * were added to a bin after the bin was visited, which is OK wrt
3341 * consistency guarantees. Maintaining this property in the face
3342 * of possible ongoing resizes requires a fair amount of
3343 * bookkeeping state that is difficult to optimize away amidst
3344 * volatile accesses. Even so, traversal maintains reasonable
3345 * throughput.
3346 *
3347 * Normally, iteration proceeds bin-by-bin traversing lists.
3348 * However, if the table has been resized, then all future steps
3349 * must traverse both the bin at the current index as well as at
3350 * (index + baseSize); and so on for further resizings. To
3351 * paranoically cope with potential sharing by users of iterators
3352 * across threads, iteration terminates if a bounds checks fails
3353 * for a table read.
3354 */
3355 static class Traverser<K,V> {
3356 Node<K,V>[] tab; // current table; updated if resized
3357 Node<K,V> next; // the next entry to use
3358 TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
3359 int index; // index of bin to use next
3360 int baseIndex; // current index of initial table
3361 int baseLimit; // index bound for initial table
3362 final int baseSize; // initial table size
3363
3364 Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3365 this.tab = tab;
3366 this.baseSize = size;
3367 this.baseIndex = this.index = index;
3368 this.baseLimit = limit;
3369 this.next = null;
3370 }
3371
3372 /**
3373 * Advances if possible, returning next valid node, or null if none.
3374 */
3375 final Node<K,V> advance() {
3376 Node<K,V> e;
3377 if ((e = next) != null)
3378 e = e.next;
3379 for (;;) {
3380 Node<K,V>[] t; int i, n; // must use locals in checks
3381 if (e != null)
3382 return next = e;
3383 if (baseIndex >= baseLimit || (t = tab) == null ||
3384 (n = t.length) <= (i = index) || i < 0)
3385 return next = null;
3386 if ((e = tabAt(t, i)) != null && e.hash < 0) {
3387 if (e instanceof ForwardingNode) {
3388 tab = ((ForwardingNode<K,V>)e).nextTable;
3389 e = null;
3390 pushState(t, i, n);
3391 continue;
3392 }
3393 else if (e instanceof TreeBin)
3394 e = ((TreeBin<K,V>)e).first;
3395 else
3396 e = null;
3397 }
3398 if (stack != null)
3399 recoverState(n);
3400 else if ((index = i + baseSize) >= n)
3401 index = ++baseIndex; // visit upper slots if present
3402 }
3403 }
3404
3405 /**
3406 * Saves traversal state upon encountering a forwarding node.
3407 */
3408 private void pushState(Node<K,V>[] t, int i, int n) {
3409 TableStack<K,V> s = spare; // reuse if possible
3410 if (s != null)
3411 spare = s.next;
3412 else
3413 s = new TableStack<K,V>();
3414 s.tab = t;
3415 s.length = n;
3416 s.index = i;
3417 s.next = stack;
3418 stack = s;
3419 }
3420
3421 /**
3422 * Possibly pops traversal state.
3423 *
3424 * @param n length of current table
3425 */
3426 private void recoverState(int n) {
3427 TableStack<K,V> s; int len;
3428 while ((s = stack) != null && (index += (len = s.length)) >= n) {
3429 n = len;
3430 index = s.index;
3431 tab = s.tab;
3432 s.tab = null;
3433 TableStack<K,V> next = s.next;
3434 s.next = spare; // save for reuse
3435 stack = next;
3436 spare = s;
3437 }
3438 if (s == null && (index += baseSize) >= n)
3439 index = ++baseIndex;
3440 }
3441 }
3442
3443 /**
3444 * Base of key, value, and entry Iterators. Adds fields to
3445 * Traverser to support iterator.remove.
3446 */
3447 static class BaseIterator<K,V> extends Traverser<K,V> {
3448 final ConcurrentHashMap<K,V> map;
3449 Node<K,V> lastReturned;
3450 BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3451 ConcurrentHashMap<K,V> map) {
3452 super(tab, size, index, limit);
3453 this.map = map;
3454 advance();
3455 }
3456
3457 public final boolean hasNext() { return next != null; }
3458 public final boolean hasMoreElements() { return next != null; }
3459
3460 public final void remove() {
3461 Node<K,V> p;
3462 if ((p = lastReturned) == null)
3463 throw new IllegalStateException();
3464 lastReturned = null;
3465 map.replaceNode(p.key, null, null);
3466 }
3467 }
3468
3469 static final class KeyIterator<K,V> extends BaseIterator<K,V>
3470 implements Iterator<K>, Enumeration<K> {
3471 KeyIterator(Node<K,V>[] tab, int size, int index, int limit,
3472 ConcurrentHashMap<K,V> map) {
3473 super(tab, size, index, limit, map);
3474 }
3475
3476 public final K next() {
3477 Node<K,V> p;
3478 if ((p = next) == null)
3479 throw new NoSuchElementException();
3480 K k = p.key;
3481 lastReturned = p;
3482 advance();
3483 return k;
3484 }
3485
3486 public final K nextElement() { return next(); }
3487 }
3488
3489 static final class ValueIterator<K,V> extends BaseIterator<K,V>
3490 implements Iterator<V>, Enumeration<V> {
3491 ValueIterator(Node<K,V>[] tab, int size, int index, int limit,
3492 ConcurrentHashMap<K,V> map) {
3493 super(tab, size, index, limit, map);
3494 }
3495
3496 public final V next() {
3497 Node<K,V> p;
3498 if ((p = next) == null)
3499 throw new NoSuchElementException();
3500 V v = p.val;
3501 lastReturned = p;
3502 advance();
3503 return v;
3504 }
3505
3506 public final V nextElement() { return next(); }
3507 }
3508
3509 static final class EntryIterator<K,V> extends BaseIterator<K,V>
3510 implements Iterator<Map.Entry<K,V>> {
3511 EntryIterator(Node<K,V>[] tab, int size, int index, int limit,
3512 ConcurrentHashMap<K,V> map) {
3513 super(tab, size, index, limit, map);
3514 }
3515
3516 public final Map.Entry<K,V> next() {
3517 Node<K,V> p;
3518 if ((p = next) == null)
3519 throw new NoSuchElementException();
3520 K k = p.key;
3521 V v = p.val;
3522 lastReturned = p;
3523 advance();
3524 return new MapEntry<K,V>(k, v, map);
3525 }
3526 }
3527
3528 /**
3529 * Exported Entry for EntryIterator.
3530 */
3531 static final class MapEntry<K,V> implements Map.Entry<K,V> {
3532 final K key; // non-null
3533 V val; // non-null
3534 final ConcurrentHashMap<K,V> map;
3535 MapEntry(K key, V val, ConcurrentHashMap<K,V> map) {
3536 this.key = key;
3537 this.val = val;
3538 this.map = map;
3539 }
3540 public K getKey() { return key; }
3541 public V getValue() { return val; }
3542 public int hashCode() { return key.hashCode() ^ val.hashCode(); }
3543 public String toString() {
3544 return Helpers.mapEntryToString(key, val);
3545 }
3546
3547 public boolean equals(Object o) {
3548 Object k, v; Map.Entry<?,?> e;
3549 return ((o instanceof Map.Entry) &&
3550 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3551 (v = e.getValue()) != null &&
3552 (k == key || k.equals(key)) &&
3553 (v == val || v.equals(val)));
3554 }
3555
3556 /**
3557 * Sets our entry's value and writes through to the map. The
3558 * value to return is somewhat arbitrary here. Since we do not
3559 * necessarily track asynchronous changes, the most recent
3560 * "previous" value could be different from what we return (or
3561 * could even have been removed, in which case the put will
3562 * re-establish). We do not and cannot guarantee more.
3563 */
3564 public V setValue(V value) {
3565 if (value == null) throw new NullPointerException();
3566 V v = val;
3567 val = value;
3568 map.put(key, value);
3569 return v;
3570 }
3571 }
3572
3573 static final class KeySpliterator<K,V> extends Traverser<K,V>
3574 implements Spliterator<K> {
3575 long est; // size estimate
3576 KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3577 long est) {
3578 super(tab, size, index, limit);
3579 this.est = est;
3580 }
3581
3582 public KeySpliterator<K,V> trySplit() {
3583 int i, f, h;
3584 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3585 new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3586 f, est >>>= 1);
3587 }
3588
3589 public void forEachRemaining(Consumer<? super K> action) {
3590 if (action == null) throw new NullPointerException();
3591 for (Node<K,V> p; (p = advance()) != null;)
3592 action.accept(p.key);
3593 }
3594
3595 public boolean tryAdvance(Consumer<? super K> action) {
3596 if (action == null) throw new NullPointerException();
3597 Node<K,V> p;
3598 if ((p = advance()) == null)
3599 return false;
3600 action.accept(p.key);
3601 return true;
3602 }
3603
3604 public long estimateSize() { return est; }
3605
3606 public int characteristics() {
3607 return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3608 Spliterator.NONNULL;
3609 }
3610 }
3611
3612 static final class ValueSpliterator<K,V> extends Traverser<K,V>
3613 implements Spliterator<V> {
3614 long est; // size estimate
3615 ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3616 long est) {
3617 super(tab, size, index, limit);
3618 this.est = est;
3619 }
3620
3621 public ValueSpliterator<K,V> trySplit() {
3622 int i, f, h;
3623 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3624 new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3625 f, est >>>= 1);
3626 }
3627
3628 public void forEachRemaining(Consumer<? super V> action) {
3629 if (action == null) throw new NullPointerException();
3630 for (Node<K,V> p; (p = advance()) != null;)
3631 action.accept(p.val);
3632 }
3633
3634 public boolean tryAdvance(Consumer<? super V> action) {
3635 if (action == null) throw new NullPointerException();
3636 Node<K,V> p;
3637 if ((p = advance()) == null)
3638 return false;
3639 action.accept(p.val);
3640 return true;
3641 }
3642
3643 public long estimateSize() { return est; }
3644
3645 public int characteristics() {
3646 return Spliterator.CONCURRENT | Spliterator.NONNULL;
3647 }
3648 }
3649
3650 static final class EntrySpliterator<K,V> extends Traverser<K,V>
3651 implements Spliterator<Map.Entry<K,V>> {
3652 final ConcurrentHashMap<K,V> map; // To export MapEntry
3653 long est; // size estimate
3654 EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3655 long est, ConcurrentHashMap<K,V> map) {
3656 super(tab, size, index, limit);
3657 this.map = map;
3658 this.est = est;
3659 }
3660
3661 public EntrySpliterator<K,V> trySplit() {
3662 int i, f, h;
3663 return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3664 new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3665 f, est >>>= 1, map);
3666 }
3667
3668 public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
3669 if (action == null) throw new NullPointerException();
3670 for (Node<K,V> p; (p = advance()) != null; )
3671 action.accept(new MapEntry<K,V>(p.key, p.val, map));
3672 }
3673
3674 public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
3675 if (action == null) throw new NullPointerException();
3676 Node<K,V> p;
3677 if ((p = advance()) == null)
3678 return false;
3679 action.accept(new MapEntry<K,V>(p.key, p.val, map));
3680 return true;
3681 }
3682
3683 public long estimateSize() { return est; }
3684
3685 public int characteristics() {
3686 return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3687 Spliterator.NONNULL;
3688 }
3689 }
3690
3691 // Parallel bulk operations
3692
3693 /**
3694 * Computes initial batch value for bulk tasks. The returned value
3695 * is approximately exp2 of the number of times (minus one) to
3696 * split task by two before executing leaf action. This value is
3697 * faster to compute and more convenient to use as a guide to
3698 * splitting than is the depth, since it is used while dividing by
3699 * two anyway.
3700 */
3701 final int batchFor(long b) {
3702 long n;
3703 if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3704 return 0;
3705 int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3706 return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3707 }
3708
3709 /**
3710 * Performs the given {@linkplain ##Bulk bulk} action for each (key, value).
3711 *
3712 * @param parallelismThreshold the (estimated) number of elements
3713 * needed for this operation to be executed in parallel
3714 * @param action the action
3715 * @since 1.8
3716 */
3717 public void forEach(long parallelismThreshold,
3718 BiConsumer<? super K,? super V> action) {
3719 if (action == null) throw new NullPointerException();
3720 new ForEachMappingTask<K,V>
3721 (null, batchFor(parallelismThreshold), 0, 0, table,
3722 action).invoke();
3723 }
3724
3725 /**
3726 * Performs the given {@linkplain ##Bulk bulk} action for each non-null transformation
3727 * of each (key, value).
3728 *
3729 * @param parallelismThreshold the (estimated) number of elements
3730 * needed for this operation to be executed in parallel
3731 * @param transformer a function returning the transformation
3732 * for an element, or null if there is no transformation (in
3733 * which case the action is not applied)
3734 * @param action the action
3735 * @param <U> the return type of the transformer
3736 * @since 1.8
3737 */
3738 public <U> void forEach(long parallelismThreshold,
3739 BiFunction<? super K, ? super V, ? extends U> transformer,
3740 Consumer<? super U> action) {
3741 if (transformer == null || action == null)
3742 throw new NullPointerException();
3743 new ForEachTransformedMappingTask<K,V,U>
3744 (null, batchFor(parallelismThreshold), 0, 0, table,
3745 transformer, action).invoke();
3746 }
3747
3748 /**
3749 * Returns a non-null result from applying the given {@linkplain ##Bulk bulk} search
3750 * function on each (key, value), or null if none. Upon
3751 * success, further element processing is suppressed and the
3752 * results of any other parallel invocations of the search
3753 * function are ignored.
3754 *
3755 * @param parallelismThreshold the (estimated) number of elements
3756 * needed for this operation to be executed in parallel
3757 * @param searchFunction a function returning a non-null
3758 * result on success, else null
3759 * @param <U> the return type of the search function
3760 * @return a non-null result from applying the given search
3761 * function on each (key, value), or null if none
3762 * @since 1.8
3763 */
3764 public <U> U search(long parallelismThreshold,
3765 BiFunction<? super K, ? super V, ? extends U> searchFunction) {
3766 if (searchFunction == null) throw new NullPointerException();
3767 return new SearchMappingsTask<K,V,U>
3768 (null, batchFor(parallelismThreshold), 0, 0, table,
3769 searchFunction, new AtomicReference<U>()).invoke();
3770 }
3771
3772 /**
3773 * Returns the result of accumulating the given {@linkplain ##Bulk bulk} transformation
3774 * of all (key, value) pairs using the given reducer to
3775 * combine values, or null if none.
3776 *
3777 * @param parallelismThreshold the (estimated) number of elements
3778 * needed for this operation to be executed in parallel
3779 * @param transformer a function returning the transformation
3780 * for an element, or null if there is no transformation (in
3781 * which case it is not combined)
3782 * @param reducer a commutative associative combining function
3783 * @param <U> the return type of the transformer
3784 * @return the result of accumulating the given transformation
3785 * of all (key, value) pairs
3786 * @since 1.8
3787 */
3788 public <U> U reduce(long parallelismThreshold,
3789 BiFunction<? super K, ? super V, ? extends U> transformer,
3790 BiFunction<? super U, ? super U, ? extends U> reducer) {
3791 if (transformer == null || reducer == null)
3792 throw new NullPointerException();
3793 return new MapReduceMappingsTask<K,V,U>
3794 (null, batchFor(parallelismThreshold), 0, 0, table,
3795 null, transformer, reducer).invoke();
3796 }
3797
3798 /**
3799 * Returns the result of accumulating the given {@linkplain ##Bulk bulk} transformation
3800 * of all (key, value) pairs using the given reducer to
3801 * combine values, and the given basis as an identity value.
3802 *
3803 * @param parallelismThreshold the (estimated) number of elements
3804 * needed for this operation to be executed in parallel
3805 * @param transformer a function returning the transformation
3806 * for an element
3807 * @param basis the identity (initial default value) for the reduction
3808 * @param reducer a commutative associative combining function
3809 * @return the result of accumulating the given transformation
3810 * of all (key, value) pairs
3811 * @since 1.8
3812 */
3813 public double reduceToDouble(long parallelismThreshold,
3814 ToDoubleBiFunction<? super K, ? super V> transformer,
3815 double basis,
3816 DoubleBinaryOperator reducer) {
3817 if (transformer == null || reducer == null)
3818 throw new NullPointerException();
3819 return new MapReduceMappingsToDoubleTask<K,V>
3820 (null, batchFor(parallelismThreshold), 0, 0, table,
3821 null, transformer, basis, reducer).invoke();
3822 }
3823
3824 /**
3825 * Returns the result of accumulating the given {@linkplain ##Bulk bulk} transformation
3826 * of all (key, value) pairs using the given reducer to
3827 * combine values, and the given basis as an identity value.
3828 *
3829 * @param parallelismThreshold the (estimated) number of elements
3830 * needed for this operation to be executed in parallel
3831 * @param transformer a function returning the transformation
3832 * for an element
3833 * @param basis the identity (initial default value) for the reduction
3834 * @param reducer a commutative associative combining function
3835 * @return the result of accumulating the given transformation
3836 * of all (key, value) pairs
3837 * @since 1.8
3838 */
3839 public long reduceToLong(long parallelismThreshold,
3840 ToLongBiFunction<? super K, ? super V> transformer,
3841 long basis,
3842 LongBinaryOperator reducer) {
3843 if (transformer == null || reducer == null)
3844 throw new NullPointerException();
3845 return new MapReduceMappingsToLongTask<K,V>
3846 (null, batchFor(parallelismThreshold), 0, 0, table,
3847 null, transformer, basis, reducer).invoke();
3848 }
3849
3850 /**
3851 * Returns the result of accumulating the given {@linkplain ##Bulk bulk} transformation
3852 * of all (key, value) pairs using the given reducer to
3853 * combine values, and the given basis as an identity value.
3854 *
3855 * @param parallelismThreshold the (estimated) number of elements
3856 * needed for this operation to be executed in parallel
3857 * @param transformer a function returning the transformation
3858 * for an element
3859 * @param basis the identity (initial default value) for the reduction
3860 * @param reducer a commutative associative combining function
3861 * @return the result of accumulating the given transformation
3862 * of all (key, value) pairs
3863 * @since 1.8
3864 */
3865 public int reduceToInt(long parallelismThreshold,
3866 ToIntBiFunction<? super K, ? super V> transformer,
3867 int basis,
3868 IntBinaryOperator reducer) {
3869 if (transformer == null || reducer == null)
3870 throw new NullPointerException();
3871 return new MapReduceMappingsToIntTask<K,V>
3872 (null, batchFor(parallelismThreshold), 0, 0, table,
3873 null, transformer, basis, reducer).invoke();
3874 }
3875
3876 /**
3877 * Performs the given {@linkplain ##Bulk bulk} action for each key.
3878 *
3879 * @param parallelismThreshold the (estimated) number of elements
3880 * needed for this operation to be executed in parallel
3881 * @param action the action
3882 * @since 1.8
3883 */
3884 public void forEachKey(long parallelismThreshold,
3885 Consumer<? super K> action) {
3886 if (action == null) throw new NullPointerException();
3887 new ForEachKeyTask<K,V>
3888 (null, batchFor(parallelismThreshold), 0, 0, table,
3889 action).invoke();
3890 }
3891
3892 /**
3893 * Performs the given {@linkplain ##Bulk bulk} action for each non-null transformation
3894 * of each key.
3895 *
3896 * @param parallelismThreshold the (estimated) number of elements
3897 * needed for this operation to be executed in parallel
3898 * @param transformer a function returning the transformation
3899 * for an element, or null if there is no transformation (in
3900 * which case the action is not applied)
3901 * @param action the action
3902 * @param <U> the return type of the transformer
3903 * @since 1.8
3904 */
3905 public <U> void forEachKey(long parallelismThreshold,
3906 Function<? super K, ? extends U> transformer,
3907 Consumer<? super U> action) {
3908 if (transformer == null || action == null)
3909 throw new NullPointerException();
3910 new ForEachTransformedKeyTask<K,V,U>
3911 (null, batchFor(parallelismThreshold), 0, 0, table,
3912 transformer, action).invoke();
3913 }
3914
3915 /**
3916 * Returns a non-null result from applying the given {@linkplain ##Bulk bulk} search
3917 * function on each key, or null if none. Upon success,
3918 * further element processing is suppressed and the results of
3919 * any other parallel invocations of the search function are
3920 * ignored.
3921 *
3922 * @param parallelismThreshold the (estimated) number of elements
3923 * needed for this operation to be executed in parallel
3924 * @param searchFunction a function returning a non-null
3925 * result on success, else null
3926 * @param <U> the return type of the search function
3927 * @return a non-null result from applying the given search
3928 * function on each key, or null if none
3929 * @since 1.8
3930 */
3931 public <U> U searchKeys(long parallelismThreshold,
3932 Function<? super K, ? extends U> searchFunction) {
3933 if (searchFunction == null) throw new NullPointerException();
3934 return new SearchKeysTask<K,V,U>
3935 (null, batchFor(parallelismThreshold), 0, 0, table,
3936 searchFunction, new AtomicReference<U>()).invoke();
3937 }
3938
3939 /**
3940 * Returns the result of {@linkplain ##Bulk bulk} accumulating all keys using the given
3941 * reducer to combine values, or null if none.
3942 *
3943 * @param parallelismThreshold the (estimated) number of elements
3944 * needed for this operation to be executed in parallel
3945 * @param reducer a commutative associative combining function
3946 * @return the result of accumulating all keys using the given
3947 * reducer to combine values, or null if none
3948 * @since 1.8
3949 */
3950 public K reduceKeys(long parallelismThreshold,
3951 BiFunction<? super K, ? super K, ? extends K> reducer) {
3952 if (reducer == null) throw new NullPointerException();
3953 return new ReduceKeysTask<K,V>
3954 (null, batchFor(parallelismThreshold), 0, 0, table,
3955 null, reducer).invoke();
3956 }
3957
3958 /**
3959 * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
3960 * of all keys using the given reducer to combine values, or
3961 * null if none.
3962 *
3963 * @param parallelismThreshold the (estimated) number of elements
3964 * needed for this operation to be executed in parallel
3965 * @param transformer a function returning the transformation
3966 * for an element, or null if there is no transformation (in
3967 * which case it is not combined)
3968 * @param reducer a commutative associative combining function
3969 * @param <U> the return type of the transformer
3970 * @return the result of accumulating the given transformation
3971 * of all keys
3972 * @since 1.8
3973 */
3974 public <U> U reduceKeys(long parallelismThreshold,
3975 Function<? super K, ? extends U> transformer,
3976 BiFunction<? super U, ? super U, ? extends U> reducer) {
3977 if (transformer == null || reducer == null)
3978 throw new NullPointerException();
3979 return new MapReduceKeysTask<K,V,U>
3980 (null, batchFor(parallelismThreshold), 0, 0, table,
3981 null, transformer, reducer).invoke();
3982 }
3983
3984 /**
3985 * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
3986 * of all keys using the given reducer to combine values, and
3987 * the given basis as an identity value.
3988 *
3989 * @param parallelismThreshold the (estimated) number of elements
3990 * needed for this operation to be executed in parallel
3991 * @param transformer a function returning the transformation
3992 * for an element
3993 * @param basis the identity (initial default value) for the reduction
3994 * @param reducer a commutative associative combining function
3995 * @return the result of accumulating the given transformation
3996 * of all keys
3997 * @since 1.8
3998 */
3999 public double reduceKeysToDouble(long parallelismThreshold,
4000 ToDoubleFunction<? super K> transformer,
4001 double basis,
4002 DoubleBinaryOperator reducer) {
4003 if (transformer == null || reducer == null)
4004 throw new NullPointerException();
4005 return new MapReduceKeysToDoubleTask<K,V>
4006 (null, batchFor(parallelismThreshold), 0, 0, table,
4007 null, transformer, basis, reducer).invoke();
4008 }
4009
4010 /**
4011 * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4012 * of all keys using the given reducer to combine values, and
4013 * the given basis as an identity value.
4014 *
4015 * @param parallelismThreshold the (estimated) number of elements
4016 * needed for this operation to be executed in parallel
4017 * @param transformer a function returning the transformation
4018 * for an element
4019 * @param basis the identity (initial default value) for the reduction
4020 * @param reducer a commutative associative combining function
4021 * @return the result of accumulating the given transformation
4022 * of all keys
4023 * @since 1.8
4024 */
4025 public long reduceKeysToLong(long parallelismThreshold,
4026 ToLongFunction<? super K> transformer,
4027 long basis,
4028 LongBinaryOperator reducer) {
4029 if (transformer == null || reducer == null)
4030 throw new NullPointerException();
4031 return new MapReduceKeysToLongTask<K,V>
4032 (null, batchFor(parallelismThreshold), 0, 0, table,
4033 null, transformer, basis, reducer).invoke();
4034 }
4035
4036 /**
4037 * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4038 * of all keys using the given reducer to combine values, and
4039 * the given basis as an identity value.
4040 *
4041 * @param parallelismThreshold the (estimated) number of elements
4042 * needed for this operation to be executed in parallel
4043 * @param transformer a function returning the transformation
4044 * for an element
4045 * @param basis the identity (initial default value) for the reduction
4046 * @param reducer a commutative associative combining function
4047 * @return the result of accumulating the given transformation
4048 * of all keys
4049 * @since 1.8
4050 */
4051 public int reduceKeysToInt(long parallelismThreshold,
4052 ToIntFunction<? super K> transformer,
4053 int basis,
4054 IntBinaryOperator reducer) {
4055 if (transformer == null || reducer == null)
4056 throw new NullPointerException();
4057 return new MapReduceKeysToIntTask<K,V>
4058 (null, batchFor(parallelismThreshold), 0, 0, table,
4059 null, transformer, basis, reducer).invoke();
4060 }
4061
4062 /**
4063 * Performs the given {@linkplain ##Bulk bulk} action for each value.
4064 *
4065 * @param parallelismThreshold the (estimated) number of elements
4066 * needed for this operation to be executed in parallel
4067 * @param action the action
4068 * @since 1.8
4069 */
4070 public void forEachValue(long parallelismThreshold,
4071 Consumer<? super V> action) {
4072 if (action == null)
4073 throw new NullPointerException();
4074 new ForEachValueTask<K,V>
4075 (null, batchFor(parallelismThreshold), 0, 0, table,
4076 action).invoke();
4077 }
4078
4079 /**
4080 * Performs the given {@linkplain ##Bulk bulk} action for each non-null transformation
4081 * of each value.
4082 *
4083 * @param parallelismThreshold the (estimated) number of elements
4084 * needed for this operation to be executed in parallel
4085 * @param transformer a function returning the transformation
4086 * for an element, or null if there is no transformation (in
4087 * which case the action is not applied)
4088 * @param action the action
4089 * @param <U> the return type of the transformer
4090 * @since 1.8
4091 */
4092 public <U> void forEachValue(long parallelismThreshold,
4093 Function<? super V, ? extends U> transformer,
4094 Consumer<? super U> action) {
4095 if (transformer == null || action == null)
4096 throw new NullPointerException();
4097 new ForEachTransformedValueTask<K,V,U>
4098 (null, batchFor(parallelismThreshold), 0, 0, table,
4099 transformer, action).invoke();
4100 }
4101
4102 /**
4103 * Returns a non-null result from {@linkplain ##Bulk bulk} applying the given search
4104 * function on each value, or null if none. Upon success,
4105 * further element processing is suppressed and the results of
4106 * any other parallel invocations of the search function are
4107 * ignored.
4108 *
4109 * @param parallelismThreshold the (estimated) number of elements
4110 * needed for this operation to be executed in parallel
4111 * @param searchFunction a function returning a non-null
4112 * result on success, else null
4113 * @param <U> the return type of the search function
4114 * @return a non-null result from applying the given search
4115 * function on each value, or null if none
4116 * @since 1.8
4117 */
4118 public <U> U searchValues(long parallelismThreshold,
4119 Function<? super V, ? extends U> searchFunction) {
4120 if (searchFunction == null) throw new NullPointerException();
4121 return new SearchValuesTask<K,V,U>
4122 (null, batchFor(parallelismThreshold), 0, 0, table,
4123 searchFunction, new AtomicReference<U>()).invoke();
4124 }
4125
4126 /**
4127 * Returns the result of {@linkplain ##Bulk bulk} accumulating all values using the
4128 * given reducer to combine values, or null if none.
4129 *
4130 * @param parallelismThreshold the (estimated) number of elements
4131 * needed for this operation to be executed in parallel
4132 * @param reducer a commutative associative combining function
4133 * @return the result of accumulating all values
4134 * @since 1.8
4135 */
4136 public V reduceValues(long parallelismThreshold,
4137 BiFunction<? super V, ? super V, ? extends V> reducer) {
4138 if (reducer == null) throw new NullPointerException();
4139 return new ReduceValuesTask<K,V>
4140 (null, batchFor(parallelismThreshold), 0, 0, table,
4141 null, reducer).invoke();
4142 }
4143
4144 /**
4145 * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4146 * of all values using the given reducer to combine values, or
4147 * null if none.
4148 *
4149 * @param parallelismThreshold the (estimated) number of elements
4150 * needed for this operation to be executed in parallel
4151 * @param transformer a function returning the transformation
4152 * for an element, or null if there is no transformation (in
4153 * which case it is not combined)
4154 * @param reducer a commutative associative combining function
4155 * @param <U> the return type of the transformer
4156 * @return the result of accumulating the given transformation
4157 * of all values
4158 * @since 1.8
4159 */
4160 public <U> U reduceValues(long parallelismThreshold,
4161 Function<? super V, ? extends U> transformer,
4162 BiFunction<? super U, ? super U, ? extends U> reducer) {
4163 if (transformer == null || reducer == null)
4164 throw new NullPointerException();
4165 return new MapReduceValuesTask<K,V,U>
4166 (null, batchFor(parallelismThreshold), 0, 0, table,
4167 null, transformer, reducer).invoke();
4168 }
4169
4170 /**
4171 * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4172 * of all values using the given reducer to combine values,
4173 * and the given basis as an identity value.
4174 *
4175 * @param parallelismThreshold the (estimated) number of elements
4176 * needed for this operation to be executed in parallel
4177 * @param transformer a function returning the transformation
4178 * for an element
4179 * @param basis the identity (initial default value) for the reduction
4180 * @param reducer a commutative associative combining function
4181 * @return the result of accumulating the given transformation
4182 * of all values
4183 * @since 1.8
4184 */
4185 public double reduceValuesToDouble(long parallelismThreshold,
4186 ToDoubleFunction<? super V> transformer,
4187 double basis,
4188 DoubleBinaryOperator reducer) {
4189 if (transformer == null || reducer == null)
4190 throw new NullPointerException();
4191 return new MapReduceValuesToDoubleTask<K,V>
4192 (null, batchFor(parallelismThreshold), 0, 0, table,
4193 null, transformer, basis, reducer).invoke();
4194 }
4195
4196 /**
4197 * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4198 * of all values using the given reducer to combine values,
4199 * and the given basis as an identity value.
4200 *
4201 * @param parallelismThreshold the (estimated) number of elements
4202 * needed for this operation to be executed in parallel
4203 * @param transformer a function returning the transformation
4204 * for an element
4205 * @param basis the identity (initial default value) for the reduction
4206 * @param reducer a commutative associative combining function
4207 * @return the result of accumulating the given transformation
4208 * of all values
4209 * @since 1.8
4210 */
4211 public long reduceValuesToLong(long parallelismThreshold,
4212 ToLongFunction<? super V> transformer,
4213 long basis,
4214 LongBinaryOperator reducer) {
4215 if (transformer == null || reducer == null)
4216 throw new NullPointerException();
4217 return new MapReduceValuesToLongTask<K,V>
4218 (null, batchFor(parallelismThreshold), 0, 0, table,
4219 null, transformer, basis, reducer).invoke();
4220 }
4221
4222 /**
4223 * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4224 * of all values using the given reducer to combine values,
4225 * and the given basis as an identity value.
4226 *
4227 * @param parallelismThreshold the (estimated) number of elements
4228 * needed for this operation to be executed in parallel
4229 * @param transformer a function returning the transformation
4230 * for an element
4231 * @param basis the identity (initial default value) for the reduction
4232 * @param reducer a commutative associative combining function
4233 * @return the result of accumulating the given transformation
4234 * of all values
4235 * @since 1.8
4236 */
4237 public int reduceValuesToInt(long parallelismThreshold,
4238 ToIntFunction<? super V> transformer,
4239 int basis,
4240 IntBinaryOperator reducer) {
4241 if (transformer == null || reducer == null)
4242 throw new NullPointerException();
4243 return new MapReduceValuesToIntTask<K,V>
4244 (null, batchFor(parallelismThreshold), 0, 0, table,
4245 null, transformer, basis, reducer).invoke();
4246 }
4247
4248 /**
4249 * Performs the given {@linkplain ##Bulk bulk} action for each entry.
4250 *
4251 * @param parallelismThreshold the (estimated) number of elements
4252 * needed for this operation to be executed in parallel
4253 * @param action the action
4254 * @since 1.8
4255 */
4256 public void forEachEntry(long parallelismThreshold,
4257 Consumer<? super Map.Entry<K,V>> action) {
4258 if (action == null) throw new NullPointerException();
4259 new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
4260 action).invoke();
4261 }
4262
4263 /**
4264 * Performs the given {@linkplain ##Bulk bulk} action for each non-null transformation
4265 * of each entry.
4266 *
4267 * @param parallelismThreshold the (estimated) number of elements
4268 * needed for this operation to be executed in parallel
4269 * @param transformer a function returning the transformation
4270 * for an element, or null if there is no transformation (in
4271 * which case the action is not applied)
4272 * @param action the action
4273 * @param <U> the return type of the transformer
4274 * @since 1.8
4275 */
4276 public <U> void forEachEntry(long parallelismThreshold,
4277 Function<Map.Entry<K,V>, ? extends U> transformer,
4278 Consumer<? super U> action) {
4279 if (transformer == null || action == null)
4280 throw new NullPointerException();
4281 new ForEachTransformedEntryTask<K,V,U>
4282 (null, batchFor(parallelismThreshold), 0, 0, table,
4283 transformer, action).invoke();
4284 }
4285
4286 /**
4287 * Returns a non-null result from {@linkplain ##Bulk bulk} applying the given search
4288 * function on each entry, or null if none. Upon success,
4289 * further element processing is suppressed and the results of
4290 * any other parallel invocations of the search function are
4291 * ignored.
4292 *
4293 * @param parallelismThreshold the (estimated) number of elements
4294 * needed for this operation to be executed in parallel
4295 * @param searchFunction a function returning a non-null
4296 * result on success, else null
4297 * @param <U> the return type of the search function
4298 * @return a non-null result from applying the given search
4299 * function on each entry, or null if none
4300 * @since 1.8
4301 */
4302 public <U> U searchEntries(long parallelismThreshold,
4303 Function<Map.Entry<K,V>, ? extends U> searchFunction) {
4304 if (searchFunction == null) throw new NullPointerException();
4305 return new SearchEntriesTask<K,V,U>
4306 (null, batchFor(parallelismThreshold), 0, 0, table,
4307 searchFunction, new AtomicReference<U>()).invoke();
4308 }
4309
4310 /**
4311 * Returns the result of {@linkplain ##Bulk bulk} accumulating all entries using the
4312 * given reducer to combine values, or null if none.
4313 *
4314 * @param parallelismThreshold the (estimated) number of elements
4315 * needed for this operation to be executed in parallel
4316 * @param reducer a commutative associative combining function
4317 * @return the result of accumulating all entries
4318 * @since 1.8
4319 */
4320 public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4321 BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4322 if (reducer == null) throw new NullPointerException();
4323 return new ReduceEntriesTask<K,V>
4324 (null, batchFor(parallelismThreshold), 0, 0, table,
4325 null, reducer).invoke();
4326 }
4327
4328 /**
4329 * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4330 * of all entries using the given reducer to combine values,
4331 * or null if none.
4332 *
4333 * @param parallelismThreshold the (estimated) number of elements
4334 * needed for this operation to be executed in parallel
4335 * @param transformer a function returning the transformation
4336 * for an element, or null if there is no transformation (in
4337 * which case it is not combined)
4338 * @param reducer a commutative associative combining function
4339 * @param <U> the return type of the transformer
4340 * @return the result of accumulating the given transformation
4341 * of all entries
4342 * @since 1.8
4343 */
4344 public <U> U reduceEntries(long parallelismThreshold,
4345 Function<Map.Entry<K,V>, ? extends U> transformer,
4346 BiFunction<? super U, ? super U, ? extends U> reducer) {
4347 if (transformer == null || reducer == null)
4348 throw new NullPointerException();
4349 return new MapReduceEntriesTask<K,V,U>
4350 (null, batchFor(parallelismThreshold), 0, 0, table,
4351 null, transformer, reducer).invoke();
4352 }
4353
4354 /**
4355 * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4356 * of all entries using the given reducer to combine values,
4357 * and the given basis as an identity value.
4358 *
4359 * @param parallelismThreshold the (estimated) number of elements
4360 * needed for this operation to be executed in parallel
4361 * @param transformer a function returning the transformation
4362 * for an element
4363 * @param basis the identity (initial default value) for the reduction
4364 * @param reducer a commutative associative combining function
4365 * @return the result of accumulating the given transformation
4366 * of all entries
4367 * @since 1.8
4368 */
4369 public double reduceEntriesToDouble(long parallelismThreshold,
4370 ToDoubleFunction<Map.Entry<K,V>> transformer,
4371 double basis,
4372 DoubleBinaryOperator reducer) {
4373 if (transformer == null || reducer == null)
4374 throw new NullPointerException();
4375 return new MapReduceEntriesToDoubleTask<K,V>
4376 (null, batchFor(parallelismThreshold), 0, 0, table,
4377 null, transformer, basis, reducer).invoke();
4378 }
4379
4380 /**
4381 * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4382 * of all entries using the given reducer to combine values,
4383 * and the given basis as an identity value.
4384 *
4385 * @param parallelismThreshold the (estimated) number of elements
4386 * needed for this operation to be executed in parallel
4387 * @param transformer a function returning the transformation
4388 * for an element
4389 * @param basis the identity (initial default value) for the reduction
4390 * @param reducer a commutative associative combining function
4391 * @return the result of accumulating the given transformation
4392 * of all entries
4393 * @since 1.8
4394 */
4395 public long reduceEntriesToLong(long parallelismThreshold,
4396 ToLongFunction<Map.Entry<K,V>> transformer,
4397 long basis,
4398 LongBinaryOperator reducer) {
4399 if (transformer == null || reducer == null)
4400 throw new NullPointerException();
4401 return new MapReduceEntriesToLongTask<K,V>
4402 (null, batchFor(parallelismThreshold), 0, 0, table,
4403 null, transformer, basis, reducer).invoke();
4404 }
4405
4406 /**
4407 * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4408 * of all entries using the given reducer to combine values,
4409 * and the given basis as an identity value.
4410 *
4411 * @param parallelismThreshold the (estimated) number of elements
4412 * needed for this operation to be executed in parallel
4413 * @param transformer a function returning the transformation
4414 * for an element
4415 * @param basis the identity (initial default value) for the reduction
4416 * @param reducer a commutative associative combining function
4417 * @return the result of accumulating the given transformation
4418 * of all entries
4419 * @since 1.8
4420 */
4421 public int reduceEntriesToInt(long parallelismThreshold,
4422 ToIntFunction<Map.Entry<K,V>> transformer,
4423 int basis,
4424 IntBinaryOperator reducer) {
4425 if (transformer == null || reducer == null)
4426 throw new NullPointerException();
4427 return new MapReduceEntriesToIntTask<K,V>
4428 (null, batchFor(parallelismThreshold), 0, 0, table,
4429 null, transformer, basis, reducer).invoke();
4430 }
4431
4432
4433 /* ----------------Views -------------- */
4434
4435 /**
4436 * Base class for views.
4437 */
4438 abstract static sealed class CollectionView<K,V,E>
4439 implements Collection<E>, java.io.Serializable permits EntrySetView, KeySetView, ValuesView {
4440 private static final long serialVersionUID = 7249069246763182397L;
4441 final ConcurrentHashMap<K,V> map;
4442 CollectionView(ConcurrentHashMap<K,V> map) { this.map = map; }
4443
4444 /**
4445 * Returns the map backing this view.
4446 *
4447 * @return the map backing this view
4448 */
4449 public ConcurrentHashMap<K,V> getMap() { return map; }
4450
4451 /**
4452 * Removes all of the elements from this view, by removing all
4453 * the mappings from the map backing this view.
4454 */
4455 public final void clear() { map.clear(); }
4456 public final int size() { return map.size(); }
4457 public final boolean isEmpty() { return map.isEmpty(); }
4458
4459 // implementations below rely on concrete classes supplying these
4460 // abstract methods
4461 /**
4462 * Returns an iterator over the elements in this collection.
4463 *
4464 * <p>The returned iterator is
4465 * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
4466 *
4467 * @return an iterator over the elements in this collection
4468 */
4469 public abstract Iterator<E> iterator();
4470 public abstract boolean contains(Object o);
4471 public abstract boolean remove(Object o);
4472
4473 private static final String OOME_MSG = "Required array size too large";
4474
4475 public final Object[] toArray() {
4476 long sz = map.mappingCount();
4477 if (sz > MAX_ARRAY_SIZE)
4478 throw new OutOfMemoryError(OOME_MSG);
4479 int n = (int)sz;
4480 Object[] r = new Object[n];
4481 int i = 0;
4482 for (E e : this) {
4483 if (i == n) {
4484 if (n >= MAX_ARRAY_SIZE)
4485 throw new OutOfMemoryError(OOME_MSG);
4486 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4487 n = MAX_ARRAY_SIZE;
4488 else
4489 n += (n >>> 1) + 1;
4490 r = Arrays.copyOf(r, n);
4491 }
4492 r[i++] = e;
4493 }
4494 return (i == n) ? r : Arrays.copyOf(r, i);
4495 }
4496
4497 @SuppressWarnings("unchecked")
4498 public final <T> T[] toArray(T[] a) {
4499 long sz = map.mappingCount();
4500 if (sz > MAX_ARRAY_SIZE)
4501 throw new OutOfMemoryError(OOME_MSG);
4502 int m = (int)sz;
4503 T[] r = (a.length >= m) ? a :
4504 (T[])java.lang.reflect.Array
4505 .newInstance(a.getClass().getComponentType(), m);
4506 int n = r.length;
4507 int i = 0;
4508 for (E e : this) {
4509 if (i == n) {
4510 if (n >= MAX_ARRAY_SIZE)
4511 throw new OutOfMemoryError(OOME_MSG);
4512 if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4513 n = MAX_ARRAY_SIZE;
4514 else
4515 n += (n >>> 1) + 1;
4516 r = Arrays.copyOf(r, n);
4517 }
4518 r[i++] = (T)e;
4519 }
4520 if (a == r && i < n) {
4521 r[i] = null; // null-terminate
4522 return r;
4523 }
4524 return (i == n) ? r : Arrays.copyOf(r, i);
4525 }
4526
4527 /**
4528 * Returns a string representation of this collection.
4529 * The string representation consists of the string representations
4530 * of the collection's elements in the order they are returned by
4531 * its iterator, enclosed in square brackets ({@code "[]"}).
4532 * Adjacent elements are separated by the characters {@code ", "}
4533 * (comma and space). Elements are converted to strings as by
4534 * {@link String#valueOf(Object)}.
4535 *
4536 * @return a string representation of this collection
4537 */
4538 public final String toString() {
4539 StringBuilder sb = new StringBuilder();
4540 sb.append('[');
4541 Iterator<E> it = iterator();
4542 if (it.hasNext()) {
4543 for (;;) {
4544 Object e = it.next();
4545 sb.append(e == this ? "(this Collection)" : e);
4546 if (!it.hasNext())
4547 break;
4548 sb.append(',').append(' ');
4549 }
4550 }
4551 return sb.append(']').toString();
4552 }
4553
4554 public final boolean containsAll(Collection<?> c) {
4555 if (c != this) {
4556 for (Object e : c) {
4557 if (e == null || !contains(e))
4558 return false;
4559 }
4560 }
4561 return true;
4562 }
4563
4564 public boolean removeAll(Collection<?> c) {
4565 if (c == null) throw new NullPointerException();
4566 boolean modified = false;
4567 // Use (c instanceof Set) as a hint that lookup in c is as
4568 // efficient as this view
4569 Node<K,V>[] t;
4570 if ((t = map.table) == null) {
4571 return false;
4572 } else if (c instanceof Set<?> && c.size() > t.length) {
4573 for (Iterator<?> it = iterator(); it.hasNext(); ) {
4574 if (c.contains(it.next())) {
4575 it.remove();
4576 modified = true;
4577 }
4578 }
4579 } else {
4580 for (Object e : c)
4581 modified |= remove(e);
4582 }
4583 return modified;
4584 }
4585
4586 public final boolean retainAll(Collection<?> c) {
4587 if (c == null) throw new NullPointerException();
4588 boolean modified = false;
4589 for (Iterator<E> it = iterator(); it.hasNext();) {
4590 if (!c.contains(it.next())) {
4591 it.remove();
4592 modified = true;
4593 }
4594 }
4595 return modified;
4596 }
4597
4598 }
4599
4600 /**
4601 * A view of a ConcurrentHashMap as a {@link Set} of keys, in
4602 * which additions may optionally be enabled by mapping to a
4603 * common value. This class cannot be directly instantiated.
4604 * See {@link #keySet() keySet()},
4605 * {@link #keySet(Object) keySet(V)},
4606 * {@link #newKeySet() newKeySet()},
4607 * {@link #newKeySet(int) newKeySet(int)}.
4608 *
4609 * @param <K> the type of keys
4610 * @param <V> the type of values in the backing map
4611 *
4612 * @since 1.8
4613 */
4614 public static final class KeySetView<K,V> extends CollectionView<K,V,K>
4615 implements Set<K>, java.io.Serializable {
4616 private static final long serialVersionUID = 7249069246763182397L;
4617 /** @serial */
4618 @SuppressWarnings("serial") // Conditionally serializable
4619 private final V value;
4620 KeySetView(ConcurrentHashMap<K,V> map, V value) { // non-public
4621 super(map);
4622 this.value = value;
4623 }
4624
4625 /**
4626 * Returns the default mapped value for additions,
4627 * or {@code null} if additions are not supported.
4628 *
4629 * @return the default mapped value for additions, or {@code null}
4630 * if not supported
4631 */
4632 public V getMappedValue() { return value; }
4633
4634 /**
4635 * {@inheritDoc}
4636 * @throws NullPointerException if the specified key is null
4637 */
4638 public boolean contains(Object o) { return map.containsKey(o); }
4639
4640 /**
4641 * Removes the key from this map view, by removing the key (and its
4642 * corresponding value) from the backing map. This method does
4643 * nothing if the key is not in the map.
4644 *
4645 * @param o the key to be removed from the backing map
4646 * @return {@code true} if the backing map contained the specified key
4647 * @throws NullPointerException if the specified key is null
4648 */
4649 public boolean remove(Object o) { return map.remove(o) != null; }
4650
4651 /**
4652 * @return an iterator over the keys of the backing map
4653 */
4654 public Iterator<K> iterator() {
4655 Node<K,V>[] t;
4656 ConcurrentHashMap<K,V> m = map;
4657 int f = (t = m.table) == null ? 0 : t.length;
4658 return new KeyIterator<K,V>(t, f, 0, f, m);
4659 }
4660
4661 /**
4662 * Adds the specified key to this set view by mapping the key to
4663 * the default mapped value in the backing map, if defined.
4664 *
4665 * @param e key to be added
4666 * @return {@code true} if this set changed as a result of the call
4667 * @throws NullPointerException if the specified key is null
4668 * @throws UnsupportedOperationException if no default mapped value
4669 * for additions was provided
4670 */
4671 public boolean add(K e) {
4672 V v;
4673 if ((v = value) == null)
4674 throw new UnsupportedOperationException();
4675 return map.putVal(e, v, true) == null;
4676 }
4677
4678 /**
4679 * Adds all of the elements in the specified collection to this set,
4680 * as if by calling {@link #add} on each one.
4681 *
4682 * @param c the elements to be inserted into this set
4683 * @return {@code true} if this set changed as a result of the call
4684 * @throws NullPointerException if the collection or any of its
4685 * elements are {@code null}
4686 * @throws UnsupportedOperationException if no default mapped value
4687 * for additions was provided
4688 */
4689 public boolean addAll(Collection<? extends K> c) {
4690 boolean added = false;
4691 V v;
4692 if ((v = value) == null)
4693 throw new UnsupportedOperationException();
4694 for (K e : c) {
4695 if (map.putVal(e, v, true) == null)
4696 added = true;
4697 }
4698 return added;
4699 }
4700
4701 public int hashCode() {
4702 int h = 0;
4703 for (K e : this)
4704 h += e.hashCode();
4705 return h;
4706 }
4707
4708 public boolean equals(Object o) {
4709 Set<?> c;
4710 return ((o instanceof Set) &&
4711 ((c = (Set<?>)o) == this ||
4712 (containsAll(c) && c.containsAll(this))));
4713 }
4714
4715 public Spliterator<K> spliterator() {
4716 Node<K,V>[] t;
4717 ConcurrentHashMap<K,V> m = map;
4718 long n = m.sumCount();
4719 int f = (t = m.table) == null ? 0 : t.length;
4720 return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4721 }
4722
4723 public void forEach(Consumer<? super K> action) {
4724 if (action == null) throw new NullPointerException();
4725 Node<K,V>[] t;
4726 if ((t = map.table) != null) {
4727 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4728 for (Node<K,V> p; (p = it.advance()) != null; )
4729 action.accept(p.key);
4730 }
4731 }
4732 }
4733
4734 /**
4735 * A view of a ConcurrentHashMap as a {@link Collection} of
4736 * values, in which additions are disabled. This class cannot be
4737 * directly instantiated. See {@link #values()}.
4738 */
4739 static final class ValuesView<K,V> extends CollectionView<K,V,V>
4740 implements Collection<V>, java.io.Serializable {
4741 private static final long serialVersionUID = 2249069246763182397L;
4742 ValuesView(ConcurrentHashMap<K,V> map) { super(map); }
4743 public final boolean contains(Object o) {
4744 return map.containsValue(o);
4745 }
4746
4747 public final boolean remove(Object o) {
4748 if (o != null) {
4749 for (Iterator<V> it = iterator(); it.hasNext();) {
4750 if (o.equals(it.next())) {
4751 it.remove();
4752 return true;
4753 }
4754 }
4755 }
4756 return false;
4757 }
4758
4759 public final Iterator<V> iterator() {
4760 ConcurrentHashMap<K,V> m = map;
4761 Node<K,V>[] t;
4762 int f = (t = m.table) == null ? 0 : t.length;
4763 return new ValueIterator<K,V>(t, f, 0, f, m);
4764 }
4765
4766 public final boolean add(V e) {
4767 throw new UnsupportedOperationException();
4768 }
4769 public final boolean addAll(Collection<? extends V> c) {
4770 throw new UnsupportedOperationException();
4771 }
4772
4773 @Override public boolean removeAll(Collection<?> c) {
4774 if (c == null) throw new NullPointerException();
4775 boolean modified = false;
4776 for (Iterator<V> it = iterator(); it.hasNext();) {
4777 if (c.contains(it.next())) {
4778 it.remove();
4779 modified = true;
4780 }
4781 }
4782 return modified;
4783 }
4784
4785 public boolean removeIf(Predicate<? super V> filter) {
4786 return map.removeValueIf(filter);
4787 }
4788
4789 public Spliterator<V> spliterator() {
4790 Node<K,V>[] t;
4791 ConcurrentHashMap<K,V> m = map;
4792 long n = m.sumCount();
4793 int f = (t = m.table) == null ? 0 : t.length;
4794 return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4795 }
4796
4797 public void forEach(Consumer<? super V> action) {
4798 if (action == null) throw new NullPointerException();
4799 Node<K,V>[] t;
4800 if ((t = map.table) != null) {
4801 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4802 for (Node<K,V> p; (p = it.advance()) != null; )
4803 action.accept(p.val);
4804 }
4805 }
4806 }
4807
4808 /**
4809 * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
4810 * entries. This class cannot be directly instantiated. See
4811 * {@link #entrySet()}.
4812 */
4813 static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4814 implements Set<Map.Entry<K,V>>, java.io.Serializable {
4815 private static final long serialVersionUID = 2249069246763182397L;
4816 EntrySetView(ConcurrentHashMap<K,V> map) { super(map); }
4817
4818 public boolean contains(Object o) {
4819 Object k, v, r; Map.Entry<?,?> e;
4820 return ((o instanceof Map.Entry) &&
4821 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4822 (r = map.get(k)) != null &&
4823 (v = e.getValue()) != null &&
4824 (v == r || v.equals(r)));
4825 }
4826
4827 public boolean remove(Object o) {
4828 Object k, v; Map.Entry<?,?> e;
4829 return ((o instanceof Map.Entry) &&
4830 (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4831 (v = e.getValue()) != null &&
4832 map.remove(k, v));
4833 }
4834
4835 /**
4836 * @return an iterator over the entries of the backing map
4837 */
4838 public Iterator<Map.Entry<K,V>> iterator() {
4839 ConcurrentHashMap<K,V> m = map;
4840 Node<K,V>[] t;
4841 int f = (t = m.table) == null ? 0 : t.length;
4842 return new EntryIterator<K,V>(t, f, 0, f, m);
4843 }
4844
4845 public boolean add(Entry<K,V> e) {
4846 return map.putVal(e.getKey(), e.getValue(), false) == null;
4847 }
4848
4849 public boolean addAll(Collection<? extends Entry<K,V>> c) {
4850 boolean added = false;
4851 for (Entry<K,V> e : c) {
4852 if (add(e))
4853 added = true;
4854 }
4855 return added;
4856 }
4857
4858 public boolean removeIf(Predicate<? super Entry<K,V>> filter) {
4859 return map.removeEntryIf(filter);
4860 }
4861
4862 public final int hashCode() {
4863 int h = 0;
4864 Node<K,V>[] t;
4865 if ((t = map.table) != null) {
4866 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4867 for (Node<K,V> p; (p = it.advance()) != null; ) {
4868 h += p.hashCode();
4869 }
4870 }
4871 return h;
4872 }
4873
4874 public final boolean equals(Object o) {
4875 Set<?> c;
4876 return ((o instanceof Set) &&
4877 ((c = (Set<?>)o) == this ||
4878 (containsAll(c) && c.containsAll(this))));
4879 }
4880
4881 public Spliterator<Map.Entry<K,V>> spliterator() {
4882 Node<K,V>[] t;
4883 ConcurrentHashMap<K,V> m = map;
4884 long n = m.sumCount();
4885 int f = (t = m.table) == null ? 0 : t.length;
4886 return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4887 }
4888
4889 public void forEach(Consumer<? super Map.Entry<K,V>> action) {
4890 if (action == null) throw new NullPointerException();
4891 Node<K,V>[] t;
4892 if ((t = map.table) != null) {
4893 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4894 for (Node<K,V> p; (p = it.advance()) != null; )
4895 action.accept(new MapEntry<K,V>(p.key, p.val, map));
4896 }
4897 }
4898
4899 }
4900
4901 // -------------------------------------------------------
4902
4903 /**
4904 * Base class for bulk tasks. Repeats some fields and code from
4905 * class Traverser, because we need to subclass CountedCompleter.
4906 */
4907 @SuppressWarnings("serial")
4908 abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4909 Node<K,V>[] tab; // same as Traverser
4910 Node<K,V> next;
4911 TableStack<K,V> stack, spare;
4912 int index;
4913 int baseIndex;
4914 int baseLimit;
4915 final int baseSize;
4916 int batch; // split control
4917
4918 BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4919 super(par);
4920 this.batch = b;
4921 this.index = this.baseIndex = i;
4922 if ((this.tab = t) == null)
4923 this.baseSize = this.baseLimit = 0;
4924 else if (par == null)
4925 this.baseSize = this.baseLimit = t.length;
4926 else {
4927 this.baseLimit = f;
4928 this.baseSize = par.baseSize;
4929 }
4930 }
4931
4932 /**
4933 * Same as Traverser version.
4934 */
4935 final Node<K,V> advance() {
4936 Node<K,V> e;
4937 if ((e = next) != null)
4938 e = e.next;
4939 for (;;) {
4940 Node<K,V>[] t; int i, n;
4941 if (e != null)
4942 return next = e;
4943 if (baseIndex >= baseLimit || (t = tab) == null ||
4944 (n = t.length) <= (i = index) || i < 0)
4945 return next = null;
4946 if ((e = tabAt(t, i)) != null && e.hash < 0) {
4947 if (e instanceof ForwardingNode) {
4948 tab = ((ForwardingNode<K,V>)e).nextTable;
4949 e = null;
4950 pushState(t, i, n);
4951 continue;
4952 }
4953 else if (e instanceof TreeBin)
4954 e = ((TreeBin<K,V>)e).first;
4955 else
4956 e = null;
4957 }
4958 if (stack != null)
4959 recoverState(n);
4960 else if ((index = i + baseSize) >= n)
4961 index = ++baseIndex;
4962 }
4963 }
4964
4965 private void pushState(Node<K,V>[] t, int i, int n) {
4966 TableStack<K,V> s = spare;
4967 if (s != null)
4968 spare = s.next;
4969 else
4970 s = new TableStack<K,V>();
4971 s.tab = t;
4972 s.length = n;
4973 s.index = i;
4974 s.next = stack;
4975 stack = s;
4976 }
4977
4978 private void recoverState(int n) {
4979 TableStack<K,V> s; int len;
4980 while ((s = stack) != null && (index += (len = s.length)) >= n) {
4981 n = len;
4982 index = s.index;
4983 tab = s.tab;
4984 s.tab = null;
4985 TableStack<K,V> next = s.next;
4986 s.next = spare; // save for reuse
4987 stack = next;
4988 spare = s;
4989 }
4990 if (s == null && (index += baseSize) >= n)
4991 index = ++baseIndex;
4992 }
4993 }
4994
4995 /*
4996 * Task classes. Coded in a regular but ugly format/style to
4997 * simplify checks that each variant differs in the right way from
4998 * others. The null screenings exist because compilers cannot tell
4999 * that we've already null-checked task arguments, so we force
5000 * simplest hoisted bypass to help avoid convoluted traps.
5001 */
5002 @SuppressWarnings("serial")
5003 static final class ForEachKeyTask<K,V>
5004 extends BulkTask<K,V,Void> {
5005 final Consumer<? super K> action;
5006 ForEachKeyTask
5007 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5008 Consumer<? super K> action) {
5009 super(p, b, i, f, t);
5010 this.action = action;
5011 }
5012 public final void compute() {
5013 final Consumer<? super K> action;
5014 if ((action = this.action) != null) {
5015 for (int i = baseIndex, f, h; batch > 0 &&
5016 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5017 addToPendingCount(1);
5018 new ForEachKeyTask<K,V>
5019 (this, batch >>>= 1, baseLimit = h, f, tab,
5020 action).fork();
5021 }
5022 for (Node<K,V> p; (p = advance()) != null;)
5023 action.accept(p.key);
5024 propagateCompletion();
5025 }
5026 }
5027 }
5028
5029 @SuppressWarnings("serial")
5030 static final class ForEachValueTask<K,V>
5031 extends BulkTask<K,V,Void> {
5032 final Consumer<? super V> action;
5033 ForEachValueTask
5034 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5035 Consumer<? super V> action) {
5036 super(p, b, i, f, t);
5037 this.action = action;
5038 }
5039 public final void compute() {
5040 final Consumer<? super V> action;
5041 if ((action = this.action) != null) {
5042 for (int i = baseIndex, f, h; batch > 0 &&
5043 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5044 addToPendingCount(1);
5045 new ForEachValueTask<K,V>
5046 (this, batch >>>= 1, baseLimit = h, f, tab,
5047 action).fork();
5048 }
5049 for (Node<K,V> p; (p = advance()) != null;)
5050 action.accept(p.val);
5051 propagateCompletion();
5052 }
5053 }
5054 }
5055
5056 @SuppressWarnings("serial")
5057 static final class ForEachEntryTask<K,V>
5058 extends BulkTask<K,V,Void> {
5059 final Consumer<? super Entry<K,V>> action;
5060 ForEachEntryTask
5061 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5062 Consumer<? super Entry<K,V>> action) {
5063 super(p, b, i, f, t);
5064 this.action = action;
5065 }
5066 public final void compute() {
5067 final Consumer<? super Entry<K,V>> action;
5068 if ((action = this.action) != null) {
5069 for (int i = baseIndex, f, h; batch > 0 &&
5070 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5071 addToPendingCount(1);
5072 new ForEachEntryTask<K,V>
5073 (this, batch >>>= 1, baseLimit = h, f, tab,
5074 action).fork();
5075 }
5076 for (Node<K,V> p; (p = advance()) != null; )
5077 action.accept(p);
5078 propagateCompletion();
5079 }
5080 }
5081 }
5082
5083 @SuppressWarnings("serial")
5084 static final class ForEachMappingTask<K,V>
5085 extends BulkTask<K,V,Void> {
5086 final BiConsumer<? super K, ? super V> action;
5087 ForEachMappingTask
5088 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5089 BiConsumer<? super K,? super V> action) {
5090 super(p, b, i, f, t);
5091 this.action = action;
5092 }
5093 public final void compute() {
5094 final BiConsumer<? super K, ? super V> action;
5095 if ((action = this.action) != null) {
5096 for (int i = baseIndex, f, h; batch > 0 &&
5097 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5098 addToPendingCount(1);
5099 new ForEachMappingTask<K,V>
5100 (this, batch >>>= 1, baseLimit = h, f, tab,
5101 action).fork();
5102 }
5103 for (Node<K,V> p; (p = advance()) != null; )
5104 action.accept(p.key, p.val);
5105 propagateCompletion();
5106 }
5107 }
5108 }
5109
5110 @SuppressWarnings("serial")
5111 static final class ForEachTransformedKeyTask<K,V,U>
5112 extends BulkTask<K,V,Void> {
5113 final Function<? super K, ? extends U> transformer;
5114 final Consumer<? super U> action;
5115 ForEachTransformedKeyTask
5116 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5117 Function<? super K, ? extends U> transformer, Consumer<? super U> action) {
5118 super(p, b, i, f, t);
5119 this.transformer = transformer; this.action = action;
5120 }
5121 public final void compute() {
5122 final Function<? super K, ? extends U> transformer;
5123 final Consumer<? super U> action;
5124 if ((transformer = this.transformer) != null &&
5125 (action = this.action) != null) {
5126 for (int i = baseIndex, f, h; batch > 0 &&
5127 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5128 addToPendingCount(1);
5129 new ForEachTransformedKeyTask<K,V,U>
5130 (this, batch >>>= 1, baseLimit = h, f, tab,
5131 transformer, action).fork();
5132 }
5133 for (Node<K,V> p; (p = advance()) != null; ) {
5134 U u;
5135 if ((u = transformer.apply(p.key)) != null)
5136 action.accept(u);
5137 }
5138 propagateCompletion();
5139 }
5140 }
5141 }
5142
5143 @SuppressWarnings("serial")
5144 static final class ForEachTransformedValueTask<K,V,U>
5145 extends BulkTask<K,V,Void> {
5146 final Function<? super V, ? extends U> transformer;
5147 final Consumer<? super U> action;
5148 ForEachTransformedValueTask
5149 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5150 Function<? super V, ? extends U> transformer, Consumer<? super U> action) {
5151 super(p, b, i, f, t);
5152 this.transformer = transformer; this.action = action;
5153 }
5154 public final void compute() {
5155 final Function<? super V, ? extends U> transformer;
5156 final Consumer<? super U> action;
5157 if ((transformer = this.transformer) != null &&
5158 (action = this.action) != null) {
5159 for (int i = baseIndex, f, h; batch > 0 &&
5160 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5161 addToPendingCount(1);
5162 new ForEachTransformedValueTask<K,V,U>
5163 (this, batch >>>= 1, baseLimit = h, f, tab,
5164 transformer, action).fork();
5165 }
5166 for (Node<K,V> p; (p = advance()) != null; ) {
5167 U u;
5168 if ((u = transformer.apply(p.val)) != null)
5169 action.accept(u);
5170 }
5171 propagateCompletion();
5172 }
5173 }
5174 }
5175
5176 @SuppressWarnings("serial")
5177 static final class ForEachTransformedEntryTask<K,V,U>
5178 extends BulkTask<K,V,Void> {
5179 final Function<Map.Entry<K,V>, ? extends U> transformer;
5180 final Consumer<? super U> action;
5181 ForEachTransformedEntryTask
5182 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5183 Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) {
5184 super(p, b, i, f, t);
5185 this.transformer = transformer; this.action = action;
5186 }
5187 public final void compute() {
5188 final Function<Map.Entry<K,V>, ? extends U> transformer;
5189 final Consumer<? super U> action;
5190 if ((transformer = this.transformer) != null &&
5191 (action = this.action) != null) {
5192 for (int i = baseIndex, f, h; batch > 0 &&
5193 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5194 addToPendingCount(1);
5195 new ForEachTransformedEntryTask<K,V,U>
5196 (this, batch >>>= 1, baseLimit = h, f, tab,
5197 transformer, action).fork();
5198 }
5199 for (Node<K,V> p; (p = advance()) != null; ) {
5200 U u;
5201 if ((u = transformer.apply(p)) != null)
5202 action.accept(u);
5203 }
5204 propagateCompletion();
5205 }
5206 }
5207 }
5208
5209 @SuppressWarnings("serial")
5210 static final class ForEachTransformedMappingTask<K,V,U>
5211 extends BulkTask<K,V,Void> {
5212 final BiFunction<? super K, ? super V, ? extends U> transformer;
5213 final Consumer<? super U> action;
5214 ForEachTransformedMappingTask
5215 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5216 BiFunction<? super K, ? super V, ? extends U> transformer,
5217 Consumer<? super U> action) {
5218 super(p, b, i, f, t);
5219 this.transformer = transformer; this.action = action;
5220 }
5221 public final void compute() {
5222 final BiFunction<? super K, ? super V, ? extends U> transformer;
5223 final Consumer<? super U> action;
5224 if ((transformer = this.transformer) != null &&
5225 (action = this.action) != null) {
5226 for (int i = baseIndex, f, h; batch > 0 &&
5227 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5228 addToPendingCount(1);
5229 new ForEachTransformedMappingTask<K,V,U>
5230 (this, batch >>>= 1, baseLimit = h, f, tab,
5231 transformer, action).fork();
5232 }
5233 for (Node<K,V> p; (p = advance()) != null; ) {
5234 U u;
5235 if ((u = transformer.apply(p.key, p.val)) != null)
5236 action.accept(u);
5237 }
5238 propagateCompletion();
5239 }
5240 }
5241 }
5242
5243 @SuppressWarnings("serial")
5244 static final class SearchKeysTask<K,V,U>
5245 extends BulkTask<K,V,U> {
5246 final Function<? super K, ? extends U> searchFunction;
5247 final AtomicReference<U> result;
5248 SearchKeysTask
5249 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5250 Function<? super K, ? extends U> searchFunction,
5251 AtomicReference<U> result) {
5252 super(p, b, i, f, t);
5253 this.searchFunction = searchFunction; this.result = result;
5254 }
5255 public final U getRawResult() { return result.get(); }
5256 public final void compute() {
5257 final Function<? super K, ? extends U> searchFunction;
5258 final AtomicReference<U> result;
5259 if ((searchFunction = this.searchFunction) != null &&
5260 (result = this.result) != null) {
5261 for (int i = baseIndex, f, h; batch > 0 &&
5262 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5263 if (result.get() != null)
5264 return;
5265 addToPendingCount(1);
5266 new SearchKeysTask<K,V,U>
5267 (this, batch >>>= 1, baseLimit = h, f, tab,
5268 searchFunction, result).fork();
5269 }
5270 while (result.get() == null) {
5271 U u;
5272 Node<K,V> p;
5273 if ((p = advance()) == null) {
5274 propagateCompletion();
5275 break;
5276 }
5277 if ((u = searchFunction.apply(p.key)) != null) {
5278 if (result.compareAndSet(null, u))
5279 quietlyCompleteRoot();
5280 break;
5281 }
5282 }
5283 }
5284 }
5285 }
5286
5287 @SuppressWarnings("serial")
5288 static final class SearchValuesTask<K,V,U>
5289 extends BulkTask<K,V,U> {
5290 final Function<? super V, ? extends U> searchFunction;
5291 final AtomicReference<U> result;
5292 SearchValuesTask
5293 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5294 Function<? super V, ? extends U> searchFunction,
5295 AtomicReference<U> result) {
5296 super(p, b, i, f, t);
5297 this.searchFunction = searchFunction; this.result = result;
5298 }
5299 public final U getRawResult() { return result.get(); }
5300 public final void compute() {
5301 final Function<? super V, ? extends U> searchFunction;
5302 final AtomicReference<U> result;
5303 if ((searchFunction = this.searchFunction) != null &&
5304 (result = this.result) != null) {
5305 for (int i = baseIndex, f, h; batch > 0 &&
5306 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5307 if (result.get() != null)
5308 return;
5309 addToPendingCount(1);
5310 new SearchValuesTask<K,V,U>
5311 (this, batch >>>= 1, baseLimit = h, f, tab,
5312 searchFunction, result).fork();
5313 }
5314 while (result.get() == null) {
5315 U u;
5316 Node<K,V> p;
5317 if ((p = advance()) == null) {
5318 propagateCompletion();
5319 break;
5320 }
5321 if ((u = searchFunction.apply(p.val)) != null) {
5322 if (result.compareAndSet(null, u))
5323 quietlyCompleteRoot();
5324 break;
5325 }
5326 }
5327 }
5328 }
5329 }
5330
5331 @SuppressWarnings("serial")
5332 static final class SearchEntriesTask<K,V,U>
5333 extends BulkTask<K,V,U> {
5334 final Function<Entry<K,V>, ? extends U> searchFunction;
5335 final AtomicReference<U> result;
5336 SearchEntriesTask
5337 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5338 Function<Entry<K,V>, ? extends U> searchFunction,
5339 AtomicReference<U> result) {
5340 super(p, b, i, f, t);
5341 this.searchFunction = searchFunction; this.result = result;
5342 }
5343 public final U getRawResult() { return result.get(); }
5344 public final void compute() {
5345 final Function<Entry<K,V>, ? extends U> searchFunction;
5346 final AtomicReference<U> result;
5347 if ((searchFunction = this.searchFunction) != null &&
5348 (result = this.result) != null) {
5349 for (int i = baseIndex, f, h; batch > 0 &&
5350 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5351 if (result.get() != null)
5352 return;
5353 addToPendingCount(1);
5354 new SearchEntriesTask<K,V,U>
5355 (this, batch >>>= 1, baseLimit = h, f, tab,
5356 searchFunction, result).fork();
5357 }
5358 while (result.get() == null) {
5359 U u;
5360 Node<K,V> p;
5361 if ((p = advance()) == null) {
5362 propagateCompletion();
5363 break;
5364 }
5365 if ((u = searchFunction.apply(p)) != null) {
5366 if (result.compareAndSet(null, u))
5367 quietlyCompleteRoot();
5368 return;
5369 }
5370 }
5371 }
5372 }
5373 }
5374
5375 @SuppressWarnings("serial")
5376 static final class SearchMappingsTask<K,V,U>
5377 extends BulkTask<K,V,U> {
5378 final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5379 final AtomicReference<U> result;
5380 SearchMappingsTask
5381 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5382 BiFunction<? super K, ? super V, ? extends U> searchFunction,
5383 AtomicReference<U> result) {
5384 super(p, b, i, f, t);
5385 this.searchFunction = searchFunction; this.result = result;
5386 }
5387 public final U getRawResult() { return result.get(); }
5388 public final void compute() {
5389 final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5390 final AtomicReference<U> result;
5391 if ((searchFunction = this.searchFunction) != null &&
5392 (result = this.result) != null) {
5393 for (int i = baseIndex, f, h; batch > 0 &&
5394 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5395 if (result.get() != null)
5396 return;
5397 addToPendingCount(1);
5398 new SearchMappingsTask<K,V,U>
5399 (this, batch >>>= 1, baseLimit = h, f, tab,
5400 searchFunction, result).fork();
5401 }
5402 while (result.get() == null) {
5403 U u;
5404 Node<K,V> p;
5405 if ((p = advance()) == null) {
5406 propagateCompletion();
5407 break;
5408 }
5409 if ((u = searchFunction.apply(p.key, p.val)) != null) {
5410 if (result.compareAndSet(null, u))
5411 quietlyCompleteRoot();
5412 break;
5413 }
5414 }
5415 }
5416 }
5417 }
5418
5419 @SuppressWarnings("serial")
5420 static final class ReduceKeysTask<K,V>
5421 extends BulkTask<K,V,K> {
5422 final BiFunction<? super K, ? super K, ? extends K> reducer;
5423 K result;
5424 ReduceKeysTask<K,V> rights, nextRight;
5425 ReduceKeysTask
5426 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5427 ReduceKeysTask<K,V> nextRight,
5428 BiFunction<? super K, ? super K, ? extends K> reducer) {
5429 super(p, b, i, f, t); this.nextRight = nextRight;
5430 this.reducer = reducer;
5431 }
5432 public final K getRawResult() { return result; }
5433 public final void compute() {
5434 final BiFunction<? super K, ? super K, ? extends K> reducer;
5435 if ((reducer = this.reducer) != null) {
5436 for (int i = baseIndex, f, h; batch > 0 &&
5437 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5438 addToPendingCount(1);
5439 (rights = new ReduceKeysTask<K,V>
5440 (this, batch >>>= 1, baseLimit = h, f, tab,
5441 rights, reducer)).fork();
5442 }
5443 K r = null;
5444 for (Node<K,V> p; (p = advance()) != null; ) {
5445 K u = p.key;
5446 r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5447 }
5448 result = r;
5449 CountedCompleter<?> c;
5450 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5451 @SuppressWarnings("unchecked")
5452 ReduceKeysTask<K,V>
5453 t = (ReduceKeysTask<K,V>)c,
5454 s = t.rights;
5455 while (s != null) {
5456 K tr, sr;
5457 if ((sr = s.result) != null)
5458 t.result = (((tr = t.result) == null) ? sr :
5459 reducer.apply(tr, sr));
5460 s = t.rights = s.nextRight;
5461 }
5462 }
5463 }
5464 }
5465 }
5466
5467 @SuppressWarnings("serial")
5468 static final class ReduceValuesTask<K,V>
5469 extends BulkTask<K,V,V> {
5470 final BiFunction<? super V, ? super V, ? extends V> reducer;
5471 V result;
5472 ReduceValuesTask<K,V> rights, nextRight;
5473 ReduceValuesTask
5474 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5475 ReduceValuesTask<K,V> nextRight,
5476 BiFunction<? super V, ? super V, ? extends V> reducer) {
5477 super(p, b, i, f, t); this.nextRight = nextRight;
5478 this.reducer = reducer;
5479 }
5480 public final V getRawResult() { return result; }
5481 public final void compute() {
5482 final BiFunction<? super V, ? super V, ? extends V> reducer;
5483 if ((reducer = this.reducer) != null) {
5484 for (int i = baseIndex, f, h; batch > 0 &&
5485 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5486 addToPendingCount(1);
5487 (rights = new ReduceValuesTask<K,V>
5488 (this, batch >>>= 1, baseLimit = h, f, tab,
5489 rights, reducer)).fork();
5490 }
5491 V r = null;
5492 for (Node<K,V> p; (p = advance()) != null; ) {
5493 V v = p.val;
5494 r = (r == null) ? v : reducer.apply(r, v);
5495 }
5496 result = r;
5497 CountedCompleter<?> c;
5498 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5499 @SuppressWarnings("unchecked")
5500 ReduceValuesTask<K,V>
5501 t = (ReduceValuesTask<K,V>)c,
5502 s = t.rights;
5503 while (s != null) {
5504 V tr, sr;
5505 if ((sr = s.result) != null)
5506 t.result = (((tr = t.result) == null) ? sr :
5507 reducer.apply(tr, sr));
5508 s = t.rights = s.nextRight;
5509 }
5510 }
5511 }
5512 }
5513 }
5514
5515 @SuppressWarnings("serial")
5516 static final class ReduceEntriesTask<K,V>
5517 extends BulkTask<K,V,Map.Entry<K,V>> {
5518 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5519 Map.Entry<K,V> result;
5520 ReduceEntriesTask<K,V> rights, nextRight;
5521 ReduceEntriesTask
5522 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5523 ReduceEntriesTask<K,V> nextRight,
5524 BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5525 super(p, b, i, f, t); this.nextRight = nextRight;
5526 this.reducer = reducer;
5527 }
5528 public final Map.Entry<K,V> getRawResult() { return result; }
5529 public final void compute() {
5530 final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5531 if ((reducer = this.reducer) != null) {
5532 for (int i = baseIndex, f, h; batch > 0 &&
5533 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5534 addToPendingCount(1);
5535 (rights = new ReduceEntriesTask<K,V>
5536 (this, batch >>>= 1, baseLimit = h, f, tab,
5537 rights, reducer)).fork();
5538 }
5539 Map.Entry<K,V> r = null;
5540 for (Node<K,V> p; (p = advance()) != null; )
5541 r = (r == null) ? p : reducer.apply(r, p);
5542 result = r;
5543 CountedCompleter<?> c;
5544 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5545 @SuppressWarnings("unchecked")
5546 ReduceEntriesTask<K,V>
5547 t = (ReduceEntriesTask<K,V>)c,
5548 s = t.rights;
5549 while (s != null) {
5550 Map.Entry<K,V> tr, sr;
5551 if ((sr = s.result) != null)
5552 t.result = (((tr = t.result) == null) ? sr :
5553 reducer.apply(tr, sr));
5554 s = t.rights = s.nextRight;
5555 }
5556 }
5557 }
5558 }
5559 }
5560
5561 @SuppressWarnings("serial")
5562 static final class MapReduceKeysTask<K,V,U>
5563 extends BulkTask<K,V,U> {
5564 final Function<? super K, ? extends U> transformer;
5565 final BiFunction<? super U, ? super U, ? extends U> reducer;
5566 U result;
5567 MapReduceKeysTask<K,V,U> rights, nextRight;
5568 MapReduceKeysTask
5569 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5570 MapReduceKeysTask<K,V,U> nextRight,
5571 Function<? super K, ? extends U> transformer,
5572 BiFunction<? super U, ? super U, ? extends U> reducer) {
5573 super(p, b, i, f, t); this.nextRight = nextRight;
5574 this.transformer = transformer;
5575 this.reducer = reducer;
5576 }
5577 public final U getRawResult() { return result; }
5578 public final void compute() {
5579 final Function<? super K, ? extends U> transformer;
5580 final BiFunction<? super U, ? super U, ? extends U> reducer;
5581 if ((transformer = this.transformer) != null &&
5582 (reducer = this.reducer) != null) {
5583 for (int i = baseIndex, f, h; batch > 0 &&
5584 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5585 addToPendingCount(1);
5586 (rights = new MapReduceKeysTask<K,V,U>
5587 (this, batch >>>= 1, baseLimit = h, f, tab,
5588 rights, transformer, reducer)).fork();
5589 }
5590 U r = null;
5591 for (Node<K,V> p; (p = advance()) != null; ) {
5592 U u;
5593 if ((u = transformer.apply(p.key)) != null)
5594 r = (r == null) ? u : reducer.apply(r, u);
5595 }
5596 result = r;
5597 CountedCompleter<?> c;
5598 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5599 @SuppressWarnings("unchecked")
5600 MapReduceKeysTask<K,V,U>
5601 t = (MapReduceKeysTask<K,V,U>)c,
5602 s = t.rights;
5603 while (s != null) {
5604 U tr, sr;
5605 if ((sr = s.result) != null)
5606 t.result = (((tr = t.result) == null) ? sr :
5607 reducer.apply(tr, sr));
5608 s = t.rights = s.nextRight;
5609 }
5610 }
5611 }
5612 }
5613 }
5614
5615 @SuppressWarnings("serial")
5616 static final class MapReduceValuesTask<K,V,U>
5617 extends BulkTask<K,V,U> {
5618 final Function<? super V, ? extends U> transformer;
5619 final BiFunction<? super U, ? super U, ? extends U> reducer;
5620 U result;
5621 MapReduceValuesTask<K,V,U> rights, nextRight;
5622 MapReduceValuesTask
5623 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5624 MapReduceValuesTask<K,V,U> nextRight,
5625 Function<? super V, ? extends U> transformer,
5626 BiFunction<? super U, ? super U, ? extends U> reducer) {
5627 super(p, b, i, f, t); this.nextRight = nextRight;
5628 this.transformer = transformer;
5629 this.reducer = reducer;
5630 }
5631 public final U getRawResult() { return result; }
5632 public final void compute() {
5633 final Function<? super V, ? extends U> transformer;
5634 final BiFunction<? super U, ? super U, ? extends U> reducer;
5635 if ((transformer = this.transformer) != null &&
5636 (reducer = this.reducer) != null) {
5637 for (int i = baseIndex, f, h; batch > 0 &&
5638 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5639 addToPendingCount(1);
5640 (rights = new MapReduceValuesTask<K,V,U>
5641 (this, batch >>>= 1, baseLimit = h, f, tab,
5642 rights, transformer, reducer)).fork();
5643 }
5644 U r = null;
5645 for (Node<K,V> p; (p = advance()) != null; ) {
5646 U u;
5647 if ((u = transformer.apply(p.val)) != null)
5648 r = (r == null) ? u : reducer.apply(r, u);
5649 }
5650 result = r;
5651 CountedCompleter<?> c;
5652 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5653 @SuppressWarnings("unchecked")
5654 MapReduceValuesTask<K,V,U>
5655 t = (MapReduceValuesTask<K,V,U>)c,
5656 s = t.rights;
5657 while (s != null) {
5658 U tr, sr;
5659 if ((sr = s.result) != null)
5660 t.result = (((tr = t.result) == null) ? sr :
5661 reducer.apply(tr, sr));
5662 s = t.rights = s.nextRight;
5663 }
5664 }
5665 }
5666 }
5667 }
5668
5669 @SuppressWarnings("serial")
5670 static final class MapReduceEntriesTask<K,V,U>
5671 extends BulkTask<K,V,U> {
5672 final Function<Map.Entry<K,V>, ? extends U> transformer;
5673 final BiFunction<? super U, ? super U, ? extends U> reducer;
5674 U result;
5675 MapReduceEntriesTask<K,V,U> rights, nextRight;
5676 MapReduceEntriesTask
5677 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5678 MapReduceEntriesTask<K,V,U> nextRight,
5679 Function<Map.Entry<K,V>, ? extends U> transformer,
5680 BiFunction<? super U, ? super U, ? extends U> reducer) {
5681 super(p, b, i, f, t); this.nextRight = nextRight;
5682 this.transformer = transformer;
5683 this.reducer = reducer;
5684 }
5685 public final U getRawResult() { return result; }
5686 public final void compute() {
5687 final Function<Map.Entry<K,V>, ? extends U> transformer;
5688 final BiFunction<? super U, ? super U, ? extends U> reducer;
5689 if ((transformer = this.transformer) != null &&
5690 (reducer = this.reducer) != null) {
5691 for (int i = baseIndex, f, h; batch > 0 &&
5692 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5693 addToPendingCount(1);
5694 (rights = new MapReduceEntriesTask<K,V,U>
5695 (this, batch >>>= 1, baseLimit = h, f, tab,
5696 rights, transformer, reducer)).fork();
5697 }
5698 U r = null;
5699 for (Node<K,V> p; (p = advance()) != null; ) {
5700 U u;
5701 if ((u = transformer.apply(p)) != null)
5702 r = (r == null) ? u : reducer.apply(r, u);
5703 }
5704 result = r;
5705 CountedCompleter<?> c;
5706 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5707 @SuppressWarnings("unchecked")
5708 MapReduceEntriesTask<K,V,U>
5709 t = (MapReduceEntriesTask<K,V,U>)c,
5710 s = t.rights;
5711 while (s != null) {
5712 U tr, sr;
5713 if ((sr = s.result) != null)
5714 t.result = (((tr = t.result) == null) ? sr :
5715 reducer.apply(tr, sr));
5716 s = t.rights = s.nextRight;
5717 }
5718 }
5719 }
5720 }
5721 }
5722
5723 @SuppressWarnings("serial")
5724 static final class MapReduceMappingsTask<K,V,U>
5725 extends BulkTask<K,V,U> {
5726 final BiFunction<? super K, ? super V, ? extends U> transformer;
5727 final BiFunction<? super U, ? super U, ? extends U> reducer;
5728 U result;
5729 MapReduceMappingsTask<K,V,U> rights, nextRight;
5730 MapReduceMappingsTask
5731 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5732 MapReduceMappingsTask<K,V,U> nextRight,
5733 BiFunction<? super K, ? super V, ? extends U> transformer,
5734 BiFunction<? super U, ? super U, ? extends U> reducer) {
5735 super(p, b, i, f, t); this.nextRight = nextRight;
5736 this.transformer = transformer;
5737 this.reducer = reducer;
5738 }
5739 public final U getRawResult() { return result; }
5740 public final void compute() {
5741 final BiFunction<? super K, ? super V, ? extends U> transformer;
5742 final BiFunction<? super U, ? super U, ? extends U> reducer;
5743 if ((transformer = this.transformer) != null &&
5744 (reducer = this.reducer) != null) {
5745 for (int i = baseIndex, f, h; batch > 0 &&
5746 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5747 addToPendingCount(1);
5748 (rights = new MapReduceMappingsTask<K,V,U>
5749 (this, batch >>>= 1, baseLimit = h, f, tab,
5750 rights, transformer, reducer)).fork();
5751 }
5752 U r = null;
5753 for (Node<K,V> p; (p = advance()) != null; ) {
5754 U u;
5755 if ((u = transformer.apply(p.key, p.val)) != null)
5756 r = (r == null) ? u : reducer.apply(r, u);
5757 }
5758 result = r;
5759 CountedCompleter<?> c;
5760 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5761 @SuppressWarnings("unchecked")
5762 MapReduceMappingsTask<K,V,U>
5763 t = (MapReduceMappingsTask<K,V,U>)c,
5764 s = t.rights;
5765 while (s != null) {
5766 U tr, sr;
5767 if ((sr = s.result) != null)
5768 t.result = (((tr = t.result) == null) ? sr :
5769 reducer.apply(tr, sr));
5770 s = t.rights = s.nextRight;
5771 }
5772 }
5773 }
5774 }
5775 }
5776
5777 @SuppressWarnings("serial")
5778 static final class MapReduceKeysToDoubleTask<K,V>
5779 extends BulkTask<K,V,Double> {
5780 final ToDoubleFunction<? super K> transformer;
5781 final DoubleBinaryOperator reducer;
5782 final double basis;
5783 double result;
5784 MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5785 MapReduceKeysToDoubleTask
5786 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5787 MapReduceKeysToDoubleTask<K,V> nextRight,
5788 ToDoubleFunction<? super K> transformer,
5789 double basis,
5790 DoubleBinaryOperator reducer) {
5791 super(p, b, i, f, t); this.nextRight = nextRight;
5792 this.transformer = transformer;
5793 this.basis = basis; this.reducer = reducer;
5794 }
5795 public final Double getRawResult() { return result; }
5796 public final void compute() {
5797 final ToDoubleFunction<? super K> transformer;
5798 final DoubleBinaryOperator reducer;
5799 if ((transformer = this.transformer) != null &&
5800 (reducer = this.reducer) != null) {
5801 double r = this.basis;
5802 for (int i = baseIndex, f, h; batch > 0 &&
5803 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5804 addToPendingCount(1);
5805 (rights = new MapReduceKeysToDoubleTask<K,V>
5806 (this, batch >>>= 1, baseLimit = h, f, tab,
5807 rights, transformer, r, reducer)).fork();
5808 }
5809 for (Node<K,V> p; (p = advance()) != null; )
5810 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key));
5811 result = r;
5812 CountedCompleter<?> c;
5813 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5814 @SuppressWarnings("unchecked")
5815 MapReduceKeysToDoubleTask<K,V>
5816 t = (MapReduceKeysToDoubleTask<K,V>)c,
5817 s = t.rights;
5818 while (s != null) {
5819 t.result = reducer.applyAsDouble(t.result, s.result);
5820 s = t.rights = s.nextRight;
5821 }
5822 }
5823 }
5824 }
5825 }
5826
5827 @SuppressWarnings("serial")
5828 static final class MapReduceValuesToDoubleTask<K,V>
5829 extends BulkTask<K,V,Double> {
5830 final ToDoubleFunction<? super V> transformer;
5831 final DoubleBinaryOperator reducer;
5832 final double basis;
5833 double result;
5834 MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5835 MapReduceValuesToDoubleTask
5836 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5837 MapReduceValuesToDoubleTask<K,V> nextRight,
5838 ToDoubleFunction<? super V> transformer,
5839 double basis,
5840 DoubleBinaryOperator reducer) {
5841 super(p, b, i, f, t); this.nextRight = nextRight;
5842 this.transformer = transformer;
5843 this.basis = basis; this.reducer = reducer;
5844 }
5845 public final Double getRawResult() { return result; }
5846 public final void compute() {
5847 final ToDoubleFunction<? super V> transformer;
5848 final DoubleBinaryOperator reducer;
5849 if ((transformer = this.transformer) != null &&
5850 (reducer = this.reducer) != null) {
5851 double r = this.basis;
5852 for (int i = baseIndex, f, h; batch > 0 &&
5853 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5854 addToPendingCount(1);
5855 (rights = new MapReduceValuesToDoubleTask<K,V>
5856 (this, batch >>>= 1, baseLimit = h, f, tab,
5857 rights, transformer, r, reducer)).fork();
5858 }
5859 for (Node<K,V> p; (p = advance()) != null; )
5860 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val));
5861 result = r;
5862 CountedCompleter<?> c;
5863 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5864 @SuppressWarnings("unchecked")
5865 MapReduceValuesToDoubleTask<K,V>
5866 t = (MapReduceValuesToDoubleTask<K,V>)c,
5867 s = t.rights;
5868 while (s != null) {
5869 t.result = reducer.applyAsDouble(t.result, s.result);
5870 s = t.rights = s.nextRight;
5871 }
5872 }
5873 }
5874 }
5875 }
5876
5877 @SuppressWarnings("serial")
5878 static final class MapReduceEntriesToDoubleTask<K,V>
5879 extends BulkTask<K,V,Double> {
5880 final ToDoubleFunction<Map.Entry<K,V>> transformer;
5881 final DoubleBinaryOperator reducer;
5882 final double basis;
5883 double result;
5884 MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5885 MapReduceEntriesToDoubleTask
5886 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5887 MapReduceEntriesToDoubleTask<K,V> nextRight,
5888 ToDoubleFunction<Map.Entry<K,V>> transformer,
5889 double basis,
5890 DoubleBinaryOperator reducer) {
5891 super(p, b, i, f, t); this.nextRight = nextRight;
5892 this.transformer = transformer;
5893 this.basis = basis; this.reducer = reducer;
5894 }
5895 public final Double getRawResult() { return result; }
5896 public final void compute() {
5897 final ToDoubleFunction<Map.Entry<K,V>> transformer;
5898 final DoubleBinaryOperator reducer;
5899 if ((transformer = this.transformer) != null &&
5900 (reducer = this.reducer) != null) {
5901 double r = this.basis;
5902 for (int i = baseIndex, f, h; batch > 0 &&
5903 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5904 addToPendingCount(1);
5905 (rights = new MapReduceEntriesToDoubleTask<K,V>
5906 (this, batch >>>= 1, baseLimit = h, f, tab,
5907 rights, transformer, r, reducer)).fork();
5908 }
5909 for (Node<K,V> p; (p = advance()) != null; )
5910 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p));
5911 result = r;
5912 CountedCompleter<?> c;
5913 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5914 @SuppressWarnings("unchecked")
5915 MapReduceEntriesToDoubleTask<K,V>
5916 t = (MapReduceEntriesToDoubleTask<K,V>)c,
5917 s = t.rights;
5918 while (s != null) {
5919 t.result = reducer.applyAsDouble(t.result, s.result);
5920 s = t.rights = s.nextRight;
5921 }
5922 }
5923 }
5924 }
5925 }
5926
5927 @SuppressWarnings("serial")
5928 static final class MapReduceMappingsToDoubleTask<K,V>
5929 extends BulkTask<K,V,Double> {
5930 final ToDoubleBiFunction<? super K, ? super V> transformer;
5931 final DoubleBinaryOperator reducer;
5932 final double basis;
5933 double result;
5934 MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5935 MapReduceMappingsToDoubleTask
5936 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5937 MapReduceMappingsToDoubleTask<K,V> nextRight,
5938 ToDoubleBiFunction<? super K, ? super V> transformer,
5939 double basis,
5940 DoubleBinaryOperator reducer) {
5941 super(p, b, i, f, t); this.nextRight = nextRight;
5942 this.transformer = transformer;
5943 this.basis = basis; this.reducer = reducer;
5944 }
5945 public final Double getRawResult() { return result; }
5946 public final void compute() {
5947 final ToDoubleBiFunction<? super K, ? super V> transformer;
5948 final DoubleBinaryOperator reducer;
5949 if ((transformer = this.transformer) != null &&
5950 (reducer = this.reducer) != null) {
5951 double r = this.basis;
5952 for (int i = baseIndex, f, h; batch > 0 &&
5953 (h = ((f = baseLimit) + i) >>> 1) > i;) {
5954 addToPendingCount(1);
5955 (rights = new MapReduceMappingsToDoubleTask<K,V>
5956 (this, batch >>>= 1, baseLimit = h, f, tab,
5957 rights, transformer, r, reducer)).fork();
5958 }
5959 for (Node<K,V> p; (p = advance()) != null; )
5960 r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val));
5961 result = r;
5962 CountedCompleter<?> c;
5963 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5964 @SuppressWarnings("unchecked")
5965 MapReduceMappingsToDoubleTask<K,V>
5966 t = (MapReduceMappingsToDoubleTask<K,V>)c,
5967 s = t.rights;
5968 while (s != null) {
5969 t.result = reducer.applyAsDouble(t.result, s.result);
5970 s = t.rights = s.nextRight;
5971 }
5972 }
5973 }
5974 }
5975 }
5976
5977 @SuppressWarnings("serial")
5978 static final class MapReduceKeysToLongTask<K,V>
5979 extends BulkTask<K,V,Long> {
5980 final ToLongFunction<? super K> transformer;
5981 final LongBinaryOperator reducer;
5982 final long basis;
5983 long result;
5984 MapReduceKeysToLongTask<K,V> rights, nextRight;
5985 MapReduceKeysToLongTask
5986 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5987 MapReduceKeysToLongTask<K,V> nextRight,
5988 ToLongFunction<? super K> transformer,
5989 long basis,
5990 LongBinaryOperator reducer) {
5991 super(p, b, i, f, t); this.nextRight = nextRight;
5992 this.transformer = transformer;
5993 this.basis = basis; this.reducer = reducer;
5994 }
5995 public final Long getRawResult() { return result; }
5996 public final void compute() {
5997 final ToLongFunction<? super K> transformer;
5998 final LongBinaryOperator reducer;
5999 if ((transformer = this.transformer) != null &&
6000 (reducer = this.reducer) != null) {
6001 long r = this.basis;
6002 for (int i = baseIndex, f, h; batch > 0 &&
6003 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6004 addToPendingCount(1);
6005 (rights = new MapReduceKeysToLongTask<K,V>
6006 (this, batch >>>= 1, baseLimit = h, f, tab,
6007 rights, transformer, r, reducer)).fork();
6008 }
6009 for (Node<K,V> p; (p = advance()) != null; )
6010 r = reducer.applyAsLong(r, transformer.applyAsLong(p.key));
6011 result = r;
6012 CountedCompleter<?> c;
6013 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6014 @SuppressWarnings("unchecked")
6015 MapReduceKeysToLongTask<K,V>
6016 t = (MapReduceKeysToLongTask<K,V>)c,
6017 s = t.rights;
6018 while (s != null) {
6019 t.result = reducer.applyAsLong(t.result, s.result);
6020 s = t.rights = s.nextRight;
6021 }
6022 }
6023 }
6024 }
6025 }
6026
6027 @SuppressWarnings("serial")
6028 static final class MapReduceValuesToLongTask<K,V>
6029 extends BulkTask<K,V,Long> {
6030 final ToLongFunction<? super V> transformer;
6031 final LongBinaryOperator reducer;
6032 final long basis;
6033 long result;
6034 MapReduceValuesToLongTask<K,V> rights, nextRight;
6035 MapReduceValuesToLongTask
6036 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6037 MapReduceValuesToLongTask<K,V> nextRight,
6038 ToLongFunction<? super V> transformer,
6039 long basis,
6040 LongBinaryOperator reducer) {
6041 super(p, b, i, f, t); this.nextRight = nextRight;
6042 this.transformer = transformer;
6043 this.basis = basis; this.reducer = reducer;
6044 }
6045 public final Long getRawResult() { return result; }
6046 public final void compute() {
6047 final ToLongFunction<? super V> transformer;
6048 final LongBinaryOperator reducer;
6049 if ((transformer = this.transformer) != null &&
6050 (reducer = this.reducer) != null) {
6051 long r = this.basis;
6052 for (int i = baseIndex, f, h; batch > 0 &&
6053 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6054 addToPendingCount(1);
6055 (rights = new MapReduceValuesToLongTask<K,V>
6056 (this, batch >>>= 1, baseLimit = h, f, tab,
6057 rights, transformer, r, reducer)).fork();
6058 }
6059 for (Node<K,V> p; (p = advance()) != null; )
6060 r = reducer.applyAsLong(r, transformer.applyAsLong(p.val));
6061 result = r;
6062 CountedCompleter<?> c;
6063 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6064 @SuppressWarnings("unchecked")
6065 MapReduceValuesToLongTask<K,V>
6066 t = (MapReduceValuesToLongTask<K,V>)c,
6067 s = t.rights;
6068 while (s != null) {
6069 t.result = reducer.applyAsLong(t.result, s.result);
6070 s = t.rights = s.nextRight;
6071 }
6072 }
6073 }
6074 }
6075 }
6076
6077 @SuppressWarnings("serial")
6078 static final class MapReduceEntriesToLongTask<K,V>
6079 extends BulkTask<K,V,Long> {
6080 final ToLongFunction<Map.Entry<K,V>> transformer;
6081 final LongBinaryOperator reducer;
6082 final long basis;
6083 long result;
6084 MapReduceEntriesToLongTask<K,V> rights, nextRight;
6085 MapReduceEntriesToLongTask
6086 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6087 MapReduceEntriesToLongTask<K,V> nextRight,
6088 ToLongFunction<Map.Entry<K,V>> transformer,
6089 long basis,
6090 LongBinaryOperator reducer) {
6091 super(p, b, i, f, t); this.nextRight = nextRight;
6092 this.transformer = transformer;
6093 this.basis = basis; this.reducer = reducer;
6094 }
6095 public final Long getRawResult() { return result; }
6096 public final void compute() {
6097 final ToLongFunction<Map.Entry<K,V>> transformer;
6098 final LongBinaryOperator reducer;
6099 if ((transformer = this.transformer) != null &&
6100 (reducer = this.reducer) != null) {
6101 long r = this.basis;
6102 for (int i = baseIndex, f, h; batch > 0 &&
6103 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6104 addToPendingCount(1);
6105 (rights = new MapReduceEntriesToLongTask<K,V>
6106 (this, batch >>>= 1, baseLimit = h, f, tab,
6107 rights, transformer, r, reducer)).fork();
6108 }
6109 for (Node<K,V> p; (p = advance()) != null; )
6110 r = reducer.applyAsLong(r, transformer.applyAsLong(p));
6111 result = r;
6112 CountedCompleter<?> c;
6113 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6114 @SuppressWarnings("unchecked")
6115 MapReduceEntriesToLongTask<K,V>
6116 t = (MapReduceEntriesToLongTask<K,V>)c,
6117 s = t.rights;
6118 while (s != null) {
6119 t.result = reducer.applyAsLong(t.result, s.result);
6120 s = t.rights = s.nextRight;
6121 }
6122 }
6123 }
6124 }
6125 }
6126
6127 @SuppressWarnings("serial")
6128 static final class MapReduceMappingsToLongTask<K,V>
6129 extends BulkTask<K,V,Long> {
6130 final ToLongBiFunction<? super K, ? super V> transformer;
6131 final LongBinaryOperator reducer;
6132 final long basis;
6133 long result;
6134 MapReduceMappingsToLongTask<K,V> rights, nextRight;
6135 MapReduceMappingsToLongTask
6136 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6137 MapReduceMappingsToLongTask<K,V> nextRight,
6138 ToLongBiFunction<? super K, ? super V> transformer,
6139 long basis,
6140 LongBinaryOperator reducer) {
6141 super(p, b, i, f, t); this.nextRight = nextRight;
6142 this.transformer = transformer;
6143 this.basis = basis; this.reducer = reducer;
6144 }
6145 public final Long getRawResult() { return result; }
6146 public final void compute() {
6147 final ToLongBiFunction<? super K, ? super V> transformer;
6148 final LongBinaryOperator reducer;
6149 if ((transformer = this.transformer) != null &&
6150 (reducer = this.reducer) != null) {
6151 long r = this.basis;
6152 for (int i = baseIndex, f, h; batch > 0 &&
6153 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6154 addToPendingCount(1);
6155 (rights = new MapReduceMappingsToLongTask<K,V>
6156 (this, batch >>>= 1, baseLimit = h, f, tab,
6157 rights, transformer, r, reducer)).fork();
6158 }
6159 for (Node<K,V> p; (p = advance()) != null; )
6160 r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val));
6161 result = r;
6162 CountedCompleter<?> c;
6163 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6164 @SuppressWarnings("unchecked")
6165 MapReduceMappingsToLongTask<K,V>
6166 t = (MapReduceMappingsToLongTask<K,V>)c,
6167 s = t.rights;
6168 while (s != null) {
6169 t.result = reducer.applyAsLong(t.result, s.result);
6170 s = t.rights = s.nextRight;
6171 }
6172 }
6173 }
6174 }
6175 }
6176
6177 @SuppressWarnings("serial")
6178 static final class MapReduceKeysToIntTask<K,V>
6179 extends BulkTask<K,V,Integer> {
6180 final ToIntFunction<? super K> transformer;
6181 final IntBinaryOperator reducer;
6182 final int basis;
6183 int result;
6184 MapReduceKeysToIntTask<K,V> rights, nextRight;
6185 MapReduceKeysToIntTask
6186 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6187 MapReduceKeysToIntTask<K,V> nextRight,
6188 ToIntFunction<? super K> transformer,
6189 int basis,
6190 IntBinaryOperator reducer) {
6191 super(p, b, i, f, t); this.nextRight = nextRight;
6192 this.transformer = transformer;
6193 this.basis = basis; this.reducer = reducer;
6194 }
6195 public final Integer getRawResult() { return result; }
6196 public final void compute() {
6197 final ToIntFunction<? super K> transformer;
6198 final IntBinaryOperator reducer;
6199 if ((transformer = this.transformer) != null &&
6200 (reducer = this.reducer) != null) {
6201 int r = this.basis;
6202 for (int i = baseIndex, f, h; batch > 0 &&
6203 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6204 addToPendingCount(1);
6205 (rights = new MapReduceKeysToIntTask<K,V>
6206 (this, batch >>>= 1, baseLimit = h, f, tab,
6207 rights, transformer, r, reducer)).fork();
6208 }
6209 for (Node<K,V> p; (p = advance()) != null; )
6210 r = reducer.applyAsInt(r, transformer.applyAsInt(p.key));
6211 result = r;
6212 CountedCompleter<?> c;
6213 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6214 @SuppressWarnings("unchecked")
6215 MapReduceKeysToIntTask<K,V>
6216 t = (MapReduceKeysToIntTask<K,V>)c,
6217 s = t.rights;
6218 while (s != null) {
6219 t.result = reducer.applyAsInt(t.result, s.result);
6220 s = t.rights = s.nextRight;
6221 }
6222 }
6223 }
6224 }
6225 }
6226
6227 @SuppressWarnings("serial")
6228 static final class MapReduceValuesToIntTask<K,V>
6229 extends BulkTask<K,V,Integer> {
6230 final ToIntFunction<? super V> transformer;
6231 final IntBinaryOperator reducer;
6232 final int basis;
6233 int result;
6234 MapReduceValuesToIntTask<K,V> rights, nextRight;
6235 MapReduceValuesToIntTask
6236 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6237 MapReduceValuesToIntTask<K,V> nextRight,
6238 ToIntFunction<? super V> transformer,
6239 int basis,
6240 IntBinaryOperator reducer) {
6241 super(p, b, i, f, t); this.nextRight = nextRight;
6242 this.transformer = transformer;
6243 this.basis = basis; this.reducer = reducer;
6244 }
6245 public final Integer getRawResult() { return result; }
6246 public final void compute() {
6247 final ToIntFunction<? super V> transformer;
6248 final IntBinaryOperator reducer;
6249 if ((transformer = this.transformer) != null &&
6250 (reducer = this.reducer) != null) {
6251 int r = this.basis;
6252 for (int i = baseIndex, f, h; batch > 0 &&
6253 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6254 addToPendingCount(1);
6255 (rights = new MapReduceValuesToIntTask<K,V>
6256 (this, batch >>>= 1, baseLimit = h, f, tab,
6257 rights, transformer, r, reducer)).fork();
6258 }
6259 for (Node<K,V> p; (p = advance()) != null; )
6260 r = reducer.applyAsInt(r, transformer.applyAsInt(p.val));
6261 result = r;
6262 CountedCompleter<?> c;
6263 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6264 @SuppressWarnings("unchecked")
6265 MapReduceValuesToIntTask<K,V>
6266 t = (MapReduceValuesToIntTask<K,V>)c,
6267 s = t.rights;
6268 while (s != null) {
6269 t.result = reducer.applyAsInt(t.result, s.result);
6270 s = t.rights = s.nextRight;
6271 }
6272 }
6273 }
6274 }
6275 }
6276
6277 @SuppressWarnings("serial")
6278 static final class MapReduceEntriesToIntTask<K,V>
6279 extends BulkTask<K,V,Integer> {
6280 final ToIntFunction<Map.Entry<K,V>> transformer;
6281 final IntBinaryOperator reducer;
6282 final int basis;
6283 int result;
6284 MapReduceEntriesToIntTask<K,V> rights, nextRight;
6285 MapReduceEntriesToIntTask
6286 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6287 MapReduceEntriesToIntTask<K,V> nextRight,
6288 ToIntFunction<Map.Entry<K,V>> transformer,
6289 int basis,
6290 IntBinaryOperator reducer) {
6291 super(p, b, i, f, t); this.nextRight = nextRight;
6292 this.transformer = transformer;
6293 this.basis = basis; this.reducer = reducer;
6294 }
6295 public final Integer getRawResult() { return result; }
6296 public final void compute() {
6297 final ToIntFunction<Map.Entry<K,V>> transformer;
6298 final IntBinaryOperator reducer;
6299 if ((transformer = this.transformer) != null &&
6300 (reducer = this.reducer) != null) {
6301 int r = this.basis;
6302 for (int i = baseIndex, f, h; batch > 0 &&
6303 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6304 addToPendingCount(1);
6305 (rights = new MapReduceEntriesToIntTask<K,V>
6306 (this, batch >>>= 1, baseLimit = h, f, tab,
6307 rights, transformer, r, reducer)).fork();
6308 }
6309 for (Node<K,V> p; (p = advance()) != null; )
6310 r = reducer.applyAsInt(r, transformer.applyAsInt(p));
6311 result = r;
6312 CountedCompleter<?> c;
6313 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6314 @SuppressWarnings("unchecked")
6315 MapReduceEntriesToIntTask<K,V>
6316 t = (MapReduceEntriesToIntTask<K,V>)c,
6317 s = t.rights;
6318 while (s != null) {
6319 t.result = reducer.applyAsInt(t.result, s.result);
6320 s = t.rights = s.nextRight;
6321 }
6322 }
6323 }
6324 }
6325 }
6326
6327 @SuppressWarnings("serial")
6328 static final class MapReduceMappingsToIntTask<K,V>
6329 extends BulkTask<K,V,Integer> {
6330 final ToIntBiFunction<? super K, ? super V> transformer;
6331 final IntBinaryOperator reducer;
6332 final int basis;
6333 int result;
6334 MapReduceMappingsToIntTask<K,V> rights, nextRight;
6335 MapReduceMappingsToIntTask
6336 (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6337 MapReduceMappingsToIntTask<K,V> nextRight,
6338 ToIntBiFunction<? super K, ? super V> transformer,
6339 int basis,
6340 IntBinaryOperator reducer) {
6341 super(p, b, i, f, t); this.nextRight = nextRight;
6342 this.transformer = transformer;
6343 this.basis = basis; this.reducer = reducer;
6344 }
6345 public final Integer getRawResult() { return result; }
6346 public final void compute() {
6347 final ToIntBiFunction<? super K, ? super V> transformer;
6348 final IntBinaryOperator reducer;
6349 if ((transformer = this.transformer) != null &&
6350 (reducer = this.reducer) != null) {
6351 int r = this.basis;
6352 for (int i = baseIndex, f, h; batch > 0 &&
6353 (h = ((f = baseLimit) + i) >>> 1) > i;) {
6354 addToPendingCount(1);
6355 (rights = new MapReduceMappingsToIntTask<K,V>
6356 (this, batch >>>= 1, baseLimit = h, f, tab,
6357 rights, transformer, r, reducer)).fork();
6358 }
6359 for (Node<K,V> p; (p = advance()) != null; )
6360 r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val));
6361 result = r;
6362 CountedCompleter<?> c;
6363 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6364 @SuppressWarnings("unchecked")
6365 MapReduceMappingsToIntTask<K,V>
6366 t = (MapReduceMappingsToIntTask<K,V>)c,
6367 s = t.rights;
6368 while (s != null) {
6369 t.result = reducer.applyAsInt(t.result, s.result);
6370 s = t.rights = s.nextRight;
6371 }
6372 }
6373 }
6374 }
6375 }
6376
6377 // Unsafe mechanics
6378 private static final Unsafe U = Unsafe.getUnsafe();
6379 private static final long SIZECTL
6380 = U.objectFieldOffset(ConcurrentHashMap.class, "sizeCtl");
6381 private static final long TRANSFERINDEX
6382 = U.objectFieldOffset(ConcurrentHashMap.class, "transferIndex");
6383 private static final long BASECOUNT
6384 = U.objectFieldOffset(ConcurrentHashMap.class, "baseCount");
6385 private static final long CELLSBUSY
6386 = U.objectFieldOffset(ConcurrentHashMap.class, "cellsBusy");
6387 private static final long CELLVALUE
6388 = U.objectFieldOffset(CounterCell.class, "value");
6389 private static final long ABASE = U.arrayBaseOffset(Node[].class);
6390 private static final int ASHIFT;
6391
6392 static {
6393 int scale = U.arrayIndexScale(Node[].class);
6394 if ((scale & (scale - 1)) != 0)
6395 throw new ExceptionInInitializerError("array index scale not a power of two");
6396 ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6397
6398 // Reduce the risk of rare disastrous classloading in first call to
6399 // LockSupport.park: https://bugs.openjdk.org/browse/JDK-8074773
6400 Class<?> ensureLoaded = LockSupport.class;
6401
6402 // Eager class load observed to help JIT during startup
6403 ensureLoaded = ReservationNode.class;
6404 }
6405 }