1 /*
   2  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.  Oracle designates this
   7  * particular file as subject to the "Classpath" exception as provided
   8  * by Oracle in the LICENSE file that accompanied this code.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  */
  24 
  25 /*
  26  * This file is available under and governed by the GNU General Public
  27  * License version 2 only, as published by the Free Software Foundation.
  28  * However, the following notice accompanied the original version of this
  29  * file:
  30  *
  31  * Written by Doug Lea with assistance from members of JCP JSR-166
  32  * Expert Group and released to the public domain, as explained at
  33  * http://creativecommons.org/publicdomain/zero/1.0/
  34  */
  35 
  36 package java.util.concurrent;
  37 
  38 import java.io.ObjectStreamField;
  39 import java.io.Serializable;
  40 import java.lang.reflect.ParameterizedType;
  41 import java.lang.reflect.Type;
  42 import java.util.AbstractMap;
  43 import java.util.Arrays;
  44 import java.util.Collection;
  45 import java.util.Enumeration;
  46 import java.util.HashMap;
  47 import java.util.Hashtable;
  48 import java.util.Iterator;
  49 import java.util.Map;
  50 import java.util.NoSuchElementException;
  51 import java.util.Objects;
  52 import java.util.Set;
  53 import java.util.Spliterator;
  54 import java.util.concurrent.atomic.AtomicReference;
  55 import java.util.concurrent.locks.LockSupport;
  56 import java.util.concurrent.locks.ReentrantLock;
  57 import java.util.function.BiConsumer;
  58 import java.util.function.BiFunction;
  59 import java.util.function.Consumer;
  60 import java.util.function.DoubleBinaryOperator;
  61 import java.util.function.Function;
  62 import java.util.function.IntBinaryOperator;
  63 import java.util.function.LongBinaryOperator;
  64 import java.util.function.Predicate;
  65 import java.util.function.ToDoubleBiFunction;
  66 import java.util.function.ToDoubleFunction;
  67 import java.util.function.ToIntBiFunction;
  68 import java.util.function.ToIntFunction;
  69 import java.util.function.ToLongBiFunction;
  70 import java.util.function.ToLongFunction;
  71 import java.util.stream.Stream;
  72 import jdk.internal.misc.Unsafe;
  73 import jdk.internal.util.ArraysSupport;
  74 import jdk.internal.vm.annotation.AOTRuntimeSetup;
  75 import jdk.internal.vm.annotation.AOTSafeClassInitializer;
  76 import jdk.internal.vm.annotation.Stable;
  77 
  78 /**
  79  * A hash table supporting full concurrency of retrievals and
  80  * high expected concurrency for updates. This class obeys the
  81  * same functional specification as {@link java.util.Hashtable}, and
  82  * includes versions of methods corresponding to each method of
  83  * {@code Hashtable}. However, even though all operations are
  84  * thread-safe, retrieval operations do <em>not</em> entail locking,
  85  * and there is <em>not</em> any support for locking the entire table
  86  * in a way that prevents all access.  This class is fully
  87  * interoperable with {@code Hashtable} in programs that rely on its
  88  * thread safety but not on its synchronization details.
  89  *
  90  * <p>Retrieval operations (including {@code get}) generally do not
  91  * block, so may overlap with update operations (including {@code put}
  92  * and {@code remove}). Retrievals reflect the results of the most
  93  * recently <em>completed</em> update operations holding upon their
  94  * onset. (More formally, an update operation for a given key bears a
  95  * <em>happens-before</em> relation with any (non-null) retrieval for
  96  * that key reporting the updated value.)  For aggregate operations
  97  * such as {@code putAll} and {@code clear}, concurrent retrievals may
  98  * reflect insertion or removal of only some entries.  Similarly,
  99  * Iterators, Spliterators and Enumerations return elements reflecting the
 100  * state of the hash table at some point at or since the creation of the
 101  * iterator/enumeration.  They do <em>not</em> throw {@link
 102  * java.util.ConcurrentModificationException ConcurrentModificationException}.
 103  * However, iterators are designed to be used by only one thread at a time.
 104  * Bear in mind that the results of aggregate status methods including
 105  * {@code size}, {@code isEmpty}, and {@code containsValue} are typically
 106  * useful only when a map is not undergoing concurrent updates in other threads.
 107  * Otherwise the results of these methods reflect transient states
 108  * that may be adequate for monitoring or estimation purposes, but not
 109  * for program control.
 110  *
 111  * <p>The table is dynamically expanded when there are too many
 112  * collisions (i.e., keys that have distinct hash codes but fall into
 113  * the same slot modulo the table size), with the expected average
 114  * effect of maintaining roughly two bins per mapping (corresponding
 115  * to a 0.75 load factor threshold for resizing). There may be much
 116  * variance around this average as mappings are added and removed, but
 117  * overall, this maintains a commonly accepted time/space tradeoff for
 118  * hash tables.  However, resizing this or any other kind of hash
 119  * table may be a relatively slow operation. When possible, it is a
 120  * good idea to provide a size estimate as an optional {@code
 121  * initialCapacity} constructor argument. An additional optional
 122  * {@code loadFactor} constructor argument provides a further means of
 123  * customizing initial table capacity by specifying the table density
 124  * to be used in calculating the amount of space to allocate for the
 125  * given number of elements.  Also, for compatibility with previous
 126  * versions of this class, constructors may optionally specify an
 127  * expected {@code concurrencyLevel} as an additional hint for
 128  * internal sizing.  Note that using many keys with exactly the same
 129  * {@code hashCode()} is a sure way to slow down performance of any
 130  * hash table. To ameliorate impact, when keys are {@link Comparable},
 131  * this class may use comparison order among keys to help break ties.
 132  *
 133  * <p>A {@link Set} projection of a ConcurrentHashMap may be created
 134  * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
 135  * (using {@link #keySet(Object)} when only keys are of interest, and the
 136  * mapped values are (perhaps transiently) not used or all take the
 137  * same mapping value.
 138  *
 139  * <p>A ConcurrentHashMap can be used as a scalable frequency map (a
 140  * form of histogram or multiset) by using {@link
 141  * java.util.concurrent.atomic.LongAdder} values and initializing via
 142  * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count
 143  * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use
 144  * {@code freqs.computeIfAbsent(key, k -> new LongAdder()).increment();}
 145  *
 146  * <p>This class and its views and iterators implement all of the
 147  * <em>optional</em> methods of the {@link Map} and {@link Iterator}
 148  * interfaces.
 149  *
 150  * <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
 151  * does <em>not</em> allow {@code null} to be used as a key or value.
 152  *
 153  * <p id="Bulk">ConcurrentHashMaps support a set of sequential and parallel bulk
 154  * operations that, unlike most {@link Stream} methods, are designed
 155  * to be safely, and often sensibly, applied even with maps that are
 156  * being concurrently updated by other threads; for example, when
 157  * computing a snapshot summary of the values in a shared registry.
 158  * There are three kinds of operation, each with four forms, accepting
 159  * functions with keys, values, entries, and (key, value) pairs as
 160  * arguments and/or return values. Because the elements of a
 161  * ConcurrentHashMap are not ordered in any particular way, and may be
 162  * processed in different orders in different parallel executions, the
 163  * correctness of supplied functions should not depend on any
 164  * ordering, or on any other objects or values that may transiently
 165  * change while computation is in progress; and except for forEach
 166  * actions, should ideally be side-effect-free. Bulk operations on
 167  * {@link Map.Entry} objects do not support method {@code setValue}.
 168  *
 169  * <ul>
 170  * <li>forEach: Performs a given action on each element.
 171  * A variant form applies a given transformation on each element
 172  * before performing the action.
 173  *
 174  * <li>search: Returns the first available non-null result of
 175  * applying a given function on each element; skipping further
 176  * search when a result is found.
 177  *
 178  * <li>reduce: Accumulates each element.  The supplied reduction
 179  * function cannot rely on ordering (more formally, it should be
 180  * both associative and commutative).  There are five variants:
 181  *
 182  * <ul>
 183  *
 184  * <li>Plain reductions. (There is not a form of this method for
 185  * (key, value) function arguments since there is no corresponding
 186  * return type.)
 187  *
 188  * <li>Mapped reductions that accumulate the results of a given
 189  * function applied to each element.
 190  *
 191  * <li>Reductions to scalar doubles, longs, and ints, using a
 192  * given basis value.
 193  *
 194  * </ul>
 195  * </ul>
 196  *
 197  * <p>These bulk operations accept a {@code parallelismThreshold}
 198  * argument. Methods proceed sequentially if the current map size is
 199  * estimated to be less than the given threshold. Using a value of
 200  * {@code Long.MAX_VALUE} suppresses all parallelism.  Using a value
 201  * of {@code 1} results in maximal parallelism by partitioning into
 202  * enough subtasks to fully utilize the {@link
 203  * ForkJoinPool#commonPool()} that is used for all parallel
 204  * computations. Normally, you would initially choose one of these
 205  * extreme values, and then measure performance of using in-between
 206  * values that trade off overhead versus throughput.
 207  *
 208  * <p>The concurrency properties of bulk operations follow
 209  * from those of ConcurrentHashMap: Any non-null result returned
 210  * from {@code get(key)} and related access methods bears a
 211  * happens-before relation with the associated insertion or
 212  * update.  The result of any bulk operation reflects the
 213  * composition of these per-element relations (but is not
 214  * necessarily atomic with respect to the map as a whole unless it
 215  * is somehow known to be quiescent).  Conversely, because keys
 216  * and values in the map are never null, null serves as a reliable
 217  * atomic indicator of the current lack of any result.  To
 218  * maintain this property, null serves as an implicit basis for
 219  * all non-scalar reduction operations. For the double, long, and
 220  * int versions, the basis should be one that, when combined with
 221  * any other value, returns that other value (more formally, it
 222  * should be the identity element for the reduction). Most common
 223  * reductions have these properties; for example, computing a sum
 224  * with basis 0 or a minimum with basis MAX_VALUE.
 225  *
 226  * <p>Search and transformation functions provided as arguments
 227  * should similarly return null to indicate the lack of any result
 228  * (in which case it is not used). In the case of mapped
 229  * reductions, this also enables transformations to serve as
 230  * filters, returning null (or, in the case of primitive
 231  * specializations, the identity basis) if the element should not
 232  * be combined. You can create compound transformations and
 233  * filterings by composing them yourself under this "null means
 234  * there is nothing there now" rule before using them in search or
 235  * reduce operations.
 236  *
 237  * <p>Methods accepting and/or returning Entry arguments maintain
 238  * key-value associations. They may be useful for example when
 239  * finding the key for the greatest value. Note that "plain" Entry
 240  * arguments can be supplied using {@code new
 241  * AbstractMap.SimpleEntry(k,v)}.
 242  *
 243  * <p>Bulk operations may complete abruptly, throwing an
 244  * exception encountered in the application of a supplied
 245  * function. Bear in mind when handling such exceptions that other
 246  * concurrently executing functions could also have thrown
 247  * exceptions, or would have done so if the first exception had
 248  * not occurred.
 249  *
 250  * <p>Speedups for parallel compared to sequential forms are common
 251  * but not guaranteed.  Parallel operations involving brief functions
 252  * on small maps may execute more slowly than sequential forms if the
 253  * underlying work to parallelize the computation is more expensive
 254  * than the computation itself.  Similarly, parallelization may not
 255  * lead to much actual parallelism if all processors are busy
 256  * performing unrelated tasks.
 257  *
 258  * <p>All arguments to all task methods must be non-null.
 259  *
 260  * <p>This class is a member of the
 261  * <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework">
 262  * Java Collections Framework</a>.
 263  *
 264  * @since 1.5
 265  * @author Doug Lea
 266  * @param <K> the type of keys maintained by this map
 267  * @param <V> the type of mapped values
 268  */
 269 @AOTSafeClassInitializer
 270 public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
 271     implements ConcurrentMap<K,V>, Serializable {
 272     private static final long serialVersionUID = 7249069246763182397L;
 273 
 274     /*
 275      * Overview:
 276      *
 277      * The primary design goal of this hash table is to maintain
 278      * concurrent readability (typically method get(), but also
 279      * iterators and related methods) while minimizing update
 280      * contention. Secondary goals are to keep space consumption about
 281      * the same or better than java.util.HashMap, and to support high
 282      * initial insertion rates on an empty table by many threads.
 283      *
 284      * This map usually acts as a binned (bucketed) hash table.  Each
 285      * key-value mapping is held in a Node.  Most nodes are instances
 286      * of the basic Node class with hash, key, value, and next
 287      * fields. However, various subclasses exist: TreeNodes are
 288      * arranged in balanced trees, not lists.  TreeBins hold the roots
 289      * of sets of TreeNodes. ForwardingNodes are placed at the heads
 290      * of bins during resizing. ReservationNodes are used as
 291      * placeholders while establishing values in computeIfAbsent and
 292      * related methods.  The types TreeBin, ForwardingNode, and
 293      * ReservationNode do not hold normal user keys, values, or
 294      * hashes, and are readily distinguishable during search etc
 295      * because they have negative hash fields and null key and value
 296      * fields. (These special nodes are either uncommon or transient,
 297      * so the impact of carrying around some unused fields is
 298      * insignificant.)
 299      *
 300      * The table is lazily initialized to a power-of-two size upon the
 301      * first insertion.  Each bin in the table normally contains a
 302      * list of Nodes (most often, the list has only zero or one Node).
 303      * Table accesses require volatile/atomic reads, writes, and
 304      * CASes.  Because there is no other way to arrange this without
 305      * adding further indirections, we use intrinsics
 306      * (jdk.internal.misc.Unsafe) operations.
 307      *
 308      * We use the top (sign) bit of Node hash fields for control
 309      * purposes -- it is available anyway because of addressing
 310      * constraints.  Nodes with negative hash fields are specially
 311      * handled or ignored in map methods.
 312      *
 313      * Insertion (via put or its variants) of the first node in an
 314      * empty bin is performed by just CASing it to the bin.  This is
 315      * by far the most common case for put operations under most
 316      * key/hash distributions.  Other update operations (insert,
 317      * delete, and replace) require locks.  We do not want to waste
 318      * the space required to associate a distinct lock object with
 319      * each bin, so instead use the first node of a bin list itself as
 320      * a lock. Locking support for these locks relies on builtin
 321      * "synchronized" monitors.
 322      *
 323      * Using the first node of a list as a lock does not by itself
 324      * suffice though: When a node is locked, any update must first
 325      * validate that it is still the first node after locking it, and
 326      * retry if not. Because new nodes are always appended to lists,
 327      * once a node is first in a bin, it remains first until deleted
 328      * or the bin becomes invalidated (upon resizing).
 329      *
 330      * The main disadvantage of per-bin locks is that other update
 331      * operations on other nodes in a bin list protected by the same
 332      * lock can stall, for example when user equals() or mapping
 333      * functions take a long time.  However, statistically, under
 334      * random hash codes, this is not a common problem.  Ideally, the
 335      * frequency of nodes in bins follows a Poisson distribution
 336      * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
 337      * parameter of about 0.5 on average, given the resizing threshold
 338      * of 0.75, although with a large variance because of resizing
 339      * granularity. Ignoring variance, the expected occurrences of
 340      * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
 341      * first values are:
 342      *
 343      * 0:    0.60653066
 344      * 1:    0.30326533
 345      * 2:    0.07581633
 346      * 3:    0.01263606
 347      * 4:    0.00157952
 348      * 5:    0.00015795
 349      * 6:    0.00001316
 350      * 7:    0.00000094
 351      * 8:    0.00000006
 352      * more: less than 1 in ten million
 353      *
 354      * Lock contention probability for two threads accessing distinct
 355      * elements is roughly 1 / (8 * #elements) under random hashes.
 356      *
 357      * Actual hash code distributions encountered in practice
 358      * sometimes deviate significantly from uniform randomness.  This
 359      * includes the case when N > (1<<30), so some keys MUST collide.
 360      * Similarly for dumb or hostile usages in which multiple keys are
 361      * designed to have identical hash codes or ones that differs only
 362      * in masked-out high bits. So we use a secondary strategy that
 363      * applies when the number of nodes in a bin exceeds a
 364      * threshold. These TreeBins use a balanced tree to hold nodes (a
 365      * specialized form of red-black trees), bounding search time to
 366      * O(log N).  Each search step in a TreeBin is at least twice as
 367      * slow as in a regular list, but given that N cannot exceed
 368      * (1<<64) (before running out of addresses) this bounds search
 369      * steps, lock hold times, etc, to reasonable constants (roughly
 370      * 100 nodes inspected per operation worst case) so long as keys
 371      * are Comparable (which is very common -- String, Long, etc).
 372      * TreeBin nodes (TreeNodes) also maintain the same "next"
 373      * traversal pointers as regular nodes, so can be traversed in
 374      * iterators in the same way.
 375      *
 376      * The table is resized when occupancy exceeds a percentage
 377      * threshold (nominally, 0.75, but see below).  Any thread
 378      * noticing an overfull bin may assist in resizing after the
 379      * initiating thread allocates and sets up the replacement array.
 380      * However, rather than stalling, these other threads may proceed
 381      * with insertions etc.  The use of TreeBins shields us from the
 382      * worst case effects of overfilling while resizes are in
 383      * progress.  Resizing proceeds by transferring bins, one by one,
 384      * from the table to the next table. However, threads claim small
 385      * blocks of indices to transfer (via field transferIndex) before
 386      * doing so, reducing contention.  A generation stamp in field
 387      * sizeCtl ensures that resizings do not overlap. Because we are
 388      * using power-of-two expansion, the elements from each bin must
 389      * either stay at same index, or move with a power of two
 390      * offset. We eliminate unnecessary node creation by catching
 391      * cases where old nodes can be reused because their next fields
 392      * won't change.  On average, only about one-sixth of them need
 393      * cloning when a table doubles. The nodes they replace will be
 394      * garbage collectible as soon as they are no longer referenced by
 395      * any reader thread that may be in the midst of concurrently
 396      * traversing table.  Upon transfer, the old table bin contains
 397      * only a special forwarding node (with hash field "MOVED") that
 398      * contains the next table as its key. On encountering a
 399      * forwarding node, access and update operations restart, using
 400      * the new table.
 401      *
 402      * Each bin transfer requires its bin lock, which can stall
 403      * waiting for locks while resizing. However, because other
 404      * threads can join in and help resize rather than contend for
 405      * locks, average aggregate waits become shorter as resizing
 406      * progresses.  The transfer operation must also ensure that all
 407      * accessible bins in both the old and new table are usable by any
 408      * traversal.  This is arranged in part by proceeding from the
 409      * last bin (table.length - 1) up towards the first.  Upon seeing
 410      * a forwarding node, traversals (see class Traverser) arrange to
 411      * move to the new table without revisiting nodes.  To ensure that
 412      * no intervening nodes are skipped even when moved out of order,
 413      * a stack (see class TableStack) is created on first encounter of
 414      * a forwarding node during a traversal, to maintain its place if
 415      * later processing the current table. The need for these
 416      * save/restore mechanics is relatively rare, but when one
 417      * forwarding node is encountered, typically many more will be.
 418      * So Traversers use a simple caching scheme to avoid creating so
 419      * many new TableStack nodes. (Thanks to Peter Levart for
 420      * suggesting use of a stack here.)
 421      *
 422      * The traversal scheme also applies to partial traversals of
 423      * ranges of bins (via an alternate Traverser constructor)
 424      * to support partitioned aggregate operations.  Also, read-only
 425      * operations give up if ever forwarded to a null table, which
 426      * provides support for shutdown-style clearing, which is also not
 427      * currently implemented.
 428      *
 429      * Lazy table initialization minimizes footprint until first use,
 430      * and also avoids resizings when the first operation is from a
 431      * putAll, constructor with map argument, or deserialization.
 432      * These cases attempt to override the initial capacity settings,
 433      * but harmlessly fail to take effect in cases of races.
 434      *
 435      * The element count is maintained using a specialization of
 436      * LongAdder. We need to incorporate a specialization rather than
 437      * just use a LongAdder in order to access implicit
 438      * contention-sensing that leads to creation of multiple
 439      * CounterCells.  The counter mechanics avoid contention on
 440      * updates but can encounter cache thrashing if read too
 441      * frequently during concurrent access. To avoid reading so often,
 442      * resizing under contention is attempted only upon adding to a
 443      * bin already holding two or more nodes. Under uniform hash
 444      * distributions, the probability of this occurring at threshold
 445      * is around 13%, meaning that only about 1 in 8 puts check
 446      * threshold (and after resizing, many fewer do so).
 447      *
 448      * TreeBins use a special form of comparison for search and
 449      * related operations (which is the main reason we cannot use
 450      * existing collections such as TreeMaps). TreeBins contain
 451      * Comparable elements, but may contain others, as well as
 452      * elements that are Comparable but not necessarily Comparable for
 453      * the same T, so we cannot invoke compareTo among them. To handle
 454      * this, the tree is ordered primarily by hash value, then by
 455      * Comparable.compareTo order if applicable.  On lookup at a node,
 456      * if elements are not comparable or compare as 0 then both left
 457      * and right children may need to be searched in the case of tied
 458      * hash values. (This corresponds to the full list search that
 459      * would be necessary if all elements were non-Comparable and had
 460      * tied hashes.) On insertion, to keep a total ordering (or as
 461      * close as is required here) across rebalancings, we compare
 462      * classes and identityHashCodes as tie-breakers. The red-black
 463      * balancing code is updated from pre-jdk-collections
 464      * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
 465      * based in turn on Cormen, Leiserson, and Rivest "Introduction to
 466      * Algorithms" (CLR).
 467      *
 468      * TreeBins also require an additional locking mechanism.  While
 469      * list traversal is always possible by readers even during
 470      * updates, tree traversal is not, mainly because of tree-rotations
 471      * that may change the root node and/or its linkages.  TreeBins
 472      * include a simple read-write lock mechanism parasitic on the
 473      * main bin-synchronization strategy: Structural adjustments
 474      * associated with an insertion or removal are already bin-locked
 475      * (and so cannot conflict with other writers) but must wait for
 476      * ongoing readers to finish. Since there can be only one such
 477      * waiter, we use a simple scheme using a single "waiter" field to
 478      * block writers.  However, readers need never block.  If the root
 479      * lock is held, they proceed along the slow traversal path (via
 480      * next-pointers) until the lock becomes available or the list is
 481      * exhausted, whichever comes first. These cases are not fast, but
 482      * maximize aggregate expected throughput.
 483      *
 484      * Maintaining API and serialization compatibility with previous
 485      * versions of this class introduces several oddities. Mainly: We
 486      * leave untouched but unused constructor arguments referring to
 487      * concurrencyLevel. We accept a loadFactor constructor argument,
 488      * but apply it only to initial table capacity (which is the only
 489      * time that we can guarantee to honor it.) We also declare an
 490      * unused "Segment" class that is instantiated in minimal form
 491      * only when serializing.
 492      *
 493      * Also, solely for compatibility with previous versions of this
 494      * class, it extends AbstractMap, even though all of its methods
 495      * are overridden, so it is just useless baggage.
 496      *
 497      * This file is organized to make things a little easier to follow
 498      * while reading than they might otherwise: First the main static
 499      * declarations and utilities, then fields, then main public
 500      * methods (with a few factorings of multiple public methods into
 501      * internal ones), then sizing methods, trees, traversers, and
 502      * bulk operations.
 503      */
 504 
 505     /* ---------------- Constants -------------- */
 506 
 507     /**
 508      * The largest possible table capacity.  This value must be
 509      * exactly 1<<30 to stay within Java array allocation and indexing
 510      * bounds for power of two table sizes, and is further required
 511      * because the top two bits of 32bit hash fields are used for
 512      * control purposes.
 513      */
 514     private static final int MAXIMUM_CAPACITY = 1 << 30;
 515 
 516     /**
 517      * The default initial table capacity.  Must be a power of 2
 518      * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
 519      */
 520     private static final int DEFAULT_CAPACITY = 16;
 521 
 522     /**
 523      * The largest possible (non-power of two) array size.
 524      * Needed by toArray and related methods.
 525      */
 526     static final int MAX_ARRAY_SIZE = ArraysSupport.SOFT_MAX_ARRAY_LENGTH;
 527 
 528     /**
 529      * The default concurrency level for this table. Unused but
 530      * defined for compatibility with previous versions of this class.
 531      */
 532     private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
 533 
 534     /**
 535      * The load factor for this table. Overrides of this value in
 536      * constructors affect only the initial table capacity.  The
 537      * actual floating point value isn't normally used -- it is
 538      * simpler to use expressions such as {@code n - (n >>> 2)} for
 539      * the associated resizing threshold.
 540      */
 541     private static final float LOAD_FACTOR = 0.75f;
 542 
 543     /**
 544      * The bin count threshold for using a tree rather than list for a
 545      * bin.  Bins are converted to trees when adding an element to a
 546      * bin with at least this many nodes. The value must be greater
 547      * than 2, and should be at least 8 to mesh with assumptions in
 548      * tree removal about conversion back to plain bins upon
 549      * shrinkage.
 550      */
 551     static final int TREEIFY_THRESHOLD = 8;
 552 
 553     /**
 554      * The bin count threshold for untreeifying a (split) bin during a
 555      * resize operation. Should be less than TREEIFY_THRESHOLD, and at
 556      * most 6 to mesh with shrinkage detection under removal.
 557      */
 558     static final int UNTREEIFY_THRESHOLD = 6;
 559 
 560     /**
 561      * The smallest table capacity for which bins may be treeified.
 562      * (Otherwise the table is resized if too many nodes in a bin.)
 563      * The value should be at least 4 * TREEIFY_THRESHOLD to avoid
 564      * conflicts between resizing and treeification thresholds.
 565      */
 566     static final int MIN_TREEIFY_CAPACITY = 64;
 567 
 568     /**
 569      * Minimum number of rebinnings per transfer step. Ranges are
 570      * subdivided to allow multiple resizer threads.  This value
 571      * serves as a lower bound to avoid resizers encountering
 572      * excessive memory contention.  The value should be at least
 573      * DEFAULT_CAPACITY.
 574      */
 575     private static final int MIN_TRANSFER_STRIDE = 16;
 576 
 577     /**
 578      * The number of bits used for generation stamp in sizeCtl.
 579      * Must be at least 6 for 32bit arrays.
 580      */
 581     private static final int RESIZE_STAMP_BITS = 16;
 582 
 583     /**
 584      * The maximum number of threads that can help resize.
 585      * Must fit in 32 - RESIZE_STAMP_BITS bits.
 586      */
 587     private static final int MAX_RESIZERS = (1 << (32 - RESIZE_STAMP_BITS)) - 1;
 588 
 589     /**
 590      * The bit shift for recording size stamp in sizeCtl.
 591      */
 592     private static final int RESIZE_STAMP_SHIFT = 32 - RESIZE_STAMP_BITS;
 593 
 594     /*
 595      * Encodings for Node hash fields. See above for explanation.
 596      */
 597     static final int MOVED     = -1; // hash for forwarding nodes
 598     static final int TREEBIN   = -2; // hash for roots of trees
 599     static final int RESERVED  = -3; // hash for transient reservations
 600     static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
 601 
 602     /** Number of CPUS, to place bounds on some sizings */
 603     static @Stable int NCPU;
 604 
 605     static {
 606         runtimeSetup();
 607     }
 608 
 609     @AOTRuntimeSetup
 610     private static void runtimeSetup() {
 611         NCPU = Runtime.getRuntime().availableProcessors();
 612     }
 613 
 614     /**
 615      * Serialized pseudo-fields, provided only for jdk7 compatibility.
 616      * @serialField segments Segment[]
 617      *   The segments, each of which is a specialized hash table.
 618      * @serialField segmentMask int
 619      *   Mask value for indexing into segments. The upper bits of a
 620      *   key's hash code are used to choose the segment.
 621      * @serialField segmentShift int
 622      *   Shift value for indexing within segments.
 623      */
 624     private static final ObjectStreamField[] serialPersistentFields = {
 625         new ObjectStreamField("segments", Segment[].class),
 626         new ObjectStreamField("segmentMask", Integer.TYPE),
 627         new ObjectStreamField("segmentShift", Integer.TYPE),
 628     };
 629 
 630     /* ---------------- Nodes -------------- */
 631 
 632     /**
 633      * Key-value entry.  This class is never exported out as a
 634      * user-mutable Map.Entry (i.e., one supporting setValue; see
 635      * MapEntry below), but can be used for read-only traversals used
 636      * in bulk tasks.  Subclasses of Node with a negative hash field
 637      * are special, and contain null keys and values (but are never
 638      * exported).  Otherwise, keys and vals are never null.
 639      */
 640     static class Node<K,V> implements Map.Entry<K,V> {
 641         final int hash;
 642         final K key;
 643         volatile V val;
 644         volatile Node<K,V> next;
 645 
 646         Node(int hash, K key, V val) {
 647             this.hash = hash;
 648             this.key = key;
 649             this.val = val;
 650         }
 651 
 652         Node(int hash, K key, V val, Node<K,V> next) {
 653             this(hash, key, val);
 654             this.next = next;
 655         }
 656 
 657         public final K getKey()     { return key; }
 658         public final V getValue()   { return val; }
 659         public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
 660         public final String toString() {
 661             return Helpers.mapEntryToString(key, val);
 662         }
 663         public final V setValue(V value) {
 664             throw new UnsupportedOperationException();
 665         }
 666 
 667         public final boolean equals(Object o) {
 668             Object k, v, u; Map.Entry<?,?> e;
 669             return ((o instanceof Map.Entry) &&
 670                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
 671                     (v = e.getValue()) != null &&
 672                     (Objects.equals(k, key)) &&
 673                     v.equals(val));
 674         }
 675 
 676         /**
 677          * Virtualized support for map.get(); overridden in subclasses.
 678          */
 679         Node<K,V> find(int h, Object k) {
 680             Node<K,V> e = this;
 681             if (k != null) {
 682                 do {
 683                     K ek;
 684                     if (e.hash == h &&
 685                         (ek = e.key) != null && Objects.equals(k, ek))
 686                         return e;
 687                 } while ((e = e.next) != null);
 688             }
 689             return null;
 690         }
 691     }
 692 
 693     /* ---------------- Static utilities -------------- */
 694 
 695     /**
 696      * Spreads (XORs) higher bits of hash to lower and also forces top
 697      * bit to 0. Because the table uses power-of-two masking, sets of
 698      * hashes that vary only in bits above the current mask will
 699      * always collide. (Among known examples are sets of Float keys
 700      * holding consecutive whole numbers in small tables.)  So we
 701      * apply a transform that spreads the impact of higher bits
 702      * downward. There is a tradeoff between speed, utility, and
 703      * quality of bit-spreading. Because many common sets of hashes
 704      * are already reasonably distributed (so don't benefit from
 705      * spreading), and because we use trees to handle large sets of
 706      * collisions in bins, we just XOR some shifted bits in the
 707      * cheapest possible way to reduce systematic lossage, as well as
 708      * to incorporate impact of the highest bits that would otherwise
 709      * never be used in index calculations because of table bounds.
 710      */
 711     static final int spread(int h) {
 712         return (h ^ (h >>> 16)) & HASH_BITS;
 713     }
 714 
 715     /**
 716      * Returns a power of two table size for the given desired capacity.
 717      * See Hackers Delight, sec 3.2
 718      */
 719     private static final int tableSizeFor(int c) {
 720         int n = -1 >>> Integer.numberOfLeadingZeros(c - 1);
 721         return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
 722     }
 723 
 724     /**
 725      * Returns x's Class if it is of the form "class C implements
 726      * Comparable<C>", else null.
 727      */
 728     static Class<?> comparableClassFor(Object x) {
 729         if (x instanceof Comparable) {
 730             Class<?> c; Type[] ts, as; ParameterizedType p;
 731             if ((c = x.getClass()) == String.class) // bypass checks
 732                 return c;
 733             if ((ts = c.getGenericInterfaces()) != null) {
 734                 for (Type t : ts) {
 735                     if ((t instanceof ParameterizedType) &&
 736                         ((p = (ParameterizedType)t).getRawType() ==
 737                          Comparable.class) &&
 738                         (as = p.getActualTypeArguments()) != null &&
 739                         as.length == 1 && as[0] == c) // type arg is c
 740                         return c;
 741                 }
 742             }
 743         }
 744         return null;
 745     }
 746 
 747     /**
 748      * Returns k.compareTo(x) if x matches kc (k's screened comparable
 749      * class), else 0.
 750      */
 751     @SuppressWarnings("unchecked") // for cast to Comparable
 752     static int compareComparables(Class<?> kc, Object k, Object x) {
 753         return (x == null || x.getClass() != kc ? 0 :
 754                 ((Comparable)k).compareTo(x));
 755     }
 756 
 757     /* ---------------- Table element access -------------- */
 758 
 759     /*
 760      * Atomic access methods are used for table elements as well as
 761      * elements of in-progress next table while resizing.  All uses of
 762      * the tab arguments must be null checked by callers.  All callers
 763      * also paranoically precheck that tab's length is not zero (or an
 764      * equivalent check), thus ensuring that any index argument taking
 765      * the form of a hash value anded with (length - 1) is a valid
 766      * index.  Note that, to be correct wrt arbitrary concurrency
 767      * errors by users, these checks must operate on local variables,
 768      * which accounts for some odd-looking inline assignments below.
 769      * Note that calls to setTabAt always occur within locked regions,
 770      * and so require only release ordering.
 771      */
 772 
 773     @SuppressWarnings("unchecked")
 774     static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
 775         return (Node<K,V>)U.getReferenceAcquire(tab, ((long)i << ASHIFT) + ABASE);
 776     }
 777 
 778     static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
 779                                         Node<K,V> c, Node<K,V> v) {
 780         return U.compareAndSetReference(tab, ((long)i << ASHIFT) + ABASE, c, v);
 781     }
 782 
 783     static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
 784         U.putReferenceRelease(tab, ((long)i << ASHIFT) + ABASE, v);
 785     }
 786 
 787     /* ---------------- Fields -------------- */
 788 
 789     /**
 790      * The array of bins. Lazily initialized upon first insertion.
 791      * Size is always a power of two. Accessed directly by iterators.
 792      */
 793     transient volatile Node<K,V>[] table;
 794 
 795     /**
 796      * The next table to use; non-null only while resizing.
 797      */
 798     private transient volatile Node<K,V>[] nextTable;
 799 
 800     /**
 801      * Base counter value, used mainly when there is no contention,
 802      * but also as a fallback during table initialization
 803      * races. Updated via CAS.
 804      */
 805     private transient volatile long baseCount;
 806 
 807     /**
 808      * Table initialization and resizing control.  When negative, the
 809      * table is being initialized or resized: -1 for initialization,
 810      * else -(1 + the number of active resizing threads).  Otherwise,
 811      * when table is null, holds the initial table size to use upon
 812      * creation, or 0 for default. After initialization, holds the
 813      * next element count value upon which to resize the table.
 814      */
 815     private transient volatile int sizeCtl;
 816 
 817     /**
 818      * The next table index (plus one) to split while resizing.
 819      */
 820     private transient volatile int transferIndex;
 821 
 822     /**
 823      * Spinlock (locked via CAS) used when resizing and/or creating CounterCells.
 824      */
 825     private transient volatile int cellsBusy;
 826 
 827     /**
 828      * Table of counter cells. When non-null, size is a power of 2.
 829      */
 830     private transient volatile CounterCell[] counterCells;
 831 
 832     // views
 833     private transient KeySetView<K,V> keySet;
 834     private transient ValuesView<K,V> values;
 835     private transient EntrySetView<K,V> entrySet;
 836 
 837 
 838     /* ---------------- Public operations -------------- */
 839 
 840     /**
 841      * Creates a new, empty map with the default initial table size (16).
 842      */
 843     public ConcurrentHashMap() {
 844     }
 845 
 846     /**
 847      * Creates a new, empty map with an initial table size
 848      * accommodating the specified number of elements without the need
 849      * to dynamically resize.
 850      *
 851      * @param initialCapacity The implementation performs internal
 852      * sizing to accommodate this many elements.
 853      * @throws IllegalArgumentException if the initial capacity of
 854      * elements is negative
 855      */
 856     public ConcurrentHashMap(int initialCapacity) {
 857         this(initialCapacity, LOAD_FACTOR, 1);
 858     }
 859 
 860     /**
 861      * Creates a new map with the same mappings as the given map.
 862      *
 863      * @param m the map
 864      */
 865     @SuppressWarnings("this-escape")
 866     public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
 867         this(m.size());
 868         putAll(m);
 869     }
 870 
 871     /**
 872      * Creates a new, empty map with an initial table size based on
 873      * the given number of elements ({@code initialCapacity}) and
 874      * initial table density ({@code loadFactor}).
 875      *
 876      * @param initialCapacity the initial capacity. The implementation
 877      * performs internal sizing to accommodate this many elements,
 878      * given the specified load factor.
 879      * @param loadFactor the load factor (table density) for
 880      * establishing the initial table size
 881      * @throws IllegalArgumentException if the initial capacity of
 882      * elements is negative or the load factor is nonpositive
 883      *
 884      * @since 1.6
 885      */
 886     public ConcurrentHashMap(int initialCapacity, float loadFactor) {
 887         this(initialCapacity, loadFactor, 1);
 888     }
 889 
 890     /**
 891      * Creates a new, empty map with an initial table size based on
 892      * the given number of elements ({@code initialCapacity}), initial
 893      * table density ({@code loadFactor}), and number of concurrently
 894      * updating threads ({@code concurrencyLevel}).
 895      *
 896      * @param initialCapacity the initial capacity. The implementation
 897      * performs internal sizing to accommodate this many elements,
 898      * given the specified load factor.
 899      * @param loadFactor the load factor (table density) for
 900      * establishing the initial table size
 901      * @param concurrencyLevel the estimated number of concurrently
 902      * updating threads. The implementation may use this value as
 903      * a sizing hint.
 904      * @throws IllegalArgumentException if the initial capacity is
 905      * negative or the load factor or concurrencyLevel are
 906      * nonpositive
 907      */
 908     public ConcurrentHashMap(int initialCapacity,
 909                              float loadFactor, int concurrencyLevel) {
 910         if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
 911             throw new IllegalArgumentException();
 912         if (initialCapacity < concurrencyLevel)   // Use at least as many bins
 913             initialCapacity = concurrencyLevel;   // as estimated threads
 914         long size = (long)(1.0 + (long)initialCapacity / loadFactor);
 915         int cap = (size >= (long)MAXIMUM_CAPACITY) ?
 916             MAXIMUM_CAPACITY : tableSizeFor((int)size);
 917         this.sizeCtl = cap;
 918     }
 919 
 920     // Original (since JDK1.2) Map methods
 921 
 922     /**
 923      * {@inheritDoc}
 924      */
 925     public int size() {
 926         long n = sumCount();
 927         return ((n < 0L) ? 0 :
 928                 (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
 929                 (int)n);
 930     }
 931 
 932     /**
 933      * {@inheritDoc}
 934      */
 935     public boolean isEmpty() {
 936         return sumCount() <= 0L; // ignore transient negative values
 937     }
 938 
 939     /**
 940      * Returns the value to which the specified key is mapped,
 941      * or {@code null} if this map contains no mapping for the key.
 942      *
 943      * <p>More formally, if this map contains a mapping from a key
 944      * {@code k} to a value {@code v} such that {@code key.equals(k)},
 945      * then this method returns {@code v}; otherwise it returns
 946      * {@code null}.  (There can be at most one such mapping.)
 947      *
 948      * @throws NullPointerException if the specified key is null
 949      */
 950     public V get(Object key) {
 951         Node<K,V>[] tab; Node<K,V> e, p; int n, eh; K ek;
 952         int h = spread(key.hashCode());
 953         if ((tab = table) != null && (n = tab.length) > 0 &&
 954             (e = tabAt(tab, (n - 1) & h)) != null) {
 955             if ((eh = e.hash) == h) {
 956                 if ((ek = e.key) != null && Objects.equals(key, ek))
 957                     return e.val;
 958             }
 959             else if (eh < 0)
 960                 return (p = e.find(h, key)) != null ? p.val : null;
 961             while ((e = e.next) != null) {
 962                 if (e.hash == h &&
 963                     ((ek = e.key) != null && Objects.equals(key, ek)))
 964                     return e.val;
 965             }
 966         }
 967         return null;
 968     }
 969 
 970     /**
 971      * Tests if the specified object is a key in this table.
 972      *
 973      * @param  key possible key
 974      * @return {@code true} if and only if the specified object
 975      *         is a key in this table, as determined by the
 976      *         {@code equals} method; {@code false} otherwise
 977      * @throws NullPointerException if the specified key is null
 978      */
 979     public boolean containsKey(Object key) {
 980         return get(key) != null;
 981     }
 982 
 983     /**
 984      * Returns {@code true} if this map maps one or more keys to the
 985      * specified value. Note: This method may require a full traversal
 986      * of the map, and is much slower than method {@code containsKey}.
 987      *
 988      * @param value value whose presence in this map is to be tested
 989      * @return {@code true} if this map maps one or more keys to the
 990      *         specified value
 991      * @throws NullPointerException if the specified value is null
 992      */
 993     public boolean containsValue(Object value) {
 994         if (value == null)
 995             throw new NullPointerException();
 996         Node<K,V>[] t;
 997         if ((t = table) != null) {
 998             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
 999             for (Node<K,V> p; (p = it.advance()) != null; ) {
1000                 V v;
1001                 if ((v = p.val) != null && Objects.equals(value, v))
1002                     return true;
1003             }
1004         }
1005         return false;
1006     }
1007 
1008     /**
1009      * Maps the specified key to the specified value in this table.
1010      * Neither the key nor the value can be null.
1011      *
1012      * <p>The value can be retrieved by calling the {@code get} method
1013      * with a key that is equal to the original key.
1014      *
1015      * @param key key with which the specified value is to be associated
1016      * @param value value to be associated with the specified key
1017      * @return the previous value associated with {@code key}, or
1018      *         {@code null} if there was no mapping for {@code key}
1019      * @throws NullPointerException if the specified key or value is null
1020      */
1021     public V put(K key, V value) {
1022         return putVal(key, value, false);
1023     }
1024 
1025     /** Implementation for put and putIfAbsent */
1026     final V putVal(K key, V value, boolean onlyIfAbsent) {
1027         if (key == null || value == null) throw new NullPointerException();
1028         int hash = spread(key.hashCode());
1029         int binCount = 0;
1030         for (Node<K,V>[] tab = table;;) {
1031             Node<K,V> f; int n, i, fh; K fk; V fv;
1032             if (tab == null || (n = tab.length) == 0)
1033                 tab = initTable();
1034             else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) {
1035                 if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value)))
1036                     break;                   // no lock when adding to empty bin
1037             }
1038             else if ((fh = f.hash) == MOVED)
1039                 tab = helpTransfer(tab, f);
1040             else if (onlyIfAbsent // check first node without acquiring lock
1041                      && fh == hash
1042                      && (fk = f.key) != null && Objects.equals(key, fk)
1043                      && (fv = f.val) != null)
1044                 return fv;
1045             else {
1046                 V oldVal = null;
1047                 synchronized (f) {
1048                     if (tabAt(tab, i) == f) {
1049                         if (fh >= 0) {
1050                             binCount = 1;
1051                             for (Node<K,V> e = f;; ++binCount) {
1052                                 K ek;
1053                                 if (e.hash == hash &&
1054                                     (ek = e.key) != null && Objects.equals(key, ek)) {
1055                                     oldVal = e.val;
1056                                     if (!onlyIfAbsent)
1057                                         e.val = value;
1058                                     break;
1059                                 }
1060                                 Node<K,V> pred = e;
1061                                 if ((e = e.next) == null) {
1062                                     pred.next = new Node<K,V>(hash, key, value);
1063                                     break;
1064                                 }
1065                             }
1066                         }
1067                         else if (f instanceof TreeBin) {
1068                             Node<K,V> p;
1069                             binCount = 2;
1070                             if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key,
1071                                                            value)) != null) {
1072                                 oldVal = p.val;
1073                                 if (!onlyIfAbsent)
1074                                     p.val = value;
1075                             }
1076                         }
1077                         else if (f instanceof ReservationNode)
1078                             throw new IllegalStateException("Recursive update");
1079                     }
1080                 }
1081                 if (binCount != 0) {
1082                     if (binCount >= TREEIFY_THRESHOLD)
1083                         treeifyBin(tab, i);
1084                     if (oldVal != null)
1085                         return oldVal;
1086                     break;
1087                 }
1088             }
1089         }
1090         addCount(1L, binCount);
1091         return null;
1092     }
1093 
1094     /**
1095      * Copies all of the mappings from the specified map to this one.
1096      * These mappings replace any mappings that this map had for any of the
1097      * keys currently in the specified map.
1098      *
1099      * @param m mappings to be stored in this map
1100      */
1101     public void putAll(Map<? extends K, ? extends V> m) {
1102         if (table != null) {
1103             tryPresize(size() + m.size());
1104         }
1105         for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
1106             putVal(e.getKey(), e.getValue(), false);
1107     }
1108 
1109     /**
1110      * Removes the key (and its corresponding value) from this map.
1111      * This method does nothing if the key is not in the map.
1112      *
1113      * @param  key the key that needs to be removed
1114      * @return the previous value associated with {@code key}, or
1115      *         {@code null} if there was no mapping for {@code key}
1116      * @throws NullPointerException if the specified key is null
1117      */
1118     public V remove(Object key) {
1119         return replaceNode(key, null, null);
1120     }
1121 
1122     /**
1123      * Implementation for the four public remove/replace methods:
1124      * Replaces node value with v, conditional upon match of cv if
1125      * non-null.  If resulting value is null, delete.
1126      */
1127     final V replaceNode(Object key, V value, Object cv) {
1128         int hash = spread(key.hashCode());
1129         for (Node<K,V>[] tab = table;;) {
1130             Node<K,V> f; int n, i, fh;
1131             if (tab == null || (n = tab.length) == 0 ||
1132                 (f = tabAt(tab, i = (n - 1) & hash)) == null)
1133                 break;
1134             else if ((fh = f.hash) == MOVED)
1135                 tab = helpTransfer(tab, f);
1136             else {
1137                 V oldVal = null;
1138                 boolean validated = false;
1139                 synchronized (f) {
1140                     if (tabAt(tab, i) == f) {
1141                         if (fh >= 0) {
1142                             validated = true;
1143                             for (Node<K,V> e = f, pred = null;;) {
1144                                 K ek;
1145                                 if (e.hash == hash &&
1146                                     ((ek = e.key) != null && Objects.equals(key, ek))) {
1147                                     V ev = e.val;
1148                                     if (cv == null ||
1149                                         (ev != null && Objects.equals(cv, ev))) {
1150                                         oldVal = ev;
1151                                         if (value != null)
1152                                             e.val = value;
1153                                         else if (pred != null)
1154                                             pred.next = e.next;
1155                                         else
1156                                             setTabAt(tab, i, e.next);
1157                                     }
1158                                     break;
1159                                 }
1160                                 pred = e;
1161                                 if ((e = e.next) == null)
1162                                     break;
1163                             }
1164                         }
1165                         else if (f instanceof TreeBin) {
1166                             validated = true;
1167                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1168                             TreeNode<K,V> r, p;
1169                             if ((r = t.root) != null &&
1170                                 (p = r.findTreeNode(hash, key, null)) != null) {
1171                                 V pv = p.val;
1172                                 if (cv == null ||
1173                                     (pv != null && Objects.equals(cv, pv))) {
1174                                     oldVal = pv;
1175                                     if (value != null)
1176                                         p.val = value;
1177                                     else if (t.removeTreeNode(p))
1178                                         setTabAt(tab, i, untreeify(t.first));
1179                                 }
1180                             }
1181                         }
1182                         else if (f instanceof ReservationNode)
1183                             throw new IllegalStateException("Recursive update");
1184                     }
1185                 }
1186                 if (validated) {
1187                     if (oldVal != null) {
1188                         if (value == null)
1189                             addCount(-1L, -1);
1190                         return oldVal;
1191                     }
1192                     break;
1193                 }
1194             }
1195         }
1196         return null;
1197     }
1198 
1199     /**
1200      * Removes all of the mappings from this map.
1201      */
1202     public void clear() {
1203         long delta = 0L; // negative number of deletions
1204         int i = 0;
1205         Node<K,V>[] tab = table;
1206         while (tab != null && i < tab.length) {
1207             int fh;
1208             Node<K,V> f = tabAt(tab, i);
1209             if (f == null)
1210                 ++i;
1211             else if ((fh = f.hash) == MOVED) {
1212                 tab = helpTransfer(tab, f);
1213                 i = 0; // restart
1214             }
1215             else {
1216                 synchronized (f) {
1217                     if (tabAt(tab, i) == f) {
1218                         Node<K,V> p = (fh >= 0 ? f :
1219                                        (f instanceof TreeBin) ?
1220                                        ((TreeBin<K,V>)f).first : null);
1221                         while (p != null) {
1222                             --delta;
1223                             p = p.next;
1224                         }
1225                         setTabAt(tab, i++, null);
1226                     }
1227                 }
1228             }
1229         }
1230         if (delta != 0L)
1231             addCount(delta, -1);
1232     }
1233 
1234     /**
1235      * Returns a {@link Set} view of the keys contained in this map.
1236      * The set is backed by the map, so changes to the map are
1237      * reflected in the set, and vice-versa. The set supports element
1238      * removal, which removes the corresponding mapping from this map,
1239      * via the {@code Iterator.remove}, {@code Set.remove},
1240      * {@code removeAll}, {@code retainAll}, and {@code clear}
1241      * operations.  It does not support the {@code add} or
1242      * {@code addAll} operations.
1243      *
1244      * <p>The view's iterators and spliterators are
1245      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1246      *
1247      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1248      * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1249      *
1250      * @return the set view
1251      */
1252     public KeySetView<K,V> keySet() {
1253         KeySetView<K,V> ks;
1254         if ((ks = keySet) != null) return ks;
1255         return keySet = new KeySetView<K,V>(this, null);
1256     }
1257 
1258     /**
1259      * Returns a {@link Collection} view of the values contained in this map.
1260      * The collection is backed by the map, so changes to the map are
1261      * reflected in the collection, and vice-versa.  The collection
1262      * supports element removal, which removes the corresponding
1263      * mapping from this map, via the {@code Iterator.remove},
1264      * {@code Collection.remove}, {@code removeAll},
1265      * {@code retainAll}, and {@code clear} operations.  It does not
1266      * support the {@code add} or {@code addAll} operations.
1267      *
1268      * <p>The view's iterators and spliterators are
1269      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1270      *
1271      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT}
1272      * and {@link Spliterator#NONNULL}.
1273      *
1274      * @return the collection view
1275      */
1276     public Collection<V> values() {
1277         ValuesView<K,V> vs;
1278         if ((vs = values) != null) return vs;
1279         return values = new ValuesView<K,V>(this);
1280     }
1281 
1282     /**
1283      * Returns a {@link Set} view of the mappings contained in this map.
1284      * The set is backed by the map, so changes to the map are
1285      * reflected in the set, and vice-versa.  The set supports element
1286      * removal, which removes the corresponding mapping from the map,
1287      * via the {@code Iterator.remove}, {@code Set.remove},
1288      * {@code removeAll}, {@code retainAll}, and {@code clear}
1289      * operations.
1290      *
1291      * <p>The view's iterators and spliterators are
1292      * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
1293      *
1294      * <p>The view's {@code spliterator} reports {@link Spliterator#CONCURRENT},
1295      * {@link Spliterator#DISTINCT}, and {@link Spliterator#NONNULL}.
1296      *
1297      * @return the set view
1298      */
1299     public Set<Map.Entry<K,V>> entrySet() {
1300         EntrySetView<K,V> es;
1301         if ((es = entrySet) != null) return es;
1302         return entrySet = new EntrySetView<K,V>(this);
1303     }
1304 
1305     /**
1306      * Returns the hash code value for this {@link Map}, i.e.,
1307      * the sum of, for each key-value pair in the map,
1308      * {@code key.hashCode() ^ value.hashCode()}.
1309      *
1310      * @return the hash code value for this map
1311      */
1312     public int hashCode() {
1313         int h = 0;
1314         Node<K,V>[] t;
1315         if ((t = table) != null) {
1316             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1317             for (Node<K,V> p; (p = it.advance()) != null; )
1318                 h += p.key.hashCode() ^ p.val.hashCode();
1319         }
1320         return h;
1321     }
1322 
1323     /**
1324      * Returns a string representation of this map.  The string
1325      * representation consists of a list of key-value mappings (in no
1326      * particular order) enclosed in braces ("{@code {}}").  Adjacent
1327      * mappings are separated by the characters {@code ", "} (comma
1328      * and space).  Each key-value mapping is rendered as the key
1329      * followed by an equals sign ("{@code =}") followed by the
1330      * associated value.
1331      *
1332      * @return a string representation of this map
1333      */
1334     public String toString() {
1335         Node<K,V>[] t;
1336         int f = (t = table) == null ? 0 : t.length;
1337         Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1338         StringBuilder sb = new StringBuilder();
1339         sb.append('{');
1340         Node<K,V> p;
1341         if ((p = it.advance()) != null) {
1342             for (;;) {
1343                 K k = p.key;
1344                 V v = p.val;
1345                 sb.append(k == this ? "(this Map)" : k);
1346                 sb.append('=');
1347                 sb.append(v == this ? "(this Map)" : v);
1348                 if ((p = it.advance()) == null)
1349                     break;
1350                 sb.append(',').append(' ');
1351             }
1352         }
1353         return sb.append('}').toString();
1354     }
1355 
1356     /**
1357      * Compares the specified object with this map for equality.
1358      * Returns {@code true} if the given object is a map with the same
1359      * mappings as this map.  This operation may return misleading
1360      * results if either map is concurrently modified during execution
1361      * of this method.
1362      *
1363      * @param o object to be compared for equality with this map
1364      * @return {@code true} if the specified object is equal to this map
1365      */
1366     public boolean equals(Object o) {
1367         if (o != this) {
1368             if (!(o instanceof Map))
1369                 return false;
1370             Map<?,?> m = (Map<?,?>) o;
1371             Node<K,V>[] t;
1372             int f = (t = table) == null ? 0 : t.length;
1373             Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
1374             for (Node<K,V> p; (p = it.advance()) != null; ) {
1375                 V val = p.val;
1376                 Object v = m.get(p.key);
1377                 if (!Objects.equals(val, v))
1378                     return false;
1379             }
1380             for (Map.Entry<?,?> e : m.entrySet()) {
1381                 Object mk, mv, v;
1382                 if ((mk = e.getKey()) == null ||
1383                     (mv = e.getValue()) == null ||
1384                     (v = get(mk)) == null ||
1385                     !Objects.equals(mv, v))
1386                     return false;
1387             }
1388         }
1389         return true;
1390     }
1391 
1392     /**
1393      * Stripped-down version of helper class used in previous version,
1394      * declared for the sake of serialization compatibility.
1395      */
1396     static class Segment<K,V> extends ReentrantLock implements Serializable {
1397         private static final long serialVersionUID = 2249069246763182397L;
1398         final float loadFactor;
1399         Segment(float lf) { this.loadFactor = lf; }
1400     }
1401 
1402     /**
1403      * Saves this map to a stream (that is, serializes it).
1404      *
1405      * @param s the stream
1406      * @throws java.io.IOException if an I/O error occurs
1407      * @serialData
1408      * the serialized fields, followed by the key (Object) and value
1409      * (Object) for each key-value mapping, followed by a null pair.
1410      * The key-value mappings are emitted in no particular order.
1411      */
1412     private void writeObject(java.io.ObjectOutputStream s)
1413         throws java.io.IOException {
1414         // For serialization compatibility
1415         // Emulate segment calculation from previous version of this class
1416         int sshift = 0;
1417         int ssize = 1;
1418         while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
1419             ++sshift;
1420             ssize <<= 1;
1421         }
1422         int segmentShift = 32 - sshift;
1423         int segmentMask = ssize - 1;
1424         @SuppressWarnings("unchecked")
1425         Segment<K,V>[] segments = (Segment<K,V>[])
1426             new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
1427         for (int i = 0; i < segments.length; ++i)
1428             segments[i] = new Segment<K,V>(LOAD_FACTOR);
1429         java.io.ObjectOutputStream.PutField streamFields = s.putFields();
1430         streamFields.put("segments", segments);
1431         streamFields.put("segmentShift", segmentShift);
1432         streamFields.put("segmentMask", segmentMask);
1433         s.writeFields();
1434 
1435         Node<K,V>[] t;
1436         if ((t = table) != null) {
1437             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1438             for (Node<K,V> p; (p = it.advance()) != null; ) {
1439                 s.writeObject(p.key);
1440                 s.writeObject(p.val);
1441             }
1442         }
1443         s.writeObject(null);
1444         s.writeObject(null);
1445     }
1446 
1447     /**
1448      * Reconstitutes this map from a stream (that is, deserializes it).
1449      * @param s the stream
1450      * @throws ClassNotFoundException if the class of a serialized object
1451      *         could not be found
1452      * @throws java.io.IOException if an I/O error occurs
1453      */
1454     private void readObject(java.io.ObjectInputStream s)
1455         throws java.io.IOException, ClassNotFoundException {
1456         /*
1457          * To improve performance in typical cases, we create nodes
1458          * while reading, then place in table once size is known.
1459          * However, we must also validate uniqueness and deal with
1460          * overpopulated bins while doing so, which requires
1461          * specialized versions of putVal mechanics.
1462          */
1463         sizeCtl = -1; // force exclusion for table construction
1464         s.defaultReadObject();
1465         long size = 0L;
1466         Node<K,V> p = null;
1467         for (;;) {
1468             @SuppressWarnings("unchecked")
1469             K k = (K) s.readObject();
1470             @SuppressWarnings("unchecked")
1471             V v = (V) s.readObject();
1472             if (k != null && v != null) {
1473                 p = new Node<K,V>(spread(k.hashCode()), k, v, p);
1474                 ++size;
1475             }
1476             else
1477                 break;
1478         }
1479         if (size == 0L)
1480             sizeCtl = 0;
1481         else {
1482             long ts = (long)(1.0 + size / LOAD_FACTOR);
1483             int n = (ts >= (long)MAXIMUM_CAPACITY) ?
1484                 MAXIMUM_CAPACITY : tableSizeFor((int)ts);
1485             @SuppressWarnings("unchecked")
1486             Node<K,V>[] tab = (Node<K,V>[])new Node<?,?>[n];
1487             int mask = n - 1;
1488             long added = 0L;
1489             while (p != null) {
1490                 boolean insertAtFront;
1491                 Node<K,V> next = p.next, first;
1492                 int h = p.hash, j = h & mask;
1493                 if ((first = tabAt(tab, j)) == null)
1494                     insertAtFront = true;
1495                 else {
1496                     K k = p.key;
1497                     if (first.hash < 0) {
1498                         TreeBin<K,V> t = (TreeBin<K,V>)first;
1499                         if (t.putTreeVal(h, k, p.val) == null)
1500                             ++added;
1501                         insertAtFront = false;
1502                     }
1503                     else {
1504                         int binCount = 0;
1505                         insertAtFront = true;
1506                         Node<K,V> q; K qk;
1507                         for (q = first; q != null; q = q.next) {
1508                             if (q.hash == h &&
1509                                 ((qk = q.key) != null && Objects.equals(k, qk))) {
1510                                 insertAtFront = false;
1511                                 break;
1512                             }
1513                             ++binCount;
1514                         }
1515                         if (insertAtFront && binCount >= TREEIFY_THRESHOLD) {
1516                             insertAtFront = false;
1517                             ++added;
1518                             p.next = first;
1519                             TreeNode<K,V> hd = null, tl = null;
1520                             for (q = p; q != null; q = q.next) {
1521                                 TreeNode<K,V> t = new TreeNode<K,V>
1522                                     (q.hash, q.key, q.val, null, null);
1523                                 if ((t.prev = tl) == null)
1524                                     hd = t;
1525                                 else
1526                                     tl.next = t;
1527                                 tl = t;
1528                             }
1529                             setTabAt(tab, j, new TreeBin<K,V>(hd));
1530                         }
1531                     }
1532                 }
1533                 if (insertAtFront) {
1534                     ++added;
1535                     p.next = first;
1536                     setTabAt(tab, j, p);
1537                 }
1538                 p = next;
1539             }
1540             table = tab;
1541             sizeCtl = n - (n >>> 2);
1542             baseCount = added;
1543         }
1544     }
1545 
1546     // ConcurrentMap methods
1547 
1548     /**
1549      * {@inheritDoc ConcurrentMap}
1550      *
1551      * @return the previous value associated with the specified key,
1552      *         or {@code null} if there was no mapping for the key
1553      * @throws NullPointerException if the specified key or value is null
1554      */
1555     public V putIfAbsent(K key, V value) {
1556         return putVal(key, value, true);
1557     }
1558 
1559     /**
1560      * {@inheritDoc ConcurrentMap}
1561      *
1562      * @throws NullPointerException if the specified key is null
1563      * @return {@inheritDoc ConcurrentMap}
1564      */
1565     public boolean remove(Object key, Object value) {
1566         if (key == null)
1567             throw new NullPointerException();
1568         return value != null && replaceNode(key, null, value) != null;
1569     }
1570 
1571     /**
1572      * {@inheritDoc ConcurrentMap}
1573      *
1574      * @throws NullPointerException if any of the arguments are null
1575      * @return {@inheritDoc ConcurrentMap}
1576      */
1577     public boolean replace(K key, V oldValue, V newValue) {
1578         if (key == null || oldValue == null || newValue == null)
1579             throw new NullPointerException();
1580         return replaceNode(key, newValue, oldValue) != null;
1581     }
1582 
1583     /**
1584      * {@inheritDoc ConcurrentMap}
1585      *
1586      * @return the previous value associated with the specified key,
1587      *         or {@code null} if there was no mapping for the key
1588      * @throws NullPointerException if the specified key or value is null
1589      */
1590     public V replace(K key, V value) {
1591         if (key == null || value == null)
1592             throw new NullPointerException();
1593         return replaceNode(key, value, null);
1594     }
1595 
1596     // Overrides of JDK8+ Map extension method defaults
1597 
1598     /**
1599      * Returns the value to which the specified key is mapped, or the
1600      * given default value if this map contains no mapping for the
1601      * key.
1602      *
1603      * @param key the key whose associated value is to be returned
1604      * @param defaultValue the value to return if this map contains
1605      * no mapping for the given key
1606      * @return the mapping for the key, if present; else the default value
1607      * @throws NullPointerException if the specified key is null
1608      */
1609     public V getOrDefault(Object key, V defaultValue) {
1610         V v;
1611         return (v = get(key)) == null ? defaultValue : v;
1612     }
1613 
1614     public void forEach(BiConsumer<? super K, ? super V> action) {
1615         if (action == null) throw new NullPointerException();
1616         Node<K,V>[] t;
1617         if ((t = table) != null) {
1618             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1619             for (Node<K,V> p; (p = it.advance()) != null; ) {
1620                 action.accept(p.key, p.val);
1621             }
1622         }
1623     }
1624 
1625     public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
1626         if (function == null) throw new NullPointerException();
1627         Node<K,V>[] t;
1628         if ((t = table) != null) {
1629             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1630             for (Node<K,V> p; (p = it.advance()) != null; ) {
1631                 V oldValue = p.val;
1632                 for (K key = p.key;;) {
1633                     V newValue = function.apply(key, oldValue);
1634                     if (newValue == null)
1635                         throw new NullPointerException();
1636                     if (replaceNode(key, newValue, oldValue) != null ||
1637                         (oldValue = get(key)) == null)
1638                         break;
1639                 }
1640             }
1641         }
1642     }
1643 
1644     /**
1645      * Helper method for EntrySetView.removeIf.
1646      */
1647     boolean removeEntryIf(Predicate<? super Entry<K,V>> function) {
1648         if (function == null) throw new NullPointerException();
1649         Node<K,V>[] t;
1650         boolean removed = false;
1651         if ((t = table) != null) {
1652             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1653             for (Node<K,V> p; (p = it.advance()) != null; ) {
1654                 K k = p.key;
1655                 V v = p.val;
1656                 Map.Entry<K,V> e = new AbstractMap.SimpleImmutableEntry<>(k, v);
1657                 if (function.test(e) && replaceNode(k, null, v) != null)
1658                     removed = true;
1659             }
1660         }
1661         return removed;
1662     }
1663 
1664     /**
1665      * Helper method for ValuesView.removeIf.
1666      */
1667     boolean removeValueIf(Predicate<? super V> function) {
1668         if (function == null) throw new NullPointerException();
1669         Node<K,V>[] t;
1670         boolean removed = false;
1671         if ((t = table) != null) {
1672             Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
1673             for (Node<K,V> p; (p = it.advance()) != null; ) {
1674                 K k = p.key;
1675                 V v = p.val;
1676                 if (function.test(v) && replaceNode(k, null, v) != null)
1677                     removed = true;
1678             }
1679         }
1680         return removed;
1681     }
1682 
1683     /**
1684      * If the specified key is not already associated with a value,
1685      * attempts to compute its value using the given mapping function
1686      * and enters it into this map unless {@code null}.  The entire
1687      * method invocation is performed atomically.  The supplied
1688      * function is invoked exactly once per invocation of this method
1689      * if the key is absent, else not at all.  Some attempted update
1690      * operations on this map by other threads may be blocked while
1691      * computation is in progress, so the computation should be short
1692      * and simple.
1693      *
1694      * <p>The mapping function must not modify this map during computation.
1695      *
1696      * @param key key with which the specified value is to be associated
1697      * @param mappingFunction the function to compute a value
1698      * @return the current (existing or computed) value associated with
1699      *         the specified key, or null if the computed value is null
1700      * @throws NullPointerException if the specified key or mappingFunction
1701      *         is null
1702      * @throws IllegalStateException if the computation detectably
1703      *         attempts a recursive update to this map that would
1704      *         otherwise never complete
1705      * @throws RuntimeException or Error if the mappingFunction does so,
1706      *         in which case the mapping is left unestablished
1707      */
1708     public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
1709         if (key == null || mappingFunction == null)
1710             throw new NullPointerException();
1711         int h = spread(key.hashCode());
1712         V val = null;
1713         int binCount = 0;
1714         for (Node<K,V>[] tab = table;;) {
1715             Node<K,V> f; int n, i, fh; K fk; V fv;
1716             if (tab == null || (n = tab.length) == 0)
1717                 tab = initTable();
1718             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1719                 Node<K,V> r = new ReservationNode<K,V>();
1720                 synchronized (r) {
1721                     if (casTabAt(tab, i, null, r)) {
1722                         binCount = 1;
1723                         Node<K,V> node = null;
1724                         try {
1725                             if ((val = mappingFunction.apply(key)) != null)
1726                                 node = new Node<K,V>(h, key, val);
1727                         } finally {
1728                             setTabAt(tab, i, node);
1729                         }
1730                     }
1731                 }
1732                 if (binCount != 0)
1733                     break;
1734             }
1735             else if ((fh = f.hash) == MOVED)
1736                 tab = helpTransfer(tab, f);
1737             else if (fh == h    // check first node without acquiring lock
1738                      && ((fk = f.key) != null && Objects.equals(key, fk))
1739                      && (fv = f.val) != null)
1740                 return fv;
1741             else {
1742                 boolean added = false;
1743                 synchronized (f) {
1744                     if (tabAt(tab, i) == f) {
1745                         if (fh >= 0) {
1746                             binCount = 1;
1747                             for (Node<K,V> e = f;; ++binCount) {
1748                                 K ek;
1749                                 if (e.hash == h &&
1750                                     ((ek = e.key) == key ||
1751                                      (ek != null && key.equals(ek)))) {
1752                                     val = e.val;
1753                                     break;
1754                                 }
1755                                 Node<K,V> pred = e;
1756                                 if ((e = e.next) == null) {
1757                                     if ((val = mappingFunction.apply(key)) != null) {
1758                                         if (pred.next != null)
1759                                             throw new IllegalStateException("Recursive update");
1760                                         added = true;
1761                                         pred.next = new Node<K,V>(h, key, val);
1762                                     }
1763                                     break;
1764                                 }
1765                             }
1766                         }
1767                         else if (f instanceof TreeBin) {
1768                             binCount = 2;
1769                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1770                             TreeNode<K,V> r, p;
1771                             if ((r = t.root) != null &&
1772                                 (p = r.findTreeNode(h, key, null)) != null)
1773                                 val = p.val;
1774                             else if ((val = mappingFunction.apply(key)) != null) {
1775                                 added = true;
1776                                 t.putTreeVal(h, key, val);
1777                             }
1778                         }
1779                         else if (f instanceof ReservationNode)
1780                             throw new IllegalStateException("Recursive update");
1781                     }
1782                 }
1783                 if (binCount != 0) {
1784                     if (binCount >= TREEIFY_THRESHOLD)
1785                         treeifyBin(tab, i);
1786                     if (!added)
1787                         return val;
1788                     break;
1789                 }
1790             }
1791         }
1792         if (val != null)
1793             addCount(1L, binCount);
1794         return val;
1795     }
1796 
1797     /**
1798      * If the value for the specified key is present, attempts to
1799      * compute a new mapping given the key and its current mapped
1800      * value.  The entire method invocation is performed atomically.
1801      * The supplied function is invoked exactly once per invocation of
1802      * this method if the key is present, else not at all.  Some
1803      * attempted update operations on this map by other threads may be
1804      * blocked while computation is in progress, so the computation
1805      * should be short and simple.
1806      *
1807      * <p>The remapping function must not modify this map during computation.
1808      *
1809      * @param key key with which a value may be associated
1810      * @param remappingFunction the function to compute a value
1811      * @return the new value associated with the specified key, or null if none
1812      * @throws NullPointerException if the specified key or remappingFunction
1813      *         is null
1814      * @throws IllegalStateException if the computation detectably
1815      *         attempts a recursive update to this map that would
1816      *         otherwise never complete
1817      * @throws RuntimeException or Error if the remappingFunction does so,
1818      *         in which case the mapping is unchanged
1819      */
1820     public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1821         if (key == null || remappingFunction == null)
1822             throw new NullPointerException();
1823         int h = spread(key.hashCode());
1824         V val = null;
1825         int delta = 0;
1826         int binCount = 0;
1827         for (Node<K,V>[] tab = table;;) {
1828             Node<K,V> f; int n, i, fh;
1829             if (tab == null || (n = tab.length) == 0)
1830                 tab = initTable();
1831             else if ((f = tabAt(tab, i = (n - 1) & h)) == null)
1832                 break;
1833             else if ((fh = f.hash) == MOVED)
1834                 tab = helpTransfer(tab, f);
1835             else {
1836                 synchronized (f) {
1837                     if (tabAt(tab, i) == f) {
1838                         if (fh >= 0) {
1839                             binCount = 1;
1840                             for (Node<K,V> e = f, pred = null;; ++binCount) {
1841                                 K ek;
1842                                 if (e.hash == h &&
1843                                     ((ek = e.key) != null && Objects.equals(key, ek))) {
1844                                     val = remappingFunction.apply(key, e.val);
1845                                     if (val != null)
1846                                         e.val = val;
1847                                     else {
1848                                         delta = -1;
1849                                         Node<K,V> en = e.next;
1850                                         if (pred != null)
1851                                             pred.next = en;
1852                                         else
1853                                             setTabAt(tab, i, en);
1854                                     }
1855                                     break;
1856                                 }
1857                                 pred = e;
1858                                 if ((e = e.next) == null)
1859                                     break;
1860                             }
1861                         }
1862                         else if (f instanceof TreeBin) {
1863                             binCount = 2;
1864                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1865                             TreeNode<K,V> r, p;
1866                             if ((r = t.root) != null &&
1867                                 (p = r.findTreeNode(h, key, null)) != null) {
1868                                 val = remappingFunction.apply(key, p.val);
1869                                 if (val != null)
1870                                     p.val = val;
1871                                 else {
1872                                     delta = -1;
1873                                     if (t.removeTreeNode(p))
1874                                         setTabAt(tab, i, untreeify(t.first));
1875                                 }
1876                             }
1877                         }
1878                         else if (f instanceof ReservationNode)
1879                             throw new IllegalStateException("Recursive update");
1880                     }
1881                 }
1882                 if (binCount != 0)
1883                     break;
1884             }
1885         }
1886         if (delta != 0)
1887             addCount((long)delta, binCount);
1888         return val;
1889     }
1890 
1891     /**
1892      * Attempts to compute a mapping for the specified key and its
1893      * current mapped value (or {@code null} if there is no current
1894      * mapping). The entire method invocation is performed atomically.
1895      * The supplied function is invoked exactly once per invocation of
1896      * this method.  Some attempted update operations on this map by
1897      * other threads may be blocked while computation is in progress,
1898      * so the computation should be short and simple.
1899      *
1900      * <p>The remapping function must not modify this map during computation.
1901      *
1902      * @param key key with which the specified value is to be associated
1903      * @param remappingFunction the function to compute a value
1904      * @return the new value associated with the specified key, or null if none
1905      * @throws NullPointerException if the specified key or remappingFunction
1906      *         is null
1907      * @throws IllegalStateException if the computation detectably
1908      *         attempts a recursive update to this map that would
1909      *         otherwise never complete
1910      * @throws RuntimeException or Error if the remappingFunction does so,
1911      *         in which case the mapping is unchanged
1912      */
1913     public V compute(K key,
1914                      BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
1915         if (key == null || remappingFunction == null)
1916             throw new NullPointerException();
1917         int h = spread(key.hashCode());
1918         V val = null;
1919         int delta = 0;
1920         int binCount = 0;
1921         for (Node<K,V>[] tab = table;;) {
1922             Node<K,V> f; int n, i, fh;
1923             if (tab == null || (n = tab.length) == 0)
1924                 tab = initTable();
1925             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
1926                 Node<K,V> r = new ReservationNode<K,V>();
1927                 synchronized (r) {
1928                     if (casTabAt(tab, i, null, r)) {
1929                         binCount = 1;
1930                         Node<K,V> node = null;
1931                         try {
1932                             if ((val = remappingFunction.apply(key, null)) != null) {
1933                                 delta = 1;
1934                                 node = new Node<K,V>(h, key, val);
1935                             }
1936                         } finally {
1937                             setTabAt(tab, i, node);
1938                         }
1939                     }
1940                 }
1941                 if (binCount != 0)
1942                     break;
1943             }
1944             else if ((fh = f.hash) == MOVED)
1945                 tab = helpTransfer(tab, f);
1946             else {
1947                 synchronized (f) {
1948                     if (tabAt(tab, i) == f) {
1949                         if (fh >= 0) {
1950                             binCount = 1;
1951                             for (Node<K,V> e = f, pred = null;; ++binCount) {
1952                                 K ek;
1953                                 if (e.hash == h &&
1954                                     ((ek = e.key) != null && Objects.equals(key, ek))) {
1955                                     val = remappingFunction.apply(key, e.val);
1956                                     if (val != null)
1957                                         e.val = val;
1958                                     else {
1959                                         delta = -1;
1960                                         Node<K,V> en = e.next;
1961                                         if (pred != null)
1962                                             pred.next = en;
1963                                         else
1964                                             setTabAt(tab, i, en);
1965                                     }
1966                                     break;
1967                                 }
1968                                 pred = e;
1969                                 if ((e = e.next) == null) {
1970                                     val = remappingFunction.apply(key, null);
1971                                     if (val != null) {
1972                                         if (pred.next != null)
1973                                             throw new IllegalStateException("Recursive update");
1974                                         delta = 1;
1975                                         pred.next = new Node<K,V>(h, key, val);
1976                                     }
1977                                     break;
1978                                 }
1979                             }
1980                         }
1981                         else if (f instanceof TreeBin) {
1982                             binCount = 1;
1983                             TreeBin<K,V> t = (TreeBin<K,V>)f;
1984                             TreeNode<K,V> r, p;
1985                             if ((r = t.root) != null)
1986                                 p = r.findTreeNode(h, key, null);
1987                             else
1988                                 p = null;
1989                             V pv = (p == null) ? null : p.val;
1990                             val = remappingFunction.apply(key, pv);
1991                             if (val != null) {
1992                                 if (p != null)
1993                                     p.val = val;
1994                                 else {
1995                                     delta = 1;
1996                                     t.putTreeVal(h, key, val);
1997                                 }
1998                             }
1999                             else if (p != null) {
2000                                 delta = -1;
2001                                 if (t.removeTreeNode(p))
2002                                     setTabAt(tab, i, untreeify(t.first));
2003                             }
2004                         }
2005                         else if (f instanceof ReservationNode)
2006                             throw new IllegalStateException("Recursive update");
2007                     }
2008                 }
2009                 if (binCount != 0) {
2010                     if (binCount >= TREEIFY_THRESHOLD)
2011                         treeifyBin(tab, i);
2012                     break;
2013                 }
2014             }
2015         }
2016         if (delta != 0)
2017             addCount((long)delta, binCount);
2018         return val;
2019     }
2020 
2021     /**
2022      * If the specified key is not already associated with a
2023      * (non-null) value, associates it with the given value.
2024      * Otherwise, replaces the value with the results of the given
2025      * remapping function, or removes if {@code null}. The entire
2026      * method invocation is performed atomically.  Some attempted
2027      * update operations on this map by other threads may be blocked
2028      * while computation is in progress, so the computation should be
2029      * short and simple, and must not attempt to update any other
2030      * mappings of this Map.
2031      *
2032      * @param key key with which the specified value is to be associated
2033      * @param value the value to use if absent
2034      * @param remappingFunction the function to recompute a value if present
2035      * @return the new value associated with the specified key, or null if none
2036      * @throws NullPointerException if the specified key or the
2037      *         remappingFunction is null
2038      * @throws RuntimeException or Error if the remappingFunction does so,
2039      *         in which case the mapping is unchanged
2040      */
2041     public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
2042         if (key == null || value == null || remappingFunction == null)
2043             throw new NullPointerException();
2044         int h = spread(key.hashCode());
2045         V val = null;
2046         int delta = 0;
2047         int binCount = 0;
2048         for (Node<K,V>[] tab = table;;) {
2049             Node<K,V> f; int n, i, fh;
2050             if (tab == null || (n = tab.length) == 0)
2051                 tab = initTable();
2052             else if ((f = tabAt(tab, i = (n - 1) & h)) == null) {
2053                 if (casTabAt(tab, i, null, new Node<K,V>(h, key, value))) {
2054                     delta = 1;
2055                     val = value;
2056                     break;
2057                 }
2058             }
2059             else if ((fh = f.hash) == MOVED)
2060                 tab = helpTransfer(tab, f);
2061             else {
2062                 synchronized (f) {
2063                     if (tabAt(tab, i) == f) {
2064                         if (fh >= 0) {
2065                             binCount = 1;
2066                             for (Node<K,V> e = f, pred = null;; ++binCount) {
2067                                 K ek;
2068                                 if (e.hash == h &&
2069                                     ((ek = e.key) != null && Objects.equals(key, ek))) {
2070                                     val = remappingFunction.apply(e.val, value);
2071                                     if (val != null)
2072                                         e.val = val;
2073                                     else {
2074                                         delta = -1;
2075                                         Node<K,V> en = e.next;
2076                                         if (pred != null)
2077                                             pred.next = en;
2078                                         else
2079                                             setTabAt(tab, i, en);
2080                                     }
2081                                     break;
2082                                 }
2083                                 pred = e;
2084                                 if ((e = e.next) == null) {
2085                                     delta = 1;
2086                                     val = value;
2087                                     pred.next = new Node<K,V>(h, key, val);
2088                                     break;
2089                                 }
2090                             }
2091                         }
2092                         else if (f instanceof TreeBin) {
2093                             binCount = 2;
2094                             TreeBin<K,V> t = (TreeBin<K,V>)f;
2095                             TreeNode<K,V> r = t.root;
2096                             TreeNode<K,V> p = (r == null) ? null :
2097                                 r.findTreeNode(h, key, null);
2098                             val = (p == null) ? value :
2099                                 remappingFunction.apply(p.val, value);
2100                             if (val != null) {
2101                                 if (p != null)
2102                                     p.val = val;
2103                                 else {
2104                                     delta = 1;
2105                                     t.putTreeVal(h, key, val);
2106                                 }
2107                             }
2108                             else if (p != null) {
2109                                 delta = -1;
2110                                 if (t.removeTreeNode(p))
2111                                     setTabAt(tab, i, untreeify(t.first));
2112                             }
2113                         }
2114                         else if (f instanceof ReservationNode)
2115                             throw new IllegalStateException("Recursive update");
2116                     }
2117                 }
2118                 if (binCount != 0) {
2119                     if (binCount >= TREEIFY_THRESHOLD)
2120                         treeifyBin(tab, i);
2121                     break;
2122                 }
2123             }
2124         }
2125         if (delta != 0)
2126             addCount((long)delta, binCount);
2127         return val;
2128     }
2129 
2130     // Hashtable legacy methods
2131 
2132     /**
2133      * Tests if some key maps into the specified value in this table.
2134      *
2135      * <p>Note that this method is identical in functionality to
2136      * {@link #containsValue(Object)}, and exists solely to ensure
2137      * full compatibility with class {@link java.util.Hashtable},
2138      * which supported this method prior to introduction of the
2139      * Java Collections Framework.
2140      *
2141      * @param  value a value to search for
2142      * @return {@code true} if and only if some key maps to the
2143      *         {@code value} argument in this table as
2144      *         determined by the {@code equals} method;
2145      *         {@code false} otherwise
2146      * @throws NullPointerException if the specified value is null
2147      */
2148     public boolean contains(Object value) {
2149         return containsValue(value);
2150     }
2151 
2152     /**
2153      * Returns an enumeration of the keys in this table.
2154      *
2155      * @return an enumeration of the keys in this table
2156      * @see #keySet()
2157      */
2158     public Enumeration<K> keys() {
2159         Node<K,V>[] t;
2160         int f = (t = table) == null ? 0 : t.length;
2161         return new KeyIterator<K,V>(t, f, 0, f, this);
2162     }
2163 
2164     /**
2165      * Returns an enumeration of the values in this table.
2166      *
2167      * @return an enumeration of the values in this table
2168      * @see #values()
2169      */
2170     public Enumeration<V> elements() {
2171         Node<K,V>[] t;
2172         int f = (t = table) == null ? 0 : t.length;
2173         return new ValueIterator<K,V>(t, f, 0, f, this);
2174     }
2175 
2176     // ConcurrentHashMap-only methods
2177 
2178     /**
2179      * Returns the number of mappings. This method should be used
2180      * instead of {@link #size} because a ConcurrentHashMap may
2181      * contain more mappings than can be represented as an int. The
2182      * value returned is an estimate; the actual count may differ if
2183      * there are concurrent insertions or removals.
2184      *
2185      * @return the number of mappings
2186      * @since 1.8
2187      */
2188     public long mappingCount() {
2189         long n = sumCount();
2190         return (n < 0L) ? 0L : n; // ignore transient negative values
2191     }
2192 
2193     /**
2194      * Creates a new {@link Set} backed by a ConcurrentHashMap
2195      * from the given type to {@code Boolean.TRUE}.
2196      *
2197      * @param <K> the element type of the returned set
2198      * @return the new set
2199      * @since 1.8
2200      */
2201     public static <K> KeySetView<K,Boolean> newKeySet() {
2202         return new KeySetView<K,Boolean>
2203             (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE);
2204     }
2205 
2206     /**
2207      * Creates a new {@link Set} backed by a ConcurrentHashMap
2208      * from the given type to {@code Boolean.TRUE}.
2209      *
2210      * @param initialCapacity The implementation performs internal
2211      * sizing to accommodate this many elements.
2212      * @param <K> the element type of the returned set
2213      * @return the new set
2214      * @throws IllegalArgumentException if the initial capacity of
2215      * elements is negative
2216      * @since 1.8
2217      */
2218     public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
2219         return new KeySetView<K,Boolean>
2220             (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE);
2221     }
2222 
2223     /**
2224      * Returns a {@link Set} view of the keys in this map, using the
2225      * given common mapped value for any additions (i.e., {@link
2226      * Collection#add} and {@link Collection#addAll(Collection)}).
2227      * This is of course only appropriate if it is acceptable to use
2228      * the same value for all additions from this view.
2229      *
2230      * @param mappedValue the mapped value to use for any additions
2231      * @return the set view
2232      * @throws NullPointerException if the mappedValue is null
2233      */
2234     public KeySetView<K,V> keySet(V mappedValue) {
2235         if (mappedValue == null)
2236             throw new NullPointerException();
2237         return new KeySetView<K,V>(this, mappedValue);
2238     }
2239 
2240     /* ---------------- Special Nodes -------------- */
2241 
2242     /**
2243      * A node inserted at head of bins during transfer operations.
2244      */
2245     static final class ForwardingNode<K,V> extends Node<K,V> {
2246         final Node<K,V>[] nextTable;
2247         ForwardingNode(Node<K,V>[] tab) {
2248             super(MOVED, null, null);
2249             this.nextTable = tab;
2250         }
2251 
2252         Node<K,V> find(int h, Object k) {
2253             // loop to avoid arbitrarily deep recursion on forwarding nodes
2254             outer: for (Node<K,V>[] tab = nextTable;;) {
2255                 Node<K,V> e; int n;
2256                 if (k == null || tab == null || (n = tab.length) == 0 ||
2257                     (e = tabAt(tab, (n - 1) & h)) == null)
2258                     return null;
2259                 for (;;) {
2260                     int eh; K ek;
2261                     if ((eh = e.hash) == h &&
2262                         ((ek = e.key) != null && Objects.equals(k, ek)))
2263                         return e;
2264                     if (eh < 0) {
2265                         if (e instanceof ForwardingNode) {
2266                             tab = ((ForwardingNode<K,V>)e).nextTable;
2267                             continue outer;
2268                         }
2269                         else
2270                             return e.find(h, k);
2271                     }
2272                     if ((e = e.next) == null)
2273                         return null;
2274                 }
2275             }
2276         }
2277     }
2278 
2279     /**
2280      * A place-holder node used in computeIfAbsent and compute.
2281      */
2282     static final class ReservationNode<K,V> extends Node<K,V> {
2283         ReservationNode() {
2284             super(RESERVED, null, null);
2285         }
2286 
2287         Node<K,V> find(int h, Object k) {
2288             return null;
2289         }
2290     }
2291 
2292     /* ---------------- Table Initialization and Resizing -------------- */
2293 
2294     /**
2295      * Returns the stamp bits for resizing a table of size n.
2296      * Must be negative when shifted left by RESIZE_STAMP_SHIFT.
2297      */
2298     static final int resizeStamp(int n) {
2299         return Integer.numberOfLeadingZeros(n) | (1 << (RESIZE_STAMP_BITS - 1));
2300     }
2301 
2302     /**
2303      * Initializes table, using the size recorded in sizeCtl.
2304      */
2305     private final Node<K,V>[] initTable() {
2306         Node<K,V>[] tab; int sc;
2307         while ((tab = table) == null || tab.length == 0) {
2308             if ((sc = sizeCtl) < 0)
2309                 Thread.yield(); // lost initialization race; just spin
2310             else if (U.compareAndSetInt(this, SIZECTL, sc, -1)) {
2311                 try {
2312                     if ((tab = table) == null || tab.length == 0) {
2313                         int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
2314                         @SuppressWarnings("unchecked")
2315                         Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2316                         table = tab = nt;
2317                         sc = n - (n >>> 2);
2318                     }
2319                 } finally {
2320                     sizeCtl = sc;
2321                 }
2322                 break;
2323             }
2324         }
2325         return tab;
2326     }
2327 
2328     /**
2329      * Adds to count, and if table is too small and not already
2330      * resizing, initiates transfer. If already resizing, helps
2331      * perform transfer if work is available.  Rechecks occupancy
2332      * after a transfer to see if another resize is already needed
2333      * because resizings are lagging additions.
2334      *
2335      * @param x the count to add
2336      * @param check if <0, don't check resize, if <= 1 only check if uncontended
2337      */
2338     private final void addCount(long x, int check) {
2339         CounterCell[] cs; long b, s;
2340         if ((cs = counterCells) != null ||
2341             !U.compareAndSetLong(this, BASECOUNT, b = baseCount, s = b + x)) {
2342             CounterCell c; long v; int m;
2343             boolean uncontended = true;
2344             if (cs == null || (m = cs.length - 1) < 0 ||
2345                 (c = cs[ThreadLocalRandom.getProbe() & m]) == null ||
2346                 !(uncontended =
2347                   U.compareAndSetLong(c, CELLVALUE, v = c.value, v + x))) {
2348                 fullAddCount(x, uncontended);
2349                 return;
2350             }
2351             if (check <= 1)
2352                 return;
2353             s = sumCount();
2354         }
2355         if (check >= 0) {
2356             Node<K,V>[] tab, nt; int n, sc;
2357             while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
2358                    (n = tab.length) < MAXIMUM_CAPACITY) {
2359                 int rs = resizeStamp(n) << RESIZE_STAMP_SHIFT;
2360                 if (sc < 0) {
2361                     if (sc == rs + MAX_RESIZERS || sc == rs + 1 ||
2362                         (nt = nextTable) == null || transferIndex <= 0)
2363                         break;
2364                     if (U.compareAndSetInt(this, SIZECTL, sc, sc + 1))
2365                         transfer(tab, nt);
2366                 }
2367                 else if (U.compareAndSetInt(this, SIZECTL, sc, rs + 2))
2368                     transfer(tab, null);
2369                 s = sumCount();
2370             }
2371         }
2372     }
2373 
2374     /**
2375      * Helps transfer if a resize is in progress.
2376      */
2377     final Node<K,V>[] helpTransfer(Node<K,V>[] tab, Node<K,V> f) {
2378         Node<K,V>[] nextTab; int sc;
2379         if (tab != null && (f instanceof ForwardingNode) &&
2380             (nextTab = ((ForwardingNode<K,V>)f).nextTable) != null) {
2381             int rs = resizeStamp(tab.length) << RESIZE_STAMP_SHIFT;
2382             while (nextTab == nextTable && table == tab &&
2383                    (sc = sizeCtl) < 0) {
2384                 if (sc == rs + MAX_RESIZERS || sc == rs + 1 ||
2385                     transferIndex <= 0)
2386                     break;
2387                 if (U.compareAndSetInt(this, SIZECTL, sc, sc + 1)) {
2388                     transfer(tab, nextTab);
2389                     break;
2390                 }
2391             }
2392             return nextTab;
2393         }
2394         return table;
2395     }
2396 
2397     /**
2398      * Tries to presize table to accommodate the given number of elements.
2399      *
2400      * @param size number of elements (doesn't need to be perfectly accurate)
2401      */
2402     private final void tryPresize(int size) {
2403         int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
2404             tableSizeFor(size + (size >>> 1) + 1);
2405         int sc;
2406         while ((sc = sizeCtl) >= 0) {
2407             Node<K,V>[] tab = table; int n;
2408             if (tab == null || (n = tab.length) == 0) {
2409                 n = (sc > c) ? sc : c;
2410                 if (U.compareAndSetInt(this, SIZECTL, sc, -1)) {
2411                     try {
2412                         if (table == tab) {
2413                             @SuppressWarnings("unchecked")
2414                             Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n];
2415                             table = nt;
2416                             sc = n - (n >>> 2);
2417                         }
2418                     } finally {
2419                         sizeCtl = sc;
2420                     }
2421                 }
2422             }
2423             else if (c <= sc || n >= MAXIMUM_CAPACITY)
2424                 break;
2425             else if (tab == table) {
2426                 int rs = resizeStamp(n);
2427                 if (U.compareAndSetInt(this, SIZECTL, sc,
2428                                         (rs << RESIZE_STAMP_SHIFT) + 2))
2429                     transfer(tab, null);
2430             }
2431         }
2432     }
2433 
2434     /**
2435      * Moves and/or copies the nodes in each bin to new table. See
2436      * above for explanation.
2437      */
2438     private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
2439         int n = tab.length, stride;
2440         if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
2441             stride = MIN_TRANSFER_STRIDE; // subdivide range
2442         if (nextTab == null) {            // initiating
2443             try {
2444                 @SuppressWarnings("unchecked")
2445                 Node<K,V>[] nt = (Node<K,V>[])new Node<?,?>[n << 1];
2446                 nextTab = nt;
2447             } catch (Throwable ex) {      // try to cope with OOME
2448                 sizeCtl = Integer.MAX_VALUE;
2449                 return;
2450             }
2451             nextTable = nextTab;
2452             transferIndex = n;
2453         }
2454         int nextn = nextTab.length;
2455         ForwardingNode<K,V> fwd = new ForwardingNode<K,V>(nextTab);
2456         boolean advance = true;
2457         boolean finishing = false; // to ensure sweep before committing nextTab
2458         for (int i = 0, bound = 0;;) {
2459             Node<K,V> f; int fh;
2460             while (advance) {
2461                 int nextIndex, nextBound;
2462                 if (--i >= bound || finishing)
2463                     advance = false;
2464                 else if ((nextIndex = transferIndex) <= 0) {
2465                     i = -1;
2466                     advance = false;
2467                 }
2468                 else if (U.compareAndSetInt
2469                          (this, TRANSFERINDEX, nextIndex,
2470                           nextBound = (nextIndex > stride ?
2471                                        nextIndex - stride : 0))) {
2472                     bound = nextBound;
2473                     i = nextIndex - 1;
2474                     advance = false;
2475                 }
2476             }
2477             if (i < 0 || i >= n || i + n >= nextn) {
2478                 int sc;
2479                 if (finishing) {
2480                     nextTable = null;
2481                     table = nextTab;
2482                     sizeCtl = (n << 1) - (n >>> 1);
2483                     return;
2484                 }
2485                 if (U.compareAndSetInt(this, SIZECTL, sc = sizeCtl, sc - 1)) {
2486                     if ((sc - 2) != resizeStamp(n) << RESIZE_STAMP_SHIFT)
2487                         return;
2488                     finishing = advance = true;
2489                     i = n; // recheck before commit
2490                 }
2491             }
2492             else if ((f = tabAt(tab, i)) == null)
2493                 advance = casTabAt(tab, i, null, fwd);
2494             else if ((fh = f.hash) == MOVED)
2495                 advance = true; // already processed
2496             else {
2497                 synchronized (f) {
2498                     if (tabAt(tab, i) == f) {
2499                         Node<K,V> ln, hn;
2500                         if (fh >= 0) {
2501                             int runBit = fh & n;
2502                             Node<K,V> lastRun = f;
2503                             for (Node<K,V> p = f.next; p != null; p = p.next) {
2504                                 int b = p.hash & n;
2505                                 if (b != runBit) {
2506                                     runBit = b;
2507                                     lastRun = p;
2508                                 }
2509                             }
2510                             if (runBit == 0) {
2511                                 ln = lastRun;
2512                                 hn = null;
2513                             }
2514                             else {
2515                                 hn = lastRun;
2516                                 ln = null;
2517                             }
2518                             for (Node<K,V> p = f; p != lastRun; p = p.next) {
2519                                 int ph = p.hash; K pk = p.key; V pv = p.val;
2520                                 if ((ph & n) == 0)
2521                                     ln = new Node<K,V>(ph, pk, pv, ln);
2522                                 else
2523                                     hn = new Node<K,V>(ph, pk, pv, hn);
2524                             }
2525                             setTabAt(nextTab, i, ln);
2526                             setTabAt(nextTab, i + n, hn);
2527                             setTabAt(tab, i, fwd);
2528                             advance = true;
2529                         }
2530                         else if (f instanceof TreeBin) {
2531                             TreeBin<K,V> t = (TreeBin<K,V>)f;
2532                             TreeNode<K,V> lo = null, loTail = null;
2533                             TreeNode<K,V> hi = null, hiTail = null;
2534                             int lc = 0, hc = 0;
2535                             for (Node<K,V> e = t.first; e != null; e = e.next) {
2536                                 int h = e.hash;
2537                                 TreeNode<K,V> p = new TreeNode<K,V>
2538                                     (h, e.key, e.val, null, null);
2539                                 if ((h & n) == 0) {
2540                                     if ((p.prev = loTail) == null)
2541                                         lo = p;
2542                                     else
2543                                         loTail.next = p;
2544                                     loTail = p;
2545                                     ++lc;
2546                                 }
2547                                 else {
2548                                     if ((p.prev = hiTail) == null)
2549                                         hi = p;
2550                                     else
2551                                         hiTail.next = p;
2552                                     hiTail = p;
2553                                     ++hc;
2554                                 }
2555                             }
2556                             ln = (lc <= UNTREEIFY_THRESHOLD) ? untreeify(lo) :
2557                                 (hc != 0) ? new TreeBin<K,V>(lo) : t;
2558                             hn = (hc <= UNTREEIFY_THRESHOLD) ? untreeify(hi) :
2559                                 (lc != 0) ? new TreeBin<K,V>(hi) : t;
2560                             setTabAt(nextTab, i, ln);
2561                             setTabAt(nextTab, i + n, hn);
2562                             setTabAt(tab, i, fwd);
2563                             advance = true;
2564                         }
2565                         else if (f instanceof ReservationNode)
2566                             throw new IllegalStateException("Recursive update");
2567                     }
2568                 }
2569             }
2570         }
2571     }
2572 
2573     /* ---------------- Counter support -------------- */
2574 
2575     /**
2576      * A padded cell for distributing counts.  Adapted from LongAdder
2577      * and Striped64.  See their internal docs for explanation.
2578      */
2579     @jdk.internal.vm.annotation.Contended static final class CounterCell {
2580         volatile long value;
2581         CounterCell(long x) { value = x; }
2582     }
2583 
2584     final long sumCount() {
2585         CounterCell[] cs = counterCells;
2586         long sum = baseCount;
2587         if (cs != null) {
2588             for (CounterCell c : cs)
2589                 if (c != null)
2590                     sum += c.value;
2591         }
2592         return sum;
2593     }
2594 
2595     // See LongAdder version for explanation
2596     private final void fullAddCount(long x, boolean wasUncontended) {
2597         int h;
2598         if ((h = ThreadLocalRandom.getProbe()) == 0) {
2599             ThreadLocalRandom.localInit();      // force initialization
2600             h = ThreadLocalRandom.getProbe();
2601             wasUncontended = true;
2602         }
2603         boolean collide = false;                // True if last slot nonempty
2604         for (;;) {
2605             CounterCell[] cs; CounterCell c; int n; long v;
2606             if ((cs = counterCells) != null && (n = cs.length) > 0) {
2607                 if ((c = cs[(n - 1) & h]) == null) {
2608                     if (cellsBusy == 0) {            // Try to attach new Cell
2609                         CounterCell r = new CounterCell(x); // Optimistic create
2610                         if (cellsBusy == 0 &&
2611                             U.compareAndSetInt(this, CELLSBUSY, 0, 1)) {
2612                             boolean created = false;
2613                             try {               // Recheck under lock
2614                                 CounterCell[] rs; int m, j;
2615                                 if ((rs = counterCells) != null &&
2616                                     (m = rs.length) > 0 &&
2617                                     rs[j = (m - 1) & h] == null) {
2618                                     rs[j] = r;
2619                                     created = true;
2620                                 }
2621                             } finally {
2622                                 cellsBusy = 0;
2623                             }
2624                             if (created)
2625                                 break;
2626                             continue;           // Slot is now non-empty
2627                         }
2628                     }
2629                     collide = false;
2630                 }
2631                 else if (!wasUncontended)       // CAS already known to fail
2632                     wasUncontended = true;      // Continue after rehash
2633                 else if (U.compareAndSetLong(c, CELLVALUE, v = c.value, v + x))
2634                     break;
2635                 else if (counterCells != cs || n >= NCPU)
2636                     collide = false;            // At max size or stale
2637                 else if (!collide)
2638                     collide = true;
2639                 else if (cellsBusy == 0 &&
2640                          U.compareAndSetInt(this, CELLSBUSY, 0, 1)) {
2641                     try {
2642                         if (counterCells == cs) // Expand table unless stale
2643                             counterCells = Arrays.copyOf(cs, n << 1);
2644                     } finally {
2645                         cellsBusy = 0;
2646                     }
2647                     collide = false;
2648                     continue;                   // Retry with expanded table
2649                 }
2650                 h = ThreadLocalRandom.advanceProbe(h);
2651             }
2652             else if (cellsBusy == 0 && counterCells == cs &&
2653                      U.compareAndSetInt(this, CELLSBUSY, 0, 1)) {
2654                 boolean init = false;
2655                 try {                           // Initialize table
2656                     if (counterCells == cs) {
2657                         CounterCell[] rs = new CounterCell[2];
2658                         rs[h & 1] = new CounterCell(x);
2659                         counterCells = rs;
2660                         init = true;
2661                     }
2662                 } finally {
2663                     cellsBusy = 0;
2664                 }
2665                 if (init)
2666                     break;
2667             }
2668             else if (U.compareAndSetLong(this, BASECOUNT, v = baseCount, v + x))
2669                 break;                          // Fall back on using base
2670         }
2671     }
2672 
2673     /* ---------------- Conversion from/to TreeBins -------------- */
2674 
2675     /**
2676      * Replaces all linked nodes in bin at given index unless table is
2677      * too small, in which case resizes instead.
2678      */
2679     private final void treeifyBin(Node<K,V>[] tab, int index) {
2680         Node<K,V> b; int n;
2681         if (tab != null) {
2682             if ((n = tab.length) < MIN_TREEIFY_CAPACITY)
2683                 tryPresize(n << 1);
2684             else if ((b = tabAt(tab, index)) != null && b.hash >= 0) {
2685                 synchronized (b) {
2686                     if (tabAt(tab, index) == b) {
2687                         TreeNode<K,V> hd = null, tl = null;
2688                         for (Node<K,V> e = b; e != null; e = e.next) {
2689                             TreeNode<K,V> p =
2690                                 new TreeNode<K,V>(e.hash, e.key, e.val,
2691                                                   null, null);
2692                             if ((p.prev = tl) == null)
2693                                 hd = p;
2694                             else
2695                                 tl.next = p;
2696                             tl = p;
2697                         }
2698                         setTabAt(tab, index, new TreeBin<K,V>(hd));
2699                     }
2700                 }
2701             }
2702         }
2703     }
2704 
2705     /**
2706      * Returns a list of non-TreeNodes replacing those in given list.
2707      */
2708     static <K,V> Node<K,V> untreeify(Node<K,V> b) {
2709         Node<K,V> hd = null, tl = null;
2710         for (Node<K,V> q = b; q != null; q = q.next) {
2711             Node<K,V> p = new Node<K,V>(q.hash, q.key, q.val);
2712             if (tl == null)
2713                 hd = p;
2714             else
2715                 tl.next = p;
2716             tl = p;
2717         }
2718         return hd;
2719     }
2720 
2721     /* ---------------- TreeNodes -------------- */
2722 
2723     /**
2724      * Nodes for use in TreeBins.
2725      */
2726     static final class TreeNode<K,V> extends Node<K,V> {
2727         TreeNode<K,V> parent;  // red-black tree links
2728         TreeNode<K,V> left;
2729         TreeNode<K,V> right;
2730         TreeNode<K,V> prev;    // needed to unlink next upon deletion
2731         boolean red;
2732 
2733         TreeNode(int hash, K key, V val, Node<K,V> next,
2734                  TreeNode<K,V> parent) {
2735             super(hash, key, val, next);
2736             this.parent = parent;
2737         }
2738 
2739         Node<K,V> find(int h, Object k) {
2740             return findTreeNode(h, k, null);
2741         }
2742 
2743         /**
2744          * Returns the TreeNode (or null if not found) for the given key
2745          * starting at given root.
2746          */
2747         final TreeNode<K,V> findTreeNode(int h, Object k, Class<?> kc) {
2748             if (k != null) {
2749                 TreeNode<K,V> p = this;
2750                 do {
2751                     int ph, dir; K pk; TreeNode<K,V> q;
2752                     TreeNode<K,V> pl = p.left, pr = p.right;
2753                     if ((ph = p.hash) > h)
2754                         p = pl;
2755                     else if (ph < h)
2756                         p = pr;
2757                     else if ((pk = p.key) != null && Objects.equals(k, pk))
2758                         return p;
2759                     else if (pl == null)
2760                         p = pr;
2761                     else if (pr == null)
2762                         p = pl;
2763                     else if ((kc != null ||
2764                               (kc = comparableClassFor(k)) != null) &&
2765                              (dir = compareComparables(kc, k, pk)) != 0)
2766                         p = (dir < 0) ? pl : pr;
2767                     else if ((q = pr.findTreeNode(h, k, kc)) != null)
2768                         return q;
2769                     else
2770                         p = pl;
2771                 } while (p != null);
2772             }
2773             return null;
2774         }
2775     }
2776 
2777     /* ---------------- TreeBins -------------- */
2778 
2779     /**
2780      * TreeNodes used at the heads of bins. TreeBins do not hold user
2781      * keys or values, but instead point to list of TreeNodes and
2782      * their root. They also maintain a parasitic read-write lock
2783      * forcing writers (who hold bin lock) to wait for readers (who do
2784      * not) to complete before tree restructuring operations.
2785      */
2786     static final class TreeBin<K,V> extends Node<K,V> {
2787         TreeNode<K,V> root;
2788         volatile TreeNode<K,V> first;
2789         volatile Thread waiter;
2790         volatile int lockState;
2791         // values for lockState
2792         static final int WRITER = 1; // set while holding write lock
2793         static final int WAITER = 2; // set when waiting for write lock
2794         static final int READER = 4; // increment value for setting read lock
2795 
2796         /**
2797          * Tie-breaking utility for ordering insertions when equal
2798          * hashCodes and non-comparable. We don't require a total
2799          * order, just a consistent insertion rule to maintain
2800          * equivalence across rebalancings. Tie-breaking further than
2801          * necessary simplifies testing a bit.
2802          */
2803         static int tieBreakOrder(Object a, Object b) {
2804             int d;
2805             if (a == null || b == null ||
2806                 (d = a.getClass().getName().
2807                  compareTo(b.getClass().getName())) == 0)
2808                 d = (System.identityHashCode(a) <= System.identityHashCode(b) ?
2809                      -1 : 1);
2810             return d;
2811         }
2812 
2813         /**
2814          * Creates bin with initial set of nodes headed by b.
2815          */
2816         TreeBin(TreeNode<K,V> b) {
2817             super(TREEBIN, null, null);
2818             this.first = b;
2819             TreeNode<K,V> r = null;
2820             for (TreeNode<K,V> x = b, next; x != null; x = next) {
2821                 next = (TreeNode<K,V>)x.next;
2822                 x.left = x.right = null;
2823                 if (r == null) {
2824                     x.parent = null;
2825                     x.red = false;
2826                     r = x;
2827                 }
2828                 else {
2829                     K k = x.key;
2830                     int h = x.hash;
2831                     Class<?> kc = null;
2832                     for (TreeNode<K,V> p = r;;) {
2833                         int dir, ph;
2834                         K pk = p.key;
2835                         if ((ph = p.hash) > h)
2836                             dir = -1;
2837                         else if (ph < h)
2838                             dir = 1;
2839                         else if ((kc == null &&
2840                                   (kc = comparableClassFor(k)) == null) ||
2841                                  (dir = compareComparables(kc, k, pk)) == 0)
2842                             dir = tieBreakOrder(k, pk);
2843                         TreeNode<K,V> xp = p;
2844                         if ((p = (dir <= 0) ? p.left : p.right) == null) {
2845                             x.parent = xp;
2846                             if (dir <= 0)
2847                                 xp.left = x;
2848                             else
2849                                 xp.right = x;
2850                             r = balanceInsertion(r, x);
2851                             break;
2852                         }
2853                     }
2854                 }
2855             }
2856             this.root = r;
2857             assert checkInvariants(root);
2858         }
2859 
2860         /**
2861          * Acquires write lock for tree restructuring.
2862          */
2863         private final void lockRoot() {
2864             if (!U.compareAndSetInt(this, LOCKSTATE, 0, WRITER))
2865                 contendedLock(); // offload to separate method
2866         }
2867 
2868         /**
2869          * Releases write lock for tree restructuring.
2870          */
2871         private final void unlockRoot() {
2872             lockState = 0;
2873         }
2874 
2875         /**
2876          * Possibly blocks awaiting root lock.
2877          */
2878         private final void contendedLock() {
2879             Thread current = Thread.currentThread(), w;
2880             for (int s;;) {
2881                 if (((s = lockState) & ~WAITER) == 0) {
2882                     if (U.compareAndSetInt(this, LOCKSTATE, s, WRITER)) {
2883                         if (waiter == current)
2884                             U.compareAndSetReference(this, WAITERTHREAD, current, null);
2885                         return;
2886                     }
2887                 }
2888                 else if ((s & WAITER) == 0)
2889                     U.compareAndSetInt(this, LOCKSTATE, s, s | WAITER);
2890                 else if ((w = waiter) == null)
2891                     U.compareAndSetReference(this, WAITERTHREAD, null, current);
2892                 else if (w == current)
2893                     LockSupport.park(this);
2894             }
2895         }
2896 
2897         /**
2898          * Returns matching node or null if none. Tries to search
2899          * using tree comparisons from root, but continues linear
2900          * search when lock not available.
2901          */
2902         final Node<K,V> find(int h, Object k) {
2903             if (k != null) {
2904                 for (Node<K,V> e = first; e != null; ) {
2905                     int s; K ek;
2906                     if (((s = lockState) & (WAITER|WRITER)) != 0) {
2907                         if (e.hash == h &&
2908                             ((ek = e.key) != null && Objects.equals(k, ek)))
2909                             return e;
2910                         e = e.next;
2911                     }
2912                     else if (U.compareAndSetInt(this, LOCKSTATE, s,
2913                                                  s + READER)) {
2914                         TreeNode<K,V> r, p;
2915                         try {
2916                             p = ((r = root) == null ? null :
2917                                  r.findTreeNode(h, k, null));
2918                         } finally {
2919                             Thread w;
2920                             if (U.getAndAddInt(this, LOCKSTATE, -READER) ==
2921                                 (READER|WAITER) && (w = waiter) != null)
2922                                 LockSupport.unpark(w);
2923                         }
2924                         return p;
2925                     }
2926                 }
2927             }
2928             return null;
2929         }
2930 
2931         /**
2932          * Finds or adds a node.
2933          * @return null if added
2934          */
2935         final TreeNode<K,V> putTreeVal(int h, K k, V v) {
2936             Class<?> kc = null;
2937             boolean searched = false;
2938             for (TreeNode<K,V> p = root;;) {
2939                 int dir, ph; K pk;
2940                 if (p == null) {
2941                     first = root = new TreeNode<K,V>(h, k, v, null, null);
2942                     break;
2943                 }
2944                 else if ((ph = p.hash) > h)
2945                     dir = -1;
2946                 else if (ph < h)
2947                     dir = 1;
2948                 else if ((pk = p.key) != null && Objects.equals(k, pk))
2949                     return p;
2950                 else if ((kc == null &&
2951                           (kc = comparableClassFor(k)) == null) ||
2952                          (dir = compareComparables(kc, k, pk)) == 0) {
2953                     if (!searched) {
2954                         TreeNode<K,V> q, ch;
2955                         searched = true;
2956                         if (((ch = p.left) != null &&
2957                              (q = ch.findTreeNode(h, k, kc)) != null) ||
2958                             ((ch = p.right) != null &&
2959                              (q = ch.findTreeNode(h, k, kc)) != null))
2960                             return q;
2961                     }
2962                     dir = tieBreakOrder(k, pk);
2963                 }
2964 
2965                 TreeNode<K,V> xp = p;
2966                 if ((p = (dir <= 0) ? p.left : p.right) == null) {
2967                     TreeNode<K,V> x, f = first;
2968                     first = x = new TreeNode<K,V>(h, k, v, f, xp);
2969                     if (f != null)
2970                         f.prev = x;
2971                     if (dir <= 0)
2972                         xp.left = x;
2973                     else
2974                         xp.right = x;
2975                     if (!xp.red)
2976                         x.red = true;
2977                     else {
2978                         lockRoot();
2979                         try {
2980                             root = balanceInsertion(root, x);
2981                         } finally {
2982                             unlockRoot();
2983                         }
2984                     }
2985                     break;
2986                 }
2987             }
2988             assert checkInvariants(root);
2989             return null;
2990         }
2991 
2992         /**
2993          * Removes the given node, that must be present before this
2994          * call.  This is messier than typical red-black deletion code
2995          * because we cannot swap the contents of an interior node
2996          * with a leaf successor that is pinned by "next" pointers
2997          * that are accessible independently of lock. So instead we
2998          * swap the tree linkages.
2999          *
3000          * @return true if now too small, so should be untreeified
3001          */
3002         final boolean removeTreeNode(TreeNode<K,V> p) {
3003             TreeNode<K,V> next = (TreeNode<K,V>)p.next;
3004             TreeNode<K,V> pred = p.prev;  // unlink traversal pointers
3005             TreeNode<K,V> r, rl;
3006             if (pred == null)
3007                 first = next;
3008             else
3009                 pred.next = next;
3010             if (next != null)
3011                 next.prev = pred;
3012             if (first == null) {
3013                 root = null;
3014                 return true;
3015             }
3016             if ((r = root) == null || r.right == null || // too small
3017                 (rl = r.left) == null || rl.left == null)
3018                 return true;
3019             lockRoot();
3020             try {
3021                 TreeNode<K,V> replacement;
3022                 TreeNode<K,V> pl = p.left;
3023                 TreeNode<K,V> pr = p.right;
3024                 if (pl != null && pr != null) {
3025                     TreeNode<K,V> s = pr, sl;
3026                     while ((sl = s.left) != null) // find successor
3027                         s = sl;
3028                     boolean c = s.red; s.red = p.red; p.red = c; // swap colors
3029                     TreeNode<K,V> sr = s.right;
3030                     TreeNode<K,V> pp = p.parent;
3031                     if (s == pr) { // p was s's direct parent
3032                         p.parent = s;
3033                         s.right = p;
3034                     }
3035                     else {
3036                         TreeNode<K,V> sp = s.parent;
3037                         if ((p.parent = sp) != null) {
3038                             if (s == sp.left)
3039                                 sp.left = p;
3040                             else
3041                                 sp.right = p;
3042                         }
3043                         if ((s.right = pr) != null)
3044                             pr.parent = s;
3045                     }
3046                     p.left = null;
3047                     if ((p.right = sr) != null)
3048                         sr.parent = p;
3049                     if ((s.left = pl) != null)
3050                         pl.parent = s;
3051                     if ((s.parent = pp) == null)
3052                         r = s;
3053                     else if (p == pp.left)
3054                         pp.left = s;
3055                     else
3056                         pp.right = s;
3057                     if (sr != null)
3058                         replacement = sr;
3059                     else
3060                         replacement = p;
3061                 }
3062                 else if (pl != null)
3063                     replacement = pl;
3064                 else if (pr != null)
3065                     replacement = pr;
3066                 else
3067                     replacement = p;
3068                 if (replacement != p) {
3069                     TreeNode<K,V> pp = replacement.parent = p.parent;
3070                     if (pp == null)
3071                         r = replacement;
3072                     else if (p == pp.left)
3073                         pp.left = replacement;
3074                     else
3075                         pp.right = replacement;
3076                     p.left = p.right = p.parent = null;
3077                 }
3078 
3079                 root = (p.red) ? r : balanceDeletion(r, replacement);
3080 
3081                 if (p == replacement) {  // detach pointers
3082                     TreeNode<K,V> pp;
3083                     if ((pp = p.parent) != null) {
3084                         if (p == pp.left)
3085                             pp.left = null;
3086                         else if (p == pp.right)
3087                             pp.right = null;
3088                         p.parent = null;
3089                     }
3090                 }
3091             } finally {
3092                 unlockRoot();
3093             }
3094             assert checkInvariants(root);
3095             return false;
3096         }
3097 
3098         /* ------------------------------------------------------------ */
3099         // Red-black tree methods, all adapted from CLR
3100 
3101         static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root,
3102                                               TreeNode<K,V> p) {
3103             TreeNode<K,V> r, pp, rl;
3104             if (p != null && (r = p.right) != null) {
3105                 if ((rl = p.right = r.left) != null)
3106                     rl.parent = p;
3107                 if ((pp = r.parent = p.parent) == null)
3108                     (root = r).red = false;
3109                 else if (pp.left == p)
3110                     pp.left = r;
3111                 else
3112                     pp.right = r;
3113                 r.left = p;
3114                 p.parent = r;
3115             }
3116             return root;
3117         }
3118 
3119         static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root,
3120                                                TreeNode<K,V> p) {
3121             TreeNode<K,V> l, pp, lr;
3122             if (p != null && (l = p.left) != null) {
3123                 if ((lr = p.left = l.right) != null)
3124                     lr.parent = p;
3125                 if ((pp = l.parent = p.parent) == null)
3126                     (root = l).red = false;
3127                 else if (pp.right == p)
3128                     pp.right = l;
3129                 else
3130                     pp.left = l;
3131                 l.right = p;
3132                 p.parent = l;
3133             }
3134             return root;
3135         }
3136 
3137         static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root,
3138                                                     TreeNode<K,V> x) {
3139             x.red = true;
3140             for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
3141                 if ((xp = x.parent) == null) {
3142                     x.red = false;
3143                     return x;
3144                 }
3145                 else if (!xp.red || (xpp = xp.parent) == null)
3146                     return root;
3147                 if (xp == (xppl = xpp.left)) {
3148                     if ((xppr = xpp.right) != null && xppr.red) {
3149                         xppr.red = false;
3150                         xp.red = false;
3151                         xpp.red = true;
3152                         x = xpp;
3153                     }
3154                     else {
3155                         if (x == xp.right) {
3156                             root = rotateLeft(root, x = xp);
3157                             xpp = (xp = x.parent) == null ? null : xp.parent;
3158                         }
3159                         if (xp != null) {
3160                             xp.red = false;
3161                             if (xpp != null) {
3162                                 xpp.red = true;
3163                                 root = rotateRight(root, xpp);
3164                             }
3165                         }
3166                     }
3167                 }
3168                 else {
3169                     if (xppl != null && xppl.red) {
3170                         xppl.red = false;
3171                         xp.red = false;
3172                         xpp.red = true;
3173                         x = xpp;
3174                     }
3175                     else {
3176                         if (x == xp.left) {
3177                             root = rotateRight(root, x = xp);
3178                             xpp = (xp = x.parent) == null ? null : xp.parent;
3179                         }
3180                         if (xp != null) {
3181                             xp.red = false;
3182                             if (xpp != null) {
3183                                 xpp.red = true;
3184                                 root = rotateLeft(root, xpp);
3185                             }
3186                         }
3187                     }
3188                 }
3189             }
3190         }
3191 
3192         static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root,
3193                                                    TreeNode<K,V> x) {
3194             for (TreeNode<K,V> xp, xpl, xpr;;) {
3195                 if (x == null || x == root)
3196                     return root;
3197                 else if ((xp = x.parent) == null) {
3198                     x.red = false;
3199                     return x;
3200                 }
3201                 else if (x.red) {
3202                     x.red = false;
3203                     return root;
3204                 }
3205                 else if ((xpl = xp.left) == x) {
3206                     if ((xpr = xp.right) != null && xpr.red) {
3207                         xpr.red = false;
3208                         xp.red = true;
3209                         root = rotateLeft(root, xp);
3210                         xpr = (xp = x.parent) == null ? null : xp.right;
3211                     }
3212                     if (xpr == null)
3213                         x = xp;
3214                     else {
3215                         TreeNode<K,V> sl = xpr.left, sr = xpr.right;
3216                         if ((sr == null || !sr.red) &&
3217                             (sl == null || !sl.red)) {
3218                             xpr.red = true;
3219                             x = xp;
3220                         }
3221                         else {
3222                             if (sr == null || !sr.red) {
3223                                 if (sl != null)
3224                                     sl.red = false;
3225                                 xpr.red = true;
3226                                 root = rotateRight(root, xpr);
3227                                 xpr = (xp = x.parent) == null ?
3228                                     null : xp.right;
3229                             }
3230                             if (xpr != null) {
3231                                 xpr.red = (xp == null) ? false : xp.red;
3232                                 if ((sr = xpr.right) != null)
3233                                     sr.red = false;
3234                             }
3235                             if (xp != null) {
3236                                 xp.red = false;
3237                                 root = rotateLeft(root, xp);
3238                             }
3239                             x = root;
3240                         }
3241                     }
3242                 }
3243                 else { // symmetric
3244                     if (xpl != null && xpl.red) {
3245                         xpl.red = false;
3246                         xp.red = true;
3247                         root = rotateRight(root, xp);
3248                         xpl = (xp = x.parent) == null ? null : xp.left;
3249                     }
3250                     if (xpl == null)
3251                         x = xp;
3252                     else {
3253                         TreeNode<K,V> sl = xpl.left, sr = xpl.right;
3254                         if ((sl == null || !sl.red) &&
3255                             (sr == null || !sr.red)) {
3256                             xpl.red = true;
3257                             x = xp;
3258                         }
3259                         else {
3260                             if (sl == null || !sl.red) {
3261                                 if (sr != null)
3262                                     sr.red = false;
3263                                 xpl.red = true;
3264                                 root = rotateLeft(root, xpl);
3265                                 xpl = (xp = x.parent) == null ?
3266                                     null : xp.left;
3267                             }
3268                             if (xpl != null) {
3269                                 xpl.red = (xp == null) ? false : xp.red;
3270                                 if ((sl = xpl.left) != null)
3271                                     sl.red = false;
3272                             }
3273                             if (xp != null) {
3274                                 xp.red = false;
3275                                 root = rotateRight(root, xp);
3276                             }
3277                             x = root;
3278                         }
3279                     }
3280                 }
3281             }
3282         }
3283 
3284         /**
3285          * Checks invariants recursively for the tree of Nodes rooted at t.
3286          */
3287         static <K,V> boolean checkInvariants(TreeNode<K,V> t) {
3288             TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
3289                 tb = t.prev, tn = (TreeNode<K,V>)t.next;
3290             if (tb != null && tb.next != t)
3291                 return false;
3292             if (tn != null && tn.prev != t)
3293                 return false;
3294             if (tp != null && t != tp.left && t != tp.right)
3295                 return false;
3296             if (tl != null && (tl.parent != t || tl.hash > t.hash))
3297                 return false;
3298             if (tr != null && (tr.parent != t || tr.hash < t.hash))
3299                 return false;
3300             if (t.red && tl != null && tl.red && tr != null && tr.red)
3301                 return false;
3302             if (tl != null && !checkInvariants(tl))
3303                 return false;
3304             if (tr != null && !checkInvariants(tr))
3305                 return false;
3306             return true;
3307         }
3308 
3309         private static final long LOCKSTATE
3310             = U.objectFieldOffset(TreeBin.class, "lockState");
3311         private static final long WAITERTHREAD
3312             = U.objectFieldOffset(TreeBin.class, "waiter");
3313     }
3314 
3315     /* ----------------Table Traversal -------------- */
3316 
3317     /**
3318      * Records the table, its length, and current traversal index for a
3319      * traverser that must process a region of a forwarded table before
3320      * proceeding with current table.
3321      */
3322     static final class TableStack<K,V> {
3323         int length;
3324         int index;
3325         Node<K,V>[] tab;
3326         TableStack<K,V> next;
3327     }
3328 
3329     /**
3330      * Encapsulates traversal for methods such as containsValue; also
3331      * serves as a base class for other iterators and spliterators.
3332      *
3333      * Method advance visits once each still-valid node that was
3334      * reachable upon iterator construction. It might miss some that
3335      * were added to a bin after the bin was visited, which is OK wrt
3336      * consistency guarantees. Maintaining this property in the face
3337      * of possible ongoing resizes requires a fair amount of
3338      * bookkeeping state that is difficult to optimize away amidst
3339      * volatile accesses.  Even so, traversal maintains reasonable
3340      * throughput.
3341      *
3342      * Normally, iteration proceeds bin-by-bin traversing lists.
3343      * However, if the table has been resized, then all future steps
3344      * must traverse both the bin at the current index as well as at
3345      * (index + baseSize); and so on for further resizings. To
3346      * paranoically cope with potential sharing by users of iterators
3347      * across threads, iteration terminates if a bounds checks fails
3348      * for a table read.
3349      */
3350     static class Traverser<K,V> {
3351         Node<K,V>[] tab;        // current table; updated if resized
3352         Node<K,V> next;         // the next entry to use
3353         TableStack<K,V> stack, spare; // to save/restore on ForwardingNodes
3354         int index;              // index of bin to use next
3355         int baseIndex;          // current index of initial table
3356         int baseLimit;          // index bound for initial table
3357         final int baseSize;     // initial table size
3358 
3359         Traverser(Node<K,V>[] tab, int size, int index, int limit) {
3360             this.tab = tab;
3361             this.baseSize = size;
3362             this.baseIndex = this.index = index;
3363             this.baseLimit = limit;
3364             this.next = null;
3365         }
3366 
3367         /**
3368          * Advances if possible, returning next valid node, or null if none.
3369          */
3370         final Node<K,V> advance() {
3371             Node<K,V> e;
3372             if ((e = next) != null)
3373                 e = e.next;
3374             for (;;) {
3375                 Node<K,V>[] t; int i, n;  // must use locals in checks
3376                 if (e != null)
3377                     return next = e;
3378                 if (baseIndex >= baseLimit || (t = tab) == null ||
3379                     (n = t.length) <= (i = index) || i < 0)
3380                     return next = null;
3381                 if ((e = tabAt(t, i)) != null && e.hash < 0) {
3382                     if (e instanceof ForwardingNode) {
3383                         tab = ((ForwardingNode<K,V>)e).nextTable;
3384                         e = null;
3385                         pushState(t, i, n);
3386                         continue;
3387                     }
3388                     else if (e instanceof TreeBin)
3389                         e = ((TreeBin<K,V>)e).first;
3390                     else
3391                         e = null;
3392                 }
3393                 if (stack != null)
3394                     recoverState(n);
3395                 else if ((index = i + baseSize) >= n)
3396                     index = ++baseIndex; // visit upper slots if present
3397             }
3398         }
3399 
3400         /**
3401          * Saves traversal state upon encountering a forwarding node.
3402          */
3403         private void pushState(Node<K,V>[] t, int i, int n) {
3404             TableStack<K,V> s = spare;  // reuse if possible
3405             if (s != null)
3406                 spare = s.next;
3407             else
3408                 s = new TableStack<K,V>();
3409             s.tab = t;
3410             s.length = n;
3411             s.index = i;
3412             s.next = stack;
3413             stack = s;
3414         }
3415 
3416         /**
3417          * Possibly pops traversal state.
3418          *
3419          * @param n length of current table
3420          */
3421         private void recoverState(int n) {
3422             TableStack<K,V> s; int len;
3423             while ((s = stack) != null && (index += (len = s.length)) >= n) {
3424                 n = len;
3425                 index = s.index;
3426                 tab = s.tab;
3427                 s.tab = null;
3428                 TableStack<K,V> next = s.next;
3429                 s.next = spare; // save for reuse
3430                 stack = next;
3431                 spare = s;
3432             }
3433             if (s == null && (index += baseSize) >= n)
3434                 index = ++baseIndex;
3435         }
3436     }
3437 
3438     /**
3439      * Base of key, value, and entry Iterators. Adds fields to
3440      * Traverser to support iterator.remove.
3441      */
3442     static class BaseIterator<K,V> extends Traverser<K,V> {
3443         final ConcurrentHashMap<K,V> map;
3444         Node<K,V> lastReturned;
3445         BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
3446                     ConcurrentHashMap<K,V> map) {
3447             super(tab, size, index, limit);
3448             this.map = map;
3449             advance();
3450         }
3451 
3452         public final boolean hasNext() { return next != null; }
3453         public final boolean hasMoreElements() { return next != null; }
3454 
3455         public final void remove() {
3456             Node<K,V> p;
3457             if ((p = lastReturned) == null)
3458                 throw new IllegalStateException();
3459             lastReturned = null;
3460             map.replaceNode(p.key, null, null);
3461         }
3462     }
3463 
3464     static final class KeyIterator<K,V> extends BaseIterator<K,V>
3465         implements Iterator<K>, Enumeration<K> {
3466         KeyIterator(Node<K,V>[] tab, int size, int index, int limit,
3467                     ConcurrentHashMap<K,V> map) {
3468             super(tab, size, index, limit, map);
3469         }
3470 
3471         public final K next() {
3472             Node<K,V> p;
3473             if ((p = next) == null)
3474                 throw new NoSuchElementException();
3475             K k = p.key;
3476             lastReturned = p;
3477             advance();
3478             return k;
3479         }
3480 
3481         public final K nextElement() { return next(); }
3482     }
3483 
3484     static final class ValueIterator<K,V> extends BaseIterator<K,V>
3485         implements Iterator<V>, Enumeration<V> {
3486         ValueIterator(Node<K,V>[] tab, int size, int index, int limit,
3487                       ConcurrentHashMap<K,V> map) {
3488             super(tab, size, index, limit, map);
3489         }
3490 
3491         public final V next() {
3492             Node<K,V> p;
3493             if ((p = next) == null)
3494                 throw new NoSuchElementException();
3495             V v = p.val;
3496             lastReturned = p;
3497             advance();
3498             return v;
3499         }
3500 
3501         public final V nextElement() { return next(); }
3502     }
3503 
3504     static final class EntryIterator<K,V> extends BaseIterator<K,V>
3505         implements Iterator<Map.Entry<K,V>> {
3506         EntryIterator(Node<K,V>[] tab, int size, int index, int limit,
3507                       ConcurrentHashMap<K,V> map) {
3508             super(tab, size, index, limit, map);
3509         }
3510 
3511         public final Map.Entry<K,V> next() {
3512             Node<K,V> p;
3513             if ((p = next) == null)
3514                 throw new NoSuchElementException();
3515             K k = p.key;
3516             V v = p.val;
3517             lastReturned = p;
3518             advance();
3519             return new MapEntry<K,V>(k, v, map);
3520         }
3521     }
3522 
3523     /**
3524      * Exported Entry for EntryIterator.
3525      */
3526     static final class MapEntry<K,V> implements Map.Entry<K,V> {
3527         final K key; // non-null
3528         V val;       // non-null
3529         final ConcurrentHashMap<K,V> map;
3530         MapEntry(K key, V val, ConcurrentHashMap<K,V> map) {
3531             this.key = key;
3532             this.val = val;
3533             this.map = map;
3534         }
3535         public K getKey()        { return key; }
3536         public V getValue()      { return val; }
3537         public int hashCode()    { return key.hashCode() ^ val.hashCode(); }
3538         public String toString() {
3539             return Helpers.mapEntryToString(key, val);
3540         }
3541 
3542         public boolean equals(Object o) {
3543             Object k, v; Map.Entry<?,?> e;
3544             return ((o instanceof Map.Entry) &&
3545                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
3546                     (v = e.getValue()) != null &&
3547                     Objects.equals(k, key) &&
3548                     Objects.equals(v, val));
3549         }
3550 
3551         /**
3552          * Sets our entry's value and writes through to the map. The
3553          * value to return is somewhat arbitrary here. Since we do not
3554          * necessarily track asynchronous changes, the most recent
3555          * "previous" value could be different from what we return (or
3556          * could even have been removed, in which case the put will
3557          * re-establish). We do not and cannot guarantee more.
3558          */
3559         public V setValue(V value) {
3560             if (value == null) throw new NullPointerException();
3561             V v = val;
3562             val = value;
3563             map.put(key, value);
3564             return v;
3565         }
3566     }
3567 
3568     static final class KeySpliterator<K,V> extends Traverser<K,V>
3569         implements Spliterator<K> {
3570         long est;               // size estimate
3571         KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3572                        long est) {
3573             super(tab, size, index, limit);
3574             this.est = est;
3575         }
3576 
3577         public KeySpliterator<K,V> trySplit() {
3578             int i, f, h;
3579             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3580                 new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
3581                                         f, est >>>= 1);
3582         }
3583 
3584         public void forEachRemaining(Consumer<? super K> action) {
3585             if (action == null) throw new NullPointerException();
3586             for (Node<K,V> p; (p = advance()) != null;)
3587                 action.accept(p.key);
3588         }
3589 
3590         public boolean tryAdvance(Consumer<? super K> action) {
3591             if (action == null) throw new NullPointerException();
3592             Node<K,V> p;
3593             if ((p = advance()) == null)
3594                 return false;
3595             action.accept(p.key);
3596             return true;
3597         }
3598 
3599         public long estimateSize() { return est; }
3600 
3601         public int characteristics() {
3602             return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3603                 Spliterator.NONNULL;
3604         }
3605     }
3606 
3607     static final class ValueSpliterator<K,V> extends Traverser<K,V>
3608         implements Spliterator<V> {
3609         long est;               // size estimate
3610         ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
3611                          long est) {
3612             super(tab, size, index, limit);
3613             this.est = est;
3614         }
3615 
3616         public ValueSpliterator<K,V> trySplit() {
3617             int i, f, h;
3618             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3619                 new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
3620                                           f, est >>>= 1);
3621         }
3622 
3623         public void forEachRemaining(Consumer<? super V> action) {
3624             if (action == null) throw new NullPointerException();
3625             for (Node<K,V> p; (p = advance()) != null;)
3626                 action.accept(p.val);
3627         }
3628 
3629         public boolean tryAdvance(Consumer<? super V> action) {
3630             if (action == null) throw new NullPointerException();
3631             Node<K,V> p;
3632             if ((p = advance()) == null)
3633                 return false;
3634             action.accept(p.val);
3635             return true;
3636         }
3637 
3638         public long estimateSize() { return est; }
3639 
3640         public int characteristics() {
3641             return Spliterator.CONCURRENT | Spliterator.NONNULL;
3642         }
3643     }
3644 
3645     static final class EntrySpliterator<K,V> extends Traverser<K,V>
3646         implements Spliterator<Map.Entry<K,V>> {
3647         final ConcurrentHashMap<K,V> map; // To export MapEntry
3648         long est;               // size estimate
3649         EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
3650                          long est, ConcurrentHashMap<K,V> map) {
3651             super(tab, size, index, limit);
3652             this.map = map;
3653             this.est = est;
3654         }
3655 
3656         public EntrySpliterator<K,V> trySplit() {
3657             int i, f, h;
3658             return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
3659                 new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
3660                                           f, est >>>= 1, map);
3661         }
3662 
3663         public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
3664             if (action == null) throw new NullPointerException();
3665             for (Node<K,V> p; (p = advance()) != null; )
3666                 action.accept(new MapEntry<K,V>(p.key, p.val, map));
3667         }
3668 
3669         public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
3670             if (action == null) throw new NullPointerException();
3671             Node<K,V> p;
3672             if ((p = advance()) == null)
3673                 return false;
3674             action.accept(new MapEntry<K,V>(p.key, p.val, map));
3675             return true;
3676         }
3677 
3678         public long estimateSize() { return est; }
3679 
3680         public int characteristics() {
3681             return Spliterator.DISTINCT | Spliterator.CONCURRENT |
3682                 Spliterator.NONNULL;
3683         }
3684     }
3685 
3686     // Parallel bulk operations
3687 
3688     /**
3689      * Computes initial batch value for bulk tasks. The returned value
3690      * is approximately exp2 of the number of times (minus one) to
3691      * split task by two before executing leaf action. This value is
3692      * faster to compute and more convenient to use as a guide to
3693      * splitting than is the depth, since it is used while dividing by
3694      * two anyway.
3695      */
3696     final int batchFor(long b) {
3697         long n;
3698         if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
3699             return 0;
3700         int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
3701         return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
3702     }
3703 
3704     /**
3705      * Performs the given {@linkplain ##Bulk bulk} action for each (key, value).
3706      *
3707      * @param parallelismThreshold the (estimated) number of elements
3708      * needed for this operation to be executed in parallel
3709      * @param action the action
3710      * @since 1.8
3711      */
3712     public void forEach(long parallelismThreshold,
3713                         BiConsumer<? super K,? super V> action) {
3714         if (action == null) throw new NullPointerException();
3715         new ForEachMappingTask<K,V>
3716             (null, batchFor(parallelismThreshold), 0, 0, table,
3717              action).invoke();
3718     }
3719 
3720     /**
3721      * Performs the given {@linkplain ##Bulk bulk} action for each non-null transformation
3722      * of each (key, value).
3723      *
3724      * @param parallelismThreshold the (estimated) number of elements
3725      * needed for this operation to be executed in parallel
3726      * @param transformer a function returning the transformation
3727      * for an element, or null if there is no transformation (in
3728      * which case the action is not applied)
3729      * @param action the action
3730      * @param <U> the return type of the transformer
3731      * @since 1.8
3732      */
3733     public <U> void forEach(long parallelismThreshold,
3734                             BiFunction<? super K, ? super V, ? extends U> transformer,
3735                             Consumer<? super U> action) {
3736         if (transformer == null || action == null)
3737             throw new NullPointerException();
3738         new ForEachTransformedMappingTask<K,V,U>
3739             (null, batchFor(parallelismThreshold), 0, 0, table,
3740              transformer, action).invoke();
3741     }
3742 
3743     /**
3744      * Returns a non-null result from applying the given {@linkplain ##Bulk bulk} search
3745      * function on each (key, value), or null if none.  Upon
3746      * success, further element processing is suppressed and the
3747      * results of any other parallel invocations of the search
3748      * function are ignored.
3749      *
3750      * @param parallelismThreshold the (estimated) number of elements
3751      * needed for this operation to be executed in parallel
3752      * @param searchFunction a function returning a non-null
3753      * result on success, else null
3754      * @param <U> the return type of the search function
3755      * @return a non-null result from applying the given search
3756      * function on each (key, value), or null if none
3757      * @since 1.8
3758      */
3759     public <U> U search(long parallelismThreshold,
3760                         BiFunction<? super K, ? super V, ? extends U> searchFunction) {
3761         if (searchFunction == null) throw new NullPointerException();
3762         return new SearchMappingsTask<K,V,U>
3763             (null, batchFor(parallelismThreshold), 0, 0, table,
3764              searchFunction, new AtomicReference<U>()).invoke();
3765     }
3766 
3767     /**
3768      * Returns the result of accumulating the given {@linkplain ##Bulk bulk} transformation
3769      * of all (key, value) pairs using the given reducer to
3770      * combine values, or null if none.
3771      *
3772      * @param parallelismThreshold the (estimated) number of elements
3773      * needed for this operation to be executed in parallel
3774      * @param transformer a function returning the transformation
3775      * for an element, or null if there is no transformation (in
3776      * which case it is not combined)
3777      * @param reducer a commutative associative combining function
3778      * @param <U> the return type of the transformer
3779      * @return the result of accumulating the given transformation
3780      * of all (key, value) pairs
3781      * @since 1.8
3782      */
3783     public <U> U reduce(long parallelismThreshold,
3784                         BiFunction<? super K, ? super V, ? extends U> transformer,
3785                         BiFunction<? super U, ? super U, ? extends U> reducer) {
3786         if (transformer == null || reducer == null)
3787             throw new NullPointerException();
3788         return new MapReduceMappingsTask<K,V,U>
3789             (null, batchFor(parallelismThreshold), 0, 0, table,
3790              null, transformer, reducer).invoke();
3791     }
3792 
3793     /**
3794      * Returns the result of accumulating the given {@linkplain ##Bulk bulk} transformation
3795      * of all (key, value) pairs using the given reducer to
3796      * combine values, and the given basis as an identity value.
3797      *
3798      * @param parallelismThreshold the (estimated) number of elements
3799      * needed for this operation to be executed in parallel
3800      * @param transformer a function returning the transformation
3801      * for an element
3802      * @param basis the identity (initial default value) for the reduction
3803      * @param reducer a commutative associative combining function
3804      * @return the result of accumulating the given transformation
3805      * of all (key, value) pairs
3806      * @since 1.8
3807      */
3808     public double reduceToDouble(long parallelismThreshold,
3809                                  ToDoubleBiFunction<? super K, ? super V> transformer,
3810                                  double basis,
3811                                  DoubleBinaryOperator reducer) {
3812         if (transformer == null || reducer == null)
3813             throw new NullPointerException();
3814         return new MapReduceMappingsToDoubleTask<K,V>
3815             (null, batchFor(parallelismThreshold), 0, 0, table,
3816              null, transformer, basis, reducer).invoke();
3817     }
3818 
3819     /**
3820      * Returns the result of accumulating the given {@linkplain ##Bulk bulk} transformation
3821      * of all (key, value) pairs using the given reducer to
3822      * combine values, and the given basis as an identity value.
3823      *
3824      * @param parallelismThreshold the (estimated) number of elements
3825      * needed for this operation to be executed in parallel
3826      * @param transformer a function returning the transformation
3827      * for an element
3828      * @param basis the identity (initial default value) for the reduction
3829      * @param reducer a commutative associative combining function
3830      * @return the result of accumulating the given transformation
3831      * of all (key, value) pairs
3832      * @since 1.8
3833      */
3834     public long reduceToLong(long parallelismThreshold,
3835                              ToLongBiFunction<? super K, ? super V> transformer,
3836                              long basis,
3837                              LongBinaryOperator reducer) {
3838         if (transformer == null || reducer == null)
3839             throw new NullPointerException();
3840         return new MapReduceMappingsToLongTask<K,V>
3841             (null, batchFor(parallelismThreshold), 0, 0, table,
3842              null, transformer, basis, reducer).invoke();
3843     }
3844 
3845     /**
3846      * Returns the result of accumulating the given {@linkplain ##Bulk bulk} transformation
3847      * of all (key, value) pairs using the given reducer to
3848      * combine values, and the given basis as an identity value.
3849      *
3850      * @param parallelismThreshold the (estimated) number of elements
3851      * needed for this operation to be executed in parallel
3852      * @param transformer a function returning the transformation
3853      * for an element
3854      * @param basis the identity (initial default value) for the reduction
3855      * @param reducer a commutative associative combining function
3856      * @return the result of accumulating the given transformation
3857      * of all (key, value) pairs
3858      * @since 1.8
3859      */
3860     public int reduceToInt(long parallelismThreshold,
3861                            ToIntBiFunction<? super K, ? super V> transformer,
3862                            int basis,
3863                            IntBinaryOperator reducer) {
3864         if (transformer == null || reducer == null)
3865             throw new NullPointerException();
3866         return new MapReduceMappingsToIntTask<K,V>
3867             (null, batchFor(parallelismThreshold), 0, 0, table,
3868              null, transformer, basis, reducer).invoke();
3869     }
3870 
3871     /**
3872      * Performs the given {@linkplain ##Bulk bulk} action for each key.
3873      *
3874      * @param parallelismThreshold the (estimated) number of elements
3875      * needed for this operation to be executed in parallel
3876      * @param action the action
3877      * @since 1.8
3878      */
3879     public void forEachKey(long parallelismThreshold,
3880                            Consumer<? super K> action) {
3881         if (action == null) throw new NullPointerException();
3882         new ForEachKeyTask<K,V>
3883             (null, batchFor(parallelismThreshold), 0, 0, table,
3884              action).invoke();
3885     }
3886 
3887     /**
3888      * Performs the given {@linkplain ##Bulk bulk} action for each non-null transformation
3889      * of each key.
3890      *
3891      * @param parallelismThreshold the (estimated) number of elements
3892      * needed for this operation to be executed in parallel
3893      * @param transformer a function returning the transformation
3894      * for an element, or null if there is no transformation (in
3895      * which case the action is not applied)
3896      * @param action the action
3897      * @param <U> the return type of the transformer
3898      * @since 1.8
3899      */
3900     public <U> void forEachKey(long parallelismThreshold,
3901                                Function<? super K, ? extends U> transformer,
3902                                Consumer<? super U> action) {
3903         if (transformer == null || action == null)
3904             throw new NullPointerException();
3905         new ForEachTransformedKeyTask<K,V,U>
3906             (null, batchFor(parallelismThreshold), 0, 0, table,
3907              transformer, action).invoke();
3908     }
3909 
3910     /**
3911      * Returns a non-null result from applying the given {@linkplain ##Bulk bulk} search
3912      * function on each key, or null if none. Upon success,
3913      * further element processing is suppressed and the results of
3914      * any other parallel invocations of the search function are
3915      * ignored.
3916      *
3917      * @param parallelismThreshold the (estimated) number of elements
3918      * needed for this operation to be executed in parallel
3919      * @param searchFunction a function returning a non-null
3920      * result on success, else null
3921      * @param <U> the return type of the search function
3922      * @return a non-null result from applying the given search
3923      * function on each key, or null if none
3924      * @since 1.8
3925      */
3926     public <U> U searchKeys(long parallelismThreshold,
3927                             Function<? super K, ? extends U> searchFunction) {
3928         if (searchFunction == null) throw new NullPointerException();
3929         return new SearchKeysTask<K,V,U>
3930             (null, batchFor(parallelismThreshold), 0, 0, table,
3931              searchFunction, new AtomicReference<U>()).invoke();
3932     }
3933 
3934     /**
3935      * Returns the result of {@linkplain ##Bulk bulk} accumulating all keys using the given
3936      * reducer to combine values, or null if none.
3937      *
3938      * @param parallelismThreshold the (estimated) number of elements
3939      * needed for this operation to be executed in parallel
3940      * @param reducer a commutative associative combining function
3941      * @return the result of accumulating all keys using the given
3942      * reducer to combine values, or null if none
3943      * @since 1.8
3944      */
3945     public K reduceKeys(long parallelismThreshold,
3946                         BiFunction<? super K, ? super K, ? extends K> reducer) {
3947         if (reducer == null) throw new NullPointerException();
3948         return new ReduceKeysTask<K,V>
3949             (null, batchFor(parallelismThreshold), 0, 0, table,
3950              null, reducer).invoke();
3951     }
3952 
3953     /**
3954      * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
3955      * of all keys using the given reducer to combine values, or
3956      * null if none.
3957      *
3958      * @param parallelismThreshold the (estimated) number of elements
3959      * needed for this operation to be executed in parallel
3960      * @param transformer a function returning the transformation
3961      * for an element, or null if there is no transformation (in
3962      * which case it is not combined)
3963      * @param reducer a commutative associative combining function
3964      * @param <U> the return type of the transformer
3965      * @return the result of accumulating the given transformation
3966      * of all keys
3967      * @since 1.8
3968      */
3969     public <U> U reduceKeys(long parallelismThreshold,
3970                             Function<? super K, ? extends U> transformer,
3971          BiFunction<? super U, ? super U, ? extends U> reducer) {
3972         if (transformer == null || reducer == null)
3973             throw new NullPointerException();
3974         return new MapReduceKeysTask<K,V,U>
3975             (null, batchFor(parallelismThreshold), 0, 0, table,
3976              null, transformer, reducer).invoke();
3977     }
3978 
3979     /**
3980      * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
3981      * of all keys using the given reducer to combine values, and
3982      * the given basis as an identity value.
3983      *
3984      * @param parallelismThreshold the (estimated) number of elements
3985      * needed for this operation to be executed in parallel
3986      * @param transformer a function returning the transformation
3987      * for an element
3988      * @param basis the identity (initial default value) for the reduction
3989      * @param reducer a commutative associative combining function
3990      * @return the result of accumulating the given transformation
3991      * of all keys
3992      * @since 1.8
3993      */
3994     public double reduceKeysToDouble(long parallelismThreshold,
3995                                      ToDoubleFunction<? super K> transformer,
3996                                      double basis,
3997                                      DoubleBinaryOperator reducer) {
3998         if (transformer == null || reducer == null)
3999             throw new NullPointerException();
4000         return new MapReduceKeysToDoubleTask<K,V>
4001             (null, batchFor(parallelismThreshold), 0, 0, table,
4002              null, transformer, basis, reducer).invoke();
4003     }
4004 
4005     /**
4006      * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4007      * of all keys using the given reducer to combine values, and
4008      * the given basis as an identity value.
4009      *
4010      * @param parallelismThreshold the (estimated) number of elements
4011      * needed for this operation to be executed in parallel
4012      * @param transformer a function returning the transformation
4013      * for an element
4014      * @param basis the identity (initial default value) for the reduction
4015      * @param reducer a commutative associative combining function
4016      * @return the result of accumulating the given transformation
4017      * of all keys
4018      * @since 1.8
4019      */
4020     public long reduceKeysToLong(long parallelismThreshold,
4021                                  ToLongFunction<? super K> transformer,
4022                                  long basis,
4023                                  LongBinaryOperator reducer) {
4024         if (transformer == null || reducer == null)
4025             throw new NullPointerException();
4026         return new MapReduceKeysToLongTask<K,V>
4027             (null, batchFor(parallelismThreshold), 0, 0, table,
4028              null, transformer, basis, reducer).invoke();
4029     }
4030 
4031     /**
4032      * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4033      * of all keys using the given reducer to combine values, and
4034      * the given basis as an identity value.
4035      *
4036      * @param parallelismThreshold the (estimated) number of elements
4037      * needed for this operation to be executed in parallel
4038      * @param transformer a function returning the transformation
4039      * for an element
4040      * @param basis the identity (initial default value) for the reduction
4041      * @param reducer a commutative associative combining function
4042      * @return the result of accumulating the given transformation
4043      * of all keys
4044      * @since 1.8
4045      */
4046     public int reduceKeysToInt(long parallelismThreshold,
4047                                ToIntFunction<? super K> transformer,
4048                                int basis,
4049                                IntBinaryOperator reducer) {
4050         if (transformer == null || reducer == null)
4051             throw new NullPointerException();
4052         return new MapReduceKeysToIntTask<K,V>
4053             (null, batchFor(parallelismThreshold), 0, 0, table,
4054              null, transformer, basis, reducer).invoke();
4055     }
4056 
4057     /**
4058      * Performs the given {@linkplain ##Bulk bulk} action for each value.
4059      *
4060      * @param parallelismThreshold the (estimated) number of elements
4061      * needed for this operation to be executed in parallel
4062      * @param action the action
4063      * @since 1.8
4064      */
4065     public void forEachValue(long parallelismThreshold,
4066                              Consumer<? super V> action) {
4067         if (action == null)
4068             throw new NullPointerException();
4069         new ForEachValueTask<K,V>
4070             (null, batchFor(parallelismThreshold), 0, 0, table,
4071              action).invoke();
4072     }
4073 
4074     /**
4075      * Performs the given {@linkplain ##Bulk bulk} action for each non-null transformation
4076      * of each value.
4077      *
4078      * @param parallelismThreshold the (estimated) number of elements
4079      * needed for this operation to be executed in parallel
4080      * @param transformer a function returning the transformation
4081      * for an element, or null if there is no transformation (in
4082      * which case the action is not applied)
4083      * @param action the action
4084      * @param <U> the return type of the transformer
4085      * @since 1.8
4086      */
4087     public <U> void forEachValue(long parallelismThreshold,
4088                                  Function<? super V, ? extends U> transformer,
4089                                  Consumer<? super U> action) {
4090         if (transformer == null || action == null)
4091             throw new NullPointerException();
4092         new ForEachTransformedValueTask<K,V,U>
4093             (null, batchFor(parallelismThreshold), 0, 0, table,
4094              transformer, action).invoke();
4095     }
4096 
4097     /**
4098      * Returns a non-null result from {@linkplain ##Bulk bulk} applying the given search
4099      * function on each value, or null if none.  Upon success,
4100      * further element processing is suppressed and the results of
4101      * any other parallel invocations of the search function are
4102      * ignored.
4103      *
4104      * @param parallelismThreshold the (estimated) number of elements
4105      * needed for this operation to be executed in parallel
4106      * @param searchFunction a function returning a non-null
4107      * result on success, else null
4108      * @param <U> the return type of the search function
4109      * @return a non-null result from applying the given search
4110      * function on each value, or null if none
4111      * @since 1.8
4112      */
4113     public <U> U searchValues(long parallelismThreshold,
4114                               Function<? super V, ? extends U> searchFunction) {
4115         if (searchFunction == null) throw new NullPointerException();
4116         return new SearchValuesTask<K,V,U>
4117             (null, batchFor(parallelismThreshold), 0, 0, table,
4118              searchFunction, new AtomicReference<U>()).invoke();
4119     }
4120 
4121     /**
4122      * Returns the result of {@linkplain ##Bulk bulk} accumulating all values using the
4123      * given reducer to combine values, or null if none.
4124      *
4125      * @param parallelismThreshold the (estimated) number of elements
4126      * needed for this operation to be executed in parallel
4127      * @param reducer a commutative associative combining function
4128      * @return the result of accumulating all values
4129      * @since 1.8
4130      */
4131     public V reduceValues(long parallelismThreshold,
4132                           BiFunction<? super V, ? super V, ? extends V> reducer) {
4133         if (reducer == null) throw new NullPointerException();
4134         return new ReduceValuesTask<K,V>
4135             (null, batchFor(parallelismThreshold), 0, 0, table,
4136              null, reducer).invoke();
4137     }
4138 
4139     /**
4140      * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4141      * of all values using the given reducer to combine values, or
4142      * null if none.
4143      *
4144      * @param parallelismThreshold the (estimated) number of elements
4145      * needed for this operation to be executed in parallel
4146      * @param transformer a function returning the transformation
4147      * for an element, or null if there is no transformation (in
4148      * which case it is not combined)
4149      * @param reducer a commutative associative combining function
4150      * @param <U> the return type of the transformer
4151      * @return the result of accumulating the given transformation
4152      * of all values
4153      * @since 1.8
4154      */
4155     public <U> U reduceValues(long parallelismThreshold,
4156                               Function<? super V, ? extends U> transformer,
4157                               BiFunction<? super U, ? super U, ? extends U> reducer) {
4158         if (transformer == null || reducer == null)
4159             throw new NullPointerException();
4160         return new MapReduceValuesTask<K,V,U>
4161             (null, batchFor(parallelismThreshold), 0, 0, table,
4162              null, transformer, reducer).invoke();
4163     }
4164 
4165     /**
4166      * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4167      * of all values using the given reducer to combine values,
4168      * and the given basis as an identity value.
4169      *
4170      * @param parallelismThreshold the (estimated) number of elements
4171      * needed for this operation to be executed in parallel
4172      * @param transformer a function returning the transformation
4173      * for an element
4174      * @param basis the identity (initial default value) for the reduction
4175      * @param reducer a commutative associative combining function
4176      * @return the result of accumulating the given transformation
4177      * of all values
4178      * @since 1.8
4179      */
4180     public double reduceValuesToDouble(long parallelismThreshold,
4181                                        ToDoubleFunction<? super V> transformer,
4182                                        double basis,
4183                                        DoubleBinaryOperator reducer) {
4184         if (transformer == null || reducer == null)
4185             throw new NullPointerException();
4186         return new MapReduceValuesToDoubleTask<K,V>
4187             (null, batchFor(parallelismThreshold), 0, 0, table,
4188              null, transformer, basis, reducer).invoke();
4189     }
4190 
4191     /**
4192      * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4193      * of all values using the given reducer to combine values,
4194      * and the given basis as an identity value.
4195      *
4196      * @param parallelismThreshold the (estimated) number of elements
4197      * needed for this operation to be executed in parallel
4198      * @param transformer a function returning the transformation
4199      * for an element
4200      * @param basis the identity (initial default value) for the reduction
4201      * @param reducer a commutative associative combining function
4202      * @return the result of accumulating the given transformation
4203      * of all values
4204      * @since 1.8
4205      */
4206     public long reduceValuesToLong(long parallelismThreshold,
4207                                    ToLongFunction<? super V> transformer,
4208                                    long basis,
4209                                    LongBinaryOperator reducer) {
4210         if (transformer == null || reducer == null)
4211             throw new NullPointerException();
4212         return new MapReduceValuesToLongTask<K,V>
4213             (null, batchFor(parallelismThreshold), 0, 0, table,
4214              null, transformer, basis, reducer).invoke();
4215     }
4216 
4217     /**
4218      * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4219      * of all values using the given reducer to combine values,
4220      * and the given basis as an identity value.
4221      *
4222      * @param parallelismThreshold the (estimated) number of elements
4223      * needed for this operation to be executed in parallel
4224      * @param transformer a function returning the transformation
4225      * for an element
4226      * @param basis the identity (initial default value) for the reduction
4227      * @param reducer a commutative associative combining function
4228      * @return the result of accumulating the given transformation
4229      * of all values
4230      * @since 1.8
4231      */
4232     public int reduceValuesToInt(long parallelismThreshold,
4233                                  ToIntFunction<? super V> transformer,
4234                                  int basis,
4235                                  IntBinaryOperator reducer) {
4236         if (transformer == null || reducer == null)
4237             throw new NullPointerException();
4238         return new MapReduceValuesToIntTask<K,V>
4239             (null, batchFor(parallelismThreshold), 0, 0, table,
4240              null, transformer, basis, reducer).invoke();
4241     }
4242 
4243     /**
4244      * Performs the given {@linkplain ##Bulk bulk} action for each entry.
4245      *
4246      * @param parallelismThreshold the (estimated) number of elements
4247      * needed for this operation to be executed in parallel
4248      * @param action the action
4249      * @since 1.8
4250      */
4251     public void forEachEntry(long parallelismThreshold,
4252                              Consumer<? super Map.Entry<K,V>> action) {
4253         if (action == null) throw new NullPointerException();
4254         new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
4255                                   action).invoke();
4256     }
4257 
4258     /**
4259      * Performs the given {@linkplain ##Bulk bulk} action for each non-null transformation
4260      * of each entry.
4261      *
4262      * @param parallelismThreshold the (estimated) number of elements
4263      * needed for this operation to be executed in parallel
4264      * @param transformer a function returning the transformation
4265      * for an element, or null if there is no transformation (in
4266      * which case the action is not applied)
4267      * @param action the action
4268      * @param <U> the return type of the transformer
4269      * @since 1.8
4270      */
4271     public <U> void forEachEntry(long parallelismThreshold,
4272                                  Function<Map.Entry<K,V>, ? extends U> transformer,
4273                                  Consumer<? super U> action) {
4274         if (transformer == null || action == null)
4275             throw new NullPointerException();
4276         new ForEachTransformedEntryTask<K,V,U>
4277             (null, batchFor(parallelismThreshold), 0, 0, table,
4278              transformer, action).invoke();
4279     }
4280 
4281     /**
4282      * Returns a non-null result from {@linkplain ##Bulk bulk} applying the given search
4283      * function on each entry, or null if none.  Upon success,
4284      * further element processing is suppressed and the results of
4285      * any other parallel invocations of the search function are
4286      * ignored.
4287      *
4288      * @param parallelismThreshold the (estimated) number of elements
4289      * needed for this operation to be executed in parallel
4290      * @param searchFunction a function returning a non-null
4291      * result on success, else null
4292      * @param <U> the return type of the search function
4293      * @return a non-null result from applying the given search
4294      * function on each entry, or null if none
4295      * @since 1.8
4296      */
4297     public <U> U searchEntries(long parallelismThreshold,
4298                                Function<Map.Entry<K,V>, ? extends U> searchFunction) {
4299         if (searchFunction == null) throw new NullPointerException();
4300         return new SearchEntriesTask<K,V,U>
4301             (null, batchFor(parallelismThreshold), 0, 0, table,
4302              searchFunction, new AtomicReference<U>()).invoke();
4303     }
4304 
4305     /**
4306      * Returns the result of {@linkplain ##Bulk bulk} accumulating all entries using the
4307      * given reducer to combine values, or null if none.
4308      *
4309      * @param parallelismThreshold the (estimated) number of elements
4310      * needed for this operation to be executed in parallel
4311      * @param reducer a commutative associative combining function
4312      * @return the result of accumulating all entries
4313      * @since 1.8
4314      */
4315     public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
4316                                         BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
4317         if (reducer == null) throw new NullPointerException();
4318         return new ReduceEntriesTask<K,V>
4319             (null, batchFor(parallelismThreshold), 0, 0, table,
4320              null, reducer).invoke();
4321     }
4322 
4323     /**
4324      * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4325      * of all entries using the given reducer to combine values,
4326      * or null if none.
4327      *
4328      * @param parallelismThreshold the (estimated) number of elements
4329      * needed for this operation to be executed in parallel
4330      * @param transformer a function returning the transformation
4331      * for an element, or null if there is no transformation (in
4332      * which case it is not combined)
4333      * @param reducer a commutative associative combining function
4334      * @param <U> the return type of the transformer
4335      * @return the result of accumulating the given transformation
4336      * of all entries
4337      * @since 1.8
4338      */
4339     public <U> U reduceEntries(long parallelismThreshold,
4340                                Function<Map.Entry<K,V>, ? extends U> transformer,
4341                                BiFunction<? super U, ? super U, ? extends U> reducer) {
4342         if (transformer == null || reducer == null)
4343             throw new NullPointerException();
4344         return new MapReduceEntriesTask<K,V,U>
4345             (null, batchFor(parallelismThreshold), 0, 0, table,
4346              null, transformer, reducer).invoke();
4347     }
4348 
4349     /**
4350      * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4351      * of all entries using the given reducer to combine values,
4352      * and the given basis as an identity value.
4353      *
4354      * @param parallelismThreshold the (estimated) number of elements
4355      * needed for this operation to be executed in parallel
4356      * @param transformer a function returning the transformation
4357      * for an element
4358      * @param basis the identity (initial default value) for the reduction
4359      * @param reducer a commutative associative combining function
4360      * @return the result of accumulating the given transformation
4361      * of all entries
4362      * @since 1.8
4363      */
4364     public double reduceEntriesToDouble(long parallelismThreshold,
4365                                         ToDoubleFunction<Map.Entry<K,V>> transformer,
4366                                         double basis,
4367                                         DoubleBinaryOperator reducer) {
4368         if (transformer == null || reducer == null)
4369             throw new NullPointerException();
4370         return new MapReduceEntriesToDoubleTask<K,V>
4371             (null, batchFor(parallelismThreshold), 0, 0, table,
4372              null, transformer, basis, reducer).invoke();
4373     }
4374 
4375     /**
4376      * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4377      * of all entries using the given reducer to combine values,
4378      * and the given basis as an identity value.
4379      *
4380      * @param parallelismThreshold the (estimated) number of elements
4381      * needed for this operation to be executed in parallel
4382      * @param transformer a function returning the transformation
4383      * for an element
4384      * @param basis the identity (initial default value) for the reduction
4385      * @param reducer a commutative associative combining function
4386      * @return the result of accumulating the given transformation
4387      * of all entries
4388      * @since 1.8
4389      */
4390     public long reduceEntriesToLong(long parallelismThreshold,
4391                                     ToLongFunction<Map.Entry<K,V>> transformer,
4392                                     long basis,
4393                                     LongBinaryOperator reducer) {
4394         if (transformer == null || reducer == null)
4395             throw new NullPointerException();
4396         return new MapReduceEntriesToLongTask<K,V>
4397             (null, batchFor(parallelismThreshold), 0, 0, table,
4398              null, transformer, basis, reducer).invoke();
4399     }
4400 
4401     /**
4402      * Returns the result of {@linkplain ##Bulk bulk} accumulating the given transformation
4403      * of all entries using the given reducer to combine values,
4404      * and the given basis as an identity value.
4405      *
4406      * @param parallelismThreshold the (estimated) number of elements
4407      * needed for this operation to be executed in parallel
4408      * @param transformer a function returning the transformation
4409      * for an element
4410      * @param basis the identity (initial default value) for the reduction
4411      * @param reducer a commutative associative combining function
4412      * @return the result of accumulating the given transformation
4413      * of all entries
4414      * @since 1.8
4415      */
4416     public int reduceEntriesToInt(long parallelismThreshold,
4417                                   ToIntFunction<Map.Entry<K,V>> transformer,
4418                                   int basis,
4419                                   IntBinaryOperator reducer) {
4420         if (transformer == null || reducer == null)
4421             throw new NullPointerException();
4422         return new MapReduceEntriesToIntTask<K,V>
4423             (null, batchFor(parallelismThreshold), 0, 0, table,
4424              null, transformer, basis, reducer).invoke();
4425     }
4426 
4427 
4428     /* ----------------Views -------------- */
4429 
4430     /**
4431      * Base class for views.
4432      */
4433     abstract static sealed class CollectionView<K,V,E>
4434         implements Collection<E>, java.io.Serializable permits EntrySetView, KeySetView, ValuesView {
4435         private static final long serialVersionUID = 7249069246763182397L;
4436         final ConcurrentHashMap<K,V> map;
4437         CollectionView(ConcurrentHashMap<K,V> map)  { this.map = map; }
4438 
4439         /**
4440          * Returns the map backing this view.
4441          *
4442          * @return the map backing this view
4443          */
4444         public ConcurrentHashMap<K,V> getMap() { return map; }
4445 
4446         /**
4447          * Removes all of the elements from this view, by removing all
4448          * the mappings from the map backing this view.
4449          */
4450         public final void clear()      { map.clear(); }
4451         public final int size()        { return map.size(); }
4452         public final boolean isEmpty() { return map.isEmpty(); }
4453 
4454         // implementations below rely on concrete classes supplying these
4455         // abstract methods
4456         /**
4457          * Returns an iterator over the elements in this collection.
4458          *
4459          * <p>The returned iterator is
4460          * <a href="package-summary.html#Weakly"><i>weakly consistent</i></a>.
4461          *
4462          * @return an iterator over the elements in this collection
4463          */
4464         public abstract Iterator<E> iterator();
4465         public abstract boolean contains(Object o);
4466         public abstract boolean remove(Object o);
4467 
4468         private static final String OOME_MSG = "Required array size too large";
4469 
4470         public final Object[] toArray() {
4471             long sz = map.mappingCount();
4472             if (sz > MAX_ARRAY_SIZE)
4473                 throw new OutOfMemoryError(OOME_MSG);
4474             int n = (int)sz;
4475             Object[] r = new Object[n];
4476             int i = 0;
4477             for (E e : this) {
4478                 if (i == n) {
4479                     if (n >= MAX_ARRAY_SIZE)
4480                         throw new OutOfMemoryError(OOME_MSG);
4481                     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4482                         n = MAX_ARRAY_SIZE;
4483                     else
4484                         n += (n >>> 1) + 1;
4485                     r = Arrays.copyOf(r, n);
4486                 }
4487                 r[i++] = e;
4488             }
4489             return (i == n) ? r : Arrays.copyOf(r, i);
4490         }
4491 
4492         @SuppressWarnings("unchecked")
4493         public final <T> T[] toArray(T[] a) {
4494             long sz = map.mappingCount();
4495             if (sz > MAX_ARRAY_SIZE)
4496                 throw new OutOfMemoryError(OOME_MSG);
4497             int m = (int)sz;
4498             T[] r = (a.length >= m) ? a :
4499                 (T[])java.lang.reflect.Array
4500                 .newInstance(a.getClass().getComponentType(), m);
4501             int n = r.length;
4502             int i = 0;
4503             for (E e : this) {
4504                 if (i == n) {
4505                     if (n >= MAX_ARRAY_SIZE)
4506                         throw new OutOfMemoryError(OOME_MSG);
4507                     if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
4508                         n = MAX_ARRAY_SIZE;
4509                     else
4510                         n += (n >>> 1) + 1;
4511                     r = Arrays.copyOf(r, n);
4512                 }
4513                 r[i++] = (T)e;
4514             }
4515             if (a == r && i < n) {
4516                 r[i] = null; // null-terminate
4517                 return r;
4518             }
4519             return (i == n) ? r : Arrays.copyOf(r, i);
4520         }
4521 
4522         /**
4523          * Returns a string representation of this collection.
4524          * The string representation consists of the string representations
4525          * of the collection's elements in the order they are returned by
4526          * its iterator, enclosed in square brackets ({@code "[]"}).
4527          * Adjacent elements are separated by the characters {@code ", "}
4528          * (comma and space).  Elements are converted to strings as by
4529          * {@link String#valueOf(Object)}.
4530          *
4531          * @return a string representation of this collection
4532          */
4533         public final String toString() {
4534             StringBuilder sb = new StringBuilder();
4535             sb.append('[');
4536             Iterator<E> it = iterator();
4537             if (it.hasNext()) {
4538                 for (;;) {
4539                     Object e = it.next();
4540                     sb.append(e == this ? "(this Collection)" : e);
4541                     if (!it.hasNext())
4542                         break;
4543                     sb.append(',').append(' ');
4544                 }
4545             }
4546             return sb.append(']').toString();
4547         }
4548 
4549         public final boolean containsAll(Collection<?> c) {
4550             if (c != this) {
4551                 for (Object e : c) {
4552                     if (e == null || !contains(e))
4553                         return false;
4554                 }
4555             }
4556             return true;
4557         }
4558 
4559         public boolean removeAll(Collection<?> c) {
4560             if (c == null) throw new NullPointerException();
4561             boolean modified = false;
4562             // Use (c instanceof Set) as a hint that lookup in c is as
4563             // efficient as this view
4564             Node<K,V>[] t;
4565             if ((t = map.table) == null) {
4566                 return false;
4567             } else if (c instanceof Set<?> && c.size() > t.length) {
4568                 for (Iterator<?> it = iterator(); it.hasNext(); ) {
4569                     if (c.contains(it.next())) {
4570                         it.remove();
4571                         modified = true;
4572                     }
4573                 }
4574             } else {
4575                 for (Object e : c)
4576                     modified |= remove(e);
4577             }
4578             return modified;
4579         }
4580 
4581         public final boolean retainAll(Collection<?> c) {
4582             if (c == null) throw new NullPointerException();
4583             boolean modified = false;
4584             for (Iterator<E> it = iterator(); it.hasNext();) {
4585                 if (!c.contains(it.next())) {
4586                     it.remove();
4587                     modified = true;
4588                 }
4589             }
4590             return modified;
4591         }
4592 
4593     }
4594 
4595     /**
4596      * A view of a ConcurrentHashMap as a {@link Set} of keys, in
4597      * which additions may optionally be enabled by mapping to a
4598      * common value.  This class cannot be directly instantiated.
4599      * See {@link #keySet() keySet()},
4600      * {@link #keySet(Object) keySet(V)},
4601      * {@link #newKeySet() newKeySet()},
4602      * {@link #newKeySet(int) newKeySet(int)}.
4603      *
4604      * @param <K> the type of keys
4605      * @param <V> the type of values in the backing map
4606      *
4607      * @since 1.8
4608      */
4609     public static final class KeySetView<K,V> extends CollectionView<K,V,K>
4610         implements Set<K>, java.io.Serializable {
4611         private static final long serialVersionUID = 7249069246763182397L;
4612         /** @serial */
4613         @SuppressWarnings("serial") // Conditionally serializable
4614         private final V value;
4615         KeySetView(ConcurrentHashMap<K,V> map, V value) {  // non-public
4616             super(map);
4617             this.value = value;
4618         }
4619 
4620         /**
4621          * Returns the default mapped value for additions,
4622          * or {@code null} if additions are not supported.
4623          *
4624          * @return the default mapped value for additions, or {@code null}
4625          * if not supported
4626          */
4627         public V getMappedValue() { return value; }
4628 
4629         /**
4630          * {@inheritDoc}
4631          * @throws NullPointerException if the specified key is null
4632          */
4633         public boolean contains(Object o) { return map.containsKey(o); }
4634 
4635         /**
4636          * Removes the key from this map view, by removing the key (and its
4637          * corresponding value) from the backing map.  This method does
4638          * nothing if the key is not in the map.
4639          *
4640          * @param  o the key to be removed from the backing map
4641          * @return {@code true} if the backing map contained the specified key
4642          * @throws NullPointerException if the specified key is null
4643          */
4644         public boolean remove(Object o) { return map.remove(o) != null; }
4645 
4646         /**
4647          * @return an iterator over the keys of the backing map
4648          */
4649         public Iterator<K> iterator() {
4650             Node<K,V>[] t;
4651             ConcurrentHashMap<K,V> m = map;
4652             int f = (t = m.table) == null ? 0 : t.length;
4653             return new KeyIterator<K,V>(t, f, 0, f, m);
4654         }
4655 
4656         /**
4657          * Adds the specified key to this set view by mapping the key to
4658          * the default mapped value in the backing map, if defined.
4659          *
4660          * @param e key to be added
4661          * @return {@code true} if this set changed as a result of the call
4662          * @throws NullPointerException if the specified key is null
4663          * @throws UnsupportedOperationException if no default mapped value
4664          * for additions was provided
4665          */
4666         public boolean add(K e) {
4667             V v;
4668             if ((v = value) == null)
4669                 throw new UnsupportedOperationException();
4670             return map.putVal(e, v, true) == null;
4671         }
4672 
4673         /**
4674          * Adds all of the elements in the specified collection to this set,
4675          * as if by calling {@link #add} on each one.
4676          *
4677          * @param c the elements to be inserted into this set
4678          * @return {@code true} if this set changed as a result of the call
4679          * @throws NullPointerException if the collection or any of its
4680          * elements are {@code null}
4681          * @throws UnsupportedOperationException if no default mapped value
4682          * for additions was provided
4683          */
4684         public boolean addAll(Collection<? extends K> c) {
4685             boolean added = false;
4686             V v;
4687             if ((v = value) == null)
4688                 throw new UnsupportedOperationException();
4689             for (K e : c) {
4690                 if (map.putVal(e, v, true) == null)
4691                     added = true;
4692             }
4693             return added;
4694         }
4695 
4696         public int hashCode() {
4697             int h = 0;
4698             for (K e : this)
4699                 h += e.hashCode();
4700             return h;
4701         }
4702 
4703         public boolean equals(Object o) {
4704             Set<?> c;
4705             return ((o instanceof Set) &&
4706                     ((c = (Set<?>)o) == this ||
4707                      (containsAll(c) && c.containsAll(this))));
4708         }
4709 
4710         public Spliterator<K> spliterator() {
4711             Node<K,V>[] t;
4712             ConcurrentHashMap<K,V> m = map;
4713             long n = m.sumCount();
4714             int f = (t = m.table) == null ? 0 : t.length;
4715             return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4716         }
4717 
4718         public void forEach(Consumer<? super K> action) {
4719             if (action == null) throw new NullPointerException();
4720             Node<K,V>[] t;
4721             if ((t = map.table) != null) {
4722                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4723                 for (Node<K,V> p; (p = it.advance()) != null; )
4724                     action.accept(p.key);
4725             }
4726         }
4727     }
4728 
4729     /**
4730      * A view of a ConcurrentHashMap as a {@link Collection} of
4731      * values, in which additions are disabled. This class cannot be
4732      * directly instantiated. See {@link #values()}.
4733      */
4734     static final class ValuesView<K,V> extends CollectionView<K,V,V>
4735         implements Collection<V>, java.io.Serializable {
4736         private static final long serialVersionUID = 2249069246763182397L;
4737         ValuesView(ConcurrentHashMap<K,V> map) { super(map); }
4738         public final boolean contains(Object o) {
4739             return map.containsValue(o);
4740         }
4741 
4742         public final boolean remove(Object o) {
4743             if (o != null) {
4744                 for (Iterator<V> it = iterator(); it.hasNext();) {
4745                     if (o.equals(it.next())) {
4746                         it.remove();
4747                         return true;
4748                     }
4749                 }
4750             }
4751             return false;
4752         }
4753 
4754         public final Iterator<V> iterator() {
4755             ConcurrentHashMap<K,V> m = map;
4756             Node<K,V>[] t;
4757             int f = (t = m.table) == null ? 0 : t.length;
4758             return new ValueIterator<K,V>(t, f, 0, f, m);
4759         }
4760 
4761         public final boolean add(V e) {
4762             throw new UnsupportedOperationException();
4763         }
4764         public final boolean addAll(Collection<? extends V> c) {
4765             throw new UnsupportedOperationException();
4766         }
4767 
4768         @Override public boolean removeAll(Collection<?> c) {
4769             if (c == null) throw new NullPointerException();
4770             boolean modified = false;
4771             for (Iterator<V> it = iterator(); it.hasNext();) {
4772                 if (c.contains(it.next())) {
4773                     it.remove();
4774                     modified = true;
4775                 }
4776             }
4777             return modified;
4778         }
4779 
4780         public boolean removeIf(Predicate<? super V> filter) {
4781             return map.removeValueIf(filter);
4782         }
4783 
4784         public Spliterator<V> spliterator() {
4785             Node<K,V>[] t;
4786             ConcurrentHashMap<K,V> m = map;
4787             long n = m.sumCount();
4788             int f = (t = m.table) == null ? 0 : t.length;
4789             return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
4790         }
4791 
4792         public void forEach(Consumer<? super V> action) {
4793             if (action == null) throw new NullPointerException();
4794             Node<K,V>[] t;
4795             if ((t = map.table) != null) {
4796                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4797                 for (Node<K,V> p; (p = it.advance()) != null; )
4798                     action.accept(p.val);
4799             }
4800         }
4801     }
4802 
4803     /**
4804      * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
4805      * entries.  This class cannot be directly instantiated. See
4806      * {@link #entrySet()}.
4807      */
4808     static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
4809         implements Set<Map.Entry<K,V>>, java.io.Serializable {
4810         private static final long serialVersionUID = 2249069246763182397L;
4811         EntrySetView(ConcurrentHashMap<K,V> map) { super(map); }
4812 
4813         public boolean contains(Object o) {
4814             Object k, v, r; Map.Entry<?,?> e;
4815             return ((o instanceof Map.Entry) &&
4816                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4817                     (r = map.get(k)) != null &&
4818                     (v = e.getValue()) != null &&
4819                     (v == r || v.equals(r)));
4820         }
4821 
4822         public boolean remove(Object o) {
4823             Object k, v; Map.Entry<?,?> e;
4824             return ((o instanceof Map.Entry) &&
4825                     (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
4826                     (v = e.getValue()) != null &&
4827                     map.remove(k, v));
4828         }
4829 
4830         /**
4831          * @return an iterator over the entries of the backing map
4832          */
4833         public Iterator<Map.Entry<K,V>> iterator() {
4834             ConcurrentHashMap<K,V> m = map;
4835             Node<K,V>[] t;
4836             int f = (t = m.table) == null ? 0 : t.length;
4837             return new EntryIterator<K,V>(t, f, 0, f, m);
4838         }
4839 
4840         public boolean add(Entry<K,V> e) {
4841             return map.putVal(e.getKey(), e.getValue(), false) == null;
4842         }
4843 
4844         public boolean addAll(Collection<? extends Entry<K,V>> c) {
4845             boolean added = false;
4846             for (Entry<K,V> e : c) {
4847                 if (add(e))
4848                     added = true;
4849             }
4850             return added;
4851         }
4852 
4853         public boolean removeIf(Predicate<? super Entry<K,V>> filter) {
4854             return map.removeEntryIf(filter);
4855         }
4856 
4857         public final int hashCode() {
4858             int h = 0;
4859             Node<K,V>[] t;
4860             if ((t = map.table) != null) {
4861                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4862                 for (Node<K,V> p; (p = it.advance()) != null; ) {
4863                     h += p.hashCode();
4864                 }
4865             }
4866             return h;
4867         }
4868 
4869         public final boolean equals(Object o) {
4870             Set<?> c;
4871             return ((o instanceof Set) &&
4872                     ((c = (Set<?>)o) == this ||
4873                      (containsAll(c) && c.containsAll(this))));
4874         }
4875 
4876         public Spliterator<Map.Entry<K,V>> spliterator() {
4877             Node<K,V>[] t;
4878             ConcurrentHashMap<K,V> m = map;
4879             long n = m.sumCount();
4880             int f = (t = m.table) == null ? 0 : t.length;
4881             return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
4882         }
4883 
4884         public void forEach(Consumer<? super Map.Entry<K,V>> action) {
4885             if (action == null) throw new NullPointerException();
4886             Node<K,V>[] t;
4887             if ((t = map.table) != null) {
4888                 Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
4889                 for (Node<K,V> p; (p = it.advance()) != null; )
4890                     action.accept(new MapEntry<K,V>(p.key, p.val, map));
4891             }
4892         }
4893 
4894     }
4895 
4896     // -------------------------------------------------------
4897 
4898     /**
4899      * Base class for bulk tasks. Repeats some fields and code from
4900      * class Traverser, because we need to subclass CountedCompleter.
4901      */
4902     @SuppressWarnings("serial")
4903     abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
4904         Node<K,V>[] tab;        // same as Traverser
4905         Node<K,V> next;
4906         TableStack<K,V> stack, spare;
4907         int index;
4908         int baseIndex;
4909         int baseLimit;
4910         final int baseSize;
4911         int batch;              // split control
4912 
4913         BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
4914             super(par);
4915             this.batch = b;
4916             this.index = this.baseIndex = i;
4917             if ((this.tab = t) == null)
4918                 this.baseSize = this.baseLimit = 0;
4919             else if (par == null)
4920                 this.baseSize = this.baseLimit = t.length;
4921             else {
4922                 this.baseLimit = f;
4923                 this.baseSize = par.baseSize;
4924             }
4925         }
4926 
4927         /**
4928          * Same as Traverser version.
4929          */
4930         final Node<K,V> advance() {
4931             Node<K,V> e;
4932             if ((e = next) != null)
4933                 e = e.next;
4934             for (;;) {
4935                 Node<K,V>[] t; int i, n;
4936                 if (e != null)
4937                     return next = e;
4938                 if (baseIndex >= baseLimit || (t = tab) == null ||
4939                     (n = t.length) <= (i = index) || i < 0)
4940                     return next = null;
4941                 if ((e = tabAt(t, i)) != null && e.hash < 0) {
4942                     if (e instanceof ForwardingNode) {
4943                         tab = ((ForwardingNode<K,V>)e).nextTable;
4944                         e = null;
4945                         pushState(t, i, n);
4946                         continue;
4947                     }
4948                     else if (e instanceof TreeBin)
4949                         e = ((TreeBin<K,V>)e).first;
4950                     else
4951                         e = null;
4952                 }
4953                 if (stack != null)
4954                     recoverState(n);
4955                 else if ((index = i + baseSize) >= n)
4956                     index = ++baseIndex;
4957             }
4958         }
4959 
4960         private void pushState(Node<K,V>[] t, int i, int n) {
4961             TableStack<K,V> s = spare;
4962             if (s != null)
4963                 spare = s.next;
4964             else
4965                 s = new TableStack<K,V>();
4966             s.tab = t;
4967             s.length = n;
4968             s.index = i;
4969             s.next = stack;
4970             stack = s;
4971         }
4972 
4973         private void recoverState(int n) {
4974             TableStack<K,V> s; int len;
4975             while ((s = stack) != null && (index += (len = s.length)) >= n) {
4976                 n = len;
4977                 index = s.index;
4978                 tab = s.tab;
4979                 s.tab = null;
4980                 TableStack<K,V> next = s.next;
4981                 s.next = spare; // save for reuse
4982                 stack = next;
4983                 spare = s;
4984             }
4985             if (s == null && (index += baseSize) >= n)
4986                 index = ++baseIndex;
4987         }
4988     }
4989 
4990     /*
4991      * Task classes. Coded in a regular but ugly format/style to
4992      * simplify checks that each variant differs in the right way from
4993      * others. The null screenings exist because compilers cannot tell
4994      * that we've already null-checked task arguments, so we force
4995      * simplest hoisted bypass to help avoid convoluted traps.
4996      */
4997     @SuppressWarnings("serial")
4998     static final class ForEachKeyTask<K,V>
4999         extends BulkTask<K,V,Void> {
5000         final Consumer<? super K> action;
5001         ForEachKeyTask
5002             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5003              Consumer<? super K> action) {
5004             super(p, b, i, f, t);
5005             this.action = action;
5006         }
5007         public final void compute() {
5008             final Consumer<? super K> action;
5009             if ((action = this.action) != null) {
5010                 for (int i = baseIndex, f, h; batch > 0 &&
5011                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5012                     addToPendingCount(1);
5013                     new ForEachKeyTask<K,V>
5014                         (this, batch >>>= 1, baseLimit = h, f, tab,
5015                          action).fork();
5016                 }
5017                 for (Node<K,V> p; (p = advance()) != null;)
5018                     action.accept(p.key);
5019                 propagateCompletion();
5020             }
5021         }
5022     }
5023 
5024     @SuppressWarnings("serial")
5025     static final class ForEachValueTask<K,V>
5026         extends BulkTask<K,V,Void> {
5027         final Consumer<? super V> action;
5028         ForEachValueTask
5029             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5030              Consumer<? super V> action) {
5031             super(p, b, i, f, t);
5032             this.action = action;
5033         }
5034         public final void compute() {
5035             final Consumer<? super V> action;
5036             if ((action = this.action) != null) {
5037                 for (int i = baseIndex, f, h; batch > 0 &&
5038                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5039                     addToPendingCount(1);
5040                     new ForEachValueTask<K,V>
5041                         (this, batch >>>= 1, baseLimit = h, f, tab,
5042                          action).fork();
5043                 }
5044                 for (Node<K,V> p; (p = advance()) != null;)
5045                     action.accept(p.val);
5046                 propagateCompletion();
5047             }
5048         }
5049     }
5050 
5051     @SuppressWarnings("serial")
5052     static final class ForEachEntryTask<K,V>
5053         extends BulkTask<K,V,Void> {
5054         final Consumer<? super Entry<K,V>> action;
5055         ForEachEntryTask
5056             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5057              Consumer<? super Entry<K,V>> action) {
5058             super(p, b, i, f, t);
5059             this.action = action;
5060         }
5061         public final void compute() {
5062             final Consumer<? super Entry<K,V>> action;
5063             if ((action = this.action) != null) {
5064                 for (int i = baseIndex, f, h; batch > 0 &&
5065                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5066                     addToPendingCount(1);
5067                     new ForEachEntryTask<K,V>
5068                         (this, batch >>>= 1, baseLimit = h, f, tab,
5069                          action).fork();
5070                 }
5071                 for (Node<K,V> p; (p = advance()) != null; )
5072                     action.accept(p);
5073                 propagateCompletion();
5074             }
5075         }
5076     }
5077 
5078     @SuppressWarnings("serial")
5079     static final class ForEachMappingTask<K,V>
5080         extends BulkTask<K,V,Void> {
5081         final BiConsumer<? super K, ? super V> action;
5082         ForEachMappingTask
5083             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5084              BiConsumer<? super K,? super V> action) {
5085             super(p, b, i, f, t);
5086             this.action = action;
5087         }
5088         public final void compute() {
5089             final BiConsumer<? super K, ? super V> action;
5090             if ((action = this.action) != null) {
5091                 for (int i = baseIndex, f, h; batch > 0 &&
5092                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5093                     addToPendingCount(1);
5094                     new ForEachMappingTask<K,V>
5095                         (this, batch >>>= 1, baseLimit = h, f, tab,
5096                          action).fork();
5097                 }
5098                 for (Node<K,V> p; (p = advance()) != null; )
5099                     action.accept(p.key, p.val);
5100                 propagateCompletion();
5101             }
5102         }
5103     }
5104 
5105     @SuppressWarnings("serial")
5106     static final class ForEachTransformedKeyTask<K,V,U>
5107         extends BulkTask<K,V,Void> {
5108         final Function<? super K, ? extends U> transformer;
5109         final Consumer<? super U> action;
5110         ForEachTransformedKeyTask
5111             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5112              Function<? super K, ? extends U> transformer, Consumer<? super U> action) {
5113             super(p, b, i, f, t);
5114             this.transformer = transformer; this.action = action;
5115         }
5116         public final void compute() {
5117             final Function<? super K, ? extends U> transformer;
5118             final Consumer<? super U> action;
5119             if ((transformer = this.transformer) != null &&
5120                 (action = this.action) != null) {
5121                 for (int i = baseIndex, f, h; batch > 0 &&
5122                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5123                     addToPendingCount(1);
5124                     new ForEachTransformedKeyTask<K,V,U>
5125                         (this, batch >>>= 1, baseLimit = h, f, tab,
5126                          transformer, action).fork();
5127                 }
5128                 for (Node<K,V> p; (p = advance()) != null; ) {
5129                     U u;
5130                     if ((u = transformer.apply(p.key)) != null)
5131                         action.accept(u);
5132                 }
5133                 propagateCompletion();
5134             }
5135         }
5136     }
5137 
5138     @SuppressWarnings("serial")
5139     static final class ForEachTransformedValueTask<K,V,U>
5140         extends BulkTask<K,V,Void> {
5141         final Function<? super V, ? extends U> transformer;
5142         final Consumer<? super U> action;
5143         ForEachTransformedValueTask
5144             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5145              Function<? super V, ? extends U> transformer, Consumer<? super U> action) {
5146             super(p, b, i, f, t);
5147             this.transformer = transformer; this.action = action;
5148         }
5149         public final void compute() {
5150             final Function<? super V, ? extends U> transformer;
5151             final Consumer<? super U> action;
5152             if ((transformer = this.transformer) != null &&
5153                 (action = this.action) != null) {
5154                 for (int i = baseIndex, f, h; batch > 0 &&
5155                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5156                     addToPendingCount(1);
5157                     new ForEachTransformedValueTask<K,V,U>
5158                         (this, batch >>>= 1, baseLimit = h, f, tab,
5159                          transformer, action).fork();
5160                 }
5161                 for (Node<K,V> p; (p = advance()) != null; ) {
5162                     U u;
5163                     if ((u = transformer.apply(p.val)) != null)
5164                         action.accept(u);
5165                 }
5166                 propagateCompletion();
5167             }
5168         }
5169     }
5170 
5171     @SuppressWarnings("serial")
5172     static final class ForEachTransformedEntryTask<K,V,U>
5173         extends BulkTask<K,V,Void> {
5174         final Function<Map.Entry<K,V>, ? extends U> transformer;
5175         final Consumer<? super U> action;
5176         ForEachTransformedEntryTask
5177             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5178              Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) {
5179             super(p, b, i, f, t);
5180             this.transformer = transformer; this.action = action;
5181         }
5182         public final void compute() {
5183             final Function<Map.Entry<K,V>, ? extends U> transformer;
5184             final Consumer<? super U> action;
5185             if ((transformer = this.transformer) != null &&
5186                 (action = this.action) != null) {
5187                 for (int i = baseIndex, f, h; batch > 0 &&
5188                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5189                     addToPendingCount(1);
5190                     new ForEachTransformedEntryTask<K,V,U>
5191                         (this, batch >>>= 1, baseLimit = h, f, tab,
5192                          transformer, action).fork();
5193                 }
5194                 for (Node<K,V> p; (p = advance()) != null; ) {
5195                     U u;
5196                     if ((u = transformer.apply(p)) != null)
5197                         action.accept(u);
5198                 }
5199                 propagateCompletion();
5200             }
5201         }
5202     }
5203 
5204     @SuppressWarnings("serial")
5205     static final class ForEachTransformedMappingTask<K,V,U>
5206         extends BulkTask<K,V,Void> {
5207         final BiFunction<? super K, ? super V, ? extends U> transformer;
5208         final Consumer<? super U> action;
5209         ForEachTransformedMappingTask
5210             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5211              BiFunction<? super K, ? super V, ? extends U> transformer,
5212              Consumer<? super U> action) {
5213             super(p, b, i, f, t);
5214             this.transformer = transformer; this.action = action;
5215         }
5216         public final void compute() {
5217             final BiFunction<? super K, ? super V, ? extends U> transformer;
5218             final Consumer<? super U> action;
5219             if ((transformer = this.transformer) != null &&
5220                 (action = this.action) != null) {
5221                 for (int i = baseIndex, f, h; batch > 0 &&
5222                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5223                     addToPendingCount(1);
5224                     new ForEachTransformedMappingTask<K,V,U>
5225                         (this, batch >>>= 1, baseLimit = h, f, tab,
5226                          transformer, action).fork();
5227                 }
5228                 for (Node<K,V> p; (p = advance()) != null; ) {
5229                     U u;
5230                     if ((u = transformer.apply(p.key, p.val)) != null)
5231                         action.accept(u);
5232                 }
5233                 propagateCompletion();
5234             }
5235         }
5236     }
5237 
5238     @SuppressWarnings("serial")
5239     static final class SearchKeysTask<K,V,U>
5240         extends BulkTask<K,V,U> {
5241         final Function<? super K, ? extends U> searchFunction;
5242         final AtomicReference<U> result;
5243         SearchKeysTask
5244             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5245              Function<? super K, ? extends U> searchFunction,
5246              AtomicReference<U> result) {
5247             super(p, b, i, f, t);
5248             this.searchFunction = searchFunction; this.result = result;
5249         }
5250         public final U getRawResult() { return result.get(); }
5251         public final void compute() {
5252             final Function<? super K, ? extends U> searchFunction;
5253             final AtomicReference<U> result;
5254             if ((searchFunction = this.searchFunction) != null &&
5255                 (result = this.result) != null) {
5256                 for (int i = baseIndex, f, h; batch > 0 &&
5257                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5258                     if (result.get() != null)
5259                         return;
5260                     addToPendingCount(1);
5261                     new SearchKeysTask<K,V,U>
5262                         (this, batch >>>= 1, baseLimit = h, f, tab,
5263                          searchFunction, result).fork();
5264                 }
5265                 while (result.get() == null) {
5266                     U u;
5267                     Node<K,V> p;
5268                     if ((p = advance()) == null) {
5269                         propagateCompletion();
5270                         break;
5271                     }
5272                     if ((u = searchFunction.apply(p.key)) != null) {
5273                         if (result.compareAndSet(null, u))
5274                             quietlyCompleteRoot();
5275                         break;
5276                     }
5277                 }
5278             }
5279         }
5280     }
5281 
5282     @SuppressWarnings("serial")
5283     static final class SearchValuesTask<K,V,U>
5284         extends BulkTask<K,V,U> {
5285         final Function<? super V, ? extends U> searchFunction;
5286         final AtomicReference<U> result;
5287         SearchValuesTask
5288             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5289              Function<? super V, ? extends U> searchFunction,
5290              AtomicReference<U> result) {
5291             super(p, b, i, f, t);
5292             this.searchFunction = searchFunction; this.result = result;
5293         }
5294         public final U getRawResult() { return result.get(); }
5295         public final void compute() {
5296             final Function<? super V, ? extends U> searchFunction;
5297             final AtomicReference<U> result;
5298             if ((searchFunction = this.searchFunction) != null &&
5299                 (result = this.result) != null) {
5300                 for (int i = baseIndex, f, h; batch > 0 &&
5301                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5302                     if (result.get() != null)
5303                         return;
5304                     addToPendingCount(1);
5305                     new SearchValuesTask<K,V,U>
5306                         (this, batch >>>= 1, baseLimit = h, f, tab,
5307                          searchFunction, result).fork();
5308                 }
5309                 while (result.get() == null) {
5310                     U u;
5311                     Node<K,V> p;
5312                     if ((p = advance()) == null) {
5313                         propagateCompletion();
5314                         break;
5315                     }
5316                     if ((u = searchFunction.apply(p.val)) != null) {
5317                         if (result.compareAndSet(null, u))
5318                             quietlyCompleteRoot();
5319                         break;
5320                     }
5321                 }
5322             }
5323         }
5324     }
5325 
5326     @SuppressWarnings("serial")
5327     static final class SearchEntriesTask<K,V,U>
5328         extends BulkTask<K,V,U> {
5329         final Function<Entry<K,V>, ? extends U> searchFunction;
5330         final AtomicReference<U> result;
5331         SearchEntriesTask
5332             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5333              Function<Entry<K,V>, ? extends U> searchFunction,
5334              AtomicReference<U> result) {
5335             super(p, b, i, f, t);
5336             this.searchFunction = searchFunction; this.result = result;
5337         }
5338         public final U getRawResult() { return result.get(); }
5339         public final void compute() {
5340             final Function<Entry<K,V>, ? extends U> searchFunction;
5341             final AtomicReference<U> result;
5342             if ((searchFunction = this.searchFunction) != null &&
5343                 (result = this.result) != null) {
5344                 for (int i = baseIndex, f, h; batch > 0 &&
5345                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5346                     if (result.get() != null)
5347                         return;
5348                     addToPendingCount(1);
5349                     new SearchEntriesTask<K,V,U>
5350                         (this, batch >>>= 1, baseLimit = h, f, tab,
5351                          searchFunction, result).fork();
5352                 }
5353                 while (result.get() == null) {
5354                     U u;
5355                     Node<K,V> p;
5356                     if ((p = advance()) == null) {
5357                         propagateCompletion();
5358                         break;
5359                     }
5360                     if ((u = searchFunction.apply(p)) != null) {
5361                         if (result.compareAndSet(null, u))
5362                             quietlyCompleteRoot();
5363                         return;
5364                     }
5365                 }
5366             }
5367         }
5368     }
5369 
5370     @SuppressWarnings("serial")
5371     static final class SearchMappingsTask<K,V,U>
5372         extends BulkTask<K,V,U> {
5373         final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5374         final AtomicReference<U> result;
5375         SearchMappingsTask
5376             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5377              BiFunction<? super K, ? super V, ? extends U> searchFunction,
5378              AtomicReference<U> result) {
5379             super(p, b, i, f, t);
5380             this.searchFunction = searchFunction; this.result = result;
5381         }
5382         public final U getRawResult() { return result.get(); }
5383         public final void compute() {
5384             final BiFunction<? super K, ? super V, ? extends U> searchFunction;
5385             final AtomicReference<U> result;
5386             if ((searchFunction = this.searchFunction) != null &&
5387                 (result = this.result) != null) {
5388                 for (int i = baseIndex, f, h; batch > 0 &&
5389                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5390                     if (result.get() != null)
5391                         return;
5392                     addToPendingCount(1);
5393                     new SearchMappingsTask<K,V,U>
5394                         (this, batch >>>= 1, baseLimit = h, f, tab,
5395                          searchFunction, result).fork();
5396                 }
5397                 while (result.get() == null) {
5398                     U u;
5399                     Node<K,V> p;
5400                     if ((p = advance()) == null) {
5401                         propagateCompletion();
5402                         break;
5403                     }
5404                     if ((u = searchFunction.apply(p.key, p.val)) != null) {
5405                         if (result.compareAndSet(null, u))
5406                             quietlyCompleteRoot();
5407                         break;
5408                     }
5409                 }
5410             }
5411         }
5412     }
5413 
5414     @SuppressWarnings("serial")
5415     static final class ReduceKeysTask<K,V>
5416         extends BulkTask<K,V,K> {
5417         final BiFunction<? super K, ? super K, ? extends K> reducer;
5418         K result;
5419         ReduceKeysTask<K,V> rights, nextRight;
5420         ReduceKeysTask
5421             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5422              ReduceKeysTask<K,V> nextRight,
5423              BiFunction<? super K, ? super K, ? extends K> reducer) {
5424             super(p, b, i, f, t); this.nextRight = nextRight;
5425             this.reducer = reducer;
5426         }
5427         public final K getRawResult() { return result; }
5428         public final void compute() {
5429             final BiFunction<? super K, ? super K, ? extends K> reducer;
5430             if ((reducer = this.reducer) != null) {
5431                 for (int i = baseIndex, f, h; batch > 0 &&
5432                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5433                     addToPendingCount(1);
5434                     (rights = new ReduceKeysTask<K,V>
5435                      (this, batch >>>= 1, baseLimit = h, f, tab,
5436                       rights, reducer)).fork();
5437                 }
5438                 K r = null;
5439                 for (Node<K,V> p; (p = advance()) != null; ) {
5440                     K u = p.key;
5441                     r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
5442                 }
5443                 result = r;
5444                 CountedCompleter<?> c;
5445                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5446                     @SuppressWarnings("unchecked")
5447                     ReduceKeysTask<K,V>
5448                         t = (ReduceKeysTask<K,V>)c,
5449                         s = t.rights;
5450                     while (s != null) {
5451                         K tr, sr;
5452                         if ((sr = s.result) != null)
5453                             t.result = (((tr = t.result) == null) ? sr :
5454                                         reducer.apply(tr, sr));
5455                         s = t.rights = s.nextRight;
5456                     }
5457                 }
5458             }
5459         }
5460     }
5461 
5462     @SuppressWarnings("serial")
5463     static final class ReduceValuesTask<K,V>
5464         extends BulkTask<K,V,V> {
5465         final BiFunction<? super V, ? super V, ? extends V> reducer;
5466         V result;
5467         ReduceValuesTask<K,V> rights, nextRight;
5468         ReduceValuesTask
5469             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5470              ReduceValuesTask<K,V> nextRight,
5471              BiFunction<? super V, ? super V, ? extends V> reducer) {
5472             super(p, b, i, f, t); this.nextRight = nextRight;
5473             this.reducer = reducer;
5474         }
5475         public final V getRawResult() { return result; }
5476         public final void compute() {
5477             final BiFunction<? super V, ? super V, ? extends V> reducer;
5478             if ((reducer = this.reducer) != null) {
5479                 for (int i = baseIndex, f, h; batch > 0 &&
5480                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5481                     addToPendingCount(1);
5482                     (rights = new ReduceValuesTask<K,V>
5483                      (this, batch >>>= 1, baseLimit = h, f, tab,
5484                       rights, reducer)).fork();
5485                 }
5486                 V r = null;
5487                 for (Node<K,V> p; (p = advance()) != null; ) {
5488                     V v = p.val;
5489                     r = (r == null) ? v : reducer.apply(r, v);
5490                 }
5491                 result = r;
5492                 CountedCompleter<?> c;
5493                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5494                     @SuppressWarnings("unchecked")
5495                     ReduceValuesTask<K,V>
5496                         t = (ReduceValuesTask<K,V>)c,
5497                         s = t.rights;
5498                     while (s != null) {
5499                         V tr, sr;
5500                         if ((sr = s.result) != null)
5501                             t.result = (((tr = t.result) == null) ? sr :
5502                                         reducer.apply(tr, sr));
5503                         s = t.rights = s.nextRight;
5504                     }
5505                 }
5506             }
5507         }
5508     }
5509 
5510     @SuppressWarnings("serial")
5511     static final class ReduceEntriesTask<K,V>
5512         extends BulkTask<K,V,Map.Entry<K,V>> {
5513         final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5514         Map.Entry<K,V> result;
5515         ReduceEntriesTask<K,V> rights, nextRight;
5516         ReduceEntriesTask
5517             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5518              ReduceEntriesTask<K,V> nextRight,
5519              BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
5520             super(p, b, i, f, t); this.nextRight = nextRight;
5521             this.reducer = reducer;
5522         }
5523         public final Map.Entry<K,V> getRawResult() { return result; }
5524         public final void compute() {
5525             final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
5526             if ((reducer = this.reducer) != null) {
5527                 for (int i = baseIndex, f, h; batch > 0 &&
5528                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5529                     addToPendingCount(1);
5530                     (rights = new ReduceEntriesTask<K,V>
5531                      (this, batch >>>= 1, baseLimit = h, f, tab,
5532                       rights, reducer)).fork();
5533                 }
5534                 Map.Entry<K,V> r = null;
5535                 for (Node<K,V> p; (p = advance()) != null; )
5536                     r = (r == null) ? p : reducer.apply(r, p);
5537                 result = r;
5538                 CountedCompleter<?> c;
5539                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5540                     @SuppressWarnings("unchecked")
5541                     ReduceEntriesTask<K,V>
5542                         t = (ReduceEntriesTask<K,V>)c,
5543                         s = t.rights;
5544                     while (s != null) {
5545                         Map.Entry<K,V> tr, sr;
5546                         if ((sr = s.result) != null)
5547                             t.result = (((tr = t.result) == null) ? sr :
5548                                         reducer.apply(tr, sr));
5549                         s = t.rights = s.nextRight;
5550                     }
5551                 }
5552             }
5553         }
5554     }
5555 
5556     @SuppressWarnings("serial")
5557     static final class MapReduceKeysTask<K,V,U>
5558         extends BulkTask<K,V,U> {
5559         final Function<? super K, ? extends U> transformer;
5560         final BiFunction<? super U, ? super U, ? extends U> reducer;
5561         U result;
5562         MapReduceKeysTask<K,V,U> rights, nextRight;
5563         MapReduceKeysTask
5564             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5565              MapReduceKeysTask<K,V,U> nextRight,
5566              Function<? super K, ? extends U> transformer,
5567              BiFunction<? super U, ? super U, ? extends U> reducer) {
5568             super(p, b, i, f, t); this.nextRight = nextRight;
5569             this.transformer = transformer;
5570             this.reducer = reducer;
5571         }
5572         public final U getRawResult() { return result; }
5573         public final void compute() {
5574             final Function<? super K, ? extends U> transformer;
5575             final BiFunction<? super U, ? super U, ? extends U> reducer;
5576             if ((transformer = this.transformer) != null &&
5577                 (reducer = this.reducer) != null) {
5578                 for (int i = baseIndex, f, h; batch > 0 &&
5579                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5580                     addToPendingCount(1);
5581                     (rights = new MapReduceKeysTask<K,V,U>
5582                      (this, batch >>>= 1, baseLimit = h, f, tab,
5583                       rights, transformer, reducer)).fork();
5584                 }
5585                 U r = null;
5586                 for (Node<K,V> p; (p = advance()) != null; ) {
5587                     U u;
5588                     if ((u = transformer.apply(p.key)) != null)
5589                         r = (r == null) ? u : reducer.apply(r, u);
5590                 }
5591                 result = r;
5592                 CountedCompleter<?> c;
5593                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5594                     @SuppressWarnings("unchecked")
5595                     MapReduceKeysTask<K,V,U>
5596                         t = (MapReduceKeysTask<K,V,U>)c,
5597                         s = t.rights;
5598                     while (s != null) {
5599                         U tr, sr;
5600                         if ((sr = s.result) != null)
5601                             t.result = (((tr = t.result) == null) ? sr :
5602                                         reducer.apply(tr, sr));
5603                         s = t.rights = s.nextRight;
5604                     }
5605                 }
5606             }
5607         }
5608     }
5609 
5610     @SuppressWarnings("serial")
5611     static final class MapReduceValuesTask<K,V,U>
5612         extends BulkTask<K,V,U> {
5613         final Function<? super V, ? extends U> transformer;
5614         final BiFunction<? super U, ? super U, ? extends U> reducer;
5615         U result;
5616         MapReduceValuesTask<K,V,U> rights, nextRight;
5617         MapReduceValuesTask
5618             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5619              MapReduceValuesTask<K,V,U> nextRight,
5620              Function<? super V, ? extends U> transformer,
5621              BiFunction<? super U, ? super U, ? extends U> reducer) {
5622             super(p, b, i, f, t); this.nextRight = nextRight;
5623             this.transformer = transformer;
5624             this.reducer = reducer;
5625         }
5626         public final U getRawResult() { return result; }
5627         public final void compute() {
5628             final Function<? super V, ? extends U> transformer;
5629             final BiFunction<? super U, ? super U, ? extends U> reducer;
5630             if ((transformer = this.transformer) != null &&
5631                 (reducer = this.reducer) != null) {
5632                 for (int i = baseIndex, f, h; batch > 0 &&
5633                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5634                     addToPendingCount(1);
5635                     (rights = new MapReduceValuesTask<K,V,U>
5636                      (this, batch >>>= 1, baseLimit = h, f, tab,
5637                       rights, transformer, reducer)).fork();
5638                 }
5639                 U r = null;
5640                 for (Node<K,V> p; (p = advance()) != null; ) {
5641                     U u;
5642                     if ((u = transformer.apply(p.val)) != null)
5643                         r = (r == null) ? u : reducer.apply(r, u);
5644                 }
5645                 result = r;
5646                 CountedCompleter<?> c;
5647                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5648                     @SuppressWarnings("unchecked")
5649                     MapReduceValuesTask<K,V,U>
5650                         t = (MapReduceValuesTask<K,V,U>)c,
5651                         s = t.rights;
5652                     while (s != null) {
5653                         U tr, sr;
5654                         if ((sr = s.result) != null)
5655                             t.result = (((tr = t.result) == null) ? sr :
5656                                         reducer.apply(tr, sr));
5657                         s = t.rights = s.nextRight;
5658                     }
5659                 }
5660             }
5661         }
5662     }
5663 
5664     @SuppressWarnings("serial")
5665     static final class MapReduceEntriesTask<K,V,U>
5666         extends BulkTask<K,V,U> {
5667         final Function<Map.Entry<K,V>, ? extends U> transformer;
5668         final BiFunction<? super U, ? super U, ? extends U> reducer;
5669         U result;
5670         MapReduceEntriesTask<K,V,U> rights, nextRight;
5671         MapReduceEntriesTask
5672             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5673              MapReduceEntriesTask<K,V,U> nextRight,
5674              Function<Map.Entry<K,V>, ? extends U> transformer,
5675              BiFunction<? super U, ? super U, ? extends U> reducer) {
5676             super(p, b, i, f, t); this.nextRight = nextRight;
5677             this.transformer = transformer;
5678             this.reducer = reducer;
5679         }
5680         public final U getRawResult() { return result; }
5681         public final void compute() {
5682             final Function<Map.Entry<K,V>, ? extends U> transformer;
5683             final BiFunction<? super U, ? super U, ? extends U> reducer;
5684             if ((transformer = this.transformer) != null &&
5685                 (reducer = this.reducer) != null) {
5686                 for (int i = baseIndex, f, h; batch > 0 &&
5687                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5688                     addToPendingCount(1);
5689                     (rights = new MapReduceEntriesTask<K,V,U>
5690                      (this, batch >>>= 1, baseLimit = h, f, tab,
5691                       rights, transformer, reducer)).fork();
5692                 }
5693                 U r = null;
5694                 for (Node<K,V> p; (p = advance()) != null; ) {
5695                     U u;
5696                     if ((u = transformer.apply(p)) != null)
5697                         r = (r == null) ? u : reducer.apply(r, u);
5698                 }
5699                 result = r;
5700                 CountedCompleter<?> c;
5701                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5702                     @SuppressWarnings("unchecked")
5703                     MapReduceEntriesTask<K,V,U>
5704                         t = (MapReduceEntriesTask<K,V,U>)c,
5705                         s = t.rights;
5706                     while (s != null) {
5707                         U tr, sr;
5708                         if ((sr = s.result) != null)
5709                             t.result = (((tr = t.result) == null) ? sr :
5710                                         reducer.apply(tr, sr));
5711                         s = t.rights = s.nextRight;
5712                     }
5713                 }
5714             }
5715         }
5716     }
5717 
5718     @SuppressWarnings("serial")
5719     static final class MapReduceMappingsTask<K,V,U>
5720         extends BulkTask<K,V,U> {
5721         final BiFunction<? super K, ? super V, ? extends U> transformer;
5722         final BiFunction<? super U, ? super U, ? extends U> reducer;
5723         U result;
5724         MapReduceMappingsTask<K,V,U> rights, nextRight;
5725         MapReduceMappingsTask
5726             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5727              MapReduceMappingsTask<K,V,U> nextRight,
5728              BiFunction<? super K, ? super V, ? extends U> transformer,
5729              BiFunction<? super U, ? super U, ? extends U> reducer) {
5730             super(p, b, i, f, t); this.nextRight = nextRight;
5731             this.transformer = transformer;
5732             this.reducer = reducer;
5733         }
5734         public final U getRawResult() { return result; }
5735         public final void compute() {
5736             final BiFunction<? super K, ? super V, ? extends U> transformer;
5737             final BiFunction<? super U, ? super U, ? extends U> reducer;
5738             if ((transformer = this.transformer) != null &&
5739                 (reducer = this.reducer) != null) {
5740                 for (int i = baseIndex, f, h; batch > 0 &&
5741                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5742                     addToPendingCount(1);
5743                     (rights = new MapReduceMappingsTask<K,V,U>
5744                      (this, batch >>>= 1, baseLimit = h, f, tab,
5745                       rights, transformer, reducer)).fork();
5746                 }
5747                 U r = null;
5748                 for (Node<K,V> p; (p = advance()) != null; ) {
5749                     U u;
5750                     if ((u = transformer.apply(p.key, p.val)) != null)
5751                         r = (r == null) ? u : reducer.apply(r, u);
5752                 }
5753                 result = r;
5754                 CountedCompleter<?> c;
5755                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5756                     @SuppressWarnings("unchecked")
5757                     MapReduceMappingsTask<K,V,U>
5758                         t = (MapReduceMappingsTask<K,V,U>)c,
5759                         s = t.rights;
5760                     while (s != null) {
5761                         U tr, sr;
5762                         if ((sr = s.result) != null)
5763                             t.result = (((tr = t.result) == null) ? sr :
5764                                         reducer.apply(tr, sr));
5765                         s = t.rights = s.nextRight;
5766                     }
5767                 }
5768             }
5769         }
5770     }
5771 
5772     @SuppressWarnings("serial")
5773     static final class MapReduceKeysToDoubleTask<K,V>
5774         extends BulkTask<K,V,Double> {
5775         final ToDoubleFunction<? super K> transformer;
5776         final DoubleBinaryOperator reducer;
5777         final double basis;
5778         double result;
5779         MapReduceKeysToDoubleTask<K,V> rights, nextRight;
5780         MapReduceKeysToDoubleTask
5781             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5782              MapReduceKeysToDoubleTask<K,V> nextRight,
5783              ToDoubleFunction<? super K> transformer,
5784              double basis,
5785              DoubleBinaryOperator reducer) {
5786             super(p, b, i, f, t); this.nextRight = nextRight;
5787             this.transformer = transformer;
5788             this.basis = basis; this.reducer = reducer;
5789         }
5790         public final Double getRawResult() { return result; }
5791         public final void compute() {
5792             final ToDoubleFunction<? super K> transformer;
5793             final DoubleBinaryOperator reducer;
5794             if ((transformer = this.transformer) != null &&
5795                 (reducer = this.reducer) != null) {
5796                 double r = this.basis;
5797                 for (int i = baseIndex, f, h; batch > 0 &&
5798                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5799                     addToPendingCount(1);
5800                     (rights = new MapReduceKeysToDoubleTask<K,V>
5801                      (this, batch >>>= 1, baseLimit = h, f, tab,
5802                       rights, transformer, r, reducer)).fork();
5803                 }
5804                 for (Node<K,V> p; (p = advance()) != null; )
5805                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key));
5806                 result = r;
5807                 CountedCompleter<?> c;
5808                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5809                     @SuppressWarnings("unchecked")
5810                     MapReduceKeysToDoubleTask<K,V>
5811                         t = (MapReduceKeysToDoubleTask<K,V>)c,
5812                         s = t.rights;
5813                     while (s != null) {
5814                         t.result = reducer.applyAsDouble(t.result, s.result);
5815                         s = t.rights = s.nextRight;
5816                     }
5817                 }
5818             }
5819         }
5820     }
5821 
5822     @SuppressWarnings("serial")
5823     static final class MapReduceValuesToDoubleTask<K,V>
5824         extends BulkTask<K,V,Double> {
5825         final ToDoubleFunction<? super V> transformer;
5826         final DoubleBinaryOperator reducer;
5827         final double basis;
5828         double result;
5829         MapReduceValuesToDoubleTask<K,V> rights, nextRight;
5830         MapReduceValuesToDoubleTask
5831             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5832              MapReduceValuesToDoubleTask<K,V> nextRight,
5833              ToDoubleFunction<? super V> transformer,
5834              double basis,
5835              DoubleBinaryOperator reducer) {
5836             super(p, b, i, f, t); this.nextRight = nextRight;
5837             this.transformer = transformer;
5838             this.basis = basis; this.reducer = reducer;
5839         }
5840         public final Double getRawResult() { return result; }
5841         public final void compute() {
5842             final ToDoubleFunction<? super V> transformer;
5843             final DoubleBinaryOperator reducer;
5844             if ((transformer = this.transformer) != null &&
5845                 (reducer = this.reducer) != null) {
5846                 double r = this.basis;
5847                 for (int i = baseIndex, f, h; batch > 0 &&
5848                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5849                     addToPendingCount(1);
5850                     (rights = new MapReduceValuesToDoubleTask<K,V>
5851                      (this, batch >>>= 1, baseLimit = h, f, tab,
5852                       rights, transformer, r, reducer)).fork();
5853                 }
5854                 for (Node<K,V> p; (p = advance()) != null; )
5855                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val));
5856                 result = r;
5857                 CountedCompleter<?> c;
5858                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5859                     @SuppressWarnings("unchecked")
5860                     MapReduceValuesToDoubleTask<K,V>
5861                         t = (MapReduceValuesToDoubleTask<K,V>)c,
5862                         s = t.rights;
5863                     while (s != null) {
5864                         t.result = reducer.applyAsDouble(t.result, s.result);
5865                         s = t.rights = s.nextRight;
5866                     }
5867                 }
5868             }
5869         }
5870     }
5871 
5872     @SuppressWarnings("serial")
5873     static final class MapReduceEntriesToDoubleTask<K,V>
5874         extends BulkTask<K,V,Double> {
5875         final ToDoubleFunction<Map.Entry<K,V>> transformer;
5876         final DoubleBinaryOperator reducer;
5877         final double basis;
5878         double result;
5879         MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
5880         MapReduceEntriesToDoubleTask
5881             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5882              MapReduceEntriesToDoubleTask<K,V> nextRight,
5883              ToDoubleFunction<Map.Entry<K,V>> transformer,
5884              double basis,
5885              DoubleBinaryOperator reducer) {
5886             super(p, b, i, f, t); this.nextRight = nextRight;
5887             this.transformer = transformer;
5888             this.basis = basis; this.reducer = reducer;
5889         }
5890         public final Double getRawResult() { return result; }
5891         public final void compute() {
5892             final ToDoubleFunction<Map.Entry<K,V>> transformer;
5893             final DoubleBinaryOperator reducer;
5894             if ((transformer = this.transformer) != null &&
5895                 (reducer = this.reducer) != null) {
5896                 double r = this.basis;
5897                 for (int i = baseIndex, f, h; batch > 0 &&
5898                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5899                     addToPendingCount(1);
5900                     (rights = new MapReduceEntriesToDoubleTask<K,V>
5901                      (this, batch >>>= 1, baseLimit = h, f, tab,
5902                       rights, transformer, r, reducer)).fork();
5903                 }
5904                 for (Node<K,V> p; (p = advance()) != null; )
5905                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p));
5906                 result = r;
5907                 CountedCompleter<?> c;
5908                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5909                     @SuppressWarnings("unchecked")
5910                     MapReduceEntriesToDoubleTask<K,V>
5911                         t = (MapReduceEntriesToDoubleTask<K,V>)c,
5912                         s = t.rights;
5913                     while (s != null) {
5914                         t.result = reducer.applyAsDouble(t.result, s.result);
5915                         s = t.rights = s.nextRight;
5916                     }
5917                 }
5918             }
5919         }
5920     }
5921 
5922     @SuppressWarnings("serial")
5923     static final class MapReduceMappingsToDoubleTask<K,V>
5924         extends BulkTask<K,V,Double> {
5925         final ToDoubleBiFunction<? super K, ? super V> transformer;
5926         final DoubleBinaryOperator reducer;
5927         final double basis;
5928         double result;
5929         MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
5930         MapReduceMappingsToDoubleTask
5931             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5932              MapReduceMappingsToDoubleTask<K,V> nextRight,
5933              ToDoubleBiFunction<? super K, ? super V> transformer,
5934              double basis,
5935              DoubleBinaryOperator reducer) {
5936             super(p, b, i, f, t); this.nextRight = nextRight;
5937             this.transformer = transformer;
5938             this.basis = basis; this.reducer = reducer;
5939         }
5940         public final Double getRawResult() { return result; }
5941         public final void compute() {
5942             final ToDoubleBiFunction<? super K, ? super V> transformer;
5943             final DoubleBinaryOperator reducer;
5944             if ((transformer = this.transformer) != null &&
5945                 (reducer = this.reducer) != null) {
5946                 double r = this.basis;
5947                 for (int i = baseIndex, f, h; batch > 0 &&
5948                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5949                     addToPendingCount(1);
5950                     (rights = new MapReduceMappingsToDoubleTask<K,V>
5951                      (this, batch >>>= 1, baseLimit = h, f, tab,
5952                       rights, transformer, r, reducer)).fork();
5953                 }
5954                 for (Node<K,V> p; (p = advance()) != null; )
5955                     r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.key, p.val));
5956                 result = r;
5957                 CountedCompleter<?> c;
5958                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
5959                     @SuppressWarnings("unchecked")
5960                     MapReduceMappingsToDoubleTask<K,V>
5961                         t = (MapReduceMappingsToDoubleTask<K,V>)c,
5962                         s = t.rights;
5963                     while (s != null) {
5964                         t.result = reducer.applyAsDouble(t.result, s.result);
5965                         s = t.rights = s.nextRight;
5966                     }
5967                 }
5968             }
5969         }
5970     }
5971 
5972     @SuppressWarnings("serial")
5973     static final class MapReduceKeysToLongTask<K,V>
5974         extends BulkTask<K,V,Long> {
5975         final ToLongFunction<? super K> transformer;
5976         final LongBinaryOperator reducer;
5977         final long basis;
5978         long result;
5979         MapReduceKeysToLongTask<K,V> rights, nextRight;
5980         MapReduceKeysToLongTask
5981             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
5982              MapReduceKeysToLongTask<K,V> nextRight,
5983              ToLongFunction<? super K> transformer,
5984              long basis,
5985              LongBinaryOperator reducer) {
5986             super(p, b, i, f, t); this.nextRight = nextRight;
5987             this.transformer = transformer;
5988             this.basis = basis; this.reducer = reducer;
5989         }
5990         public final Long getRawResult() { return result; }
5991         public final void compute() {
5992             final ToLongFunction<? super K> transformer;
5993             final LongBinaryOperator reducer;
5994             if ((transformer = this.transformer) != null &&
5995                 (reducer = this.reducer) != null) {
5996                 long r = this.basis;
5997                 for (int i = baseIndex, f, h; batch > 0 &&
5998                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
5999                     addToPendingCount(1);
6000                     (rights = new MapReduceKeysToLongTask<K,V>
6001                      (this, batch >>>= 1, baseLimit = h, f, tab,
6002                       rights, transformer, r, reducer)).fork();
6003                 }
6004                 for (Node<K,V> p; (p = advance()) != null; )
6005                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.key));
6006                 result = r;
6007                 CountedCompleter<?> c;
6008                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6009                     @SuppressWarnings("unchecked")
6010                     MapReduceKeysToLongTask<K,V>
6011                         t = (MapReduceKeysToLongTask<K,V>)c,
6012                         s = t.rights;
6013                     while (s != null) {
6014                         t.result = reducer.applyAsLong(t.result, s.result);
6015                         s = t.rights = s.nextRight;
6016                     }
6017                 }
6018             }
6019         }
6020     }
6021 
6022     @SuppressWarnings("serial")
6023     static final class MapReduceValuesToLongTask<K,V>
6024         extends BulkTask<K,V,Long> {
6025         final ToLongFunction<? super V> transformer;
6026         final LongBinaryOperator reducer;
6027         final long basis;
6028         long result;
6029         MapReduceValuesToLongTask<K,V> rights, nextRight;
6030         MapReduceValuesToLongTask
6031             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6032              MapReduceValuesToLongTask<K,V> nextRight,
6033              ToLongFunction<? super V> transformer,
6034              long basis,
6035              LongBinaryOperator reducer) {
6036             super(p, b, i, f, t); this.nextRight = nextRight;
6037             this.transformer = transformer;
6038             this.basis = basis; this.reducer = reducer;
6039         }
6040         public final Long getRawResult() { return result; }
6041         public final void compute() {
6042             final ToLongFunction<? super V> transformer;
6043             final LongBinaryOperator reducer;
6044             if ((transformer = this.transformer) != null &&
6045                 (reducer = this.reducer) != null) {
6046                 long r = this.basis;
6047                 for (int i = baseIndex, f, h; batch > 0 &&
6048                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6049                     addToPendingCount(1);
6050                     (rights = new MapReduceValuesToLongTask<K,V>
6051                      (this, batch >>>= 1, baseLimit = h, f, tab,
6052                       rights, transformer, r, reducer)).fork();
6053                 }
6054                 for (Node<K,V> p; (p = advance()) != null; )
6055                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.val));
6056                 result = r;
6057                 CountedCompleter<?> c;
6058                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6059                     @SuppressWarnings("unchecked")
6060                     MapReduceValuesToLongTask<K,V>
6061                         t = (MapReduceValuesToLongTask<K,V>)c,
6062                         s = t.rights;
6063                     while (s != null) {
6064                         t.result = reducer.applyAsLong(t.result, s.result);
6065                         s = t.rights = s.nextRight;
6066                     }
6067                 }
6068             }
6069         }
6070     }
6071 
6072     @SuppressWarnings("serial")
6073     static final class MapReduceEntriesToLongTask<K,V>
6074         extends BulkTask<K,V,Long> {
6075         final ToLongFunction<Map.Entry<K,V>> transformer;
6076         final LongBinaryOperator reducer;
6077         final long basis;
6078         long result;
6079         MapReduceEntriesToLongTask<K,V> rights, nextRight;
6080         MapReduceEntriesToLongTask
6081             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6082              MapReduceEntriesToLongTask<K,V> nextRight,
6083              ToLongFunction<Map.Entry<K,V>> transformer,
6084              long basis,
6085              LongBinaryOperator reducer) {
6086             super(p, b, i, f, t); this.nextRight = nextRight;
6087             this.transformer = transformer;
6088             this.basis = basis; this.reducer = reducer;
6089         }
6090         public final Long getRawResult() { return result; }
6091         public final void compute() {
6092             final ToLongFunction<Map.Entry<K,V>> transformer;
6093             final LongBinaryOperator reducer;
6094             if ((transformer = this.transformer) != null &&
6095                 (reducer = this.reducer) != null) {
6096                 long r = this.basis;
6097                 for (int i = baseIndex, f, h; batch > 0 &&
6098                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6099                     addToPendingCount(1);
6100                     (rights = new MapReduceEntriesToLongTask<K,V>
6101                      (this, batch >>>= 1, baseLimit = h, f, tab,
6102                       rights, transformer, r, reducer)).fork();
6103                 }
6104                 for (Node<K,V> p; (p = advance()) != null; )
6105                     r = reducer.applyAsLong(r, transformer.applyAsLong(p));
6106                 result = r;
6107                 CountedCompleter<?> c;
6108                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6109                     @SuppressWarnings("unchecked")
6110                     MapReduceEntriesToLongTask<K,V>
6111                         t = (MapReduceEntriesToLongTask<K,V>)c,
6112                         s = t.rights;
6113                     while (s != null) {
6114                         t.result = reducer.applyAsLong(t.result, s.result);
6115                         s = t.rights = s.nextRight;
6116                     }
6117                 }
6118             }
6119         }
6120     }
6121 
6122     @SuppressWarnings("serial")
6123     static final class MapReduceMappingsToLongTask<K,V>
6124         extends BulkTask<K,V,Long> {
6125         final ToLongBiFunction<? super K, ? super V> transformer;
6126         final LongBinaryOperator reducer;
6127         final long basis;
6128         long result;
6129         MapReduceMappingsToLongTask<K,V> rights, nextRight;
6130         MapReduceMappingsToLongTask
6131             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6132              MapReduceMappingsToLongTask<K,V> nextRight,
6133              ToLongBiFunction<? super K, ? super V> transformer,
6134              long basis,
6135              LongBinaryOperator reducer) {
6136             super(p, b, i, f, t); this.nextRight = nextRight;
6137             this.transformer = transformer;
6138             this.basis = basis; this.reducer = reducer;
6139         }
6140         public final Long getRawResult() { return result; }
6141         public final void compute() {
6142             final ToLongBiFunction<? super K, ? super V> transformer;
6143             final LongBinaryOperator reducer;
6144             if ((transformer = this.transformer) != null &&
6145                 (reducer = this.reducer) != null) {
6146                 long r = this.basis;
6147                 for (int i = baseIndex, f, h; batch > 0 &&
6148                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6149                     addToPendingCount(1);
6150                     (rights = new MapReduceMappingsToLongTask<K,V>
6151                      (this, batch >>>= 1, baseLimit = h, f, tab,
6152                       rights, transformer, r, reducer)).fork();
6153                 }
6154                 for (Node<K,V> p; (p = advance()) != null; )
6155                     r = reducer.applyAsLong(r, transformer.applyAsLong(p.key, p.val));
6156                 result = r;
6157                 CountedCompleter<?> c;
6158                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6159                     @SuppressWarnings("unchecked")
6160                     MapReduceMappingsToLongTask<K,V>
6161                         t = (MapReduceMappingsToLongTask<K,V>)c,
6162                         s = t.rights;
6163                     while (s != null) {
6164                         t.result = reducer.applyAsLong(t.result, s.result);
6165                         s = t.rights = s.nextRight;
6166                     }
6167                 }
6168             }
6169         }
6170     }
6171 
6172     @SuppressWarnings("serial")
6173     static final class MapReduceKeysToIntTask<K,V>
6174         extends BulkTask<K,V,Integer> {
6175         final ToIntFunction<? super K> transformer;
6176         final IntBinaryOperator reducer;
6177         final int basis;
6178         int result;
6179         MapReduceKeysToIntTask<K,V> rights, nextRight;
6180         MapReduceKeysToIntTask
6181             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6182              MapReduceKeysToIntTask<K,V> nextRight,
6183              ToIntFunction<? super K> transformer,
6184              int basis,
6185              IntBinaryOperator reducer) {
6186             super(p, b, i, f, t); this.nextRight = nextRight;
6187             this.transformer = transformer;
6188             this.basis = basis; this.reducer = reducer;
6189         }
6190         public final Integer getRawResult() { return result; }
6191         public final void compute() {
6192             final ToIntFunction<? super K> transformer;
6193             final IntBinaryOperator reducer;
6194             if ((transformer = this.transformer) != null &&
6195                 (reducer = this.reducer) != null) {
6196                 int r = this.basis;
6197                 for (int i = baseIndex, f, h; batch > 0 &&
6198                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6199                     addToPendingCount(1);
6200                     (rights = new MapReduceKeysToIntTask<K,V>
6201                      (this, batch >>>= 1, baseLimit = h, f, tab,
6202                       rights, transformer, r, reducer)).fork();
6203                 }
6204                 for (Node<K,V> p; (p = advance()) != null; )
6205                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.key));
6206                 result = r;
6207                 CountedCompleter<?> c;
6208                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6209                     @SuppressWarnings("unchecked")
6210                     MapReduceKeysToIntTask<K,V>
6211                         t = (MapReduceKeysToIntTask<K,V>)c,
6212                         s = t.rights;
6213                     while (s != null) {
6214                         t.result = reducer.applyAsInt(t.result, s.result);
6215                         s = t.rights = s.nextRight;
6216                     }
6217                 }
6218             }
6219         }
6220     }
6221 
6222     @SuppressWarnings("serial")
6223     static final class MapReduceValuesToIntTask<K,V>
6224         extends BulkTask<K,V,Integer> {
6225         final ToIntFunction<? super V> transformer;
6226         final IntBinaryOperator reducer;
6227         final int basis;
6228         int result;
6229         MapReduceValuesToIntTask<K,V> rights, nextRight;
6230         MapReduceValuesToIntTask
6231             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6232              MapReduceValuesToIntTask<K,V> nextRight,
6233              ToIntFunction<? super V> transformer,
6234              int basis,
6235              IntBinaryOperator reducer) {
6236             super(p, b, i, f, t); this.nextRight = nextRight;
6237             this.transformer = transformer;
6238             this.basis = basis; this.reducer = reducer;
6239         }
6240         public final Integer getRawResult() { return result; }
6241         public final void compute() {
6242             final ToIntFunction<? super V> transformer;
6243             final IntBinaryOperator reducer;
6244             if ((transformer = this.transformer) != null &&
6245                 (reducer = this.reducer) != null) {
6246                 int r = this.basis;
6247                 for (int i = baseIndex, f, h; batch > 0 &&
6248                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6249                     addToPendingCount(1);
6250                     (rights = new MapReduceValuesToIntTask<K,V>
6251                      (this, batch >>>= 1, baseLimit = h, f, tab,
6252                       rights, transformer, r, reducer)).fork();
6253                 }
6254                 for (Node<K,V> p; (p = advance()) != null; )
6255                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.val));
6256                 result = r;
6257                 CountedCompleter<?> c;
6258                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6259                     @SuppressWarnings("unchecked")
6260                     MapReduceValuesToIntTask<K,V>
6261                         t = (MapReduceValuesToIntTask<K,V>)c,
6262                         s = t.rights;
6263                     while (s != null) {
6264                         t.result = reducer.applyAsInt(t.result, s.result);
6265                         s = t.rights = s.nextRight;
6266                     }
6267                 }
6268             }
6269         }
6270     }
6271 
6272     @SuppressWarnings("serial")
6273     static final class MapReduceEntriesToIntTask<K,V>
6274         extends BulkTask<K,V,Integer> {
6275         final ToIntFunction<Map.Entry<K,V>> transformer;
6276         final IntBinaryOperator reducer;
6277         final int basis;
6278         int result;
6279         MapReduceEntriesToIntTask<K,V> rights, nextRight;
6280         MapReduceEntriesToIntTask
6281             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6282              MapReduceEntriesToIntTask<K,V> nextRight,
6283              ToIntFunction<Map.Entry<K,V>> transformer,
6284              int basis,
6285              IntBinaryOperator reducer) {
6286             super(p, b, i, f, t); this.nextRight = nextRight;
6287             this.transformer = transformer;
6288             this.basis = basis; this.reducer = reducer;
6289         }
6290         public final Integer getRawResult() { return result; }
6291         public final void compute() {
6292             final ToIntFunction<Map.Entry<K,V>> transformer;
6293             final IntBinaryOperator reducer;
6294             if ((transformer = this.transformer) != null &&
6295                 (reducer = this.reducer) != null) {
6296                 int r = this.basis;
6297                 for (int i = baseIndex, f, h; batch > 0 &&
6298                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6299                     addToPendingCount(1);
6300                     (rights = new MapReduceEntriesToIntTask<K,V>
6301                      (this, batch >>>= 1, baseLimit = h, f, tab,
6302                       rights, transformer, r, reducer)).fork();
6303                 }
6304                 for (Node<K,V> p; (p = advance()) != null; )
6305                     r = reducer.applyAsInt(r, transformer.applyAsInt(p));
6306                 result = r;
6307                 CountedCompleter<?> c;
6308                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6309                     @SuppressWarnings("unchecked")
6310                     MapReduceEntriesToIntTask<K,V>
6311                         t = (MapReduceEntriesToIntTask<K,V>)c,
6312                         s = t.rights;
6313                     while (s != null) {
6314                         t.result = reducer.applyAsInt(t.result, s.result);
6315                         s = t.rights = s.nextRight;
6316                     }
6317                 }
6318             }
6319         }
6320     }
6321 
6322     @SuppressWarnings("serial")
6323     static final class MapReduceMappingsToIntTask<K,V>
6324         extends BulkTask<K,V,Integer> {
6325         final ToIntBiFunction<? super K, ? super V> transformer;
6326         final IntBinaryOperator reducer;
6327         final int basis;
6328         int result;
6329         MapReduceMappingsToIntTask<K,V> rights, nextRight;
6330         MapReduceMappingsToIntTask
6331             (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
6332              MapReduceMappingsToIntTask<K,V> nextRight,
6333              ToIntBiFunction<? super K, ? super V> transformer,
6334              int basis,
6335              IntBinaryOperator reducer) {
6336             super(p, b, i, f, t); this.nextRight = nextRight;
6337             this.transformer = transformer;
6338             this.basis = basis; this.reducer = reducer;
6339         }
6340         public final Integer getRawResult() { return result; }
6341         public final void compute() {
6342             final ToIntBiFunction<? super K, ? super V> transformer;
6343             final IntBinaryOperator reducer;
6344             if ((transformer = this.transformer) != null &&
6345                 (reducer = this.reducer) != null) {
6346                 int r = this.basis;
6347                 for (int i = baseIndex, f, h; batch > 0 &&
6348                          (h = ((f = baseLimit) + i) >>> 1) > i;) {
6349                     addToPendingCount(1);
6350                     (rights = new MapReduceMappingsToIntTask<K,V>
6351                      (this, batch >>>= 1, baseLimit = h, f, tab,
6352                       rights, transformer, r, reducer)).fork();
6353                 }
6354                 for (Node<K,V> p; (p = advance()) != null; )
6355                     r = reducer.applyAsInt(r, transformer.applyAsInt(p.key, p.val));
6356                 result = r;
6357                 CountedCompleter<?> c;
6358                 for (c = firstComplete(); c != null; c = c.nextComplete()) {
6359                     @SuppressWarnings("unchecked")
6360                     MapReduceMappingsToIntTask<K,V>
6361                         t = (MapReduceMappingsToIntTask<K,V>)c,
6362                         s = t.rights;
6363                     while (s != null) {
6364                         t.result = reducer.applyAsInt(t.result, s.result);
6365                         s = t.rights = s.nextRight;
6366                     }
6367                 }
6368             }
6369         }
6370     }
6371 
6372     // Unsafe mechanics
6373     private static final Unsafe U = Unsafe.getUnsafe();
6374     private static final long SIZECTL
6375         = U.objectFieldOffset(ConcurrentHashMap.class, "sizeCtl");
6376     private static final long TRANSFERINDEX
6377         = U.objectFieldOffset(ConcurrentHashMap.class, "transferIndex");
6378     private static final long BASECOUNT
6379         = U.objectFieldOffset(ConcurrentHashMap.class, "baseCount");
6380     private static final long CELLSBUSY
6381         = U.objectFieldOffset(ConcurrentHashMap.class, "cellsBusy");
6382     private static final long CELLVALUE
6383         = U.objectFieldOffset(CounterCell.class, "value");
6384     private static final long ABASE = U.arrayBaseOffset(Node[].class);
6385     private static final int ASHIFT;
6386 
6387     static {
6388         int scale = U.arrayIndexScale(Node[].class);
6389         if ((scale & (scale - 1)) != 0)
6390             throw new ExceptionInInitializerError("array index scale not a power of two");
6391         ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
6392 
6393         // Reduce the risk of rare disastrous classloading in first call to
6394         // LockSupport.park: https://bugs.openjdk.org/browse/JDK-8074773
6395         Class<?> ensureLoaded = LockSupport.class;
6396 
6397         // Eager class load observed to help JIT during startup
6398         ensureLoaded = ReservationNode.class;
6399     }
6400 }