1 /* 2 * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package java.util; 27 28 import java.io.IOException; 29 import java.io.InvalidObjectException; 30 import java.io.ObjectInputStream; 31 import java.io.Serializable; 32 import java.lang.reflect.ParameterizedType; 33 import java.lang.reflect.Type; 34 import java.util.function.BiConsumer; 35 import java.util.function.BiFunction; 36 import java.util.function.Consumer; 37 import java.util.function.Function; 38 import jdk.internal.access.SharedSecrets; 39 40 /** 41 * Hash table based implementation of the {@code Map} interface. This 42 * implementation provides all of the optional map operations, and permits 43 * {@code null} values and the {@code null} key. (The {@code HashMap} 44 * class is roughly equivalent to {@code Hashtable}, except that it is 45 * unsynchronized and permits nulls.) This class makes no guarantees as to 46 * the order of the map; in particular, it does not guarantee that the order 47 * will remain constant over time. 48 * 49 * <p>This implementation provides constant-time performance for the basic 50 * operations ({@code get} and {@code put}), assuming the hash function 51 * disperses the elements properly among the buckets. Iteration over 52 * collection views requires time proportional to the "capacity" of the 53 * {@code HashMap} instance (the number of buckets) plus its size (the number 54 * of key-value mappings). Thus, it's very important not to set the initial 55 * capacity too high (or the load factor too low) if iteration performance is 56 * important. 57 * 58 * <p>An instance of {@code HashMap} has two parameters that affect its 59 * performance: <i>initial capacity</i> and <i>load factor</i>. The 60 * <i>capacity</i> is the number of buckets in the hash table, and the initial 61 * capacity is simply the capacity at the time the hash table is created. The 62 * <i>load factor</i> is a measure of how full the hash table is allowed to 63 * get before its capacity is automatically increased. When the number of 64 * entries in the hash table exceeds the product of the load factor and the 65 * current capacity, the hash table is <i>rehashed</i> (that is, internal data 66 * structures are rebuilt) so that the hash table has approximately twice the 67 * number of buckets. 68 * 69 * <p>As a general rule, the default load factor (.75) offers a good 70 * tradeoff between time and space costs. Higher values decrease the 71 * space overhead but increase the lookup cost (reflected in most of 72 * the operations of the {@code HashMap} class, including 73 * {@code get} and {@code put}). The expected number of entries in 74 * the map and its load factor should be taken into account when 75 * setting its initial capacity, so as to minimize the number of 76 * rehash operations. If the initial capacity is greater than the 77 * maximum number of entries divided by the load factor, no rehash 78 * operations will ever occur. 79 * 80 * <p>If many mappings are to be stored in a {@code HashMap} 81 * instance, creating it with a sufficiently large capacity will allow 82 * the mappings to be stored more efficiently than letting it perform 83 * automatic rehashing as needed to grow the table. Note that using 84 * many keys with the same {@code hashCode()} is a sure way to slow 85 * down performance of any hash table. To ameliorate impact, when keys 86 * are {@link Comparable}, this class may use comparison order among 87 * keys to help break ties. 88 * 89 * <p><strong>Note that this implementation is not synchronized.</strong> 90 * If multiple threads access a hash map concurrently, and at least one of 91 * the threads modifies the map structurally, it <i>must</i> be 92 * synchronized externally. (A structural modification is any operation 93 * that adds or deletes one or more mappings; merely changing the value 94 * associated with a key that an instance already contains is not a 95 * structural modification.) This is typically accomplished by 96 * synchronizing on some object that naturally encapsulates the map. 97 * 98 * If no such object exists, the map should be "wrapped" using the 99 * {@link Collections#synchronizedMap Collections.synchronizedMap} 100 * method. This is best done at creation time, to prevent accidental 101 * unsynchronized access to the map:<pre> 102 * Map m = Collections.synchronizedMap(new HashMap(...));</pre> 103 * 104 * <p>The iterators returned by all of this class's "collection view methods" 105 * are <i>fail-fast</i>: if the map is structurally modified at any time after 106 * the iterator is created, in any way except through the iterator's own 107 * {@code remove} method, the iterator will throw a 108 * {@link ConcurrentModificationException}. Thus, in the face of concurrent 109 * modification, the iterator fails quickly and cleanly, rather than risking 110 * arbitrary, non-deterministic behavior at an undetermined time in the 111 * future. 112 * 113 * <p>Note that the fail-fast behavior of an iterator cannot be guaranteed 114 * as it is, generally speaking, impossible to make any hard guarantees in the 115 * presence of unsynchronized concurrent modification. Fail-fast iterators 116 * throw {@code ConcurrentModificationException} on a best-effort basis. 117 * Therefore, it would be wrong to write a program that depended on this 118 * exception for its correctness: <i>the fail-fast behavior of iterators 119 * should be used only to detect bugs.</i> 120 * 121 * <p>This class is a member of the 122 * <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework"> 123 * Java Collections Framework</a>. 124 * 125 * @param <K> the type of keys maintained by this map 126 * @param <V> the type of mapped values 127 * 128 * @author Doug Lea 129 * @author Josh Bloch 130 * @author Arthur van Hoff 131 * @author Neal Gafter 132 * @see Object#hashCode() 133 * @see Collection 134 * @see Map 135 * @see TreeMap 136 * @see Hashtable 137 * @since 1.2 138 */ 139 public class HashMap<K,V> extends AbstractMap<K,V> 140 implements Map<K,V>, Cloneable, Serializable { 141 142 @java.io.Serial 143 private static final long serialVersionUID = 362498820763181265L; 144 145 /* 146 * Implementation notes. 147 * 148 * This map usually acts as a binned (bucketed) hash table, but 149 * when bins get too large, they are transformed into bins of 150 * TreeNodes, each structured similarly to those in 151 * java.util.TreeMap. Most methods try to use normal bins, but 152 * relay to TreeNode methods when applicable (simply by checking 153 * instanceof a node). Bins of TreeNodes may be traversed and 154 * used like any others, but additionally support faster lookup 155 * when overpopulated. However, since the vast majority of bins in 156 * normal use are not overpopulated, checking for existence of 157 * tree bins may be delayed in the course of table methods. 158 * 159 * Tree bins (i.e., bins whose elements are all TreeNodes) are 160 * ordered primarily by hashCode, but in the case of ties, if two 161 * elements are of the same "class C implements Comparable<C>", 162 * type then their compareTo method is used for ordering. (We 163 * conservatively check generic types via reflection to validate 164 * this -- see method comparableClassFor). The added complexity 165 * of tree bins is worthwhile in providing worst-case O(log n) 166 * operations when keys either have distinct hashes or are 167 * orderable, Thus, performance degrades gracefully under 168 * accidental or malicious usages in which hashCode() methods 169 * return values that are poorly distributed, as well as those in 170 * which many keys share a hashCode, so long as they are also 171 * Comparable. (If neither of these apply, we may waste about a 172 * factor of two in time and space compared to taking no 173 * precautions. But the only known cases stem from poor user 174 * programming practices that are already so slow that this makes 175 * little difference.) 176 * 177 * Because TreeNodes are about twice the size of regular nodes, we 178 * use them only when bins contain enough nodes to warrant use 179 * (see TREEIFY_THRESHOLD). And when they become too small (due to 180 * removal or resizing) they are converted back to plain bins. In 181 * usages with well-distributed user hashCodes, tree bins are 182 * rarely used. Ideally, under random hashCodes, the frequency of 183 * nodes in bins follows a Poisson distribution 184 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a 185 * parameter of about 0.5 on average for the default resizing 186 * threshold of 0.75, although with a large variance because of 187 * resizing granularity. Ignoring variance, the expected 188 * occurrences of list size k are (exp(-0.5) * pow(0.5, k) / 189 * factorial(k)). The first values are: 190 * 191 * 0: 0.60653066 192 * 1: 0.30326533 193 * 2: 0.07581633 194 * 3: 0.01263606 195 * 4: 0.00157952 196 * 5: 0.00015795 197 * 6: 0.00001316 198 * 7: 0.00000094 199 * 8: 0.00000006 200 * more: less than 1 in ten million 201 * 202 * The root of a tree bin is normally its first node. However, 203 * sometimes (currently only upon Iterator.remove), the root might 204 * be elsewhere, but can be recovered following parent links 205 * (method TreeNode.root()). 206 * 207 * All applicable internal methods accept a hash code as an 208 * argument (as normally supplied from a public method), allowing 209 * them to call each other without recomputing user hashCodes. 210 * Most internal methods also accept a "tab" argument, that is 211 * normally the current table, but may be a new or old one when 212 * resizing or converting. 213 * 214 * When bin lists are treeified, split, or untreeified, we keep 215 * them in the same relative access/traversal order (i.e., field 216 * Node.next) to better preserve locality, and to slightly 217 * simplify handling of splits and traversals that invoke 218 * iterator.remove. When using comparators on insertion, to keep a 219 * total ordering (or as close as is required here) across 220 * rebalancings, we compare classes and identityHashCodes as 221 * tie-breakers. 222 * 223 * The use and transitions among plain vs tree modes is 224 * complicated by the existence of subclass LinkedHashMap. See 225 * below for hook methods defined to be invoked upon insertion, 226 * removal and access that allow LinkedHashMap internals to 227 * otherwise remain independent of these mechanics. (This also 228 * requires that a map instance be passed to some utility methods 229 * that may create new nodes.) 230 * 231 * The concurrent-programming-like SSA-based coding style helps 232 * avoid aliasing errors amid all of the twisty pointer operations. 233 */ 234 235 /** 236 * The default initial capacity - MUST be a power of two. 237 */ 238 static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16 239 240 /** 241 * The maximum capacity, used if a higher value is implicitly specified 242 * by either of the constructors with arguments. 243 * MUST be a power of two <= 1<<30. 244 */ 245 static final int MAXIMUM_CAPACITY = 1 << 30; 246 247 /** 248 * The load factor used when none specified in constructor. 249 */ 250 static final float DEFAULT_LOAD_FACTOR = 0.75f; 251 252 /** 253 * The bin count threshold for using a tree rather than list for a 254 * bin. Bins are converted to trees when adding an element to a 255 * bin with at least this many nodes. The value must be greater 256 * than 2 and should be at least 8 to mesh with assumptions in 257 * tree removal about conversion back to plain bins upon 258 * shrinkage. 259 */ 260 static final int TREEIFY_THRESHOLD = 8; 261 262 /** 263 * The bin count threshold for untreeifying a (split) bin during a 264 * resize operation. Should be less than TREEIFY_THRESHOLD, and at 265 * most 6 to mesh with shrinkage detection under removal. 266 */ 267 static final int UNTREEIFY_THRESHOLD = 6; 268 269 /** 270 * The smallest table capacity for which bins may be treeified. 271 * (Otherwise the table is resized if too many nodes in a bin.) 272 * Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts 273 * between resizing and treeification thresholds. 274 */ 275 static final int MIN_TREEIFY_CAPACITY = 64; 276 277 /** 278 * Basic hash bin node, used for most entries. (See below for 279 * TreeNode subclass, and in LinkedHashMap for its Entry subclass.) 280 */ 281 static class Node<K,V> implements Map.Entry<K,V> { 282 final int hash; 283 final K key; 284 V value; 285 Node<K,V> next; 286 287 Node(int hash, K key, V value, Node<K,V> next) { 288 this.hash = hash; 289 this.key = key; 290 this.value = value; 291 this.next = next; 292 } 293 294 public final K getKey() { return key; } 295 public final V getValue() { return value; } 296 public final String toString() { return key + "=" + value; } 297 298 public final int hashCode() { 299 return Objects.hashCode(key) ^ Objects.hashCode(value); 300 } 301 302 public final V setValue(V newValue) { 303 V oldValue = value; 304 value = newValue; 305 return oldValue; 306 } 307 308 public final boolean equals(Object o) { 309 if (o == this) 310 return true; 311 312 return o instanceof Map.Entry<?, ?> e 313 && Objects.equals(key, e.getKey()) 314 && Objects.equals(value, e.getValue()); 315 } 316 } 317 318 /* ---------------- Static utilities -------------- */ 319 320 /** 321 * Computes key.hashCode() and spreads (XORs) higher bits of hash 322 * to lower. Because the table uses power-of-two masking, sets of 323 * hashes that vary only in bits above the current mask will 324 * always collide. (Among known examples are sets of Float keys 325 * holding consecutive whole numbers in small tables.) So we 326 * apply a transform that spreads the impact of higher bits 327 * downward. There is a tradeoff between speed, utility, and 328 * quality of bit-spreading. Because many common sets of hashes 329 * are already reasonably distributed (so don't benefit from 330 * spreading), and because we use trees to handle large sets of 331 * collisions in bins, we just XOR some shifted bits in the 332 * cheapest possible way to reduce systematic lossage, as well as 333 * to incorporate impact of the highest bits that would otherwise 334 * never be used in index calculations because of table bounds. 335 */ 336 static final int hash(Object key) { 337 int h; 338 return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16); 339 } 340 341 /** 342 * Returns x's Class if it is of the form "class C implements 343 * Comparable<C>", else null. 344 */ 345 static Class<?> comparableClassFor(Object x) { 346 if (x instanceof Comparable) { 347 Class<?> c; Type[] ts, as; ParameterizedType p; 348 if ((c = x.getClass()) == String.class) // bypass checks 349 return c; 350 if ((ts = c.getGenericInterfaces()) != null) { 351 for (Type t : ts) { 352 if ((t instanceof ParameterizedType) && 353 ((p = (ParameterizedType) t).getRawType() == 354 Comparable.class) && 355 (as = p.getActualTypeArguments()) != null && 356 as.length == 1 && as[0] == c) // type arg is c 357 return c; 358 } 359 } 360 } 361 return null; 362 } 363 364 /** 365 * Returns k.compareTo(x) if x matches kc (k's screened comparable 366 * class), else 0. 367 */ 368 @SuppressWarnings("unchecked") // for cast to Comparable 369 static int compareComparables(Class<?> kc, Object k, Object x) { 370 return (x == null || x.getClass() != kc ? 0 : 371 ((Comparable)k).compareTo(x)); 372 } 373 374 /** 375 * Returns a power of two size for the given target capacity. 376 */ 377 static final int tableSizeFor(int cap) { 378 int n = -1 >>> Integer.numberOfLeadingZeros(cap - 1); 379 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; 380 } 381 382 /* ---------------- Fields -------------- */ 383 384 /** 385 * The table, initialized on first use, and resized as 386 * necessary. When allocated, length is always a power of two. 387 * (We also tolerate length zero in some operations to allow 388 * bootstrapping mechanics that are currently not needed.) 389 */ 390 transient Node<K,V>[] table; 391 392 /** 393 * Holds cached entrySet(). Note that AbstractMap fields are used 394 * for keySet() and values(). 395 */ 396 transient Set<Map.Entry<K,V>> entrySet; 397 398 /** 399 * The number of key-value mappings contained in this map. 400 */ 401 transient int size; 402 403 /** 404 * The number of times this HashMap has been structurally modified 405 * Structural modifications are those that change the number of mappings in 406 * the HashMap or otherwise modify its internal structure (e.g., 407 * rehash). This field is used to make iterators on Collection-views of 408 * the HashMap fail-fast. (See ConcurrentModificationException). 409 */ 410 transient int modCount; 411 412 /** 413 * The next size value at which to resize (capacity * load factor). 414 * 415 * @serial 416 */ 417 // (The javadoc description is true upon serialization. 418 // Additionally, if the table array has not been allocated, this 419 // field holds the initial array capacity, or zero signifying 420 // DEFAULT_INITIAL_CAPACITY.) 421 int threshold; 422 423 /** 424 * The load factor for the hash table. 425 * 426 * @serial 427 */ 428 final float loadFactor; 429 430 /* ---------------- Public operations -------------- */ 431 432 /** 433 * Constructs an empty {@code HashMap} with the specified initial 434 * capacity and load factor. 435 * 436 * @apiNote 437 * To create a {@code HashMap} with an initial capacity that accommodates 438 * an expected number of mappings, use {@link #newHashMap(int) newHashMap}. 439 * 440 * @param initialCapacity the initial capacity 441 * @param loadFactor the load factor 442 * @throws IllegalArgumentException if the initial capacity is negative 443 * or the load factor is nonpositive 444 */ 445 public HashMap(int initialCapacity, float loadFactor) { 446 if (initialCapacity < 0) 447 throw new IllegalArgumentException("Illegal initial capacity: " + 448 initialCapacity); 449 if (initialCapacity > MAXIMUM_CAPACITY) 450 initialCapacity = MAXIMUM_CAPACITY; 451 if (loadFactor <= 0 || Float.isNaN(loadFactor)) 452 throw new IllegalArgumentException("Illegal load factor: " + 453 loadFactor); 454 this.loadFactor = loadFactor; 455 this.threshold = tableSizeFor(initialCapacity); 456 } 457 458 /** 459 * Constructs an empty {@code HashMap} with the specified initial 460 * capacity and the default load factor (0.75). 461 * 462 * @apiNote 463 * To create a {@code HashMap} with an initial capacity that accommodates 464 * an expected number of mappings, use {@link #newHashMap(int) newHashMap}. 465 * 466 * @param initialCapacity the initial capacity. 467 * @throws IllegalArgumentException if the initial capacity is negative. 468 */ 469 public HashMap(int initialCapacity) { 470 this(initialCapacity, DEFAULT_LOAD_FACTOR); 471 } 472 473 /** 474 * Constructs an empty {@code HashMap} with the default initial capacity 475 * (16) and the default load factor (0.75). 476 */ 477 public HashMap() { 478 this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted 479 } 480 481 /** 482 * Constructs a new {@code HashMap} with the same mappings as the 483 * specified {@code Map}. The {@code HashMap} is created with 484 * default load factor (0.75) and an initial capacity sufficient to 485 * hold the mappings in the specified {@code Map}. 486 * 487 * @param m the map whose mappings are to be placed in this map 488 * @throws NullPointerException if the specified map is null 489 */ 490 @SuppressWarnings("this-escape") 491 public HashMap(Map<? extends K, ? extends V> m) { 492 this.loadFactor = DEFAULT_LOAD_FACTOR; 493 putMapEntries(m, false); 494 } 495 496 /** 497 * Implements Map.putAll and Map constructor. 498 * 499 * @param m the map 500 * @param evict false when initially constructing this map, else 501 * true (relayed to method afterNodeInsertion). 502 */ 503 final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) { 504 int s = m.size(); 505 if (s > 0) { 506 if (table == null) { // pre-size 507 double dt = Math.ceil(s / (double)loadFactor); 508 int t = ((dt < (double)MAXIMUM_CAPACITY) ? 509 (int)dt : MAXIMUM_CAPACITY); 510 if (t > threshold) 511 threshold = tableSizeFor(t); 512 } else { 513 // Because of linked-list bucket constraints, we cannot 514 // expand all at once, but can reduce total resize 515 // effort by repeated doubling now vs later 516 while (s > threshold && table.length < MAXIMUM_CAPACITY) 517 resize(); 518 } 519 520 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) { 521 K key = e.getKey(); 522 V value = e.getValue(); 523 putVal(hash(key), key, value, false, evict); 524 } 525 } 526 } 527 528 /** 529 * Returns the number of key-value mappings in this map. 530 * 531 * @return the number of key-value mappings in this map 532 */ 533 public int size() { 534 return size; 535 } 536 537 /** 538 * Returns {@code true} if this map contains no key-value mappings. 539 * 540 * @return {@code true} if this map contains no key-value mappings 541 */ 542 public boolean isEmpty() { 543 return size == 0; 544 } 545 546 /** 547 * Returns the value to which the specified key is mapped, 548 * or {@code null} if this map contains no mapping for the key. 549 * 550 * <p>More formally, if this map contains a mapping from a key 551 * {@code k} to a value {@code v} such that {@code (key==null ? k==null : 552 * key.equals(k))}, then this method returns {@code v}; otherwise 553 * it returns {@code null}. (There can be at most one such mapping.) 554 * 555 * <p>A return value of {@code null} does not <i>necessarily</i> 556 * indicate that the map contains no mapping for the key; it's also 557 * possible that the map explicitly maps the key to {@code null}. 558 * The {@link #containsKey containsKey} operation may be used to 559 * distinguish these two cases. 560 * 561 * @see #put(Object, Object) 562 */ 563 public V get(Object key) { 564 Node<K,V> e; 565 return (e = getNode(key)) == null ? null : e.value; 566 } 567 568 /** 569 * Implements Map.get and related methods. 570 * 571 * @param key the key 572 * @return the node, or null if none 573 */ 574 final Node<K,V> getNode(Object key) { 575 Node<K,V>[] tab; Node<K,V> first, e; int n, hash; K k; 576 if ((tab = table) != null && (n = tab.length) > 0 && 577 (first = tab[(n - 1) & (hash = hash(key))]) != null) { 578 if (first.hash == hash && // always check first node 579 Objects.equals(key, first.key)) 580 return first; 581 if ((e = first.next) != null) { 582 if (first instanceof TreeNode) 583 return ((TreeNode<K,V>)first).getTreeNode(hash, key); 584 do { 585 if (e.hash == hash && 586 Objects.equals(key, e.key)) 587 return e; 588 } while ((e = e.next) != null); 589 } 590 } 591 return null; 592 } 593 594 /** 595 * Returns {@code true} if this map contains a mapping for the 596 * specified key. 597 * 598 * @param key The key whose presence in this map is to be tested 599 * @return {@code true} if this map contains a mapping for the specified 600 * key. 601 */ 602 public boolean containsKey(Object key) { 603 return getNode(key) != null; 604 } 605 606 /** 607 * Associates the specified value with the specified key in this map. 608 * If the map previously contained a mapping for the key, the old 609 * value is replaced. 610 * 611 * @param key key with which the specified value is to be associated 612 * @param value value to be associated with the specified key 613 * @return the previous value associated with {@code key}, or 614 * {@code null} if there was no mapping for {@code key}. 615 * (A {@code null} return can also indicate that the map 616 * previously associated {@code null} with {@code key}.) 617 */ 618 public V put(K key, V value) { 619 return putVal(hash(key), key, value, false, true); 620 } 621 622 /** 623 * Implements Map.put and related methods. 624 * 625 * @param hash hash for key 626 * @param key the key 627 * @param value the value to put 628 * @param onlyIfAbsent if true, don't change existing value 629 * @param evict if false, the table is in creation mode. 630 * @return previous value, or null if none 631 */ 632 final V putVal(int hash, K key, V value, boolean onlyIfAbsent, 633 boolean evict) { 634 Node<K,V>[] tab; Node<K,V> p; int n, i; 635 if ((tab = table) == null || (n = tab.length) == 0) 636 n = (tab = resize()).length; 637 if ((p = tab[i = (n - 1) & hash]) == null) 638 tab[i] = newNode(hash, key, value, null); 639 else { 640 Node<K,V> e; K k; 641 if (p.hash == hash && 642 Objects.equals(key, p.key)) 643 e = p; 644 else if (p instanceof TreeNode) 645 e = ((TreeNode<K,V>)p).putTreeVal(this, tab, hash, key, value); 646 else { 647 for (int binCount = 0; ; ++binCount) { 648 if ((e = p.next) == null) { 649 p.next = newNode(hash, key, value, null); 650 if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st 651 treeifyBin(tab, hash); 652 break; 653 } 654 if (e.hash == hash && 655 Objects.equals(key, e.key)) 656 break; 657 p = e; 658 } 659 } 660 if (e != null) { // existing mapping for key 661 V oldValue = e.value; 662 if (!onlyIfAbsent || oldValue == null) 663 e.value = value; 664 afterNodeAccess(e); 665 return oldValue; 666 } 667 } 668 ++modCount; 669 if (++size > threshold) 670 resize(); 671 afterNodeInsertion(evict); 672 return null; 673 } 674 675 /** 676 * Initializes or doubles table size. If null, allocates in 677 * accord with initial capacity target held in field threshold. 678 * Otherwise, because we are using power-of-two expansion, the 679 * elements from each bin must either stay at same index, or move 680 * with a power of two offset in the new table. 681 * 682 * @return the table 683 */ 684 final Node<K,V>[] resize() { 685 Node<K,V>[] oldTab = table; 686 int oldCap = (oldTab == null) ? 0 : oldTab.length; 687 int oldThr = threshold; 688 int newCap, newThr = 0; 689 if (oldCap > 0) { 690 if (oldCap >= MAXIMUM_CAPACITY) { 691 threshold = Integer.MAX_VALUE; 692 return oldTab; 693 } 694 else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY && 695 oldCap >= DEFAULT_INITIAL_CAPACITY) 696 newThr = oldThr << 1; // double threshold 697 } 698 else if (oldThr > 0) // initial capacity was placed in threshold 699 newCap = oldThr; 700 else { // zero initial threshold signifies using defaults 701 newCap = DEFAULT_INITIAL_CAPACITY; 702 newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY); 703 } 704 if (newThr == 0) { 705 float ft = (float)newCap * loadFactor; 706 newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ? 707 (int)ft : Integer.MAX_VALUE); 708 } 709 threshold = newThr; 710 @SuppressWarnings({"rawtypes","unchecked"}) 711 Node<K,V>[] newTab = (Node<K,V>[])new Node[newCap]; 712 table = newTab; 713 if (oldTab != null) { 714 for (int j = 0; j < oldCap; ++j) { 715 Node<K,V> e; 716 if ((e = oldTab[j]) != null) { 717 oldTab[j] = null; 718 if (e.next == null) 719 newTab[e.hash & (newCap - 1)] = e; 720 else if (e instanceof TreeNode) 721 ((TreeNode<K,V>)e).split(this, newTab, j, oldCap); 722 else { // preserve order 723 Node<K,V> loHead = null, loTail = null; 724 Node<K,V> hiHead = null, hiTail = null; 725 Node<K,V> next; 726 do { 727 next = e.next; 728 if ((e.hash & oldCap) == 0) { 729 if (loTail == null) 730 loHead = e; 731 else 732 loTail.next = e; 733 loTail = e; 734 } 735 else { 736 if (hiTail == null) 737 hiHead = e; 738 else 739 hiTail.next = e; 740 hiTail = e; 741 } 742 } while ((e = next) != null); 743 if (loTail != null) { 744 loTail.next = null; 745 newTab[j] = loHead; 746 } 747 if (hiTail != null) { 748 hiTail.next = null; 749 newTab[j + oldCap] = hiHead; 750 } 751 } 752 } 753 } 754 } 755 return newTab; 756 } 757 758 /** 759 * Replaces all linked nodes in bin at index for given hash unless 760 * table is too small, in which case resizes instead. 761 */ 762 final void treeifyBin(Node<K,V>[] tab, int hash) { 763 int n, index; Node<K,V> e; 764 if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY) 765 resize(); 766 else if ((e = tab[index = (n - 1) & hash]) != null) { 767 TreeNode<K,V> hd = null, tl = null; 768 do { 769 TreeNode<K,V> p = replacementTreeNode(e, null); 770 if (tl == null) 771 hd = p; 772 else { 773 p.prev = tl; 774 tl.next = p; 775 } 776 tl = p; 777 } while ((e = e.next) != null); 778 if ((tab[index] = hd) != null) 779 hd.treeify(tab); 780 } 781 } 782 783 /** 784 * Copies all of the mappings from the specified map to this map. 785 * These mappings will replace any mappings that this map had for 786 * any of the keys currently in the specified map. 787 * 788 * @param m mappings to be stored in this map 789 * @throws NullPointerException if the specified map is null 790 */ 791 public void putAll(Map<? extends K, ? extends V> m) { 792 putMapEntries(m, true); 793 } 794 795 /** 796 * Removes the mapping for the specified key from this map if present. 797 * 798 * @param key key whose mapping is to be removed from the map 799 * @return the previous value associated with {@code key}, or 800 * {@code null} if there was no mapping for {@code key}. 801 * (A {@code null} return can also indicate that the map 802 * previously associated {@code null} with {@code key}.) 803 */ 804 public V remove(Object key) { 805 Node<K,V> e; 806 return (e = removeNode(hash(key), key, null, false, true)) == null ? 807 null : e.value; 808 } 809 810 /** 811 * Implements Map.remove and related methods. 812 * 813 * @param hash hash for key 814 * @param key the key 815 * @param value the value to match if matchValue, else ignored 816 * @param matchValue if true only remove if value is equal 817 * @param movable if false do not move other nodes while removing 818 * @return the node, or null if none 819 */ 820 final Node<K,V> removeNode(int hash, Object key, Object value, 821 boolean matchValue, boolean movable) { 822 Node<K,V>[] tab; Node<K,V> p; int n, index; 823 if ((tab = table) != null && (n = tab.length) > 0 && 824 (p = tab[index = (n - 1) & hash]) != null) { 825 Node<K,V> node = null, e; K k; V v; 826 if (p.hash == hash && 827 Objects.equals(key, p.key)) 828 node = p; 829 else if ((e = p.next) != null) { 830 if (p instanceof TreeNode) 831 node = ((TreeNode<K,V>)p).getTreeNode(hash, key); 832 else { 833 do { 834 if (e.hash == hash && 835 Objects.equals(key, e.key)) { 836 node = e; 837 break; 838 } 839 p = e; 840 } while ((e = e.next) != null); 841 } 842 } 843 if (node != null && (!matchValue || (v = node.value) == value || 844 (value != null && value.equals(v)))) { 845 if (node instanceof TreeNode) 846 ((TreeNode<K,V>)node).removeTreeNode(this, tab, movable); 847 else if (node == p) 848 tab[index] = node.next; 849 else 850 p.next = node.next; 851 ++modCount; 852 --size; 853 afterNodeRemoval(node); 854 return node; 855 } 856 } 857 return null; 858 } 859 860 /** 861 * Removes all of the mappings from this map. 862 * The map will be empty after this call returns. 863 */ 864 public void clear() { 865 Node<K,V>[] tab; 866 modCount++; 867 if ((tab = table) != null && size > 0) { 868 size = 0; 869 for (int i = 0; i < tab.length; ++i) 870 tab[i] = null; 871 } 872 } 873 874 /** 875 * Returns {@code true} if this map maps one or more keys to the 876 * specified value. 877 * 878 * @param value value whose presence in this map is to be tested 879 * @return {@code true} if this map maps one or more keys to the 880 * specified value 881 */ 882 public boolean containsValue(Object value) { 883 Node<K,V>[] tab; V v; 884 if ((tab = table) != null && size > 0) { 885 for (Node<K,V> e : tab) { 886 for (; e != null; e = e.next) { 887 if ((v = e.value) == value || 888 (value != null && value.equals(v))) 889 return true; 890 } 891 } 892 } 893 return false; 894 } 895 896 /** 897 * Returns a {@link Set} view of the keys contained in this map. 898 * The set is backed by the map, so changes to the map are 899 * reflected in the set, and vice-versa. If the map is modified 900 * while an iteration over the set is in progress (except through 901 * the iterator's own {@code remove} operation), the results of 902 * the iteration are undefined. The set supports element removal, 903 * which removes the corresponding mapping from the map, via the 904 * {@code Iterator.remove}, {@code Set.remove}, 905 * {@code removeAll}, {@code retainAll}, and {@code clear} 906 * operations. It does not support the {@code add} or {@code addAll} 907 * operations. 908 * 909 * @return a set view of the keys contained in this map 910 */ 911 public Set<K> keySet() { 912 Set<K> ks = keySet; 913 if (ks == null) { 914 ks = new KeySet(); 915 keySet = ks; 916 } 917 return ks; 918 } 919 920 /** 921 * Prepares the array for {@link Collection#toArray(Object[])} implementation. 922 * If supplied array is smaller than this map size, a new array is allocated. 923 * If supplied array is bigger than this map size, a null is written at size index. 924 * 925 * @param a an original array passed to {@code toArray()} method 926 * @param <T> type of array elements 927 * @return an array ready to be filled and returned from {@code toArray()} method. 928 */ 929 @SuppressWarnings("unchecked") 930 final <T> T[] prepareArray(T[] a) { 931 int size = this.size; 932 if (a.length < size) { 933 return (T[]) java.lang.reflect.Array 934 .newInstance(a.getClass().getComponentType(), size); 935 } 936 if (a.length > size) { 937 a[size] = null; 938 } 939 return a; 940 } 941 942 /** 943 * Fills an array with this map keys and returns it. This method assumes 944 * that input array is big enough to fit all the keys. Use 945 * {@link #prepareArray(Object[])} to ensure this. 946 * 947 * @param a an array to fill 948 * @param <T> type of array elements 949 * @return supplied array 950 */ 951 <T> T[] keysToArray(T[] a) { 952 Object[] r = a; 953 Node<K,V>[] tab; 954 int idx = 0; 955 if (size > 0 && (tab = table) != null) { 956 for (Node<K,V> e : tab) { 957 for (; e != null; e = e.next) { 958 r[idx++] = e.key; 959 } 960 } 961 } 962 return a; 963 } 964 965 /** 966 * Fills an array with this map values and returns it. This method assumes 967 * that input array is big enough to fit all the values. Use 968 * {@link #prepareArray(Object[])} to ensure this. 969 * 970 * @param a an array to fill 971 * @param <T> type of array elements 972 * @return supplied array 973 */ 974 <T> T[] valuesToArray(T[] a) { 975 Object[] r = a; 976 Node<K,V>[] tab; 977 int idx = 0; 978 if (size > 0 && (tab = table) != null) { 979 for (Node<K,V> e : tab) { 980 for (; e != null; e = e.next) { 981 r[idx++] = e.value; 982 } 983 } 984 } 985 return a; 986 } 987 988 final class KeySet extends AbstractSet<K> { 989 public final int size() { return size; } 990 public final void clear() { HashMap.this.clear(); } 991 public final Iterator<K> iterator() { return new KeyIterator(); } 992 public final boolean contains(Object o) { return containsKey(o); } 993 public final boolean remove(Object key) { 994 return removeNode(hash(key), key, null, false, true) != null; 995 } 996 public final Spliterator<K> spliterator() { 997 return new KeySpliterator<>(HashMap.this, 0, -1, 0, 0); 998 } 999 1000 public Object[] toArray() { 1001 return keysToArray(new Object[size]); 1002 } 1003 1004 public <T> T[] toArray(T[] a) { 1005 return keysToArray(prepareArray(a)); 1006 } 1007 1008 public final void forEach(Consumer<? super K> action) { 1009 Node<K,V>[] tab; 1010 if (action == null) 1011 throw new NullPointerException(); 1012 if (size > 0 && (tab = table) != null) { 1013 int mc = modCount; 1014 for (Node<K,V> e : tab) { 1015 for (; e != null; e = e.next) 1016 action.accept(e.key); 1017 } 1018 if (modCount != mc) 1019 throw new ConcurrentModificationException(); 1020 } 1021 } 1022 } 1023 1024 /** 1025 * Returns a {@link Collection} view of the values contained in this map. 1026 * The collection is backed by the map, so changes to the map are 1027 * reflected in the collection, and vice-versa. If the map is 1028 * modified while an iteration over the collection is in progress 1029 * (except through the iterator's own {@code remove} operation), 1030 * the results of the iteration are undefined. The collection 1031 * supports element removal, which removes the corresponding 1032 * mapping from the map, via the {@code Iterator.remove}, 1033 * {@code Collection.remove}, {@code removeAll}, 1034 * {@code retainAll} and {@code clear} operations. It does not 1035 * support the {@code add} or {@code addAll} operations. 1036 * 1037 * @return a view of the values contained in this map 1038 */ 1039 public Collection<V> values() { 1040 Collection<V> vs = values; 1041 if (vs == null) { 1042 vs = new Values(); 1043 values = vs; 1044 } 1045 return vs; 1046 } 1047 1048 final class Values extends AbstractCollection<V> { 1049 public final int size() { return size; } 1050 public final void clear() { HashMap.this.clear(); } 1051 public final Iterator<V> iterator() { return new ValueIterator(); } 1052 public final boolean contains(Object o) { return containsValue(o); } 1053 public final Spliterator<V> spliterator() { 1054 return new ValueSpliterator<>(HashMap.this, 0, -1, 0, 0); 1055 } 1056 1057 public Object[] toArray() { 1058 return valuesToArray(new Object[size]); 1059 } 1060 1061 public <T> T[] toArray(T[] a) { 1062 return valuesToArray(prepareArray(a)); 1063 } 1064 1065 public final void forEach(Consumer<? super V> action) { 1066 Node<K,V>[] tab; 1067 if (action == null) 1068 throw new NullPointerException(); 1069 if (size > 0 && (tab = table) != null) { 1070 int mc = modCount; 1071 for (Node<K,V> e : tab) { 1072 for (; e != null; e = e.next) 1073 action.accept(e.value); 1074 } 1075 if (modCount != mc) 1076 throw new ConcurrentModificationException(); 1077 } 1078 } 1079 } 1080 1081 /** 1082 * Returns a {@link Set} view of the mappings contained in this map. 1083 * The set is backed by the map, so changes to the map are 1084 * reflected in the set, and vice-versa. If the map is modified 1085 * while an iteration over the set is in progress (except through 1086 * the iterator's own {@code remove} operation, or through the 1087 * {@code setValue} operation on a map entry returned by the 1088 * iterator) the results of the iteration are undefined. The set 1089 * supports element removal, which removes the corresponding 1090 * mapping from the map, via the {@code Iterator.remove}, 1091 * {@code Set.remove}, {@code removeAll}, {@code retainAll} and 1092 * {@code clear} operations. It does not support the 1093 * {@code add} or {@code addAll} operations. 1094 * 1095 * @return a set view of the mappings contained in this map 1096 */ 1097 public Set<Map.Entry<K,V>> entrySet() { 1098 Set<Map.Entry<K,V>> es; 1099 return (es = entrySet) == null ? (entrySet = new EntrySet()) : es; 1100 } 1101 1102 final class EntrySet extends AbstractSet<Map.Entry<K,V>> { 1103 public final int size() { return size; } 1104 public final void clear() { HashMap.this.clear(); } 1105 public final Iterator<Map.Entry<K,V>> iterator() { 1106 return new EntryIterator(); 1107 } 1108 public final boolean contains(Object o) { 1109 if (!(o instanceof Map.Entry<?, ?> e)) 1110 return false; 1111 Object key = e.getKey(); 1112 Node<K,V> candidate = getNode(key); 1113 return candidate != null && candidate.equals(e); 1114 } 1115 public final boolean remove(Object o) { 1116 if (o instanceof Map.Entry<?, ?> e) { 1117 Object key = e.getKey(); 1118 Object value = e.getValue(); 1119 return removeNode(hash(key), key, value, true, true) != null; 1120 } 1121 return false; 1122 } 1123 public final Spliterator<Map.Entry<K,V>> spliterator() { 1124 return new EntrySpliterator<>(HashMap.this, 0, -1, 0, 0); 1125 } 1126 public final void forEach(Consumer<? super Map.Entry<K,V>> action) { 1127 Node<K,V>[] tab; 1128 if (action == null) 1129 throw new NullPointerException(); 1130 if (size > 0 && (tab = table) != null) { 1131 int mc = modCount; 1132 for (Node<K,V> e : tab) { 1133 for (; e != null; e = e.next) 1134 action.accept(e); 1135 } 1136 if (modCount != mc) 1137 throw new ConcurrentModificationException(); 1138 } 1139 } 1140 } 1141 1142 // Overrides of JDK8 Map extension methods 1143 1144 @Override 1145 public V getOrDefault(Object key, V defaultValue) { 1146 Node<K,V> e; 1147 return (e = getNode(key)) == null ? defaultValue : e.value; 1148 } 1149 1150 @Override 1151 public V putIfAbsent(K key, V value) { 1152 return putVal(hash(key), key, value, true, true); 1153 } 1154 1155 @Override 1156 public boolean remove(Object key, Object value) { 1157 return removeNode(hash(key), key, value, true, true) != null; 1158 } 1159 1160 @Override 1161 public boolean replace(K key, V oldValue, V newValue) { 1162 Node<K,V> e; V v; 1163 if ((e = getNode(key)) != null && 1164 ((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) { 1165 e.value = newValue; 1166 afterNodeAccess(e); 1167 return true; 1168 } 1169 return false; 1170 } 1171 1172 @Override 1173 public V replace(K key, V value) { 1174 Node<K,V> e; 1175 if ((e = getNode(key)) != null) { 1176 V oldValue = e.value; 1177 e.value = value; 1178 afterNodeAccess(e); 1179 return oldValue; 1180 } 1181 return null; 1182 } 1183 1184 /** 1185 * {@inheritDoc} 1186 * 1187 * <p>This method will, on a best-effort basis, throw a 1188 * {@link ConcurrentModificationException} if it is detected that the 1189 * mapping function modifies this map during computation. 1190 * 1191 * @throws ConcurrentModificationException if it is detected that the 1192 * mapping function modified this map 1193 */ 1194 @Override 1195 public V computeIfAbsent(K key, 1196 Function<? super K, ? extends V> mappingFunction) { 1197 if (mappingFunction == null) 1198 throw new NullPointerException(); 1199 int hash = hash(key); 1200 Node<K,V>[] tab; Node<K,V> first; int n, i; 1201 int binCount = 0; 1202 TreeNode<K,V> t = null; 1203 Node<K,V> old = null; 1204 if (size > threshold || (tab = table) == null || 1205 (n = tab.length) == 0) 1206 n = (tab = resize()).length; 1207 if ((first = tab[i = (n - 1) & hash]) != null) { 1208 if (first instanceof TreeNode) 1209 old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); 1210 else { 1211 Node<K,V> e = first; K k; 1212 do { 1213 if (e.hash == hash && 1214 Objects.equals(key, e.key)) { 1215 old = e; 1216 break; 1217 } 1218 ++binCount; 1219 } while ((e = e.next) != null); 1220 } 1221 V oldValue; 1222 if (old != null && (oldValue = old.value) != null) { 1223 afterNodeAccess(old); 1224 return oldValue; 1225 } 1226 } 1227 int mc = modCount; 1228 V v = mappingFunction.apply(key); 1229 if (mc != modCount) { throw new ConcurrentModificationException(); } 1230 if (v == null) { 1231 return null; 1232 } else if (old != null) { 1233 old.value = v; 1234 afterNodeAccess(old); 1235 return v; 1236 } 1237 else if (t != null) 1238 t.putTreeVal(this, tab, hash, key, v); 1239 else { 1240 tab[i] = newNode(hash, key, v, first); 1241 if (binCount >= TREEIFY_THRESHOLD - 1) 1242 treeifyBin(tab, hash); 1243 } 1244 modCount = mc + 1; 1245 ++size; 1246 afterNodeInsertion(true); 1247 return v; 1248 } 1249 1250 /** 1251 * {@inheritDoc} 1252 * 1253 * <p>This method will, on a best-effort basis, throw a 1254 * {@link ConcurrentModificationException} if it is detected that the 1255 * remapping function modifies this map during computation. 1256 * 1257 * @throws ConcurrentModificationException if it is detected that the 1258 * remapping function modified this map 1259 */ 1260 @Override 1261 public V computeIfPresent(K key, 1262 BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1263 if (remappingFunction == null) 1264 throw new NullPointerException(); 1265 Node<K,V> e; V oldValue; 1266 if ((e = getNode(key)) != null && 1267 (oldValue = e.value) != null) { 1268 int mc = modCount; 1269 V v = remappingFunction.apply(key, oldValue); 1270 if (mc != modCount) { throw new ConcurrentModificationException(); } 1271 if (v != null) { 1272 e.value = v; 1273 afterNodeAccess(e); 1274 return v; 1275 } 1276 else { 1277 int hash = hash(key); 1278 removeNode(hash, key, null, false, true); 1279 } 1280 } 1281 return null; 1282 } 1283 1284 /** 1285 * {@inheritDoc} 1286 * 1287 * <p>This method will, on a best-effort basis, throw a 1288 * {@link ConcurrentModificationException} if it is detected that the 1289 * remapping function modifies this map during computation. 1290 * 1291 * @throws ConcurrentModificationException if it is detected that the 1292 * remapping function modified this map 1293 */ 1294 @Override 1295 public V compute(K key, 1296 BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1297 if (remappingFunction == null) 1298 throw new NullPointerException(); 1299 int hash = hash(key); 1300 Node<K,V>[] tab; Node<K,V> first; int n, i; 1301 int binCount = 0; 1302 TreeNode<K,V> t = null; 1303 Node<K,V> old = null; 1304 if (size > threshold || (tab = table) == null || 1305 (n = tab.length) == 0) 1306 n = (tab = resize()).length; 1307 if ((first = tab[i = (n - 1) & hash]) != null) { 1308 if (first instanceof TreeNode) 1309 old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); 1310 else { 1311 Node<K,V> e = first; K k; 1312 do { 1313 if (e.hash == hash && 1314 Objects.equals(key, e.key)) { 1315 old = e; 1316 break; 1317 } 1318 ++binCount; 1319 } while ((e = e.next) != null); 1320 } 1321 } 1322 V oldValue = (old == null) ? null : old.value; 1323 int mc = modCount; 1324 V v = remappingFunction.apply(key, oldValue); 1325 if (mc != modCount) { throw new ConcurrentModificationException(); } 1326 if (old != null) { 1327 if (v != null) { 1328 old.value = v; 1329 afterNodeAccess(old); 1330 } 1331 else 1332 removeNode(hash, key, null, false, true); 1333 } 1334 else if (v != null) { 1335 if (t != null) 1336 t.putTreeVal(this, tab, hash, key, v); 1337 else { 1338 tab[i] = newNode(hash, key, v, first); 1339 if (binCount >= TREEIFY_THRESHOLD - 1) 1340 treeifyBin(tab, hash); 1341 } 1342 modCount = mc + 1; 1343 ++size; 1344 afterNodeInsertion(true); 1345 } 1346 return v; 1347 } 1348 1349 /** 1350 * {@inheritDoc} 1351 * 1352 * <p>This method will, on a best-effort basis, throw a 1353 * {@link ConcurrentModificationException} if it is detected that the 1354 * remapping function modifies this map during computation. 1355 * 1356 * @throws ConcurrentModificationException if it is detected that the 1357 * remapping function modified this map 1358 */ 1359 @Override 1360 public V merge(K key, V value, 1361 BiFunction<? super V, ? super V, ? extends V> remappingFunction) { 1362 if (value == null || remappingFunction == null) 1363 throw new NullPointerException(); 1364 int hash = hash(key); 1365 Node<K,V>[] tab; Node<K,V> first; int n, i; 1366 int binCount = 0; 1367 TreeNode<K,V> t = null; 1368 Node<K,V> old = null; 1369 if (size > threshold || (tab = table) == null || 1370 (n = tab.length) == 0) 1371 n = (tab = resize()).length; 1372 if ((first = tab[i = (n - 1) & hash]) != null) { 1373 if (first instanceof TreeNode) 1374 old = (t = (TreeNode<K,V>)first).getTreeNode(hash, key); 1375 else { 1376 Node<K,V> e = first; K k; 1377 do { 1378 if (e.hash == hash && 1379 Objects.equals(key, e.key)) { 1380 old = e; 1381 break; 1382 } 1383 ++binCount; 1384 } while ((e = e.next) != null); 1385 } 1386 } 1387 if (old != null) { 1388 V v; 1389 if (old.value != null) { 1390 int mc = modCount; 1391 v = remappingFunction.apply(old.value, value); 1392 if (mc != modCount) { 1393 throw new ConcurrentModificationException(); 1394 } 1395 } else { 1396 v = value; 1397 } 1398 if (v != null) { 1399 old.value = v; 1400 afterNodeAccess(old); 1401 } 1402 else 1403 removeNode(hash, key, null, false, true); 1404 return v; 1405 } else { 1406 if (t != null) 1407 t.putTreeVal(this, tab, hash, key, value); 1408 else { 1409 tab[i] = newNode(hash, key, value, first); 1410 if (binCount >= TREEIFY_THRESHOLD - 1) 1411 treeifyBin(tab, hash); 1412 } 1413 ++modCount; 1414 ++size; 1415 afterNodeInsertion(true); 1416 return value; 1417 } 1418 } 1419 1420 @Override 1421 public void forEach(BiConsumer<? super K, ? super V> action) { 1422 Node<K,V>[] tab; 1423 if (action == null) 1424 throw new NullPointerException(); 1425 if (size > 0 && (tab = table) != null) { 1426 int mc = modCount; 1427 for (Node<K,V> e : tab) { 1428 for (; e != null; e = e.next) 1429 action.accept(e.key, e.value); 1430 } 1431 if (modCount != mc) 1432 throw new ConcurrentModificationException(); 1433 } 1434 } 1435 1436 @Override 1437 public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { 1438 Node<K,V>[] tab; 1439 if (function == null) 1440 throw new NullPointerException(); 1441 if (size > 0 && (tab = table) != null) { 1442 int mc = modCount; 1443 for (Node<K,V> e : tab) { 1444 for (; e != null; e = e.next) { 1445 e.value = function.apply(e.key, e.value); 1446 } 1447 } 1448 if (modCount != mc) 1449 throw new ConcurrentModificationException(); 1450 } 1451 } 1452 1453 /* ------------------------------------------------------------ */ 1454 // Cloning and serialization 1455 1456 /** 1457 * Returns a shallow copy of this {@code HashMap} instance: the keys and 1458 * values themselves are not cloned. 1459 * 1460 * @return a shallow copy of this map 1461 */ 1462 @SuppressWarnings("unchecked") 1463 @Override 1464 public Object clone() { 1465 HashMap<K,V> result; 1466 try { 1467 result = (HashMap<K,V>)super.clone(); 1468 } catch (CloneNotSupportedException e) { 1469 // this shouldn't happen, since we are Cloneable 1470 throw new InternalError(e); 1471 } 1472 result.reinitialize(); 1473 result.putMapEntries(this, false); 1474 return result; 1475 } 1476 1477 // These methods are also used when serializing HashSets 1478 final float loadFactor() { return loadFactor; } 1479 final int capacity() { 1480 return (table != null) ? table.length : 1481 (threshold > 0) ? threshold : 1482 DEFAULT_INITIAL_CAPACITY; 1483 } 1484 1485 /** 1486 * Saves this map to a stream (that is, serializes it). 1487 * 1488 * @param s the stream 1489 * @throws IOException if an I/O error occurs 1490 * @serialData The <i>capacity</i> of the HashMap (the length of the 1491 * bucket array) is emitted (int), followed by the 1492 * <i>size</i> (an int, the number of key-value 1493 * mappings), followed by the key (Object) and value (Object) 1494 * for each key-value mapping. The key-value mappings are 1495 * emitted in no particular order. 1496 */ 1497 @java.io.Serial 1498 private void writeObject(java.io.ObjectOutputStream s) 1499 throws IOException { 1500 int buckets = capacity(); 1501 // Write out the threshold, loadfactor, and any hidden stuff 1502 s.defaultWriteObject(); 1503 s.writeInt(buckets); 1504 s.writeInt(size); 1505 internalWriteEntries(s); 1506 } 1507 1508 /** 1509 * Reconstitutes this map from a stream (that is, deserializes it). 1510 * @param s the stream 1511 * @throws ClassNotFoundException if the class of a serialized object 1512 * could not be found 1513 * @throws IOException if an I/O error occurs 1514 */ 1515 @java.io.Serial 1516 private void readObject(ObjectInputStream s) 1517 throws IOException, ClassNotFoundException { 1518 1519 ObjectInputStream.GetField fields = s.readFields(); 1520 1521 // Read loadFactor (ignore threshold) 1522 float lf = fields.get("loadFactor", 0.75f); 1523 if (lf <= 0 || Float.isNaN(lf)) 1524 throw new InvalidObjectException("Illegal load factor: " + lf); 1525 1526 lf = Math.clamp(lf, 0.25f, 4.0f); 1527 HashMap.UnsafeHolder.putLoadFactor(this, lf); 1528 1529 reinitialize(); 1530 1531 s.readInt(); // Read and ignore number of buckets 1532 int mappings = s.readInt(); // Read number of mappings (size) 1533 if (mappings < 0) { 1534 throw new InvalidObjectException("Illegal mappings count: " + mappings); 1535 } else if (mappings == 0) { 1536 // use defaults 1537 } else if (mappings > 0) { 1538 double dc = Math.ceil(mappings / (double)lf); 1539 int cap = ((dc < DEFAULT_INITIAL_CAPACITY) ? 1540 DEFAULT_INITIAL_CAPACITY : 1541 (dc >= MAXIMUM_CAPACITY) ? 1542 MAXIMUM_CAPACITY : 1543 tableSizeFor((int)dc)); 1544 float ft = (float)cap * lf; 1545 threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ? 1546 (int)ft : Integer.MAX_VALUE); 1547 1548 // Check Map.Entry[].class since it's the nearest public type to 1549 // what we're actually creating. 1550 SharedSecrets.getJavaObjectInputStreamAccess().checkArray(s, Map.Entry[].class, cap); 1551 @SuppressWarnings({"rawtypes","unchecked"}) 1552 Node<K,V>[] tab = (Node<K,V>[])new Node[cap]; 1553 table = tab; 1554 1555 // Read the keys and values, and put the mappings in the HashMap 1556 for (int i = 0; i < mappings; i++) { 1557 @SuppressWarnings("unchecked") 1558 K key = (K) s.readObject(); 1559 @SuppressWarnings("unchecked") 1560 V value = (V) s.readObject(); 1561 putVal(hash(key), key, value, false, false); 1562 } 1563 } 1564 } 1565 1566 // Support for resetting final field during deserializing 1567 private static final class UnsafeHolder { 1568 private UnsafeHolder() { throw new InternalError(); } 1569 private static final jdk.internal.misc.Unsafe unsafe 1570 = jdk.internal.misc.Unsafe.getUnsafe(); 1571 private static final long LF_OFFSET 1572 = unsafe.objectFieldOffset(HashMap.class, "loadFactor"); 1573 static void putLoadFactor(HashMap<?, ?> map, float lf) { 1574 unsafe.putFloat(map, LF_OFFSET, lf); 1575 } 1576 } 1577 1578 /* ------------------------------------------------------------ */ 1579 // iterators 1580 1581 abstract class HashIterator { 1582 Node<K,V> next; // next entry to return 1583 Node<K,V> current; // current entry 1584 int expectedModCount; // for fast-fail 1585 int index; // current slot 1586 1587 HashIterator() { 1588 expectedModCount = modCount; 1589 Node<K,V>[] t = table; 1590 current = next = null; 1591 index = 0; 1592 if (t != null && size > 0) { // advance to first entry 1593 do {} while (index < t.length && (next = t[index++]) == null); 1594 } 1595 } 1596 1597 public final boolean hasNext() { 1598 return next != null; 1599 } 1600 1601 final Node<K,V> nextNode() { 1602 Node<K,V>[] t; 1603 Node<K,V> e = next; 1604 if (modCount != expectedModCount) 1605 throw new ConcurrentModificationException(); 1606 if (e == null) 1607 throw new NoSuchElementException(); 1608 if ((next = (current = e).next) == null && (t = table) != null) { 1609 do {} while (index < t.length && (next = t[index++]) == null); 1610 } 1611 return e; 1612 } 1613 1614 public final void remove() { 1615 Node<K,V> p = current; 1616 if (p == null) 1617 throw new IllegalStateException(); 1618 if (modCount != expectedModCount) 1619 throw new ConcurrentModificationException(); 1620 current = null; 1621 removeNode(p.hash, p.key, null, false, false); 1622 expectedModCount = modCount; 1623 } 1624 } 1625 1626 final class KeyIterator extends HashIterator 1627 implements Iterator<K> { 1628 public final K next() { return nextNode().key; } 1629 } 1630 1631 final class ValueIterator extends HashIterator 1632 implements Iterator<V> { 1633 public final V next() { return nextNode().value; } 1634 } 1635 1636 final class EntryIterator extends HashIterator 1637 implements Iterator<Map.Entry<K,V>> { 1638 public final Map.Entry<K,V> next() { return nextNode(); } 1639 } 1640 1641 /* ------------------------------------------------------------ */ 1642 // spliterators 1643 1644 static class HashMapSpliterator<K,V> { 1645 final HashMap<K,V> map; 1646 Node<K,V> current; // current node 1647 int index; // current index, modified on advance/split 1648 int fence; // one past last index 1649 int est; // size estimate 1650 int expectedModCount; // for comodification checks 1651 1652 HashMapSpliterator(HashMap<K,V> m, int origin, 1653 int fence, int est, 1654 int expectedModCount) { 1655 this.map = m; 1656 this.index = origin; 1657 this.fence = fence; 1658 this.est = est; 1659 this.expectedModCount = expectedModCount; 1660 } 1661 1662 final int getFence() { // initialize fence and size on first use 1663 int hi; 1664 if ((hi = fence) < 0) { 1665 HashMap<K,V> m = map; 1666 est = m.size; 1667 expectedModCount = m.modCount; 1668 Node<K,V>[] tab = m.table; 1669 hi = fence = (tab == null) ? 0 : tab.length; 1670 } 1671 return hi; 1672 } 1673 1674 public final long estimateSize() { 1675 getFence(); // force init 1676 return (long) est; 1677 } 1678 } 1679 1680 static final class KeySpliterator<K,V> 1681 extends HashMapSpliterator<K,V> 1682 implements Spliterator<K> { 1683 KeySpliterator(HashMap<K,V> m, int origin, int fence, int est, 1684 int expectedModCount) { 1685 super(m, origin, fence, est, expectedModCount); 1686 } 1687 1688 public KeySpliterator<K,V> trySplit() { 1689 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 1690 return (lo >= mid || current != null) ? null : 1691 new KeySpliterator<>(map, lo, index = mid, est >>>= 1, 1692 expectedModCount); 1693 } 1694 1695 public void forEachRemaining(Consumer<? super K> action) { 1696 int i, hi, mc; 1697 if (action == null) 1698 throw new NullPointerException(); 1699 HashMap<K,V> m = map; 1700 Node<K,V>[] tab = m.table; 1701 if ((hi = fence) < 0) { 1702 mc = expectedModCount = m.modCount; 1703 hi = fence = (tab == null) ? 0 : tab.length; 1704 } 1705 else 1706 mc = expectedModCount; 1707 if (tab != null && tab.length >= hi && 1708 (i = index) >= 0 && (i < (index = hi) || current != null)) { 1709 Node<K,V> p = current; 1710 current = null; 1711 do { 1712 if (p == null) 1713 p = tab[i++]; 1714 else { 1715 action.accept(p.key); 1716 p = p.next; 1717 } 1718 } while (p != null || i < hi); 1719 if (m.modCount != mc) 1720 throw new ConcurrentModificationException(); 1721 } 1722 } 1723 1724 public boolean tryAdvance(Consumer<? super K> action) { 1725 int hi; 1726 if (action == null) 1727 throw new NullPointerException(); 1728 Node<K,V>[] tab = map.table; 1729 if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { 1730 while (current != null || index < hi) { 1731 if (current == null) 1732 current = tab[index++]; 1733 else { 1734 K k = current.key; 1735 current = current.next; 1736 action.accept(k); 1737 if (map.modCount != expectedModCount) 1738 throw new ConcurrentModificationException(); 1739 return true; 1740 } 1741 } 1742 } 1743 return false; 1744 } 1745 1746 public int characteristics() { 1747 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | 1748 Spliterator.DISTINCT; 1749 } 1750 } 1751 1752 static final class ValueSpliterator<K,V> 1753 extends HashMapSpliterator<K,V> 1754 implements Spliterator<V> { 1755 ValueSpliterator(HashMap<K,V> m, int origin, int fence, int est, 1756 int expectedModCount) { 1757 super(m, origin, fence, est, expectedModCount); 1758 } 1759 1760 public ValueSpliterator<K,V> trySplit() { 1761 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 1762 return (lo >= mid || current != null) ? null : 1763 new ValueSpliterator<>(map, lo, index = mid, est >>>= 1, 1764 expectedModCount); 1765 } 1766 1767 public void forEachRemaining(Consumer<? super V> action) { 1768 int i, hi, mc; 1769 if (action == null) 1770 throw new NullPointerException(); 1771 HashMap<K,V> m = map; 1772 Node<K,V>[] tab = m.table; 1773 if ((hi = fence) < 0) { 1774 mc = expectedModCount = m.modCount; 1775 hi = fence = (tab == null) ? 0 : tab.length; 1776 } 1777 else 1778 mc = expectedModCount; 1779 if (tab != null && tab.length >= hi && 1780 (i = index) >= 0 && (i < (index = hi) || current != null)) { 1781 Node<K,V> p = current; 1782 current = null; 1783 do { 1784 if (p == null) 1785 p = tab[i++]; 1786 else { 1787 action.accept(p.value); 1788 p = p.next; 1789 } 1790 } while (p != null || i < hi); 1791 if (m.modCount != mc) 1792 throw new ConcurrentModificationException(); 1793 } 1794 } 1795 1796 public boolean tryAdvance(Consumer<? super V> action) { 1797 int hi; 1798 if (action == null) 1799 throw new NullPointerException(); 1800 Node<K,V>[] tab = map.table; 1801 if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { 1802 while (current != null || index < hi) { 1803 if (current == null) 1804 current = tab[index++]; 1805 else { 1806 V v = current.value; 1807 current = current.next; 1808 action.accept(v); 1809 if (map.modCount != expectedModCount) 1810 throw new ConcurrentModificationException(); 1811 return true; 1812 } 1813 } 1814 } 1815 return false; 1816 } 1817 1818 public int characteristics() { 1819 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0); 1820 } 1821 } 1822 1823 static final class EntrySpliterator<K,V> 1824 extends HashMapSpliterator<K,V> 1825 implements Spliterator<Map.Entry<K,V>> { 1826 EntrySpliterator(HashMap<K,V> m, int origin, int fence, int est, 1827 int expectedModCount) { 1828 super(m, origin, fence, est, expectedModCount); 1829 } 1830 1831 public EntrySpliterator<K,V> trySplit() { 1832 int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; 1833 return (lo >= mid || current != null) ? null : 1834 new EntrySpliterator<>(map, lo, index = mid, est >>>= 1, 1835 expectedModCount); 1836 } 1837 1838 public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { 1839 int i, hi, mc; 1840 if (action == null) 1841 throw new NullPointerException(); 1842 HashMap<K,V> m = map; 1843 Node<K,V>[] tab = m.table; 1844 if ((hi = fence) < 0) { 1845 mc = expectedModCount = m.modCount; 1846 hi = fence = (tab == null) ? 0 : tab.length; 1847 } 1848 else 1849 mc = expectedModCount; 1850 if (tab != null && tab.length >= hi && 1851 (i = index) >= 0 && (i < (index = hi) || current != null)) { 1852 Node<K,V> p = current; 1853 current = null; 1854 do { 1855 if (p == null) 1856 p = tab[i++]; 1857 else { 1858 action.accept(p); 1859 p = p.next; 1860 } 1861 } while (p != null || i < hi); 1862 if (m.modCount != mc) 1863 throw new ConcurrentModificationException(); 1864 } 1865 } 1866 1867 public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { 1868 int hi; 1869 if (action == null) 1870 throw new NullPointerException(); 1871 Node<K,V>[] tab = map.table; 1872 if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { 1873 while (current != null || index < hi) { 1874 if (current == null) 1875 current = tab[index++]; 1876 else { 1877 Node<K,V> e = current; 1878 current = current.next; 1879 action.accept(e); 1880 if (map.modCount != expectedModCount) 1881 throw new ConcurrentModificationException(); 1882 return true; 1883 } 1884 } 1885 } 1886 return false; 1887 } 1888 1889 public int characteristics() { 1890 return (fence < 0 || est == map.size ? Spliterator.SIZED : 0) | 1891 Spliterator.DISTINCT; 1892 } 1893 } 1894 1895 /* ------------------------------------------------------------ */ 1896 // LinkedHashMap support 1897 1898 1899 /* 1900 * The following package-protected methods are designed to be 1901 * overridden by LinkedHashMap, but not by any other subclass. 1902 * Nearly all other internal methods are also package-protected 1903 * but are declared final, so can be used by LinkedHashMap, view 1904 * classes, and HashSet. 1905 */ 1906 1907 // Create a regular (non-tree) node 1908 Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) { 1909 return new Node<>(hash, key, value, next); 1910 } 1911 1912 // For conversion from TreeNodes to plain nodes 1913 Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) { 1914 return new Node<>(p.hash, p.key, p.value, next); 1915 } 1916 1917 // Create a tree bin node 1918 TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) { 1919 return new TreeNode<>(hash, key, value, next); 1920 } 1921 1922 // For treeifyBin 1923 TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) { 1924 return new TreeNode<>(p.hash, p.key, p.value, next); 1925 } 1926 1927 /** 1928 * Reset to initial default state. Called by clone and readObject. 1929 */ 1930 void reinitialize() { 1931 table = null; 1932 entrySet = null; 1933 keySet = null; 1934 values = null; 1935 modCount = 0; 1936 threshold = 0; 1937 size = 0; 1938 } 1939 1940 // Callbacks to allow LinkedHashMap post-actions 1941 void afterNodeAccess(Node<K,V> p) { } 1942 void afterNodeInsertion(boolean evict) { } 1943 void afterNodeRemoval(Node<K,V> p) { } 1944 1945 // Called only from writeObject, to ensure compatible ordering. 1946 void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException { 1947 Node<K,V>[] tab; 1948 if (size > 0 && (tab = table) != null) { 1949 for (Node<K,V> e : tab) { 1950 for (; e != null; e = e.next) { 1951 s.writeObject(e.key); 1952 s.writeObject(e.value); 1953 } 1954 } 1955 } 1956 } 1957 1958 /* ------------------------------------------------------------ */ 1959 // Tree bins 1960 1961 /** 1962 * Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn 1963 * extends Node) so can be used as extension of either regular or 1964 * linked node. 1965 */ 1966 static final class TreeNode<K,V> extends LinkedHashMap.Entry<K,V> { 1967 TreeNode<K,V> parent; // red-black tree links 1968 TreeNode<K,V> left; 1969 TreeNode<K,V> right; 1970 TreeNode<K,V> prev; // needed to unlink next upon deletion 1971 boolean red; 1972 TreeNode(int hash, K key, V val, Node<K,V> next) { 1973 super(hash, key, val, next); 1974 } 1975 1976 /** 1977 * Returns root of tree containing this node. 1978 */ 1979 final TreeNode<K,V> root() { 1980 for (TreeNode<K,V> r = this, p;;) { 1981 if ((p = r.parent) == null) 1982 return r; 1983 r = p; 1984 } 1985 } 1986 1987 /** 1988 * Ensures that the given root is the first node of its bin. 1989 */ 1990 static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) { 1991 int n; 1992 if (root != null && tab != null && (n = tab.length) > 0) { 1993 int index = (n - 1) & root.hash; 1994 TreeNode<K,V> first = (TreeNode<K,V>)tab[index]; 1995 if (root != first) { 1996 Node<K,V> rn; 1997 tab[index] = root; 1998 TreeNode<K,V> rp = root.prev; 1999 if ((rn = root.next) != null) 2000 ((TreeNode<K,V>)rn).prev = rp; 2001 if (rp != null) 2002 rp.next = rn; 2003 if (first != null) 2004 first.prev = root; 2005 root.next = first; 2006 root.prev = null; 2007 } 2008 assert checkInvariants(root); 2009 } 2010 } 2011 2012 /** 2013 * Finds the node starting at root p with the given hash and key. 2014 * The kc argument caches comparableClassFor(key) upon first use 2015 * comparing keys. 2016 */ 2017 final TreeNode<K,V> find(int h, Object k, Class<?> kc) { 2018 TreeNode<K,V> p = this; 2019 do { 2020 int ph, dir; K pk; 2021 TreeNode<K,V> pl = p.left, pr = p.right, q; 2022 if ((ph = p.hash) > h) 2023 p = pl; 2024 else if (ph < h) 2025 p = pr; 2026 else if (Objects.equals(k, (pk = p.key))) 2027 return p; 2028 else if (pl == null) 2029 p = pr; 2030 else if (pr == null) 2031 p = pl; 2032 else if ((kc != null || 2033 (kc = comparableClassFor(k)) != null) && 2034 (dir = compareComparables(kc, k, pk)) != 0) 2035 p = (dir < 0) ? pl : pr; 2036 else if ((q = pr.find(h, k, kc)) != null) 2037 return q; 2038 else 2039 p = pl; 2040 } while (p != null); 2041 return null; 2042 } 2043 2044 /** 2045 * Calls find for root node. 2046 */ 2047 final TreeNode<K,V> getTreeNode(int h, Object k) { 2048 return ((parent != null) ? root() : this).find(h, k, null); 2049 } 2050 2051 /** 2052 * Tie-breaking utility for ordering insertions when equal 2053 * hashCodes and non-comparable. We don't require a total 2054 * order, just a consistent insertion rule to maintain 2055 * equivalence across rebalancings. Tie-breaking further than 2056 * necessary simplifies testing a bit. 2057 */ 2058 static int tieBreakOrder(Object a, Object b) { 2059 int d; 2060 if (a == null || b == null || 2061 (d = a.getClass().getName(). 2062 compareTo(b.getClass().getName())) == 0) 2063 d = (System.identityHashCode(a) <= System.identityHashCode(b) ? 2064 -1 : 1); 2065 return d; 2066 } 2067 2068 /** 2069 * Forms tree of the nodes linked from this node. 2070 */ 2071 final void treeify(Node<K,V>[] tab) { 2072 TreeNode<K,V> root = null; 2073 for (TreeNode<K,V> x = this, next; x != null; x = next) { 2074 next = (TreeNode<K,V>)x.next; 2075 x.left = x.right = null; 2076 if (root == null) { 2077 x.parent = null; 2078 x.red = false; 2079 root = x; 2080 } 2081 else { 2082 K k = x.key; 2083 int h = x.hash; 2084 Class<?> kc = null; 2085 for (TreeNode<K,V> p = root;;) { 2086 int dir, ph; 2087 K pk = p.key; 2088 if ((ph = p.hash) > h) 2089 dir = -1; 2090 else if (ph < h) 2091 dir = 1; 2092 else if ((kc == null && 2093 (kc = comparableClassFor(k)) == null) || 2094 (dir = compareComparables(kc, k, pk)) == 0) 2095 dir = tieBreakOrder(k, pk); 2096 2097 TreeNode<K,V> xp = p; 2098 if ((p = (dir <= 0) ? p.left : p.right) == null) { 2099 x.parent = xp; 2100 if (dir <= 0) 2101 xp.left = x; 2102 else 2103 xp.right = x; 2104 root = balanceInsertion(root, x); 2105 break; 2106 } 2107 } 2108 } 2109 } 2110 moveRootToFront(tab, root); 2111 } 2112 2113 /** 2114 * Returns a list of non-TreeNodes replacing those linked from 2115 * this node. 2116 */ 2117 final Node<K,V> untreeify(HashMap<K,V> map) { 2118 Node<K,V> hd = null, tl = null; 2119 for (Node<K,V> q = this; q != null; q = q.next) { 2120 Node<K,V> p = map.replacementNode(q, null); 2121 if (tl == null) 2122 hd = p; 2123 else 2124 tl.next = p; 2125 tl = p; 2126 } 2127 return hd; 2128 } 2129 2130 /** 2131 * Tree version of putVal. 2132 */ 2133 final TreeNode<K,V> putTreeVal(HashMap<K,V> map, Node<K,V>[] tab, 2134 int h, K k, V v) { 2135 Class<?> kc = null; 2136 boolean searched = false; 2137 TreeNode<K,V> root = (parent != null) ? root() : this; 2138 for (TreeNode<K,V> p = root;;) { 2139 int dir, ph; K pk; 2140 if ((ph = p.hash) > h) 2141 dir = -1; 2142 else if (ph < h) 2143 dir = 1; 2144 else if (Objects.equals(k, (pk = p.key))) 2145 return p; 2146 else if ((kc == null && 2147 (kc = comparableClassFor(k)) == null) || 2148 (dir = compareComparables(kc, k, pk)) == 0) { 2149 if (!searched) { 2150 TreeNode<K,V> q, ch; 2151 searched = true; 2152 if (((ch = p.left) != null && 2153 (q = ch.find(h, k, kc)) != null) || 2154 ((ch = p.right) != null && 2155 (q = ch.find(h, k, kc)) != null)) 2156 return q; 2157 } 2158 dir = tieBreakOrder(k, pk); 2159 } 2160 2161 TreeNode<K,V> xp = p; 2162 if ((p = (dir <= 0) ? p.left : p.right) == null) { 2163 Node<K,V> xpn = xp.next; 2164 TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn); 2165 if (dir <= 0) 2166 xp.left = x; 2167 else 2168 xp.right = x; 2169 xp.next = x; 2170 x.parent = x.prev = xp; 2171 if (xpn != null) 2172 ((TreeNode<K,V>)xpn).prev = x; 2173 moveRootToFront(tab, balanceInsertion(root, x)); 2174 return null; 2175 } 2176 } 2177 } 2178 2179 /** 2180 * Removes the given node, that must be present before this call. 2181 * This is messier than typical red-black deletion code because we 2182 * cannot swap the contents of an interior node with a leaf 2183 * successor that is pinned by "next" pointers that are accessible 2184 * independently during traversal. So instead we swap the tree 2185 * linkages. If the current tree appears to have too few nodes, 2186 * the bin is converted back to a plain bin. (The test triggers 2187 * somewhere between 2 and 6 nodes, depending on tree structure). 2188 */ 2189 final void removeTreeNode(HashMap<K,V> map, Node<K,V>[] tab, 2190 boolean movable) { 2191 int n; 2192 if (tab == null || (n = tab.length) == 0) 2193 return; 2194 int index = (n - 1) & hash; 2195 TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl; 2196 TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev; 2197 if (pred == null) 2198 tab[index] = first = succ; 2199 else 2200 pred.next = succ; 2201 if (succ != null) 2202 succ.prev = pred; 2203 if (first == null) 2204 return; 2205 if (root.parent != null) 2206 root = root.root(); 2207 if (root == null 2208 || (movable 2209 && (root.right == null 2210 || (rl = root.left) == null 2211 || rl.left == null))) { 2212 tab[index] = first.untreeify(map); // too small 2213 return; 2214 } 2215 TreeNode<K,V> p = this, pl = left, pr = right, replacement; 2216 if (pl != null && pr != null) { 2217 TreeNode<K,V> s = pr, sl; 2218 while ((sl = s.left) != null) // find successor 2219 s = sl; 2220 boolean c = s.red; s.red = p.red; p.red = c; // swap colors 2221 TreeNode<K,V> sr = s.right; 2222 TreeNode<K,V> pp = p.parent; 2223 if (s == pr) { // p was s's direct parent 2224 p.parent = s; 2225 s.right = p; 2226 } 2227 else { 2228 TreeNode<K,V> sp = s.parent; 2229 if ((p.parent = sp) != null) { 2230 if (s == sp.left) 2231 sp.left = p; 2232 else 2233 sp.right = p; 2234 } 2235 if ((s.right = pr) != null) 2236 pr.parent = s; 2237 } 2238 p.left = null; 2239 if ((p.right = sr) != null) 2240 sr.parent = p; 2241 if ((s.left = pl) != null) 2242 pl.parent = s; 2243 if ((s.parent = pp) == null) 2244 root = s; 2245 else if (p == pp.left) 2246 pp.left = s; 2247 else 2248 pp.right = s; 2249 if (sr != null) 2250 replacement = sr; 2251 else 2252 replacement = p; 2253 } 2254 else if (pl != null) 2255 replacement = pl; 2256 else if (pr != null) 2257 replacement = pr; 2258 else 2259 replacement = p; 2260 if (replacement != p) { 2261 TreeNode<K,V> pp = replacement.parent = p.parent; 2262 if (pp == null) 2263 (root = replacement).red = false; 2264 else if (p == pp.left) 2265 pp.left = replacement; 2266 else 2267 pp.right = replacement; 2268 p.left = p.right = p.parent = null; 2269 } 2270 2271 TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement); 2272 2273 if (replacement == p) { // detach 2274 TreeNode<K,V> pp = p.parent; 2275 p.parent = null; 2276 if (pp != null) { 2277 if (p == pp.left) 2278 pp.left = null; 2279 else if (p == pp.right) 2280 pp.right = null; 2281 } 2282 } 2283 if (movable) 2284 moveRootToFront(tab, r); 2285 } 2286 2287 /** 2288 * Splits nodes in a tree bin into lower and upper tree bins, 2289 * or untreeifies if now too small. Called only from resize; 2290 * see above discussion about split bits and indices. 2291 * 2292 * @param map the map 2293 * @param tab the table for recording bin heads 2294 * @param index the index of the table being split 2295 * @param bit the bit of hash to split on 2296 */ 2297 final void split(HashMap<K,V> map, Node<K,V>[] tab, int index, int bit) { 2298 TreeNode<K,V> b = this; 2299 // Relink into lo and hi lists, preserving order 2300 TreeNode<K,V> loHead = null, loTail = null; 2301 TreeNode<K,V> hiHead = null, hiTail = null; 2302 int lc = 0, hc = 0; 2303 for (TreeNode<K,V> e = b, next; e != null; e = next) { 2304 next = (TreeNode<K,V>)e.next; 2305 e.next = null; 2306 if ((e.hash & bit) == 0) { 2307 if ((e.prev = loTail) == null) 2308 loHead = e; 2309 else 2310 loTail.next = e; 2311 loTail = e; 2312 ++lc; 2313 } 2314 else { 2315 if ((e.prev = hiTail) == null) 2316 hiHead = e; 2317 else 2318 hiTail.next = e; 2319 hiTail = e; 2320 ++hc; 2321 } 2322 } 2323 2324 if (loHead != null) { 2325 if (lc <= UNTREEIFY_THRESHOLD) 2326 tab[index] = loHead.untreeify(map); 2327 else { 2328 tab[index] = loHead; 2329 if (hiHead != null) // (else is already treeified) 2330 loHead.treeify(tab); 2331 } 2332 } 2333 if (hiHead != null) { 2334 if (hc <= UNTREEIFY_THRESHOLD) 2335 tab[index + bit] = hiHead.untreeify(map); 2336 else { 2337 tab[index + bit] = hiHead; 2338 if (loHead != null) 2339 hiHead.treeify(tab); 2340 } 2341 } 2342 } 2343 2344 /* ------------------------------------------------------------ */ 2345 // Red-black tree methods, all adapted from CLR 2346 2347 static <K,V> TreeNode<K,V> rotateLeft(TreeNode<K,V> root, 2348 TreeNode<K,V> p) { 2349 TreeNode<K,V> r, pp, rl; 2350 if (p != null && (r = p.right) != null) { 2351 if ((rl = p.right = r.left) != null) 2352 rl.parent = p; 2353 if ((pp = r.parent = p.parent) == null) 2354 (root = r).red = false; 2355 else if (pp.left == p) 2356 pp.left = r; 2357 else 2358 pp.right = r; 2359 r.left = p; 2360 p.parent = r; 2361 } 2362 return root; 2363 } 2364 2365 static <K,V> TreeNode<K,V> rotateRight(TreeNode<K,V> root, 2366 TreeNode<K,V> p) { 2367 TreeNode<K,V> l, pp, lr; 2368 if (p != null && (l = p.left) != null) { 2369 if ((lr = p.left = l.right) != null) 2370 lr.parent = p; 2371 if ((pp = l.parent = p.parent) == null) 2372 (root = l).red = false; 2373 else if (pp.right == p) 2374 pp.right = l; 2375 else 2376 pp.left = l; 2377 l.right = p; 2378 p.parent = l; 2379 } 2380 return root; 2381 } 2382 2383 static <K,V> TreeNode<K,V> balanceInsertion(TreeNode<K,V> root, 2384 TreeNode<K,V> x) { 2385 x.red = true; 2386 for (TreeNode<K,V> xp, xpp, xppl, xppr;;) { 2387 if ((xp = x.parent) == null) { 2388 x.red = false; 2389 return x; 2390 } 2391 else if (!xp.red || (xpp = xp.parent) == null) 2392 return root; 2393 if (xp == (xppl = xpp.left)) { 2394 if ((xppr = xpp.right) != null && xppr.red) { 2395 xppr.red = false; 2396 xp.red = false; 2397 xpp.red = true; 2398 x = xpp; 2399 } 2400 else { 2401 if (x == xp.right) { 2402 root = rotateLeft(root, x = xp); 2403 xpp = (xp = x.parent) == null ? null : xp.parent; 2404 } 2405 if (xp != null) { 2406 xp.red = false; 2407 if (xpp != null) { 2408 xpp.red = true; 2409 root = rotateRight(root, xpp); 2410 } 2411 } 2412 } 2413 } 2414 else { 2415 if (xppl != null && xppl.red) { 2416 xppl.red = false; 2417 xp.red = false; 2418 xpp.red = true; 2419 x = xpp; 2420 } 2421 else { 2422 if (x == xp.left) { 2423 root = rotateRight(root, x = xp); 2424 xpp = (xp = x.parent) == null ? null : xp.parent; 2425 } 2426 if (xp != null) { 2427 xp.red = false; 2428 if (xpp != null) { 2429 xpp.red = true; 2430 root = rotateLeft(root, xpp); 2431 } 2432 } 2433 } 2434 } 2435 } 2436 } 2437 2438 static <K,V> TreeNode<K,V> balanceDeletion(TreeNode<K,V> root, 2439 TreeNode<K,V> x) { 2440 for (TreeNode<K,V> xp, xpl, xpr;;) { 2441 if (x == null || x == root) 2442 return root; 2443 else if ((xp = x.parent) == null) { 2444 x.red = false; 2445 return x; 2446 } 2447 else if (x.red) { 2448 x.red = false; 2449 return root; 2450 } 2451 else if ((xpl = xp.left) == x) { 2452 if ((xpr = xp.right) != null && xpr.red) { 2453 xpr.red = false; 2454 xp.red = true; 2455 root = rotateLeft(root, xp); 2456 xpr = (xp = x.parent) == null ? null : xp.right; 2457 } 2458 if (xpr == null) 2459 x = xp; 2460 else { 2461 TreeNode<K,V> sl = xpr.left, sr = xpr.right; 2462 if ((sr == null || !sr.red) && 2463 (sl == null || !sl.red)) { 2464 xpr.red = true; 2465 x = xp; 2466 } 2467 else { 2468 if (sr == null || !sr.red) { 2469 if (sl != null) 2470 sl.red = false; 2471 xpr.red = true; 2472 root = rotateRight(root, xpr); 2473 xpr = (xp = x.parent) == null ? 2474 null : xp.right; 2475 } 2476 if (xpr != null) { 2477 xpr.red = (xp == null) ? false : xp.red; 2478 if ((sr = xpr.right) != null) 2479 sr.red = false; 2480 } 2481 if (xp != null) { 2482 xp.red = false; 2483 root = rotateLeft(root, xp); 2484 } 2485 x = root; 2486 } 2487 } 2488 } 2489 else { // symmetric 2490 if (xpl != null && xpl.red) { 2491 xpl.red = false; 2492 xp.red = true; 2493 root = rotateRight(root, xp); 2494 xpl = (xp = x.parent) == null ? null : xp.left; 2495 } 2496 if (xpl == null) 2497 x = xp; 2498 else { 2499 TreeNode<K,V> sl = xpl.left, sr = xpl.right; 2500 if ((sl == null || !sl.red) && 2501 (sr == null || !sr.red)) { 2502 xpl.red = true; 2503 x = xp; 2504 } 2505 else { 2506 if (sl == null || !sl.red) { 2507 if (sr != null) 2508 sr.red = false; 2509 xpl.red = true; 2510 root = rotateLeft(root, xpl); 2511 xpl = (xp = x.parent) == null ? 2512 null : xp.left; 2513 } 2514 if (xpl != null) { 2515 xpl.red = (xp == null) ? false : xp.red; 2516 if ((sl = xpl.left) != null) 2517 sl.red = false; 2518 } 2519 if (xp != null) { 2520 xp.red = false; 2521 root = rotateRight(root, xp); 2522 } 2523 x = root; 2524 } 2525 } 2526 } 2527 } 2528 } 2529 2530 /** 2531 * Recursive invariant check 2532 */ 2533 static <K,V> boolean checkInvariants(TreeNode<K,V> t) { 2534 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right, 2535 tb = t.prev, tn = (TreeNode<K,V>)t.next; 2536 if (tb != null && tb.next != t) 2537 return false; 2538 if (tn != null && tn.prev != t) 2539 return false; 2540 if (tp != null && t != tp.left && t != tp.right) 2541 return false; 2542 if (tl != null && (tl.parent != t || tl.hash > t.hash)) 2543 return false; 2544 if (tr != null && (tr.parent != t || tr.hash < t.hash)) 2545 return false; 2546 if (t.red && tl != null && tl.red && tr != null && tr.red) 2547 return false; 2548 if (tl != null && !checkInvariants(tl)) 2549 return false; 2550 if (tr != null && !checkInvariants(tr)) 2551 return false; 2552 return true; 2553 } 2554 } 2555 2556 /** 2557 * Calculate initial capacity for HashMap based classes, from expected size and default load factor (0.75). 2558 * 2559 * @param numMappings the expected number of mappings 2560 * @return initial capacity for HashMap based classes. 2561 * @since 19 2562 */ 2563 static int calculateHashMapCapacity(int numMappings) { 2564 return (int) Math.ceil(numMappings / (double) DEFAULT_LOAD_FACTOR); 2565 } 2566 2567 /** 2568 * Creates a new, empty HashMap suitable for the expected number of mappings. 2569 * The returned map uses the default load factor of 0.75, and its initial capacity is 2570 * generally large enough so that the expected number of mappings can be added 2571 * without resizing the map. 2572 * 2573 * @param numMappings the expected number of mappings 2574 * @param <K> the type of keys maintained by the new map 2575 * @param <V> the type of mapped values 2576 * @return the newly created map 2577 * @throws IllegalArgumentException if numMappings is negative 2578 * @since 19 2579 */ 2580 public static <K, V> HashMap<K, V> newHashMap(int numMappings) { 2581 if (numMappings < 0) { 2582 throw new IllegalArgumentException("Negative number of mappings: " + numMappings); 2583 } 2584 return new HashMap<>(calculateHashMapCapacity(numMappings)); 2585 } 2586 2587 }