1 /* 2 * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. Oracle designates this 8 * particular file as subject to the "Classpath" exception as provided 9 * by Oracle in the LICENSE file that accompanied this code. 10 * 11 * This code is distributed in the hope that it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14 * version 2 for more details (a copy is included in the LICENSE file that 15 * accompanied this code). 16 * 17 * You should have received a copy of the GNU General Public License version 18 * 2 along with this work; if not, write to the Free Software Foundation, 19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 20 * 21 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 22 * or visit www.oracle.com if you need additional information or have any 23 * questions. 24 */ 25 26 package org.openjdk.bench.valhalla.sandbox.corelibs.corelibs.mapprotos; 27 28 import java.io.IOException; 29 import java.io.InvalidObjectException; 30 import java.io.PrintStream; 31 import java.io.Serializable; 32 import java.lang.reflect.InvocationTargetException; 33 import java.lang.reflect.Method; 34 import java.lang.reflect.ParameterizedType; 35 import java.lang.reflect.Type; 36 import java.util.AbstractCollection; 37 //import java.util.AbstractMap; 38 import java.util.AbstractSet; 39 import java.util.Arrays; 40 import java.util.Collection; 41 import java.util.Collections; 42 import java.util.ConcurrentModificationException; 43 import java.util.Hashtable; 44 import java.util.Iterator; 45 import java.util.Map; 46 import java.util.Objects; 47 import java.util.Optional; 48 import java.util.TreeMap; 49 import java.util.NoSuchElementException; 50 import java.util.Set; 51 import java.util.function.BiConsumer; 52 import java.util.function.BiFunction; 53 import java.util.function.Consumer; 54 import java.util.function.Function; 55 56 /** 57 * Hash map implementation that uses inline class entries in the initial table 58 * and maintains a link list of separate Node entries for key/value pairs 59 * that have the same hash value. The handling of the link list is the same 60 * as the original java.util.HashMap. 61 * The primary entry array is larger than in HashMap due to the inline storage 62 * of the entries but since it replaces the separate Node instance for the first 63 * Node, the overall memory usage is less for a reasonably full table. 64 * The TreeNode organization is not yet implemented. 65 * <p> 66 * Hash table based implementation of the {@code Map} interface. This 67 * implementation provides all of the optional map operations, and permits 68 * {@code null} values and the {@code null} key. (The {@code HashMap} 69 * class is roughly equivalent to {@code Hashtable}, except that it is 70 * unsynchronized and permits nulls.) This class makes no guarantees as to 71 * the order of the map; in particular, it does not guarantee that the order 72 * will remain constant over time. 73 * 74 * <p>This implementation provides constant-time performance for the basic 75 * operations ({@code get} and {@code put}), assuming the hash function 76 * disperses the elements properly among the buckets. Iteration over 77 * collection views requires time proportional to the "capacity" of the 78 * {@code HashMap} instance (the number of buckets) plus its size (the number 79 * of key-value mappings). Thus, it's very important not to set the initial 80 * capacity too high (or the load factor too low) if iteration performance is 81 * important. 82 * 83 * <p>An instance of {@code HashMap} has two parameters that affect its 84 * performance: <i>initial capacity</i> and <i>load factor</i>. The 85 * <i>capacity</i> is the number of buckets in the hash table, and the initial 86 * capacity is simply the capacity at the time the hash table is created. The 87 * <i>load factor</i> is a measure of how full the hash table is allowed to 88 * get before its capacity is automatically increased. When the number of 89 * entries in the hash table exceeds the product of the load factor and the 90 * current capacity, the hash table is <i>rehashed</i> (that is, internal data 91 * structures are rebuilt) so that the hash table has approximately twice the 92 * number of buckets. 93 * 94 * <p>As a general rule, the default load factor (.75) offers a good 95 * tradeoff between time and space costs. Higher values decrease the 96 * space overhead but increase the lookup cost (reflected in most of 97 * the operations of the {@code HashMap} class, including 98 * {@code get} and {@code put}). The expected number of entries in 99 * the map and its load factor should be taken into account when 100 * setting its initial capacity, so as to minimize the number of 101 * rehash operations. If the initial capacity is greater than the 102 * maximum number of entries divided by the load factor, no rehash 103 * operations will ever occur. 104 * 105 * <p>If many mappings are to be stored in a {@code HashMap} 106 * instance, creating it with a sufficiently large capacity will allow 107 * the mappings to be stored more efficiently than letting it perform 108 * automatic rehashing as needed to grow the table. Note that using 109 * many keys with the same {@code hashCode()} is a sure way to slow 110 * down performance of any hash table. To ameliorate impact, when keys 111 * are {@link Comparable}, this class may use comparison order among 112 * keys to help break ties. 113 * 114 * <p><strong>Note that this implementation is not synchronized.</strong> 115 * If multiple threads access a hash map concurrently, and at least one of 116 * the threads modifies the map structurally, it <i>must</i> be 117 * synchronized externally. (A structural modification is any operation 118 * that adds or deletes one or more mappings; merely changing the value 119 * associated with a key that an instance already contains is not a 120 * structural modification.) This is typically accomplished by 121 * synchronizing on some object that naturally encapsulates the map. 122 * 123 * If no such object exists, the map should be "wrapped" using the 124 * {@link Collections#synchronizedMap Collections.synchronizedMap} 125 * method. This is best done at creation time, to prevent accidental 126 * unsynchronized access to the map:<pre> 127 * Map m = Collections.synchronizedMap(new HashMap(...));</pre> 128 * 129 * <p>The iterators returned by all of this class's "collection view methods" 130 * are <i>fail-fast</i>: if the map is structurally modified at any time after 131 * the iterator is created, in any way except through the iterator's own 132 * {@code remove} method, the iterator will throw a 133 * {@link ConcurrentModificationException}. Thus, in the face of concurrent 134 * modification, the iterator fails quickly and cleanly, rather than risking 135 * arbitrary, non-deterministic behavior at an undetermined time in the 136 * future. 137 * 138 * <p>Note that the fail-fast behavior of an iterator cannot be guaranteed 139 * as it is, generally speaking, impossible to make any hard guarantees in the 140 * presence of unsynchronized concurrent modification. Fail-fast iterators 141 * throw {@code ConcurrentModificationException} on a best-effort basis. 142 * Therefore, it would be wrong to write a program that depended on this 143 * exception for its correctness: <i>the fail-fast behavior of iterators 144 * should be used only to detect bugs.</i> 145 * 146 * <p>This class is a member of the 147 * <a href="{@docRoot}/java.base/java/util/package-summary.html#CollectionsFramework"> 148 * Java Collections Framework</a>. 149 * 150 * @param <K> the type of keys maintained by this map 151 * @param <V> the type of mapped values 152 * 153 * @author Doug Lea 154 * @author Josh Bloch 155 * @author Arthur van Hoff 156 * @author Neal Gafter 157 * @see Object#hashCode() 158 * @see Collection 159 * @see Map 160 * @see TreeMap 161 * @see Hashtable 162 * @since 1.2 163 */ 164 public class XHashMap<K,V> extends XAbstractMap<K,V> 165 implements Map<K,V>, Cloneable, Serializable { 166 167 private static final long serialVersionUID = 362498820763181265L; 168 169 /* 170 * Implementation notes. 171 * 172 * This map usually acts as a binned (bucketed) hash table, but 173 * when bins get too large, they are transformed into bins of 174 * TreeNodes, each structured similarly to those in 175 * java.util.TreeMap. Most methods try to use normal bins, but 176 * relay to TreeNode methods when applicable (simply by checking 177 * instanceof a node). Bins of TreeNodes may be traversed and 178 * used like any others, but additionally support faster lookup 179 * when overpopulated. However, since the vast majority of bins in 180 * normal use are not overpopulated, checking for existence of 181 * tree bins may be delayed in the course of table methods. 182 * 183 * Tree bins (i.e., bins whose elements are all TreeNodes) are 184 * ordered primarily by hashCode, but in the case of ties, if two 185 * elements are of the same "class C implements Comparable<C>", 186 * type then their compareTo method is used for ordering. (We 187 * conservatively check generic types via reflection to validate 188 * this -- see method comparableClassFor). The added complexity 189 * of tree bins is worthwhile in providing worst-case O(log n) 190 * operations when keys either have distinct hashes or are 191 * orderable, Thus, performance degrades gracefully under 192 * accidental or malicious usages in which hashCode() methods 193 * return values that are poorly distributed, as well as those in 194 * which many keys share a hashCode, so long as they are also 195 * Comparable. (If neither of these apply, we may waste about a 196 * factor of two in time and space compared to taking no 197 * precautions. But the only known cases stem from poor user 198 * programming practices that are already so slow that this makes 199 * little difference.) 200 * 201 * Because TreeNodes are about twice the size of regular nodes, we 202 * use them only when bins contain enough nodes to warrant use 203 * (see TREEIFY_THRESHOLD). And when they become too small (due to 204 * removal or resizing) they are converted back to plain bins. In 205 * usages with well-distributed user hashCodes, tree bins are 206 * rarely used. Ideally, under random hashCodes, the frequency of 207 * nodes in bins follows a Poisson distribution 208 * (http://en.wikipedia.org/wiki/Poisson_distribution) with a 209 * parameter of about 0.5 on average for the default resizing 210 * threshold of 0.75, although with a large variance because of 211 * resizing granularity. Ignoring variance, the expected 212 * occurrences of list size k are (exp(-0.5) * pow(0.5, k) / 213 * factorial(k)). The first values are: 214 * 215 * 0: 0.60653066 216 * 1: 0.30326533 217 * 2: 0.07581633 218 * 3: 0.01263606 219 * 4: 0.00157952 220 * 5: 0.00015795 221 * 6: 0.00001316 222 * 7: 0.00000094 223 * 8: 0.00000006 224 * more: less than 1 in ten million 225 * 226 * The root of a tree bin is normally its first node. However, 227 * sometimes (currently only upon Iterator.remove), the root might 228 * be elsewhere, but can be recovered following parent links 229 * (method TreeNode.root()). 230 * 231 * All applicable internal methods accept a hash code as an 232 * argument (as normally supplied from a public method), allowing 233 * them to call each other without recomputing user hashCodes. 234 * Most internal methods also accept a "tab" argument, that is 235 * normally the current table, but may be a new or old one when 236 * resizing or converting. 237 * 238 * When bin lists are treeified, split, or untreeified, we keep 239 * them in the same relative access/traversal order (i.e., field 240 * Node.next) to better preserve locality, and to slightly 241 * simplify handling of splits and traversals that invoke 242 * iterator.remove. When using comparators on insertion, to keep a 243 * total ordering (or as close as is required here) across 244 * rebalancings, we compare classes and identityHashCodes as 245 * tie-breakers. 246 * 247 * The use and transitions among plain vs tree modes is 248 * complicated by the existence of subclass LinkedHashMap. See 249 * below for hook methods defined to be invoked upon insertion, 250 * removal and access that allow LinkedHashMap internals to 251 * otherwise remain independent of these mechanics. (This also 252 * requires that a map instance be passed to some utility methods 253 * that may create new nodes.) 254 * 255 * The concurrent-programming-like SSA-based coding style helps 256 * avoid aliasing errors amid all of the twisty pointer operations. 257 */ 258 259 /** 260 * The default initial capacity - MUST be a power of two. 261 */ 262 static final int DEFAULT_INITIAL_CAPACITY = 1 << 4; // aka 16 263 264 /** 265 * The maximum capacity, used if a higher value is implicitly specified 266 * by either of the constructors with arguments. 267 * MUST be a power of two <= 1<<30. 268 */ 269 static final int MAXIMUM_CAPACITY = 1 << 30; 270 271 /** 272 * The load factor used when none specified in constructor. 273 */ 274 static final float DEFAULT_LOAD_FACTOR = 0.75f; 275 276 /** 277 * The bin count threshold for using a tree rather than list for a 278 * bin. Bins are converted to trees when adding an element to a 279 * bin with at least this many nodes. The value must be greater 280 * than 2 and should be at least 8 to mesh with assumptions in 281 * tree removal about conversion back to plain bins upon 282 * shrinkage. 283 */ 284 static final int TREEIFY_THRESHOLD = 8; 285 286 /** 287 * The bin count threshold for untreeifying a (split) bin during a 288 * resize operation. Should be less than TREEIFY_THRESHOLD, and at 289 * most 6 to mesh with shrinkage detection under removal. 290 */ 291 static final int UNTREEIFY_THRESHOLD = 6; 292 293 /** 294 * The smallest table capacity for which bins may be treeified. 295 * (Otherwise the table is resized if too many nodes in a bin.) 296 * Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts 297 * between resizing and treeification thresholds. 298 */ 299 static final int MIN_TREEIFY_CAPACITY = 64; 300 301 private XNode<K,V> emptyXNode() { 302 return new XNode(); 303 } 304 /** 305 * Basic hash bin node, used for most entries. (See below for 306 * TreeNode subclass, and in LinkedHashMap for its Entry subclass.) 307 */ 308 static value class XNode<K,V> implements Map.Entry<K,V> { 309 final int hash; 310 final K key; 311 V value; 312 Node<K,V> next; 313 314 XNode() { 315 this.hash = 0; 316 this.key = null; 317 this.value = null; 318 this.next = null; 319 } 320 321 XNode(int hash, K key, V value, Node<K,V> next) { 322 this.hash = hash; 323 this.key = key; 324 this.value = value; 325 this.next = next; 326 } 327 328 boolean isEmpty() { 329 return hash == 0 && key == null && value == null; 330 } 331 public final K getKey() { return key; } 332 public final V getValue() { return value; } 333 public final String toString() { return key + "=" + value; } 334 335 public final int hashCode() { 336 return Objects.hashCode(key) ^ Objects.hashCode(value); 337 } 338 339 public final V setValue(V newValue) { 340 throw new IllegalStateException("XNode cannot set a value"); 341 // V oldValue = value; 342 // value = newValue; 343 // return oldValue; 344 } 345 346 public final boolean equals(Object o) { 347 if (o instanceof Map.Entry) { 348 Map.Entry<?,?> e = (Map.Entry<?,?>)o; 349 if (Objects.equals(key, e.getKey()) && 350 Objects.equals(value, e.getValue())) 351 return true; 352 } 353 return false; 354 } 355 } 356 357 /** 358 * Basic hash bin node, used for overflow entries. (See below for 359 * TreeNode subclass, and in LinkedHashMap for its Entry subclass.) 360 */ 361 static class Node<K,V> implements Map.Entry<K,V> { 362 final int hash; 363 final K key; 364 V value; 365 Node<K,V> next; 366 367 Node(int hash, K key, V value, Node<K,V> next) { 368 this.hash = hash; 369 this.key = key; 370 this.value = value; 371 this.next = next; 372 } 373 374 public final K getKey() { return key; } 375 public final V getValue() { return value; } 376 public final String toString() { return key + "=" + value; } 377 public final int hashCode() { 378 return Objects.hashCode(key) ^ Objects.hashCode(value); 379 } 380 381 public final V setValue(V newValue) { 382 V oldValue = value; 383 value = newValue; 384 return oldValue; 385 } 386 387 public final boolean equals(Object o) { 388 if (o == this) 389 return true; 390 if (o instanceof Map.Entry) { 391 Map.Entry<?,?> e = (Map.Entry<?,?>)o; 392 if (Objects.equals(key, e.getKey()) && 393 Objects.equals(value, e.getValue())) 394 return true; 395 } 396 return false; 397 } 398 } 399 400 value class XNodeWrapper implements Map.Entry<K,V> { 401 int index; 402 403 XNodeWrapper(int index) { 404 this.index = index; 405 } 406 407 public K getKey() { 408 XNode<K,V> e = table[index]; 409 return e.isEmpty() ? null : e.key; 410 } 411 412 public V getValue() { 413 XNode<K,V> e = table[index]; 414 return e.isEmpty() ? null : e.value; 415 } 416 417 /** 418 * Replaces the value corresponding to this entry with the specified 419 * value (optional operation). (Writes through to the map.) The 420 * behavior of this call is undefined if the mapping has already been 421 * removed from the map (by the iterator's {@code remove} operation). 422 * 423 * @param value new value to be stored in this entry 424 * @return old value corresponding to the entry 425 * @throws UnsupportedOperationException if the {@code put} operation 426 * is not supported by the backing map 427 * @throws ClassCastException if the class of the specified value 428 * prevents it from being stored in the backing map 429 * @throws NullPointerException if the backing map does not permit 430 * null values, and the specified value is null 431 * @throws IllegalArgumentException if some property of this value 432 * prevents it from being stored in the backing map 433 * @throws IllegalStateException implementations may, but are not 434 * required to, throw this exception if the entry has been 435 * removed from the backing map. 436 */ 437 public V setValue(V value) { 438 XNode<K,V> e = table[index]; 439 assert !e.isEmpty(); 440 table[index] = new XNode(e.hash, e.key, value, e.next); 441 return e.value; 442 } 443 } 444 /* ---------------- Static utilities -------------- */ 445 446 /** 447 * Computes key.hashCode() and spreads (XORs) higher bits of hash 448 * to lower. Because the table uses power-of-two masking, sets of 449 * hashes that vary only in bits above the current mask will 450 * always collide. (Among known examples are sets of Float keys 451 * holding consecutive whole numbers in small tables.) So we 452 * apply a transform that spreads the impact of higher bits 453 * downward. There is a tradeoff between speed, utility, and 454 * quality of bit-spreading. Because many common sets of hashes 455 * are already reasonably distributed (so don't benefit from 456 * spreading), and because we use trees to handle large sets of 457 * collisions in bins, we just XOR some shifted bits in the 458 * cheapest possible way to reduce systematic lossage, as well as 459 * to incorporate impact of the highest bits that would otherwise 460 * never be used in index calculations because of table bounds. 461 */ 462 static final int hash(Object key) { 463 int h; 464 return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16); 465 } 466 467 /** 468 * Returns x's Class if it is of the form "class C implements 469 * Comparable<C>", else null. 470 */ 471 static Class<?> comparableClassFor(Object x) { 472 if (x instanceof Comparable) { 473 Class<?> c; Type[] ts, as; ParameterizedType p; 474 if ((c = x.getClass()) == String.class) // bypass checks 475 return c; 476 if ((ts = c.getGenericInterfaces()) != null) { 477 for (Type t : ts) { 478 if ((t instanceof ParameterizedType) && 479 ((p = (ParameterizedType) t).getRawType() == 480 Comparable.class) && 481 (as = p.getActualTypeArguments()) != null && 482 as.length == 1 && as[0] == c) // type arg is c 483 return c; 484 } 485 } 486 } 487 return null; 488 } 489 490 /** 491 * Returns k.compareTo(x) if x matches kc (k's screened comparable 492 * class), else 0. 493 */ 494 @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable 495 static int compareComparables(Class<?> kc, Object k, Object x) { 496 return (x == null || x.getClass() != kc ? 0 : 497 ((Comparable)k).compareTo(x)); 498 } 499 500 /** 501 * Returns a power of two size for the given target capacity. 502 */ 503 static final int tableSizeFor(int cap) { 504 int n = -1 >>> Integer.numberOfLeadingZeros(cap - 1); 505 return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; 506 } 507 508 /* ---------------- Fields -------------- */ 509 510 /** 511 * The table, initialized on first use, and resized as 512 * necessary. When allocated, length is always a power of two. 513 * (We also tolerate length zero in some operations to allow 514 * bootstrapping mechanics that are currently not needed.) 515 */ 516 transient XNode<K,V>[] table; 517 518 /** 519 * Holds cached entrySet(). Note that AbstractMap fields are used 520 * for keySet() and values(). 521 */ 522 transient Set<Map.Entry<K,V>> entrySet; 523 524 /** 525 * The number of key-value mappings contained in this map. 526 */ 527 transient int size; 528 529 /** 530 * The number of times this HashMap has been structurally modified 531 * Structural modifications are those that change the number of mappings in 532 * the HashMap or otherwise modify its internal structure (e.g., 533 * rehash). This field is used to make iterators on Collection-views of 534 * the HashMap fail-fast. (See ConcurrentModificationException). 535 */ 536 transient int modCount; 537 538 /** 539 * The next size value at which to resize (capacity * load factor). 540 * 541 * @serial 542 */ 543 // (The javadoc description is true upon serialization. 544 // Additionally, if the table array has not been allocated, this 545 // field holds the initial array capacity, or zero signifying 546 // DEFAULT_INITIAL_CAPACITY.) 547 int threshold; 548 549 /** 550 * The load factor for the hash table. 551 * 552 * @serial 553 */ 554 final float loadFactor; 555 556 /* ---------------- Public operations -------------- */ 557 558 /** 559 * Constructs an empty {@code HashMap} with the specified initial 560 * capacity and load factor. 561 * 562 * @param initialCapacity the initial capacity 563 * @param loadFactor the load factor 564 * @throws IllegalArgumentException if the initial capacity is negative 565 * or the load factor is nonpositive 566 */ 567 public XHashMap(int initialCapacity, float loadFactor) { 568 if (initialCapacity < 0) 569 throw new IllegalArgumentException("Illegal initial capacity: " + 570 initialCapacity); 571 if (initialCapacity > MAXIMUM_CAPACITY) 572 initialCapacity = MAXIMUM_CAPACITY; 573 if (loadFactor <= 0 || Float.isNaN(loadFactor)) 574 throw new IllegalArgumentException("Illegal load factor: " + 575 loadFactor); 576 this.loadFactor = loadFactor; 577 this.threshold = tableSizeFor(initialCapacity); 578 } 579 580 /** 581 * Constructs an empty {@code HashMap} with the specified initial 582 * capacity and the default load factor (0.75). 583 * 584 * @param initialCapacity the initial capacity. 585 * @throws IllegalArgumentException if the initial capacity is negative. 586 */ 587 public XHashMap(int initialCapacity) { 588 this(initialCapacity, DEFAULT_LOAD_FACTOR); 589 } 590 591 /** 592 * Constructs an empty {@code HashMap} with the default initial capacity 593 * (16) and the default load factor (0.75). 594 */ 595 public XHashMap() { 596 this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted 597 } 598 599 /** 600 * Constructs a new {@code HashMap} with the same mappings as the 601 * specified {@code Map}. The {@code HashMap} is created with 602 * default load factor (0.75) and an initial capacity sufficient to 603 * hold the mappings in the specified {@code Map}. 604 * 605 * @param m the map whose mappings are to be placed in this map 606 * @throws NullPointerException if the specified map is null 607 */ 608 public XHashMap(Map<? extends K, ? extends V> m) { 609 this.loadFactor = DEFAULT_LOAD_FACTOR; 610 putMapEntries(m, false); 611 } 612 613 /** 614 * Implements Map.putAll and Map constructor. 615 * 616 * @param m the map 617 * @param evict false when initially constructing this map, else true. 618 */ 619 final void putMapEntries(Map<? extends K, ? extends V> m, boolean evict) { 620 int s = m.size(); 621 if (s > 0) { 622 if (table == null) { // pre-size 623 float ft = ((float)s / loadFactor) + 1.0F; 624 int t = ((ft < (float)MAXIMUM_CAPACITY) ? 625 (int)ft : MAXIMUM_CAPACITY); 626 if (t > threshold) 627 threshold = tableSizeFor(t); 628 } else { 629 // Because of linked-list bucket constraints, we cannot 630 // expand all at once, but can reduce total resize 631 // effort by repeated doubling now vs later 632 while (s > threshold && table.length < MAXIMUM_CAPACITY) 633 resize(); 634 } 635 636 for (Map.Entry<? extends K, ? extends V> e : m.entrySet()) { 637 K key = e.getKey(); 638 V value = e.getValue(); 639 putVal(hash(key), key, value, false, evict); 640 } 641 } 642 } 643 644 /** 645 * Returns the number of key-value mappings in this map. 646 * 647 * @return the number of key-value mappings in this map 648 */ 649 public int size() { 650 return size; 651 } 652 653 /** 654 * Returns {@code true} if this map contains no key-value mappings. 655 * 656 * @return {@code true} if this map contains no key-value mappings 657 */ 658 public boolean isEmpty() { 659 return size == 0; 660 } 661 662 /** 663 * Returns the value to which the specified key is mapped, 664 * or {@code null} if this map contains no mapping for the key. 665 * 666 * <p>More formally, if this map contains a mapping from a key 667 * {@code k} to a value {@code v} such that {@code (key==null ? k==null : 668 * key.equals(k))}, then this method returns {@code v}; otherwise 669 * it returns {@code null}. (There can be at most one such mapping.) 670 * 671 * <p>A return value of {@code null} does not <i>necessarily</i> 672 * indicate that the map contains no mapping for the key; it's also 673 * possible that the map explicitly maps the key to {@code null}. 674 * The {@link #containsKey containsKey} operation may be used to 675 * distinguish these two cases. 676 * 677 * @see #put(Object, Object) 678 */ 679 public V get(Object key) { 680 int hash = hash(key); 681 Node<K,V> e; 682 XNode<K,V> n = getXNode(hash, key); 683 return (!n.isEmpty()) ? n.value 684 : (e = getNode(hash, key)) == null ? null : e.value; 685 } 686 687 /** 688 * Implements Map.get and related methods. 689 * 690 * @param hash hash for key 691 * @param key the key 692 * @return the node, or emptyXNode() if not at top level. 693 */ 694 final XNode<K,V> getXNode(int hash, Object key) { 695 XNode<K, V>[] tab; 696 XNode<K, V> first; 697 int n; 698 K k; 699 if ((tab = table) != null && (n = tab.length) > 0 && 700 !(first = tab[(n - 1) & hash]).isEmpty()) { 701 if (first.hash == hash && // always check first node 702 ((k = first.key) == key || (key != null && key.equals(k)))) 703 return first; 704 } 705 return emptyXNode(); 706 } 707 708 /** 709 * Implements Map.get and related methods when the key is not found in the primary entry. 710 * 711 * @param hash hash for key 712 * @param key the key 713 * @return the node, or null if none 714 */ 715 final Node<K,V> getNode(int hash, Object key) { 716 XNode<K,V>[] tab; XNode<K,V> first; Node<K,V> e; int n; K k; 717 if ((tab = table) != null && (n = tab.length) > 0 && 718 !(first = tab[(n - 1) & hash]).isEmpty()) { 719 if ((e = first.next) != null) { 720 if (e instanceof TreeNode) 721 return ((TreeNode<K,V>)e).getTreeNode(hash, key); 722 do { 723 if (e.hash == hash && 724 ((k = e.key) == key || (key != null && key.equals(k)))) 725 return e; 726 } while ((e = e.next) != null); 727 } 728 } 729 return null; 730 } 731 732 /** 733 * Returns {@code true} if this map contains a mapping for the 734 * specified key. 735 * 736 * @param key The key whose presence in this map is to be tested 737 * @return {@code true} if this map contains a mapping for the specified 738 * key. 739 */ 740 public boolean containsKey(Object key) { 741 int hash = hash(key); 742 Node<K,V> e; 743 XNode<K,V> n = getXNode(hash, key); 744 return !n.isEmpty() || (e = getNode(hash, key)) != null; 745 } 746 747 /** 748 * Associates the specified value with the specified key in this map. 749 * If the map previously contained a mapping for the key, the old 750 * value is replaced. 751 * 752 * @param key key with which the specified value is to be associated 753 * @param value value to be associated with the specified key 754 * @return the previous value associated with {@code key}, or 755 * {@code null} if there was no mapping for {@code key}. 756 * (A {@code null} return can also indicate that the map 757 * previously associated {@code null} with {@code key}.) 758 */ 759 public V put(K key, V value) { 760 return putVal(hash(key), key, value, false, true); 761 } 762 763 /** 764 * Implements Map.put and related methods. 765 * 766 * @param hash hash for key 767 * @param key the key 768 * @param value the value to put 769 * @param onlyIfAbsent if true, don't change existing value 770 * @param evict if false, the table is in creation mode. 771 * @return previous value, or null if none 772 */ 773 final V putVal(int hash, K key, V value, boolean onlyIfAbsent, 774 boolean evict) { 775 XNode<K,V>[] tab; XNode<K,V> tp; int n, i; 776 if ((tab = table) == null || (n = tab.length) == 0) 777 n = (tab = resize()).length; 778 if ((tp = tab[i = (n - 1) & hash]).isEmpty()) { 779 tab[i] = new XNode(hash, key, value, null); 780 } else { 781 Node<K,V> e; K k; 782 if (tp.hash == hash && 783 ((k = tp.key) == key || (key != null && key.equals(k)))) { 784 if (!onlyIfAbsent || tp.value == null) { 785 tab[i] = new XNode(hash, k, value, tp.next); 786 } 787 return tp.value; 788 } else if ((e = tp.next) == null) { 789 Node<K,V> x = newNode(hash, key, value, null); 790 tab[i] = new XNode(tp.hash, tp.key, tp.value, x); 791 } else if (e instanceof TreeNode) { 792 e = ((TreeNode<K, V>) e).putTreeVal(this, tab, hash, key, value); 793 } else { 794 for (int binCount = 0; ; ++binCount) { 795 if (e.hash == hash && 796 ((k = e.key) == key || (key != null && key.equals(k)))) 797 break; 798 Node<K,V> p = e; 799 if ((e = p.next) == null) { 800 p.next = newNode(hash, key, value, null); 801 if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st 802 treeifyBin(tab, hash); 803 break; 804 } 805 } 806 } 807 if (e != null) { // existing mapping for key 808 V oldValue = e.value; 809 if (!onlyIfAbsent || oldValue == null) 810 e.value = value; 811 return oldValue; 812 } 813 } 814 815 ++modCount; 816 if (++size > threshold) 817 resize(); 818 return null; 819 } 820 821 /** 822 * Initializes or doubles table size. If null, allocates in 823 * accord with initial capacity target held in field threshold. 824 * Otherwise, because we are using power-of-two expansion, the 825 * elements from each bin must either stay at same index, or move 826 * with a power of two offset in the new table. 827 * 828 * @return the table 829 */ 830 final XNode<K,V>[] resize() { 831 XNode<K,V>[] oldTab = table; 832 int oldCap = (oldTab == null) ? 0 : oldTab.length; 833 int oldThr = threshold; 834 int newCap, newThr = 0; 835 if (oldCap > 0) { 836 if (oldCap >= MAXIMUM_CAPACITY) { 837 threshold = Integer.MAX_VALUE; 838 return oldTab; 839 } 840 else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY && 841 oldCap >= DEFAULT_INITIAL_CAPACITY) 842 newThr = oldThr << 1; // double threshold 843 } 844 else if (oldThr > 0) // initial capacity was placed in threshold 845 newCap = oldThr; 846 else { // zero initial threshold signifies using defaults 847 newCap = DEFAULT_INITIAL_CAPACITY; 848 newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY); 849 } 850 if (newThr == 0) { 851 float ft = (float)newCap * loadFactor; 852 newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ? 853 (int)ft : Integer.MAX_VALUE); 854 } 855 threshold = newThr; 856 @SuppressWarnings({"rawtypes","unchecked"}) 857 XNode<K,V>[] newTab = (XNode<K,V>[])new XNode[newCap]; 858 table = newTab; 859 if (oldTab != null) { 860 for (int j = 0; j < oldCap; ++j) { 861 XNode<K,V> x; 862 Node<K,V> e; 863 if (!(x = oldTab[j]).isEmpty()) { 864 oldTab[j] = emptyXNode(); 865 if ((e = x.next) == null) 866 newTab[x.hash & (newCap - 1)] = new XNode(x.hash, x.key, x.value, null); 867 else if (e instanceof TreeNode) 868 ((TreeNode<K,V>)e).split(this, newTab, j, oldCap); 869 else { // preserve order 870 Node<K,V> loHead = null, loTail = null; 871 Node<K,V> hiHead = null, hiTail = null; 872 Node<K,V> next; 873 do { 874 next = e.next; 875 if ((e.hash & oldCap) == 0) { 876 if (loTail == null) 877 loHead = e; 878 else 879 loTail.next = e; 880 loTail = e; 881 } 882 else { 883 if (hiTail == null) 884 hiHead = e; 885 else 886 hiTail.next = e; 887 hiTail = e; 888 } 889 } while ((e = next) != null); 890 if (loTail != null) 891 loTail.next = null; 892 if (hiTail != null) 893 hiTail.next = null; 894 895 newTab[j] = (j == (x.hash & (newCap - 1))) 896 ? new XNode(x.hash, x.key, x.value, loHead) 897 : ((loHead != null) 898 ? new XNode(loHead.hash, loHead.key, loHead.value, loHead.next) : 899 emptyXNode()); 900 901 newTab[j + oldCap] = ((j + oldCap) == (x.hash & (newCap - 1))) 902 ? new XNode(x.hash, x.key, x.value, hiHead) 903 : ((hiHead != null) 904 ? new XNode(hiHead.hash, hiHead.key, hiHead.value, hiHead.next) : 905 emptyXNode()); 906 } 907 } 908 } 909 } 910 return newTab; 911 } 912 913 /** 914 * Replaces all linked nodes in bin at index for given hash unless 915 * table is too small, in which case resizes instead. 916 */ 917 final void treeifyBin(XNode<K,V>[] tab, int hash) { 918 // int n, index; Node<K,V> e; 919 // if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY) 920 // resize(); 921 // else if ((e = tab[index = (n - 1) & hash]) != null) { 922 // TreeNode<K,V> hd = null, tl = null; 923 // do { 924 // TreeNode<K,V> p = replacementTreeNode(e, null); 925 // if (tl == null) 926 // hd = p; 927 // else { 928 // p.prev = tl; 929 // tl.next = p; 930 // } 931 // tl = p; 932 // } while ((e = e.next) != null); 933 // if ((tab[index] = hd) != null) 934 // hd.treeify(tab); 935 // } 936 } 937 938 /** 939 * Copies all of the mappings from the specified map to this map. 940 * These mappings will replace any mappings that this map had for 941 * any of the keys currently in the specified map. 942 * 943 * @param m mappings to be stored in this map 944 * @throws NullPointerException if the specified map is null 945 */ 946 public void putAll(Map<? extends K, ? extends V> m) { 947 putMapEntries(m, true); 948 } 949 950 /** 951 * Removes the mapping for the specified key from this map if present. 952 * 953 * @param key key whose mapping is to be removed from the map 954 * @return the previous value associated with {@code key}, or 955 * {@code null} if there was no mapping for {@code key}. 956 * (A {@code null} return can also indicate that the map 957 * previously associated {@code null} with {@code key}.) 958 */ 959 public V remove(Object key) { 960 Optional<V> o = removeNode(hash(key), key, null, false, true); 961 return o.orElse(null); 962 } 963 964 /** 965 * Implements Map.remove and related methods. 966 * 967 * @param hash hash for key 968 * @param key the key 969 * @param value the value to match if matchValue, else ignored 970 * @param matchValue if true only remove if value is equal 971 * @param movable if false do not move other nodes while removing 972 * @return the node, or null if none 973 */ 974 final Optional<V> removeNode(int hash, Object key, Object value, 975 boolean matchValue, boolean movable) { 976 XNode<K,V>[] tab; XNode<K,V> te; int n, index; 977 if ((tab = table) != null && (n = tab.length) > 0 && 978 !(te = tab[index = (n - 1) & hash]).isEmpty()) { 979 Node<K,V> node = null, e; K k; V v = null; 980 if (te.hash == hash && 981 ((k = te.key) == key || (key != null && key.equals(k)))) { 982 if ((!matchValue || (v = te.value) == value || 983 (value != null && value.equals(v)))) { 984 tab[index] = ((e = te.next) == null) 985 ? emptyXNode() 986 : new XNode(hash, e.key, e.value, e.next); 987 ++modCount; 988 --size; 989 return Optional.ofNullable(v); 990 } 991 } else if ((e = te.next) != null) { 992 Node<K,V> p = null; 993 if (e instanceof TreeNode) 994 node = ((TreeNode<K,V>)e).getTreeNode(hash, key); 995 else { 996 do { 997 if (e.hash == hash && 998 ((k = e.key) == key || 999 (key != null && key.equals(k)))) { 1000 node = e; 1001 break; 1002 } 1003 p = e; 1004 } while ((e = e.next) != null); 1005 } 1006 1007 if (node != null && (!matchValue || (v = node.value) == value || 1008 (value != null && value.equals(v)))) { 1009 if (node instanceof TreeNode) 1010 ((TreeNode<K,V>)node).removeTreeNode(this, tab, movable); 1011 else if (p == null) 1012 tab[index] = new XNode(hash, node.key, node.value, node.next); 1013 else 1014 p.next = node.next; 1015 ++modCount; 1016 --size; 1017 return Optional.of(node.value); 1018 } 1019 } 1020 } 1021 return Optional.empty(); 1022 } 1023 1024 /** 1025 * Removes all of the mappings from this map. 1026 * The map will be empty after this call returns. 1027 */ 1028 public void clear() { 1029 modCount++; 1030 if (table != null && size > 0) { 1031 size = 0; 1032 table = null; 1033 threshold = 0; 1034 } 1035 } 1036 1037 /** 1038 * Returns {@code true} if this map maps one or more keys to the 1039 * specified value. 1040 * 1041 * @param value value whose presence in this map is to be tested 1042 * @return {@code true} if this map maps one or more keys to the 1043 * specified value 1044 */ 1045 public boolean containsValue(Object value) { 1046 XNode<K,V>[] tab; V v; 1047 if ((tab = table) != null && size > 0) { 1048 for (XNode<K,V> te : tab) { 1049 if (!te.isEmpty()) { 1050 if ((v = te.value) == value || 1051 (value != null && value.equals(v))) 1052 return true; 1053 for (Node<K, V> e = te.next; e != null; e = e.next) { 1054 if ((v = e.value) == value || 1055 (value != null && value.equals(v))) 1056 return true; 1057 } 1058 } 1059 } 1060 } 1061 return false; 1062 } 1063 1064 /** 1065 * Returns a {@link Set} view of the keys contained in this map. 1066 * The set is backed by the map, so changes to the map are 1067 * reflected in the set, and vice-versa. If the map is modified 1068 * while an iteration over the set is in progress (except through 1069 * the iterator's own {@code remove} operation), the results of 1070 * the iteration are undefined. The set supports element removal, 1071 * which removes the corresponding mapping from the map, via the 1072 * {@code Iterator.remove}, {@code Set.remove}, 1073 * {@code removeAll}, {@code retainAll}, and {@code clear} 1074 * operations. It does not support the {@code add} or {@code addAll} 1075 * operations. 1076 * 1077 * @return a set view of the keys contained in this map 1078 */ 1079 public Set<K> keySet() { 1080 Set<K> ks = keySet; 1081 if (ks == null) { 1082 ks = new KeySet(); 1083 keySet = ks; 1084 } 1085 return ks; 1086 } 1087 1088 /** 1089 * Prepares the array for {@link Collection#toArray(Object[])} implementation. 1090 * If supplied array is smaller than this map size, a new array is allocated. 1091 * If supplied array is bigger than this map size, a null is written at size index. 1092 * 1093 * @param a an original array passed to {@code toArray()} method 1094 * @param <T> type of array elements 1095 * @return an array ready to be filled and returned from {@code toArray()} method. 1096 */ 1097 @SuppressWarnings("unchecked") 1098 final <T> T[] prepareArray(T[] a) { 1099 int size = this.size; 1100 if (a.length < size) { 1101 return (T[]) java.lang.reflect.Array 1102 .newInstance(a.getClass().getComponentType(), size); 1103 } 1104 if (a.length > size) { 1105 a[size] = null; 1106 } 1107 return a; 1108 } 1109 1110 /** 1111 * Fills an array with this map keys and returns it. This method assumes 1112 * that input array is big enough to fit all the keys. Use 1113 * {@link #prepareArray(Object[])} to ensure this. 1114 * 1115 * @param a an array to fill 1116 * @param <T> type of array elements 1117 * @return supplied array 1118 */ 1119 <T> T[] keysToArray(T[] a) { 1120 Object[] r = a; 1121 XNode<K,V>[] tab; 1122 int idx = 0; 1123 int i = 0; 1124 if (size > 0 && (tab = table) != null) { 1125 for (XNode<K,V> te : tab) { 1126 if (!te.isEmpty()) { 1127 r[idx++] = te.key; 1128 for (Node<K, V> e = te.next; e != null; e = e.next) { 1129 r[idx++] = e.key; 1130 } 1131 } 1132 } 1133 } 1134 return a; 1135 } 1136 1137 /** 1138 * Fills an array with this map values and returns it. This method assumes 1139 * that input array is big enough to fit all the values. Use 1140 * {@link #prepareArray(Object[])} to ensure this. 1141 * 1142 * @param a an array to fill 1143 * @param <T> type of array elements 1144 * @return supplied array 1145 */ 1146 <T> T[] valuesToArray(T[] a) { 1147 Object[] r = a; 1148 XNode<K,V>[] tab; 1149 int idx = 0; 1150 if (size > 0 && (tab = table) != null) { 1151 for (XNode<K,V> te : tab) { 1152 if (!te.isEmpty()) { 1153 r[idx++] = te.value; 1154 for (Node<K, V> e = te.next; e != null; e = e.next) { 1155 r[idx++] = e.value; 1156 } 1157 } 1158 } 1159 } 1160 return a; 1161 } 1162 1163 final class KeySet extends AbstractSet<K> { 1164 public final int size() { return size; } 1165 public final void clear() { XHashMap.this.clear(); } 1166 public final Iterator<K> iterator() { return new KeyIterator(); } 1167 public final boolean contains(Object o) { return containsKey(o); } 1168 public final boolean remove(Object key) { 1169 return removeNode(hash(key), key, null, false, true).isPresent(); 1170 } 1171 1172 public Object[] toArray() { 1173 return keysToArray(new Object[size]); 1174 } 1175 1176 public <T> T[] toArray(T[] a) { 1177 return keysToArray(prepareArray(a)); 1178 } 1179 1180 public final void forEach(Consumer<? super K> action) { 1181 XNode<K,V>[] tab; 1182 if (action == null) 1183 throw new NullPointerException(); 1184 if (size > 0 && (tab = table) != null) { 1185 int mc = modCount; 1186 for (XNode<K,V> te : tab) { 1187 if (!te.isEmpty()) { 1188 action.accept(te.key); 1189 for (Node<K, V> e = te.next; e != null; e = e.next) 1190 action.accept(e.key); 1191 } 1192 } 1193 if (modCount != mc) 1194 throw new ConcurrentModificationException(); 1195 } 1196 } 1197 } 1198 1199 /** 1200 * Returns a {@link Collection} view of the values contained in this map. 1201 * The collection is backed by the map, so changes to the map are 1202 * reflected in the collection, and vice-versa. If the map is 1203 * modified while an iteration over the collection is in progress 1204 * (except through the iterator's own {@code remove} operation), 1205 * the results of the iteration are undefined. The collection 1206 * supports element removal, which removes the corresponding 1207 * mapping from the map, via the {@code Iterator.remove}, 1208 * {@code Collection.remove}, {@code removeAll}, 1209 * {@code retainAll} and {@code clear} operations. It does not 1210 * support the {@code add} or {@code addAll} operations. 1211 * 1212 * @return a view of the values contained in this map 1213 */ 1214 public Collection<V> values() { 1215 Collection<V> vs = values; 1216 if (vs == null) { 1217 vs = new Values(); 1218 values = vs; 1219 } 1220 return vs; 1221 } 1222 1223 final class Values extends AbstractCollection<V> { 1224 public final int size() { return size; } 1225 public final void clear() { XHashMap.this.clear(); } 1226 public final Iterator<V> iterator() { return new ValueIterator(); } 1227 public final boolean contains(Object o) { return containsValue(o); } 1228 1229 public Object[] toArray() { 1230 return valuesToArray(new Object[size]); 1231 } 1232 1233 public <T> T[] toArray(T[] a) { 1234 return valuesToArray(prepareArray(a)); 1235 } 1236 1237 public final void forEach(Consumer<? super V> action) { 1238 XNode<K,V>[] tab; 1239 if (action == null) 1240 throw new NullPointerException(); 1241 if (size > 0 && (tab = table) != null) { 1242 int mc = modCount; 1243 for (XNode<K,V> te : tab) { 1244 if (!te.isEmpty()) { 1245 action.accept(te.value); 1246 for (Node<K, V> e = te.next; e != null; e = e.next) 1247 action.accept(e.value); 1248 } 1249 } 1250 if (modCount != mc) 1251 throw new ConcurrentModificationException(); 1252 } 1253 } 1254 } 1255 1256 /** 1257 * Returns a {@link Set} view of the mappings contained in this map. 1258 * The set is backed by the map, so changes to the map are 1259 * reflected in the set, and vice-versa. If the map is modified 1260 * while an iteration over the set is in progress (except through 1261 * the iterator's own {@code remove} operation, or through the 1262 * {@code setValue} operation on a map entry returned by the 1263 * iterator) the results of the iteration are undefined. The set 1264 * supports element removal, which removes the corresponding 1265 * mapping from the map, via the {@code Iterator.remove}, 1266 * {@code Set.remove}, {@code removeAll}, {@code retainAll} and 1267 * {@code clear} operations. It does not support the 1268 * {@code add} or {@code addAll} operations. 1269 * 1270 * @return a set view of the mappings contained in this map 1271 */ 1272 public Set<Map.Entry<K,V>> entrySet() { 1273 Set<Map.Entry<K,V>> es; 1274 return (es = entrySet) == null ? (entrySet = new EntrySet()) : es; 1275 } 1276 1277 final class EntrySet extends AbstractSet<Map.Entry<K,V>> { 1278 public final int size() { return size; } 1279 public final void clear() { XHashMap.this.clear(); } 1280 public final Iterator<Map.Entry<K,V>> iterator() { 1281 return new EntryIterator(); 1282 } 1283 public final boolean contains(Object o) { 1284 if (!(o instanceof Map.Entry)) 1285 return false; 1286 Map.Entry<?,?> e = (Map.Entry<?,?>) o; 1287 Object key = e.getKey(); 1288 Node<K,V> candidate = getNode(hash(key), key); 1289 return candidate != null && candidate.equals(e); 1290 } 1291 public final boolean remove(Object o) { 1292 if (o instanceof Map.Entry) { 1293 Map.Entry<?,?> e = (Map.Entry<?,?>) o; 1294 Object key = e.getKey(); 1295 Object value = e.getValue(); 1296 return removeNode(hash(key), key, value, true, true).isPresent(); 1297 } 1298 return false; 1299 } 1300 public final void forEach(Consumer<? super Map.Entry<K,V>> action) { 1301 XNode<K,V>[] tab; 1302 if (action == null) 1303 throw new NullPointerException(); 1304 if (size > 0 && (tab = table) != null) { 1305 int mc = modCount; 1306 for (XNode<K,V> te : tab) { 1307 if (!te.isEmpty()) { 1308 action.accept(new XNodeWrapper(te.hash & (tab.length - 1))); 1309 for (Node<K, V> e = te.next; e != null; e = e.next) 1310 action.accept(e); 1311 } 1312 } 1313 if (modCount != mc) 1314 throw new ConcurrentModificationException(); 1315 } 1316 } 1317 } 1318 1319 // Overrides of JDK8 Map extension methods 1320 1321 @Override 1322 public V getOrDefault(Object key, V defaultValue) { 1323 Node<K,V> e; 1324 return (e = getNode(hash(key), key)) == null ? defaultValue : e.value; 1325 } 1326 1327 @Override 1328 public V putIfAbsent(K key, V value) { 1329 return putVal(hash(key), key, value, true, true); 1330 } 1331 1332 @Override 1333 public boolean remove(Object key, Object value) { 1334 return removeNode(hash(key), key, value, true, true).isPresent(); 1335 } 1336 1337 @Override 1338 public boolean replace(K key, V oldValue, V newValue) { 1339 Node<K,V> e; V v; 1340 if ((e = getNode(hash(key), key)) != null && 1341 ((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) { 1342 e.value = newValue; 1343 return true; 1344 } 1345 return false; 1346 } 1347 1348 @Override 1349 public V replace(K key, V value) { 1350 Node<K,V> e; 1351 if ((e = getNode(hash(key), key)) != null) { 1352 V oldValue = e.value; 1353 e.value = value; 1354 return oldValue; 1355 } 1356 return null; 1357 } 1358 1359 /** 1360 * {@inheritDoc} 1361 * 1362 * <p>This method will, on a best-effort basis, throw a 1363 * {@link ConcurrentModificationException} if it is detected that the 1364 * mapping function modifies this map during computation. 1365 * 1366 * @throws ConcurrentModificationException if it is detected that the 1367 * mapping function modified this map 1368 */ 1369 @Override 1370 public V computeIfAbsent(K key, 1371 Function<? super K, ? extends V> mappingFunction) { 1372 if (mappingFunction == null) 1373 throw new NullPointerException(); 1374 int hash = hash(key); 1375 XNode<K,V>[] tab; XNode<K,V> first; int n, i; 1376 int binCount = 0; 1377 TreeNode<K,V> t = null; 1378 Node<K,V> old = null; 1379 if (size > threshold || (tab = table) == null || 1380 (n = tab.length) == 0) 1381 n = (tab = resize()).length; 1382 if (!(first = tab[i = (n - 1) & hash]).isEmpty()) { 1383 K k; 1384 if (first.hash == hash && 1385 ((k = first.key) == key || (key != null && key.equals(k)))) { 1386 return first.value; 1387 } 1388 Node<K,V> e = first.next; 1389 if (e instanceof TreeNode) 1390 old = (t = (TreeNode<K,V>)e).getTreeNode(hash, key); 1391 else { 1392 do { 1393 if (e.hash == hash && 1394 ((k = e.key) == key || (key != null && key.equals(k)))) { 1395 old = e; 1396 break; 1397 } 1398 ++binCount; 1399 } while ((e = e.next) != null); 1400 } 1401 1402 V oldValue; 1403 if (old != null && (oldValue = old.value) != null) { 1404 return oldValue; 1405 } 1406 } 1407 int mc = modCount; 1408 V v = mappingFunction.apply(key); 1409 if (mc != modCount) { throw new ConcurrentModificationException(); } 1410 if (v == null) { 1411 return null; 1412 } else if (old != null) { 1413 old.value = v; 1414 return v; 1415 } 1416 else if (t != null) 1417 t.putTreeVal(this, tab, hash, key, v); 1418 else { 1419 Node<K,V> x = (tab[i].isEmpty()) ? null : newNode(hash, key, v, null); 1420 tab[i] = new XNode(hash, key, v, x); 1421 if (binCount >= TREEIFY_THRESHOLD - 1) 1422 treeifyBin(tab, hash); 1423 } 1424 modCount = mc + 1; 1425 ++size; 1426 return v; 1427 } 1428 1429 /** 1430 * {@inheritDoc} 1431 * 1432 * <p>This method will, on a best-effort basis, throw a 1433 * {@link ConcurrentModificationException} if it is detected that the 1434 * remapping function modifies this map during computation. 1435 * 1436 * @throws ConcurrentModificationException if it is detected that the 1437 * remapping function modified this map 1438 */ 1439 @Override 1440 public V computeIfPresent(K key, 1441 BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1442 if (remappingFunction == null) 1443 throw new NullPointerException(); 1444 Node<K,V> e; V oldValue; 1445 int hash = hash(key); 1446 if ((e = getNode(hash, key)) != null && 1447 (oldValue = e.value) != null) { 1448 int mc = modCount; 1449 V v = remappingFunction.apply(key, oldValue); 1450 if (mc != modCount) { throw new ConcurrentModificationException(); } 1451 if (v != null) { 1452 e.value = v; 1453 return v; 1454 } 1455 else 1456 removeNode(hash, key, null, false, true); 1457 } 1458 return null; 1459 } 1460 1461 /** 1462 * {@inheritDoc} 1463 * 1464 * <p>This method will, on a best-effort basis, throw a 1465 * {@link ConcurrentModificationException} if it is detected that the 1466 * remapping function modifies this map during computation. 1467 * 1468 * @throws ConcurrentModificationException if it is detected that the 1469 * remapping function modified this map 1470 */ 1471 @Override 1472 public V compute(K key, 1473 BiFunction<? super K, ? super V, ? extends V> remappingFunction) { 1474 if (remappingFunction == null) 1475 throw new NullPointerException(); 1476 int hash = hash(key); 1477 XNode<K,V>[] tab; XNode<K,V> first; int n, i; 1478 int binCount = 0; 1479 TreeNode<K,V> t = null; 1480 Node<K,V> old = null; 1481 if (size > threshold || (tab = table) == null || 1482 (n = tab.length) == 0) 1483 n = (tab = resize()).length; 1484 if (!(first = tab[i = (n - 1) & hash]).isEmpty()) { 1485 Node<K,V> e = first.next;K k; 1486 if (first.hash == hash && 1487 ((k = first.key) == key || (key != null && key.equals(k)))) { 1488 V v = remappingFunction.apply(k, first.value); 1489 tab[i] = new XNode(hash, k, v, e); 1490 return v; 1491 } 1492 if (e instanceof TreeNode) 1493 old = (t = (TreeNode<K,V>)e).getTreeNode(hash, key); 1494 else { 1495 do { 1496 if (e.hash == hash && 1497 ((k = e.key) == key || (key != null && key.equals(k)))) { 1498 old = e; 1499 break; 1500 } 1501 ++binCount; 1502 } while ((e = e.next) != null); 1503 } 1504 } 1505 V oldValue = (old == null) ? null : old.value; 1506 int mc = modCount; 1507 V v = remappingFunction.apply(key, oldValue); 1508 if (mc != modCount) { throw new ConcurrentModificationException(); } 1509 if (old != null) { 1510 if (v != null) { 1511 old.value = v; 1512 } 1513 else 1514 removeNode(hash, key, null, false, true); 1515 } 1516 else if (v != null) { 1517 if (t != null) 1518 t.putTreeVal(this, tab, hash, key, v); 1519 else { 1520 Node<K,V> x = (tab[i].isEmpty()) ? null : newNode(hash, key, v, null); 1521 tab[i] = new XNode(hash, key, v, x); 1522 if (binCount >= TREEIFY_THRESHOLD - 1) 1523 treeifyBin(tab, hash); 1524 } 1525 modCount = mc + 1; 1526 ++size; 1527 } 1528 return v; 1529 } 1530 1531 /** 1532 * {@inheritDoc} 1533 * 1534 * <p>This method will, on a best-effort basis, throw a 1535 * {@link ConcurrentModificationException} if it is detected that the 1536 * remapping function modifies this map during computation. 1537 * 1538 * @throws ConcurrentModificationException if it is detected that the 1539 * remapping function modified this map 1540 */ 1541 @Override 1542 public V merge(K key, V value, 1543 BiFunction<? super V, ? super V, ? extends V> remappingFunction) { 1544 if (value == null || remappingFunction == null) 1545 throw new NullPointerException(); 1546 int hash = hash(key); 1547 XNode<K,V>[] tab; XNode<K,V> first; int n, i; 1548 int binCount = 0; 1549 TreeNode<K,V> t = null; 1550 Node<K,V> old = null; 1551 if (size > threshold || (tab = table) == null || 1552 (n = tab.length) == 0) 1553 n = (tab = resize()).length; 1554 if (!(first = tab[i = (n - 1) & hash]).isEmpty()) { 1555 Node<K,V> e = first.next;K k; 1556 if (first.hash == hash && 1557 ((k = first.key) == key || (key != null && key.equals(k)))) { 1558 V v = remappingFunction.apply(first.value, value); 1559 tab[i] = new XNode(hash, k, v, e); 1560 return v; 1561 } 1562 if (e instanceof TreeNode) 1563 old = (t = (TreeNode<K,V>)e).getTreeNode(hash, key); 1564 else { 1565 do { 1566 if (e.hash == hash && 1567 ((k = e.key) == key || (key != null && key.equals(k)))) { 1568 old = e; 1569 break; 1570 } 1571 ++binCount; 1572 } while ((e = e.next) != null); 1573 } 1574 } 1575 if (old != null) { 1576 V v; 1577 if (old.value != null) { 1578 int mc = modCount; 1579 v = remappingFunction.apply(old.value, value); 1580 if (mc != modCount) { 1581 throw new ConcurrentModificationException(); 1582 } 1583 } else { 1584 v = value; 1585 } 1586 if (v != null) { 1587 old.value = v; 1588 } 1589 else 1590 removeNode(hash, key, null, false, true); 1591 return v; 1592 } else { 1593 if (t != null) 1594 t.putTreeVal(this, tab, hash, key, value); 1595 else { 1596 Node<K,V> x = (tab[i].isEmpty()) ? null 1597 : newNode(hash, tab[i].key, tab[i].value, null); 1598 tab[i] = new XNode(hash, key, value, x); 1599 if (binCount >= TREEIFY_THRESHOLD - 1) 1600 treeifyBin(tab, hash); 1601 } 1602 ++modCount; 1603 ++size; 1604 return value; 1605 } 1606 } 1607 1608 @Override 1609 public void forEach(BiConsumer<? super K, ? super V> action) { 1610 XNode<K,V>[] tab; 1611 if (action == null) 1612 throw new NullPointerException(); 1613 if (size > 0 && (tab = table) != null) { 1614 int mc = modCount; 1615 for (XNode<K,V> te : tab) { 1616 if (!te.isEmpty()) { 1617 action.accept(te.key, te.value); 1618 for (Node<K, V> e = te.next; e != null; e = e.next) 1619 action.accept(e.key, e.value); 1620 } 1621 } 1622 if (modCount != mc) 1623 throw new ConcurrentModificationException(); 1624 } 1625 } 1626 1627 @Override 1628 public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { 1629 XNode<K,V>[] tab; 1630 if (function == null) 1631 throw new NullPointerException(); 1632 if (size > 0 && (tab = table) != null) { 1633 int mc = modCount; 1634 for (XNode<K,V> te : tab) { 1635 if (!te.isEmpty()) { 1636 V v = function.apply(te.key, te.value); 1637 tab[te.hash & (tab.length -1)] = new XNode(te.hash, te.key, v, te.next); 1638 for (Node<K, V> e = te.next; e != null; e = e.next) 1639 e.value = function.apply(e.key, e.value); 1640 } 1641 } 1642 if (modCount != mc) 1643 throw new ConcurrentModificationException(); 1644 } 1645 } 1646 1647 /* ------------------------------------------------------------ */ 1648 // Cloning and serialization 1649 1650 /** 1651 * Returns a shallow copy of this {@code HashMap} instance: the keys and 1652 * values themselves are not cloned. 1653 * 1654 * @return a shallow copy of this map 1655 */ 1656 @SuppressWarnings("unchecked") 1657 @Override 1658 public Object clone() { 1659 XHashMap<K,V> result; 1660 try { 1661 result = (XHashMap<K,V>)super.clone(); 1662 } catch (CloneNotSupportedException e) { 1663 // this shouldn't happen, since we are Cloneable 1664 throw new InternalError(e); 1665 } 1666 result.reinitialize(); 1667 result.putMapEntries(this, false); 1668 return result; 1669 } 1670 1671 // These methods are also used when serializing HashSets 1672 final float loadFactor() { return loadFactor; } 1673 final int capacity() { 1674 return (table != null) ? table.length : 1675 (threshold > 0) ? threshold : 1676 DEFAULT_INITIAL_CAPACITY; 1677 } 1678 1679 /** 1680 * Saves this map to a stream (that is, serializes it). 1681 * 1682 * @param s the stream 1683 * @throws IOException if an I/O error occurs 1684 * @serialData The <i>capacity</i> of the HashMap (the length of the 1685 * bucket array) is emitted (int), followed by the 1686 * <i>size</i> (an int, the number of key-value 1687 * mappings), followed by the key (Object) and value (Object) 1688 * for each key-value mapping. The key-value mappings are 1689 * emitted in no particular order. 1690 */ 1691 private void writeObject(java.io.ObjectOutputStream s) 1692 throws IOException { 1693 int buckets = capacity(); 1694 // Write out the threshold, loadfactor, and any hidden stuff 1695 s.defaultWriteObject(); 1696 s.writeInt(buckets); 1697 s.writeInt(size); 1698 internalWriteEntries(s); 1699 } 1700 1701 /** 1702 * Reconstitutes this map from a stream (that is, deserializes it). 1703 * @param s the stream 1704 * @throws ClassNotFoundException if the class of a serialized object 1705 * could not be found 1706 * @throws IOException if an I/O error occurs 1707 */ 1708 private void readObject(java.io.ObjectInputStream s) 1709 throws IOException, ClassNotFoundException { 1710 // Read in the threshold (ignored), loadfactor, and any hidden stuff 1711 s.defaultReadObject(); 1712 reinitialize(); 1713 if (loadFactor <= 0 || Float.isNaN(loadFactor)) 1714 throw new InvalidObjectException("Illegal load factor: " + 1715 loadFactor); 1716 s.readInt(); // Read and ignore number of buckets 1717 int mappings = s.readInt(); // Read number of mappings (size) 1718 if (mappings < 0) 1719 throw new InvalidObjectException("Illegal mappings count: " + 1720 mappings); 1721 else if (mappings > 0) { // (if zero, use defaults) 1722 // Size the table using given load factor only if within 1723 // range of 0.25...4.0 1724 float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f); 1725 float fc = (float)mappings / lf + 1.0f; 1726 int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ? 1727 DEFAULT_INITIAL_CAPACITY : 1728 (fc >= MAXIMUM_CAPACITY) ? 1729 MAXIMUM_CAPACITY : 1730 tableSizeFor((int)fc)); 1731 float ft = (float)cap * lf; 1732 threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ? 1733 (int)ft : Integer.MAX_VALUE); 1734 1735 // Check Map.Entry[].class since it's the nearest public type to 1736 // what we're actually creating. 1737 @SuppressWarnings({"rawtypes","unchecked"}) 1738 XNode<K,V>[] tab = (XNode<K,V>[])new XNode[cap]; 1739 table = tab; 1740 1741 // Read the keys and values, and put the mappings in the HashMap 1742 for (int i = 0; i < mappings; i++) { 1743 @SuppressWarnings("unchecked") 1744 K key = (K) s.readObject(); 1745 @SuppressWarnings("unchecked") 1746 V value = (V) s.readObject(); 1747 putVal(hash(key), key, value, false, false); 1748 } 1749 } 1750 } 1751 1752 /* ------------------------------------------------------------ */ 1753 // iterators 1754 1755 static final Node<?,?> START_INDEX = new Node(0, null, null, null); 1756 1757 abstract class HashIterator { 1758 Node<K,V> next; // next entry to return 1759 Node<K,V> current; // current entry 1760 int expectedModCount; // for fast-fail 1761 int index; // current slot 1762 1763 1764 HashIterator() { 1765 expectedModCount = modCount; 1766 XNode<K,V>[] t = table; 1767 current = next = null; 1768 index = 0; 1769 if (t != null && size > 0) { // advance to first entry 1770 XNode<K,V> n = emptyXNode(); 1771 for (; index < t.length && (n = t[index]).isEmpty(); index++) { 1772 } 1773 next = (Node<K,V>)START_INDEX; 1774 } 1775 } 1776 1777 public final boolean hasNext() { 1778 return next != null; 1779 } 1780 1781 final Entry<K,V> nextNode() { 1782 XNode<K,V>[] t; 1783 Node<K,V> e = next; 1784 if (modCount != expectedModCount) 1785 throw new ConcurrentModificationException(); 1786 if (e == null) 1787 throw new NoSuchElementException(); 1788 if ((next = (current = e).next) == null && (t = table) != null) { 1789 var ret = (e == START_INDEX) ? new XNodeWrapper(index++) : e; 1790 for (; index < t.length && (t[index]).isEmpty(); index++) { } 1791 next = (index < t.length) ? (Node<K, V>) START_INDEX : null; 1792 return ret; 1793 } 1794 return e; 1795 } 1796 1797 public final void remove() { 1798 Node<K,V> p = current; 1799 if (p == null) 1800 throw new IllegalStateException(); 1801 if (modCount != expectedModCount) 1802 throw new ConcurrentModificationException(); 1803 current = null; 1804 removeNode(p.hash, p.key, null, false, false); 1805 expectedModCount = modCount; 1806 } 1807 } 1808 1809 final class KeyIterator extends HashIterator 1810 implements Iterator<K> { 1811 public final K next() { return nextNode().getKey(); } 1812 } 1813 1814 final class ValueIterator extends HashIterator 1815 implements Iterator<V> { 1816 public final V next() { return nextNode().getValue(); } 1817 } 1818 1819 final class EntryIterator extends HashIterator 1820 implements Iterator<Map.Entry<K,V>> { 1821 public final Map.Entry<K,V> next() { return nextNode(); } 1822 } 1823 1824 /* 1825 * The following package-protected methods are designed to be 1826 * overridden by LinkedHashMap, but not by any other subclass. 1827 * Nearly all other internal methods are also package-protected 1828 * but are declared final, so can be used by LinkedHashMap, view 1829 * classes, and HashSet. 1830 */ 1831 1832 // Create a regular (non-tree) node 1833 Node<K,V> newNode(int hash, K key, V value, Node<K,V> next) { 1834 return new Node<>(hash, key, value, next); 1835 } 1836 1837 // For conversion from TreeNodes to plain nodes 1838 Node<K,V> replacementNode(Node<K,V> p, Node<K,V> next) { 1839 return new Node<>(p.hash, p.key, p.value, next); 1840 } 1841 1842 // Create a tree bin node 1843 TreeNode<K,V> newTreeNode(int hash, K key, V value, Node<K,V> next) { 1844 return new TreeNode<>(hash, key, value, next); 1845 } 1846 1847 // For treeifyBin 1848 TreeNode<K,V> replacementTreeNode(Node<K,V> p, Node<K,V> next) { 1849 return new TreeNode<>(p.hash, p.key, p.value, next); 1850 } 1851 1852 /** 1853 * Reset to initial default state. Called by clone and readObject. 1854 */ 1855 void reinitialize() { 1856 table = null; 1857 entrySet = null; 1858 keySet = null; 1859 values = null; 1860 modCount = 0; 1861 threshold = 0; 1862 size = 0; 1863 } 1864 1865 // Called only from writeObject, to ensure compatible ordering. 1866 void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException { 1867 XNode<K,V>[] tab; 1868 if (size > 0 && (tab = table) != null) { 1869 for (XNode<K,V> te : tab) { 1870 if (!te.isEmpty()) { 1871 s.writeObject(te.key); 1872 s.writeObject(te.value); 1873 1874 for (Node<K, V> e = te.next; e != null; e = e.next) { 1875 s.writeObject(e.key); 1876 s.writeObject(e.value); 1877 } 1878 } 1879 } 1880 } 1881 } 1882 1883 /* ------------------------------------------------------------ */ 1884 // Tree bins 1885 1886 /** 1887 * Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn 1888 * extends Node) so can be used as extension of either regular or 1889 * linked node. 1890 */ 1891 static final class TreeNode<K,V> extends Node<K,V> { 1892 TreeNode<K,V> parent; // red-black tree links 1893 TreeNode<K,V> left; 1894 TreeNode<K,V> right; 1895 TreeNode<K,V> prev; // needed to unlink next upon deletion 1896 boolean red; 1897 TreeNode(int hash, K key, V val, Node<K,V> next) { 1898 super(hash, key, val, next); 1899 } 1900 1901 /** 1902 * Returns root of tree containing this node. 1903 */ 1904 final TreeNode<K,V> root() { 1905 for (TreeNode<K,V> r = this, p;;) { 1906 if ((p = r.parent) == null) 1907 return r; 1908 r = p; 1909 } 1910 } 1911 1912 /** 1913 * Ensures that the given root is the first node of its bin. 1914 */ 1915 static <K,V> void moveRootToFront(Node<K,V>[] tab, TreeNode<K,V> root) { 1916 int n; 1917 if (root != null && tab != null && (n = tab.length) > 0) { 1918 int index = (n - 1) & root.hash; 1919 TreeNode<K,V> first = (TreeNode<K,V>)tab[index]; 1920 if (root != first) { 1921 Node<K,V> rn; 1922 tab[index] = root; 1923 TreeNode<K,V> rp = root.prev; 1924 if ((rn = root.next) != null) 1925 ((TreeNode<K,V>)rn).prev = rp; 1926 if (rp != null) 1927 rp.next = rn; 1928 if (first != null) 1929 first.prev = root; 1930 root.next = first; 1931 root.prev = null; 1932 } 1933 assert checkInvariants(root); 1934 } 1935 } 1936 1937 /** 1938 * Finds the node starting at root p with the given hash and key. 1939 * The kc argument caches comparableClassFor(key) upon first use 1940 * comparing keys. 1941 */ 1942 final TreeNode<K,V> find(int h, Object k, Class<?> kc) { 1943 TreeNode<K,V> p = this; 1944 do { 1945 int ph, dir; K pk; 1946 TreeNode<K,V> pl = p.left, pr = p.right, q; 1947 if ((ph = p.hash) > h) 1948 p = pl; 1949 else if (ph < h) 1950 p = pr; 1951 else if ((pk = p.key) == k || (k != null && k.equals(pk))) 1952 return p; 1953 else if (pl == null) 1954 p = pr; 1955 else if (pr == null) 1956 p = pl; 1957 else if ((kc != null || 1958 (kc = comparableClassFor(k)) != null) && 1959 (dir = compareComparables(kc, k, pk)) != 0) 1960 p = (dir < 0) ? pl : pr; 1961 else if ((q = pr.find(h, k, kc)) != null) 1962 return q; 1963 else 1964 p = pl; 1965 } while (p != null); 1966 return null; 1967 } 1968 1969 /** 1970 * Calls find for root node. 1971 */ 1972 final TreeNode<K,V> getTreeNode(int h, Object k) { 1973 return ((parent != null) ? root() : this).find(h, k, null); 1974 } 1975 1976 /** 1977 * Tie-breaking utility for ordering insertions when equal 1978 * hashCodes and non-comparable. We don't require a total 1979 * order, just a consistent insertion rule to maintain 1980 * equivalence across rebalancings. Tie-breaking further than 1981 * necessary simplifies testing a bit. 1982 */ 1983 static int tieBreakOrder(Object a, Object b) { 1984 int d; 1985 if (a == null || b == null || 1986 (d = a.getClass().getName(). 1987 compareTo(b.getClass().getName())) == 0) 1988 d = (System.identityHashCode(a) <= System.identityHashCode(b) ? 1989 -1 : 1); 1990 return d; 1991 } 1992 1993 /** 1994 * Forms tree of the nodes linked from this node. 1995 */ 1996 final void treeify(XNode<K,V>[] tab) { 1997 // TreeNode<K,V> root = null; 1998 // for (TreeNode<K,V> x = this, next; x != null; x = next) { 1999 // next = (TreeNode<K,V>)x.next; 2000 // x.left = x.right = null; 2001 // if (root == null) { 2002 // x.parent = null; 2003 // x.red = false; 2004 // root = x; 2005 // } 2006 // else { 2007 // K k = x.key; 2008 // int h = x.hash; 2009 // Class<?> kc = null; 2010 // for (TreeNode<K,V> p = root;;) { 2011 // int dir, ph; 2012 // K pk = p.key; 2013 // if ((ph = p.hash) > h) 2014 // dir = -1; 2015 // else if (ph < h) 2016 // dir = 1; 2017 // else if ((kc == null && 2018 // (kc = comparableClassFor(k)) == null) || 2019 // (dir = compareComparables(kc, k, pk)) == 0) 2020 // dir = tieBreakOrder(k, pk); 2021 // 2022 // TreeNode<K,V> xp = p; 2023 // if ((p = (dir <= 0) ? p.left : p.right) == null) { 2024 // x.parent = xp; 2025 // if (dir <= 0) 2026 // xp.left = x; 2027 // else 2028 // xp.right = x; 2029 // root = balanceInsertion(root, x); 2030 // break; 2031 // } 2032 // } 2033 // } 2034 // } 2035 // moveRootToFront(tab, root); 2036 } 2037 2038 /** 2039 * Returns a list of non-TreeNodes replacing those linked from 2040 * this node. 2041 */ 2042 final Node<K,V> untreeify(XHashMap<K,V> map) { 2043 Node<K,V> hd = null, tl = null; 2044 for (Node<K,V> q = this; q != null; q = q.next) { 2045 Node<K,V> p = map.replacementNode(q, null); 2046 if (tl == null) 2047 hd = p; 2048 else 2049 tl.next = p; 2050 tl = p; 2051 } 2052 return hd; 2053 } 2054 2055 /** 2056 * Tree version of putVal. 2057 */ 2058 final TreeNode<K,V> putTreeVal(XHashMap<K,V> map, XNode<K,V>[] tab, 2059 int h, K k, V v) { 2060 // Class<?> kc = null; 2061 // boolean searched = false; 2062 // TreeNode<K,V> root = (parent != null) ? root() : this; 2063 // for (TreeNode<K,V> p = root;;) { 2064 // int dir, ph; K pk; 2065 // if ((ph = p.hash) > h) 2066 // dir = -1; 2067 // else if (ph < h) 2068 // dir = 1; 2069 // else if ((pk = p.key) == k || (k != null && k.equals(pk))) 2070 // return p; 2071 // else if ((kc == null && 2072 // (kc = comparableClassFor(k)) == null) || 2073 // (dir = compareComparables(kc, k, pk)) == 0) { 2074 // if (!searched) { 2075 // TreeNode<K,V> q, ch; 2076 // searched = true; 2077 // if (((ch = p.left) != null && 2078 // (q = ch.find(h, k, kc)) != null) || 2079 // ((ch = p.right) != null && 2080 // (q = ch.find(h, k, kc)) != null)) 2081 // return q; 2082 // } 2083 // dir = tieBreakOrder(k, pk); 2084 // } 2085 // 2086 // TreeNode<K,V> xp = p; 2087 // if ((p = (dir <= 0) ? p.left : p.right) == null) { 2088 // Node<K,V> xpn = xp.next; 2089 // TreeNode<K,V> x = map.newTreeNode(h, k, v, xpn); 2090 // if (dir <= 0) 2091 // xp.left = x; 2092 // else 2093 // xp.right = x; 2094 // xp.next = x; 2095 // x.parent = x.prev = xp; 2096 // if (xpn != null) 2097 // ((TreeNode<K,V>)xpn).prev = x; 2098 // moveRootToFront(tab, balanceInsertion(root, x)); 2099 // return null; 2100 // } 2101 // } 2102 return null; 2103 } 2104 2105 /** 2106 * Removes the given node, that must be present before this call. 2107 * This is messier than typical red-black deletion code because we 2108 * cannot swap the contents of an interior node with a leaf 2109 * successor that is pinned by "next" pointers that are accessible 2110 * independently during traversal. So instead we swap the tree 2111 * linkages. If the current tree appears to have too few nodes, 2112 * the bin is converted back to a plain bin. (The test triggers 2113 * somewhere between 2 and 6 nodes, depending on tree structure). 2114 */ 2115 final void removeTreeNode(XHashMap<K,V> map, XNode<K,V>[] tab, 2116 boolean movable) { 2117 // int n; 2118 // if (tab == null || (n = tab.length) == 0) 2119 // return; 2120 // int index = (n - 1) & hash; 2121 // TreeNode<K,V> first = (TreeNode<K,V>)tab[index], root = first, rl; 2122 // TreeNode<K,V> succ = (TreeNode<K,V>)next, pred = prev; 2123 // if (pred == null) 2124 // tab[index] = first = succ; 2125 // else 2126 // pred.next = succ; 2127 // if (succ != null) 2128 // succ.prev = pred; 2129 // if (first == null) 2130 // return; 2131 // if (root.parent != null) 2132 // root = root.root(); 2133 // if (root == null 2134 // || (movable 2135 // && (root.right == null 2136 // || (rl = root.left) == null 2137 // || rl.left == null))) { 2138 // tab[index] = first.untreeify(map); // too small 2139 // return; 2140 // } 2141 // TreeNode<K,V> p = this, pl = left, pr = right, replacement; 2142 // if (pl != null && pr != null) { 2143 // TreeNode<K,V> s = pr, sl; 2144 // while ((sl = s.left) != null) // find successor 2145 // s = sl; 2146 // boolean c = s.red; s.red = p.red; p.red = c; // swap colors 2147 // TreeNode<K,V> sr = s.right; 2148 // TreeNode<K,V> pp = p.parent; 2149 // if (s == pr) { // p was s's direct parent 2150 // p.parent = s; 2151 // s.right = p; 2152 // } 2153 // else { 2154 // TreeNode<K,V> sp = s.parent; 2155 // if ((p.parent = sp) != null) { 2156 // if (s == sp.left) 2157 // sp.left = p; 2158 // else 2159 // sp.right = p; 2160 // } 2161 // if ((s.right = pr) != null) 2162 // pr.parent = s; 2163 // } 2164 // p.left = null; 2165 // if ((p.right = sr) != null) 2166 // sr.parent = p; 2167 // if ((s.left = pl) != null) 2168 // pl.parent = s; 2169 // if ((s.parent = pp) == null) 2170 // root = s; 2171 // else if (p == pp.left) 2172 // pp.left = s; 2173 // else 2174 // pp.right = s; 2175 // if (sr != null) 2176 // replacement = sr; 2177 // else 2178 // replacement = p; 2179 // } 2180 // else if (pl != null) 2181 // replacement = pl; 2182 // else if (pr != null) 2183 // replacement = pr; 2184 // else 2185 // replacement = p; 2186 // if (replacement != p) { 2187 // TreeNode<K,V> pp = replacement.parent = p.parent; 2188 // if (pp == null) 2189 // (root = replacement).red = false; 2190 // else if (p == pp.left) 2191 // pp.left = replacement; 2192 // else 2193 // pp.right = replacement; 2194 // p.left = p.right = p.parent = null; 2195 // } 2196 // 2197 // TreeNode<K,V> r = p.red ? root : balanceDeletion(root, replacement); 2198 // 2199 // if (replacement == p) { // detach 2200 // TreeNode<K,V> pp = p.parent; 2201 // p.parent = null; 2202 // if (pp != null) { 2203 // if (p == pp.left) 2204 // pp.left = null; 2205 // else if (p == pp.right) 2206 // pp.right = null; 2207 // } 2208 // } 2209 // if (movable) 2210 // moveRootToFront(tab, r); 2211 } 2212 2213 /** 2214 * Splits nodes in a tree bin into lower and upper tree bins, 2215 * or untreeifies if now too small. Called only from resize; 2216 * see above discussion about split bits and indices. 2217 * 2218 * @param map the map 2219 * @param tab the table for recording bin heads 2220 * @param index the index of the table being split 2221 * @param bit the bit of hash to split on 2222 */ 2223 final void split(XHashMap<K,V> map, XNode<K,V>[] tab, int index, int bit) { 2224 // TreeNode<K,V> b = this; 2225 // // Relink into lo and hi lists, preserving order 2226 // TreeNode<K,V> loHead = null, loTail = null; 2227 // TreeNode<K,V> hiHead = null, hiTail = null; 2228 // int lc = 0, hc = 0; 2229 // for (TreeNode<K,V> e = b, next; e != null; e = next) { 2230 // next = (TreeNode<K,V>)e.next; 2231 // e.next = null; 2232 // if ((e.hash & bit) == 0) { 2233 // if ((e.prev = loTail) == null) 2234 // loHead = e; 2235 // else 2236 // loTail.next = e; 2237 // loTail = e; 2238 // ++lc; 2239 // } 2240 // else { 2241 // if ((e.prev = hiTail) == null) 2242 // hiHead = e; 2243 // else 2244 // hiTail.next = e; 2245 // hiTail = e; 2246 // ++hc; 2247 // } 2248 // } 2249 // 2250 // if (loHead != null) { 2251 // if (lc <= UNTREEIFY_THRESHOLD) 2252 // tab[index] = loHead.untreeify(map); 2253 // else { 2254 // tab[index] = loHead; 2255 // if (hiHead != null) // (else is already treeified) 2256 // loHead.treeify(tab); 2257 // } 2258 // } 2259 // if (hiHead != null) { 2260 // if (hc <= UNTREEIFY_THRESHOLD) 2261 // tab[index + bit] = hiHead.untreeify(map); 2262 // else { 2263 // tab[index + bit] = hiHead; 2264 // if (loHead != null) 2265 // hiHead.treeify(tab); 2266 // } 2267 // } 2268 } 2269 2270 /** 2271 * Recursive invariant check 2272 */ 2273 static <K,V> boolean checkInvariants(TreeNode<K,V> t) { 2274 TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right, 2275 tb = t.prev, tn = (TreeNode<K,V>)t.next; 2276 if (tb != null && tb.next != t) 2277 return false; 2278 if (tn != null && tn.prev != t) 2279 return false; 2280 if (tp != null && t != tp.left && t != tp.right) 2281 return false; 2282 if (tl != null && (tl.parent != t || tl.hash > t.hash)) 2283 return false; 2284 if (tr != null && (tr.parent != t || tr.hash < t.hash)) 2285 return false; 2286 if (t.red && tl != null && tl.red && tr != null && tr.red) 2287 return false; 2288 if (tl != null && !checkInvariants(tl)) 2289 return false; 2290 if (tr != null && !checkInvariants(tr)) 2291 return false; 2292 return true; 2293 } 2294 } 2295 2296 2297 public void dumpStats(PrintStream out) { 2298 out.printf("%s instance: size: %d%n", this.getClass().getName(), this.size()); 2299 long size = heapSize(); 2300 long bytesPer = size / this.size(); 2301 out.printf(" heap size: %d(bytes), avg bytes per entry: %d, table len: %d%n", 2302 size, bytesPer, table.length); 2303 long[] types = entryTypes(); 2304 out.printf(" values: %d, empty: %d%n", types[0], types[1]); 2305 int[] rehashes = entryRehashes(); 2306 out.printf(" hash collision histogram: max: %d, %s%n", 2307 rehashes.length - 1, Arrays.toString(rehashes)); 2308 } 2309 2310 private long[] entryTypes() { 2311 long[] counts = new long[3]; 2312 for (XNode<K,V> te : table) { 2313 counts[te.isEmpty() ? 1 : 0]++; 2314 } 2315 return counts; 2316 } 2317 2318 // Returns a histogram array of the number of rehashs needed to find each key. 2319 private int[] entryRehashes() { 2320 int[] counts = new int[table.length + 1]; 2321 XNode<K,V>[] tab = table; 2322 for (XNode<K,V> te : tab) { 2323 if (!te.isEmpty()) { 2324 int count = 0; 2325 for (Node<K, V> e = te.next; e != null; e = e.next) 2326 count++; 2327 counts[count]++; 2328 } 2329 } 2330 2331 int i; 2332 for (i = counts.length - 1; i >= 0 && counts[i] == 0; i--) { 2333 } 2334 counts = Arrays.copyOf(counts, i + 1); 2335 return counts; 2336 } 2337 2338 private long heapSize() { 2339 long acc = objectSizeMaybe(this); 2340 acc += objectSizeMaybe(table); 2341 2342 XNode<K,V>[] tab = table; 2343 for (XNode<K,V> te : tab) { 2344 if (!te.isEmpty()) { 2345 for (Node<K, V> e = te.next; e != null; e = e.next) 2346 acc += objectSizeMaybe(e); 2347 } 2348 } 2349 return acc; 2350 } 2351 2352 private long objectSizeMaybe(Object o) { 2353 try { 2354 return (mObjectSize != null) 2355 ? (long)mObjectSize.invoke(null, o) 2356 : 0L; 2357 } catch (IllegalAccessException | InvocationTargetException e) { 2358 return 0L; 2359 } 2360 } 2361 2362 private static boolean hasObjectSize = false; 2363 private static Method mObjectSize = getObjectSizeMethod(); 2364 2365 private static Method getObjectSizeMethod() { 2366 try { 2367 Method m = Objects.class.getDeclaredMethod("getObjectSize", Object.class); 2368 hasObjectSize = true; 2369 return m; 2370 } catch (NoSuchMethodException nsme) { 2371 return null; 2372 } 2373 } 2374 2375 }