# HG changeset patch # User psandoz # Date 1378280065 -7200 # Node ID dda89341ee2d3c52c38b12e92cd4198c76238e43 # Parent 39ccb0972a2fb712612b7fe5adc7775603dfaf99 8023463: Improvements to HashMap/LinkedHashMap use of bins/buckets and trees (red/black) 8012913: LinkedHashMap key/value/entry spliterators should report ORDERED Reviewed-by: mduigou, forax, bchristi, alanb Contributed-by: Doug Lea , Paul Sandoz diff -r 39ccb0972a2f -r dda89341ee2d jdk/src/share/classes/java/util/HashMap.java --- a/jdk/src/share/classes/java/util/HashMap.java Mon Aug 12 12:22:10 2013 +0200 +++ b/jdk/src/share/classes/java/util/HashMap.java Wed Sep 04 09:34:25 2013 +0200 @@ -25,13 +25,14 @@ package java.util; -import java.io.*; +import java.io.IOException; +import java.io.InvalidObjectException; +import java.io.Serializable; import java.lang.reflect.ParameterizedType; import java.lang.reflect.Type; -import java.util.concurrent.ThreadLocalRandom; import java.util.function.BiConsumer; +import java.util.function.BiFunction; import java.util.function.Consumer; -import java.util.function.BiFunction; import java.util.function.Function; /** @@ -63,20 +64,25 @@ * structures are rebuilt) so that the hash table has approximately twice the * number of buckets. * - *

As a general rule, the default load factor (.75) offers a good tradeoff - * between time and space costs. Higher values decrease the space overhead - * but increase the lookup cost (reflected in most of the operations of the - * HashMap class, including get and put). The - * expected number of entries in the map and its load factor should be taken - * into account when setting its initial capacity, so as to minimize the - * number of rehash operations. If the initial capacity is greater - * than the maximum number of entries divided by the load factor, no - * rehash operations will ever occur. + *

As a general rule, the default load factor (.75) offers a good + * tradeoff between time and space costs. Higher values decrease the + * space overhead but increase the lookup cost (reflected in most of + * the operations of the HashMap class, including + * get and put). The expected number of entries in + * the map and its load factor should be taken into account when + * setting its initial capacity, so as to minimize the number of + * rehash operations. If the initial capacity is greater than the + * maximum number of entries divided by the load factor, no rehash + * operations will ever occur. * - *

If many mappings are to be stored in a HashMap instance, - * creating it with a sufficiently large capacity will allow the mappings to - * be stored more efficiently than letting it perform automatic rehashing as - * needed to grow the table. + *

If many mappings are to be stored in a HashMap + * instance, creating it with a sufficiently large capacity will allow + * the mappings to be stored more efficiently than letting it perform + * automatic rehashing as needed to grow the table. Note that using + * many keys with the same {@code hashCode()} is a sure way to slow + * down performance of any hash table. To ameliorate impact, when keys + * are {@link Comparable}, this class may use comparison order among + * keys to help break ties. * *

Note that this implementation is not synchronized. * If multiple threads access a hash map concurrently, and at least one of @@ -128,11 +134,100 @@ * @see Hashtable * @since 1.2 */ +public class HashMap extends AbstractMap + implements Map, Cloneable, Serializable { -public class HashMap - extends AbstractMap - implements Map, Cloneable, Serializable -{ + private static final long serialVersionUID = 362498820763181265L; + + /* + * Implementation notes. + * + * This map usually acts as a binned (bucketed) hash table, but + * when bins get too large, they are transformed into bins of + * TreeNodes, each structured similarly to those in + * java.util.TreeMap. Most methods try to use normal bins, but + * relay to TreeNode methods when applicable (simply by checking + * instanceof a node). Bins of TreeNodes may be traversed and + * used like any others, but additionally support faster lookup + * when overpopulated. However, since the vast majority of bins in + * normal use are not overpopulated, checking for existence of + * tree bins may be delayed in the course of table methods. + * + * Tree bins (i.e., bins whose elements are all TreeNodes) are + * ordered primarily by hashCode, but in the case of ties, if two + * elements are of the same "class C implements Comparable", + * type then their compareTo method is used for ordering. (We + * conservatively check generic types via reflection to validate + * this -- see method comparableClassFor). The added complexity + * of tree bins is worthwhile in providing worst-case O(log n) + * operations when keys either have distinct hashes or are + * orderable, Thus, performance degrades gracefully under + * accidental or malicious usages in which hashCode() methods + * return values that are poorly distributed, as well as those in + * which many keys share a hashCode, so long as they are also + * Comparable. (If neither of these apply, we may waste about a + * factor of two in time and space compared to taking no + * precautions. But the only known cases stem from poor user + * programming practices that are already so slow that this makes + * little difference.) + * + * Because TreeNodes are about twice the size of regular nodes, we + * use them only when bins contain enough nodes to warrant use + * (see TREEIFY_THRESHOLD). And when they become too small (due to + * removal or resizing) they are converted back to plain bins. In + * usages with well-distributed user hashCodes, tree bins are + * rarely used. Ideally, under random hashCodes, the frequency of + * nodes in bins follows a Poisson distribution + * (http://en.wikipedia.org/wiki/Poisson_distribution) with a + * parameter of about 0.5 on average for the default resizing + * threshold of 0.75, although with a large variance because of + * resizing granularity. Ignoring variance, the expected + * occurrences of list size k are (exp(-0.5) * pow(0.5, k) / + * factorial(k)). The first values are: + * + * 0: 0.60653066 + * 1: 0.30326533 + * 2: 0.07581633 + * 3: 0.01263606 + * 4: 0.00157952 + * 5: 0.00015795 + * 6: 0.00001316 + * 7: 0.00000094 + * 8: 0.00000006 + * more: less than 1 in ten million + * + * The root of a tree bin is normally its first node. However, + * sometimes (currently only upon Iterator.remove), the root might + * be elsewhere, but can be recovered following parent links + * (method TreeNode.root()). + * + * All applicable internal methods accept a hash code as an + * argument (as normally supplied from a public method), allowing + * them to call each other without recomputing user hashCodes. + * Most internal methods also accept a "tab" argument, that is + * normally the current table, but may be a new or old one when + * resizing or converting. + * + * When bin lists are treeified, split, or untreeified, we keep + * them in the same relative access/traversal order (i.e., field + * Node.next) to better preserve locality, and to slightly + * simplify handling of splits and traversals that invoke + * iterator.remove. When using comparators on insertion, to keep a + * total ordering (or as close as is required here) across + * rebalancings, we compare classes and identityHashCodes as + * tie-breakers. + * + * The use and transitions among plain vs tree modes is + * complicated by the existence of subclass LinkedHashMap. See + * below for hook methods defined to be invoked upon insertion, + * removal and access that allow LinkedHashMap internals to + * otherwise remain independent of these mechanics. (This also + * requires that a map instance be passed to some utility methods + * that may create new nodes.) + * + * The concurrent-programming-like SSA-based coding style helps + * avoid aliasing errors amid all of the twisty pointer operations. + */ /** * The default initial capacity - MUST be a power of two. @@ -152,14 +247,158 @@ static final float DEFAULT_LOAD_FACTOR = 0.75f; /** - * An empty table instance to share when the table is not inflated. + * The bin count threshold for using a tree rather than list for a + * bin. Bins are converted to trees when adding an element to a + * bin with at least this many nodes. The value must be greater + * than 2 and should be at least 8 to mesh with assumptions in + * tree removal about conversion back to plain bins upon + * shrinkage. + */ + static final int TREEIFY_THRESHOLD = 8; + + /** + * The bin count threshold for untreeifying a (split) bin during a + * resize operation. Should be less than TREEIFY_THRESHOLD, and at + * most 6 to mesh with shrinkage detection under removal. + */ + static final int UNTREEIFY_THRESHOLD = 6; + + /** + * The smallest table capacity for which bins may be treeified. + * (Otherwise the table is resized if too many nodes in a bin.) + * Should be at least 4 * TREEIFY_THRESHOLD to avoid conflicts + * between resizing and treeification thresholds. + */ + static final int MIN_TREEIFY_CAPACITY = 64; + + /** + * Basic hash bin node, used for most entries. (See below for + * TreeNode subclass, and in LinkedHashMap for its Entry subclass.) */ - static final Object[] EMPTY_TABLE = {}; + static class Node implements Map.Entry { + final int hash; + final K key; + V value; + Node next; + + Node(int hash, K key, V value, Node next) { + this.hash = hash; + this.key = key; + this.value = value; + this.next = next; + } + + public final K getKey() { return key; } + public final V getValue() { return value; } + public final String toString() { return key + "=" + value; } + + public final int hashCode() { + return Objects.hashCode(key) ^ Objects.hashCode(value); + } + + public final V setValue(V newValue) { + V oldValue = value; + value = newValue; + return oldValue; + } + + public final boolean equals(Object o) { + if (o == this) + return true; + if (o instanceof Map.Entry) { + Map.Entry e = (Map.Entry)o; + if (Objects.equals(key, e.getKey()) && + Objects.equals(value, e.getValue())) + return true; + } + return false; + } + } + + /* ---------------- Static utilities -------------- */ /** - * The table, resized as necessary. Length MUST Always be a power of two. + * Computes key.hashCode() and spreads (XORs) higher bits of hash + * to lower. Because the table uses power-of-two masking, sets of + * hashes that vary only in bits above the current mask will + * always collide. (Among known examples are sets of Float keys + * holding consecutive whole numbers in small tables.) So we + * apply a transform that spreads the impact of higher bits + * downward. There is a tradeoff between speed, utility, and + * quality of bit-spreading. Because many common sets of hashes + * are already reasonably distributed (so don't benefit from + * spreading), and because we use trees to handle large sets of + * collisions in bins, we just XOR some shifted bits in the + * cheapest possible way to reduce systematic lossage, as well as + * to incorporate impact of the highest bits that would otherwise + * never be used in index calculations because of table bounds. + */ + static final int hash(Object key) { + int h; + return (key == null) ? 0 : (h = key.hashCode()) ^ (h >>> 16); + } + + /** + * Returns x's Class if it is of the form "class C implements + * Comparable", else null. */ - transient Object[] table = EMPTY_TABLE; + static Class comparableClassFor(Object x) { + if (x instanceof Comparable) { + Class c; Type[] ts, as; Type t; ParameterizedType p; + if ((c = x.getClass()) == String.class) // bypass checks + return c; + if ((ts = c.getGenericInterfaces()) != null) { + for (int i = 0; i < ts.length; ++i) { + if (((t = ts[i]) instanceof ParameterizedType) && + ((p = (ParameterizedType)t).getRawType() == + Comparable.class) && + (as = p.getActualTypeArguments()) != null && + as.length == 1 && as[0] == c) // type arg is c + return c; + } + } + } + return null; + } + + /** + * Returns k.compareTo(x) if x matches kc (k's screened comparable + * class), else 0. + */ + @SuppressWarnings({"rawtypes","unchecked"}) // for cast to Comparable + static int compareComparables(Class kc, Object k, Object x) { + return (x == null || x.getClass() != kc ? 0 : + ((Comparable)k).compareTo(x)); + } + + /** + * Returns a power of two size for the given target capacity. + */ + static final int tableSizeFor(int cap) { + int n = cap - 1; + n |= n >>> 1; + n |= n >>> 2; + n |= n >>> 4; + n |= n >>> 8; + n |= n >>> 16; + return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; + } + + /* ---------------- Fields -------------- */ + + /** + * The table, initialized on first use, and resized as + * necessary. When allocated, length is always a power of two. + * (We also tolerate length zero in some operations to allow + * bootstrapping mechanics that are currently not needed.) + */ + transient Node[] table; + + /** + * Holds cached entrySet(). Note that AbstractMap fields are used + * for keySet() and values(). + */ + transient Set> entrySet; /** * The number of key-value mappings contained in this map. @@ -167,21 +406,6 @@ transient int size; /** - * The next size value at which to resize (capacity * load factor). - * @serial - */ - // If table == EMPTY_TABLE then this is the initial capacity at which the - // table will be created when inflated. - int threshold; - - /** - * The load factor for the hash table. - * - * @serial - */ - final float loadFactor; - - /** * The number of times this HashMap has been structurally modified * Structural modifications are those that change the number of mappings in * the HashMap or otherwise modify its internal structure (e.g., @@ -191,627 +415,24 @@ transient int modCount; /** - * Holds values which can't be initialized until after VM is booted. + * The next size value at which to resize (capacity * load factor). + * + * @serial */ - private static class Holder { - static final sun.misc.Unsafe UNSAFE; - - /** - * Offset of "final" hashSeed field we must set in - * readObject() method. - */ - static final long HASHSEED_OFFSET; - - static final boolean USE_HASHSEED; - - static { - String hashSeedProp = java.security.AccessController.doPrivileged( - new sun.security.action.GetPropertyAction( - "jdk.map.useRandomSeed")); - boolean localBool = (null != hashSeedProp) - ? Boolean.parseBoolean(hashSeedProp) : false; - USE_HASHSEED = localBool; - - if (USE_HASHSEED) { - try { - UNSAFE = sun.misc.Unsafe.getUnsafe(); - HASHSEED_OFFSET = UNSAFE.objectFieldOffset( - HashMap.class.getDeclaredField("hashSeed")); - } catch (NoSuchFieldException | SecurityException e) { - throw new InternalError("Failed to record hashSeed offset", e); - } - } else { - UNSAFE = null; - HASHSEED_OFFSET = 0; - } - } - } - - /* - * A randomizing value associated with this instance that is applied to - * hash code of keys to make hash collisions harder to find. - * - * Non-final so it can be set lazily, but be sure not to set more than once. - */ - transient final int hashSeed; - - /* - * TreeBin/TreeNode code from CHM doesn't handle the null key. Store the - * null key entry here. - */ - transient Entry nullKeyEntry = null; - - /* - * In order to improve performance under high hash-collision conditions, - * HashMap will switch to storing a bin's entries in a balanced tree - * (TreeBin) instead of a linked-list once the number of entries in the bin - * passes a certain threshold (TreeBin.TREE_THRESHOLD), if at least one of - * the keys in the bin implements Comparable. This technique is borrowed - * from ConcurrentHashMap. - */ - - /* - * Code based on CHMv8 - * - * Node type for TreeBin - */ - final static class TreeNode { - TreeNode parent; // red-black tree links - TreeNode left; - TreeNode right; - TreeNode prev; // needed to unlink next upon deletion - boolean red; - final HashMap.Entry entry; - - TreeNode(HashMap.Entry entry, Object next, TreeNode parent) { - this.entry = entry; - this.entry.next = next; - this.parent = parent; - } - } + // (The javadoc description is true upon serialization. + // Additionally, if the table array has not been allocated, this + // field holds the initial array capacity, or zero signifying + // DEFAULT_INITIAL_CAPACITY.) + int threshold; /** - * Returns a Class for the given object of the form "class C - * implements Comparable", if one exists, else null. See the TreeBin - * docs, below, for explanation. - */ - static Class comparableClassFor(Object x) { - Class c, s, cmpc; Type[] ts, as; Type t; ParameterizedType p; - if ((c = x.getClass()) == String.class) // bypass checks - return c; - if ((cmpc = Comparable.class).isAssignableFrom(c)) { - while (cmpc.isAssignableFrom(s = c.getSuperclass())) - c = s; // find topmost comparable class - if ((ts = c.getGenericInterfaces()) != null) { - for (int i = 0; i < ts.length; ++i) { - if (((t = ts[i]) instanceof ParameterizedType) && - ((p = (ParameterizedType)t).getRawType() == cmpc) && - (as = p.getActualTypeArguments()) != null && - as.length == 1 && as[0] == c) // type arg is c - return c; - } - } - } - return null; - } - - /* - * Code based on CHMv8 + * The load factor for the hash table. * - * A specialized form of red-black tree for use in bins - * whose size exceeds a threshold. - * - * TreeBins use a special form of comparison for search and - * related operations (which is the main reason we cannot use - * existing collections such as TreeMaps). TreeBins contain - * Comparable elements, but may contain others, as well as - * elements that are Comparable but not necessarily Comparable - * for the same T, so we cannot invoke compareTo among them. To - * handle this, the tree is ordered primarily by hash value, then - * by Comparable.compareTo order if applicable. On lookup at a - * node, if elements are not comparable or compare as 0 then both - * left and right children may need to be searched in the case of - * tied hash values. (This corresponds to the full list search - * that would be necessary if all elements were non-Comparable and - * had tied hashes.) The red-black balancing code is updated from - * pre-jdk-collections - * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) - * based in turn on Cormen, Leiserson, and Rivest "Introduction to - * Algorithms" (CLR). + * @serial */ - final class TreeBin { - /* - * The bin count threshold for using a tree rather than list for a bin. The - * value reflects the approximate break-even point for using tree-based - * operations. - */ - static final int TREE_THRESHOLD = 16; - - TreeNode root; // root of tree - TreeNode first; // head of next-pointer list - - /* - * Split a TreeBin into lo and hi parts and install in given table. - * - * Existing Entrys are re-used, which maintains the before/after links for - * LinkedHashMap.Entry. - * - * No check for Comparable, though this is the same as CHM. - */ - final void splitTreeBin(Object[] newTable, int i, TreeBin loTree, TreeBin hiTree) { - TreeBin oldTree = this; - int bit = newTable.length >>> 1; - int loCount = 0, hiCount = 0; - TreeNode e = oldTree.first; - TreeNode next; - - // This method is called when the table has just increased capacity, - // so indexFor() is now taking one additional bit of hash into - // account ("bit"). Entries in this TreeBin now belong in one of - // two bins, "i" or "i+bit", depending on if the new top bit of the - // hash is set. The trees for the two bins are loTree and hiTree. - // If either tree ends up containing fewer than TREE_THRESHOLD - // entries, it is converted back to a linked list. - while (e != null) { - // Save entry.next - it will get overwritten in putTreeNode() - next = (TreeNode)e.entry.next; - - int h = e.entry.hash; - K k = (K) e.entry.key; - V v = e.entry.value; - if ((h & bit) == 0) { - ++loCount; - // Re-using e.entry - loTree.putTreeNode(h, k, v, e.entry); - } else { - ++hiCount; - hiTree.putTreeNode(h, k, v, e.entry); - } - // Iterate using the saved 'next' - e = next; - } - if (loCount < TREE_THRESHOLD) { // too small, convert back to list - HashMap.Entry loEntry = null; - TreeNode p = loTree.first; - while (p != null) { - @SuppressWarnings("unchecked") - TreeNode savedNext = (TreeNode) p.entry.next; - p.entry.next = loEntry; - loEntry = p.entry; - p = savedNext; - } - // assert newTable[i] == null; - newTable[i] = loEntry; - } else { - // assert newTable[i] == null; - newTable[i] = loTree; - } - if (hiCount < TREE_THRESHOLD) { // too small, convert back to list - HashMap.Entry hiEntry = null; - TreeNode p = hiTree.first; - while (p != null) { - @SuppressWarnings("unchecked") - TreeNode savedNext = (TreeNode) p.entry.next; - p.entry.next = hiEntry; - hiEntry = p.entry; - p = savedNext; - } - // assert newTable[i + bit] == null; - newTable[i + bit] = hiEntry; - } else { - // assert newTable[i + bit] == null; - newTable[i + bit] = hiTree; - } - } - - /* - * Popuplate the TreeBin with entries from the linked list e - * - * Assumes 'this' is a new/empty TreeBin - * - * Note: no check for Comparable - * Note: I believe this changes iteration order - */ - @SuppressWarnings("unchecked") - void populate(HashMap.Entry e) { - // assert root == null; - // assert first == null; - HashMap.Entry next; - while (e != null) { - // Save entry.next - it will get overwritten in putTreeNode() - next = (HashMap.Entry)e.next; - // Re-using Entry e will maintain before/after in LinkedHM - putTreeNode(e.hash, (K)e.key, (V)e.value, e); - // Iterate using the saved 'next' - e = next; - } - } - - /** - * Copied from CHMv8 - * From CLR - */ - private void rotateLeft(TreeNode p) { - if (p != null) { - TreeNode r = p.right, pp, rl; - if ((rl = p.right = r.left) != null) { - rl.parent = p; - } - if ((pp = r.parent = p.parent) == null) { - root = r; - } else if (pp.left == p) { - pp.left = r; - } else { - pp.right = r; - } - r.left = p; - p.parent = r; - } - } - - /** - * Copied from CHMv8 - * From CLR - */ - private void rotateRight(TreeNode p) { - if (p != null) { - TreeNode l = p.left, pp, lr; - if ((lr = p.left = l.right) != null) { - lr.parent = p; - } - if ((pp = l.parent = p.parent) == null) { - root = l; - } else if (pp.right == p) { - pp.right = l; - } else { - pp.left = l; - } - l.right = p; - p.parent = l; - } - } - - /** - * Returns the TreeNode (or null if not found) for the given - * key. A front-end for recursive version. - */ - final TreeNode getTreeNode(int h, K k) { - return getTreeNode(h, k, root, comparableClassFor(k)); - } - - /** - * Returns the TreeNode (or null if not found) for the given key - * starting at given root. - */ - @SuppressWarnings("unchecked") - final TreeNode getTreeNode (int h, K k, TreeNode p, Class cc) { - // assert k != null; - while (p != null) { - int dir, ph; Object pk; - if ((ph = p.entry.hash) != h) - dir = (h < ph) ? -1 : 1; - else if ((pk = p.entry.key) == k || k.equals(pk)) - return p; - else if (cc == null || comparableClassFor(pk) != cc || - (dir = ((Comparable)k).compareTo(pk)) == 0) { - // assert pk != null; - TreeNode r, pl, pr; // check both sides - if ((pr = p.right) != null && - (r = getTreeNode(h, k, pr, cc)) != null) - return r; - else if ((pl = p.left) != null) - dir = -1; - else // nothing there - break; - } - p = (dir > 0) ? p.right : p.left; - } - return null; - } + final float loadFactor; - /* - * Finds or adds a node. - * - * 'entry' should be used to recycle an existing Entry (e.g. in the case - * of converting a linked-list bin to a TreeBin). - * If entry is null, a new Entry will be created for the new TreeNode - * - * @return the TreeNode containing the mapping, or null if a new - * TreeNode was added - */ - @SuppressWarnings("unchecked") - TreeNode putTreeNode(int h, K k, V v, HashMap.Entry entry) { - // assert k != null; - //if (entry != null) { - // assert h == entry.hash; - // assert k == entry.key; - // assert v == entry.value; - // } - Class cc = comparableClassFor(k); - TreeNode pp = root, p = null; - int dir = 0; - while (pp != null) { // find existing node or leaf to insert at - int ph; Object pk; - p = pp; - if ((ph = p.entry.hash) != h) - dir = (h < ph) ? -1 : 1; - else if ((pk = p.entry.key) == k || k.equals(pk)) - return p; - else if (cc == null || comparableClassFor(pk) != cc || - (dir = ((Comparable)k).compareTo(pk)) == 0) { - TreeNode r, pr; - if ((pr = p.right) != null && - (r = getTreeNode(h, k, pr, cc)) != null) - return r; - else // continue left - dir = -1; - } - pp = (dir > 0) ? p.right : p.left; - } - - // Didn't find the mapping in the tree, so add it - TreeNode f = first; - TreeNode x; - if (entry != null) { - x = new TreeNode(entry, f, p); - } else { - x = new TreeNode(newEntry(h, k, v, null), f, p); - } - first = x; - - if (p == null) { - root = x; - } else { // attach and rebalance; adapted from CLR - TreeNode xp, xpp; - if (f != null) { - f.prev = x; - } - if (dir <= 0) { - p.left = x; - } else { - p.right = x; - } - x.red = true; - while (x != null && (xp = x.parent) != null && xp.red - && (xpp = xp.parent) != null) { - TreeNode xppl = xpp.left; - if (xp == xppl) { - TreeNode y = xpp.right; - if (y != null && y.red) { - y.red = false; - xp.red = false; - xpp.red = true; - x = xpp; - } else { - if (x == xp.right) { - rotateLeft(x = xp); - xpp = (xp = x.parent) == null ? null : xp.parent; - } - if (xp != null) { - xp.red = false; - if (xpp != null) { - xpp.red = true; - rotateRight(xpp); - } - } - } - } else { - TreeNode y = xppl; - if (y != null && y.red) { - y.red = false; - xp.red = false; - xpp.red = true; - x = xpp; - } else { - if (x == xp.left) { - rotateRight(x = xp); - xpp = (xp = x.parent) == null ? null : xp.parent; - } - if (xp != null) { - xp.red = false; - if (xpp != null) { - xpp.red = true; - rotateLeft(xpp); - } - } - } - } - } - TreeNode r = root; - if (r != null && r.red) { - r.red = false; - } - } - return null; - } - - /* - * From CHMv8 - * - * Removes the given node, that must be present before this - * call. This is messier than typical red-black deletion code - * because we cannot swap the contents of an interior node - * with a leaf successor that is pinned by "next" pointers - * that are accessible independently of lock. So instead we - * swap the tree linkages. - */ - final void deleteTreeNode(TreeNode p) { - TreeNode next = (TreeNode) p.entry.next; // unlink traversal pointers - TreeNode pred = p.prev; - if (pred == null) { - first = next; - } else { - pred.entry.next = next; - } - if (next != null) { - next.prev = pred; - } - TreeNode replacement; - TreeNode pl = p.left; - TreeNode pr = p.right; - if (pl != null && pr != null) { - TreeNode s = pr, sl; - while ((sl = s.left) != null) // find successor - { - s = sl; - } - boolean c = s.red; - s.red = p.red; - p.red = c; // swap colors - TreeNode sr = s.right; - TreeNode pp = p.parent; - if (s == pr) { // p was s's direct parent - p.parent = s; - s.right = p; - } else { - TreeNode sp = s.parent; - if ((p.parent = sp) != null) { - if (s == sp.left) { - sp.left = p; - } else { - sp.right = p; - } - } - if ((s.right = pr) != null) { - pr.parent = s; - } - } - p.left = null; - if ((p.right = sr) != null) { - sr.parent = p; - } - if ((s.left = pl) != null) { - pl.parent = s; - } - if ((s.parent = pp) == null) { - root = s; - } else if (p == pp.left) { - pp.left = s; - } else { - pp.right = s; - } - replacement = sr; - } else { - replacement = (pl != null) ? pl : pr; - } - TreeNode pp = p.parent; - if (replacement == null) { - if (pp == null) { - root = null; - return; - } - replacement = p; - } else { - replacement.parent = pp; - if (pp == null) { - root = replacement; - } else if (p == pp.left) { - pp.left = replacement; - } else { - pp.right = replacement; - } - p.left = p.right = p.parent = null; - } - if (!p.red) { // rebalance, from CLR - TreeNode x = replacement; - while (x != null) { - TreeNode xp, xpl; - if (x.red || (xp = x.parent) == null) { - x.red = false; - break; - } - if (x == (xpl = xp.left)) { - TreeNode sib = xp.right; - if (sib != null && sib.red) { - sib.red = false; - xp.red = true; - rotateLeft(xp); - sib = (xp = x.parent) == null ? null : xp.right; - } - if (sib == null) { - x = xp; - } else { - TreeNode sl = sib.left, sr = sib.right; - if ((sr == null || !sr.red) - && (sl == null || !sl.red)) { - sib.red = true; - x = xp; - } else { - if (sr == null || !sr.red) { - if (sl != null) { - sl.red = false; - } - sib.red = true; - rotateRight(sib); - sib = (xp = x.parent) == null ? - null : xp.right; - } - if (sib != null) { - sib.red = (xp == null) ? false : xp.red; - if ((sr = sib.right) != null) { - sr.red = false; - } - } - if (xp != null) { - xp.red = false; - rotateLeft(xp); - } - x = root; - } - } - } else { // symmetric - TreeNode sib = xpl; - if (sib != null && sib.red) { - sib.red = false; - xp.red = true; - rotateRight(xp); - sib = (xp = x.parent) == null ? null : xp.left; - } - if (sib == null) { - x = xp; - } else { - TreeNode sl = sib.left, sr = sib.right; - if ((sl == null || !sl.red) - && (sr == null || !sr.red)) { - sib.red = true; - x = xp; - } else { - if (sl == null || !sl.red) { - if (sr != null) { - sr.red = false; - } - sib.red = true; - rotateLeft(sib); - sib = (xp = x.parent) == null ? - null : xp.left; - } - if (sib != null) { - sib.red = (xp == null) ? false : xp.red; - if ((sl = sib.left) != null) { - sl.red = false; - } - } - if (xp != null) { - xp.red = false; - rotateRight(xp); - } - x = root; - } - } - } - } - } - if (p == replacement && (pp = p.parent) != null) { - if (p == pp.left) // detach pointers - { - pp.left = null; - } else if (p == pp.right) { - pp.right = null; - } - p.parent = null; - } - } - } + /* ---------------- Public operations -------------- */ /** * Constructs an empty HashMap with the specified initial @@ -832,9 +453,7 @@ throw new IllegalArgumentException("Illegal load factor: " + loadFactor); this.loadFactor = loadFactor; - threshold = initialCapacity; - hashSeed = initHashSeed(); - init(); + this.threshold = tableSizeFor(initialCapacity); } /** @@ -853,7 +472,7 @@ * (16) and the default load factor (0.75). */ public HashMap() { - this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR); + this.loadFactor = DEFAULT_LOAD_FACTOR; // all other fields defaulted } /** @@ -866,79 +485,35 @@ * @throws NullPointerException if the specified map is null */ public HashMap(Map m) { - this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, - DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR); - inflateTable(threshold); - - putAllForCreate(m); - // assert size == m.size(); - } - - private static int roundUpToPowerOf2(int number) { - // assert number >= 0 : "number must be non-negative"; - return number >= MAXIMUM_CAPACITY - ? MAXIMUM_CAPACITY - : (number > 1) ? Integer.highestOneBit((number - 1) << 1) : 1; + this.loadFactor = DEFAULT_LOAD_FACTOR; + putMapEntries(m, false); } /** - * Inflates the table. - */ - private void inflateTable(int toSize) { - // Find a power of 2 >= toSize - int capacity = roundUpToPowerOf2(toSize); - - threshold = (int) Math.min(capacity * loadFactor, MAXIMUM_CAPACITY + 1); - table = new Object[capacity]; - } - - // internal utilities - - /** - * Initialization hook for subclasses. This method is called - * in all constructors and pseudo-constructors (clone, readObject) - * after HashMap has been initialized but before any entries have - * been inserted. (In the absence of this method, readObject would - * require explicit knowledge of subclasses.) - */ - void init() { - } - - /** - * Return an initial value for the hashSeed, or 0 if the random seed is not - * enabled. + * Implements Map.putAll and Map constructor + * + * @param m the map + * @param evict false when initially constructing this map, else + * true (relayed to method afterNodeInsertion). */ - final int initHashSeed() { - if (sun.misc.VM.isBooted() && Holder.USE_HASHSEED) { - int seed = ThreadLocalRandom.current().nextInt(); - return (seed != 0) ? seed : 1; + final void putMapEntries(Map m, boolean evict) { + int s = m.size(); + if (s > 0) { + if (table == null) { // pre-size + float ft = ((float)s / loadFactor) + 1.0F; + int t = ((ft < (float)MAXIMUM_CAPACITY) ? + (int)ft : MAXIMUM_CAPACITY); + if (t > threshold) + threshold = tableSizeFor(t); + } + else if (s > threshold) + resize(); + for (Map.Entry e : m.entrySet()) { + K key = e.getKey(); + V value = e.getValue(); + putVal(hash(key), key, value, false, evict); + } } - return 0; - } - - /** - * Retrieve object hash code and applies a supplemental hash function to the - * result hash, which defends against poor quality hash functions. This is - * critical because HashMap uses power-of-two length hash tables, that - * otherwise encounter collisions for hashCodes that do not differ - * in lower bits. - */ - final int hash(Object k) { - int h = hashSeed ^ k.hashCode(); - - // This function ensures that hashCodes that differ only by - // constant multiples at each bit position have a bounded - // number of collisions (approximately 8 at default load factor). - h ^= (h >>> 20) ^ (h >>> 12); - return h ^ (h >>> 7) ^ (h >>> 4); - } - - /** - * Returns index for hash code h. - */ - static int indexFor(int h, int length) { - // assert Integer.bitCount(length) == 1 : "length must be a non-zero power of 2"; - return h & (length-1); } /** @@ -976,18 +551,36 @@ * * @see #put(Object, Object) */ - @SuppressWarnings("unchecked") public V get(Object key) { - Entry entry = getEntry(key); - - return null == entry ? null : entry.getValue(); + Node e; + return (e = getNode(hash(key), key)) == null ? null : e.value; } - @Override - public V getOrDefault(Object key, V defaultValue) { - Entry entry = getEntry(key); - - return (entry == null) ? defaultValue : entry.getValue(); + /** + * Implements Map.get and related methods + * + * @param hash hash for key + * @param key the key + * @return the node, or null if none + */ + final Node getNode(int hash, Object key) { + Node[] tab; Node first, e; int n; K k; + if ((tab = table) != null && (n = tab.length) > 0 && + (first = tab[(n - 1) & hash]) != null) { + if (first.hash == hash && // always check first node + ((k = first.key) == key || (key != null && key.equals(k)))) + return first; + if ((e = first.next) != null) { + if (first instanceof TreeNode) + return ((TreeNode)first).getTreeNode(hash, key); + do { + if (e.hash == hash && + ((k = e.key) == key || (key != null && key.equals(k)))) + return e; + } while ((e = e.next) != null); + } + } + return null; } /** @@ -999,49 +592,10 @@ * key. */ public boolean containsKey(Object key) { - return getEntry(key) != null; + return getNode(hash(key), key) != null; } /** - * Returns the entry associated with the specified key in the - * HashMap. Returns null if the HashMap contains no mapping - * for the key. - */ - @SuppressWarnings("unchecked") - final Entry getEntry(Object key) { - if (size == 0) { - return null; - } - if (key == null) { - return nullKeyEntry; - } - int hash = hash(key); - int bin = indexFor(hash, table.length); - - if (table[bin] instanceof Entry) { - Entry e = (Entry) table[bin]; - for (; e != null; e = (Entry)e.next) { - Object k; - if (e.hash == hash && - ((k = e.key) == key || key.equals(k))) { - return e; - } - } - } else if (table[bin] != null) { - TreeBin e = (TreeBin)table[bin]; - TreeNode p = e.getTreeNode(hash, (K)key); - if (p != null) { - // assert p.entry.hash == hash && p.entry.key.equals(key); - return (Entry)p.entry; - } else { - return null; - } - } - return null; - } - - - /** * Associates the specified value with the specified key in this map. * If the map previously contained a mapping for the key, the old * value is replaced. @@ -1053,202 +607,169 @@ * (A null return can also indicate that the map * previously associated null with key.) */ - @SuppressWarnings("unchecked") public V put(K key, V value) { - if (table == EMPTY_TABLE) { - inflateTable(threshold); - } - if (key == null) - return putForNullKey(value); - int hash = hash(key); - int i = indexFor(hash, table.length); - boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? + return putVal(hash(key), key, value, false, true); + } - if (table[i] instanceof Entry) { - // Bin contains ordinary Entries. Search for key in the linked list - // of entries, counting the number of entries. Only check for - // TreeBin conversion if the list size is >= TREE_THRESHOLD. - // (The conversion still may not happen if the table gets resized.) - int listSize = 0; - Entry e = (Entry) table[i]; - for (; e != null; e = (Entry)e.next) { - Object k; - if (e.hash == hash && ((k = e.key) == key || key.equals(k))) { - V oldValue = e.value; + /** + * Implements Map.put and related methods + * + * @param hash hash for key + * @param key the key + * @param value the value to put + * @param onlyIfAbsent if true, don't change existing value + * @param evict if false, the table is in creation mode. + * @return previous value, or null if none + */ + final V putVal(int hash, K key, V value, boolean onlyIfAbsent, + boolean evict) { + Node[] tab; Node p; int n, i; + if (size > threshold || (tab = table) == null || + (n = tab.length) == 0) + n = (tab = resize()).length; + if ((p = tab[i = (n - 1) & hash]) == null) + tab[i] = newNode(hash, key, value, null); + else { + Node e; K k; + if (p.hash == hash && + ((k = p.key) == key || (key != null && key.equals(k)))) + e = p; + else if (p instanceof TreeNode) + e = ((TreeNode)p).putTreeVal(this, tab, hash, key, value); + else { + for (int binCount = 0; ; ++binCount) { + if ((e = p.next) == null) { + p.next = newNode(hash, key, value, null); + if (binCount >= TREEIFY_THRESHOLD - 1) // -1 for 1st + treeifyBin(tab, hash); + break; + } + if (e.hash == hash && + ((k = e.key) == key || (key != null && key.equals(k)))) + break; + p = e; + } + } + if (e != null) { // existing mapping for key + V oldValue = e.value; + if (!onlyIfAbsent || oldValue == null) e.value = value; - e.recordAccess(this); - return oldValue; - } - listSize++; - } - // Didn't find, so fall through and call addEntry() to add the - // Entry and check for TreeBin conversion. - checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; - } else if (table[i] != null) { - TreeBin e = (TreeBin)table[i]; - TreeNode p = e.putTreeNode(hash, key, value, null); - if (p == null) { // putTreeNode() added a new node - modCount++; - size++; - if (size >= threshold) { - resize(2 * table.length); - } - return null; - } else { // putTreeNode() found an existing node - Entry pEntry = (Entry)p.entry; - V oldVal = pEntry.value; - pEntry.value = value; - pEntry.recordAccess(this); - return oldVal; + afterNodeAccess(e); + return oldValue; } } - modCount++; - addEntry(hash, key, value, i, checkIfNeedTree); + ++modCount; + ++size; + afterNodeInsertion(evict); return null; } /** - * Offloaded version of put for null keys + * Initializes or doubles table size. If null, allocates in + * accord with initial capacity target held in field threshold. + * Otherwise, because we are using power-of-two expansion, the + * elements from each bin must either stay at same index, or move + * with a power of two offset in the new table. + * + * @return the table */ - private V putForNullKey(V value) { - if (nullKeyEntry != null) { - V oldValue = nullKeyEntry.value; - nullKeyEntry.value = value; - nullKeyEntry.recordAccess(this); - return oldValue; + final Node[] resize() { + Node[] oldTab = table; + int oldCap = (oldTab == null) ? 0 : oldTab.length; + int oldThr = threshold; + int newCap, newThr = 0; + if (oldCap > 0) { + if (oldCap >= MAXIMUM_CAPACITY) { + threshold = Integer.MAX_VALUE; + return oldTab; + } + else if ((newCap = oldCap << 1) < MAXIMUM_CAPACITY && + oldCap >= DEFAULT_INITIAL_CAPACITY) + newThr = oldThr << 1; // double threshold } - modCount++; - size++; // newEntry() skips size++ - nullKeyEntry = newEntry(0, null, value, null); - return null; - } - - private void putForCreateNullKey(V value) { - // Look for preexisting entry for key. This will never happen for - // clone or deserialize. It will only happen for construction if the - // input Map is a sorted map whose ordering is inconsistent w/ equals. - if (nullKeyEntry != null) { - nullKeyEntry.value = value; - } else { - nullKeyEntry = newEntry(0, null, value, null); - size++; + else if (oldThr > 0) // initial capacity was placed in threshold + newCap = oldThr; + else { // zero initial threshold signifies using defaults + newCap = DEFAULT_INITIAL_CAPACITY; + newThr = (int)(DEFAULT_LOAD_FACTOR * DEFAULT_INITIAL_CAPACITY); } - } - - - /** - * This method is used instead of put by constructors and - * pseudoconstructors (clone, readObject). It does not resize the table, - * check for comodification, etc, though it will convert bins to TreeBins - * as needed. It calls createEntry rather than addEntry. - */ - @SuppressWarnings("unchecked") - private void putForCreate(K key, V value) { - if (null == key) { - putForCreateNullKey(value); - return; + if (newThr == 0) { + float ft = (float)newCap * loadFactor; + newThr = (newCap < MAXIMUM_CAPACITY && ft < (float)MAXIMUM_CAPACITY ? + (int)ft : Integer.MAX_VALUE); } - int hash = hash(key); - int i = indexFor(hash, table.length); - boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? - - /** - * Look for preexisting entry for key. This will never happen for - * clone or deserialize. It will only happen for construction if the - * input Map is a sorted map whose ordering is inconsistent w/ equals. - */ - if (table[i] instanceof Entry) { - int listSize = 0; - Entry e = (Entry) table[i]; - for (; e != null; e = (Entry)e.next) { - Object k; - if (e.hash == hash && ((k = e.key) == key || key.equals(k))) { - e.value = value; - return; + threshold = newThr; + @SuppressWarnings({"rawtypes","unchecked"}) + Node[] newTab = (Node[])new Node[newCap]; + table = newTab; + if (oldTab != null) { + for (int j = 0; j < oldCap; ++j) { + Node e; + if ((e = oldTab[j]) != null) { + oldTab[j] = null; + if (e.next == null) + newTab[e.hash & (newCap - 1)] = e; + else if (e instanceof TreeNode) + ((TreeNode)e).split(this, newTab, j, oldCap); + else { // preserve order + Node loHead = null, loTail = null; + Node hiHead = null, hiTail = null; + Node next; + do { + next = e.next; + if ((e.hash & oldCap) == 0) { + if (loTail == null) + loHead = e; + else + loTail.next = e; + loTail = e; + } + else { + if (hiTail == null) + hiHead = e; + else + hiTail.next = e; + hiTail = e; + } + } while ((e = next) != null); + if (loTail != null) { + loTail.next = null; + newTab[j] = loHead; + } + if (hiTail != null) { + hiTail.next = null; + newTab[j + oldCap] = hiHead; + } + } } - listSize++; } - // Didn't find, fall through to createEntry(). - // Check for conversion to TreeBin done via createEntry(). - checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; - } else if (table[i] != null) { - TreeBin e = (TreeBin)table[i]; - TreeNode p = e.putTreeNode(hash, key, value, null); - if (p != null) { - p.entry.setValue(value); // Found an existing node, set value - } else { - size++; // Added a new TreeNode, so update size - } - // don't need modCount++/check for resize - just return - return; } - - createEntry(hash, key, value, i, checkIfNeedTree); - } - - private void putAllForCreate(Map m) { - for (Map.Entry e : m.entrySet()) - putForCreate(e.getKey(), e.getValue()); + return newTab; } /** - * Rehashes the contents of this map into a new array with a - * larger capacity. This method is called automatically when the - * number of keys in this map reaches its threshold. - * - * If current capacity is MAXIMUM_CAPACITY, this method does not - * resize the map, but sets threshold to Integer.MAX_VALUE. - * This has the effect of preventing future calls. - * - * @param newCapacity the new capacity, MUST be a power of two; - * must be greater than current capacity unless current - * capacity is MAXIMUM_CAPACITY (in which case value - * is irrelevant). + * Replaces all linked nodes in bin at index for given hash unless + * table is too small, in which case resizes instead. */ - void resize(int newCapacity) { - Object[] oldTable = table; - int oldCapacity = oldTable.length; - if (oldCapacity == MAXIMUM_CAPACITY) { - threshold = Integer.MAX_VALUE; - return; + final void treeifyBin(Node[] tab, int hash) { + int n, index; Node e; + if (tab == null || (n = tab.length) < MIN_TREEIFY_CAPACITY) + resize(); + else if ((e = tab[index = (n - 1) & hash]) != null) { + TreeNode hd = null, tl = null; + do { + TreeNode p = replacementTreeNode(e, null); + if (tl == null) + hd = p; + else { + p.prev = tl; + tl.next = p; + } + tl = p; + } while ((e = e.next) != null); + if ((tab[index] = hd) != null) + hd.treeify(tab); } - - Object[] newTable = new Object[newCapacity]; - transfer(newTable); - table = newTable; - threshold = (int)Math.min(newCapacity * loadFactor, MAXIMUM_CAPACITY + 1); - } - - /** - * Transfers all entries from current table to newTable. - * - * Assumes newTable is larger than table - */ - @SuppressWarnings("unchecked") - void transfer(Object[] newTable) { - Object[] src = table; - // assert newTable.length > src.length : "newTable.length(" + - // newTable.length + ") expected to be > src.length("+src.length+")"; - int newCapacity = newTable.length; - for (int j = 0; j < src.length; j++) { - if (src[j] instanceof Entry) { - // Assume: since wasn't TreeBin before, won't need TreeBin now - Entry e = (Entry) src[j]; - while (null != e) { - Entry next = (Entry)e.next; - int i = indexFor(e.hash, newCapacity); - e.next = (Entry) newTable[i]; - newTable[i] = e; - e = next; - } - } else if (src[j] != null) { - TreeBin e = (TreeBin) src[j]; - TreeBin loTree = new TreeBin(); - TreeBin hiTree = new TreeBin(); - e.splitTreeBin(newTable, j, loTree, hiTree); - } - } - Arrays.fill(table, null); } /** @@ -1260,30 +781,8 @@ * @throws NullPointerException if the specified map is null */ public void putAll(Map m) { - int numKeysToBeAdded = m.size(); - if (numKeysToBeAdded == 0) - return; - - if (table == EMPTY_TABLE) { - inflateTable((int) Math.max(numKeysToBeAdded * loadFactor, threshold)); - } - - /* - * Expand the map if the map if the number of mappings to be added - * is greater than or equal to threshold. This is conservative; the - * obvious condition is (m.size() + size) >= threshold, but this - * condition could result in a map with twice the appropriate capacity, - * if the keys to be added overlap with the keys already in this map. - * By using the conservative calculation, we subject ourself - * to at most one extra resize. - */ - if (numKeysToBeAdded > threshold && table.length < MAXIMUM_CAPACITY) { - resize(table.length * 2); - } - - for (Map.Entry e : m.entrySet()) - put(e.getKey(), e.getValue()); - } + putMapEntries(m, true); + } /** * Removes the mapping for the specified key from this map if present. @@ -1295,834 +794,74 @@ * previously associated null with key.) */ public V remove(Object key) { - Entry e = removeEntryForKey(key); - return (e == null ? null : e.value); - } - - // optimized implementations of default methods in Map - - @Override - public void forEach(BiConsumer action) { - Objects.requireNonNull(action); - final int expectedModCount = modCount; - if (nullKeyEntry != null) { - forEachNullKey(expectedModCount, action); - } - Object[] tab = this.table; - for (int index = 0; index < tab.length; index++) { - Object item = tab[index]; - if (item == null) { - continue; - } - if (item instanceof HashMap.TreeBin) { - eachTreeNode(expectedModCount, ((TreeBin)item).first, action); - continue; - } - @SuppressWarnings("unchecked") - Entry entry = (Entry)item; - while (entry != null) { - action.accept(entry.key, entry.value); - entry = (Entry)entry.next; - - if (expectedModCount != modCount) { - throw new ConcurrentModificationException(); - } - } - } - } - - private void eachTreeNode(int expectedModCount, TreeNode node, BiConsumer action) { - while (node != null) { - @SuppressWarnings("unchecked") - Entry entry = (Entry)node.entry; - action.accept(entry.key, entry.value); - node = (TreeNode)entry.next; - - if (expectedModCount != modCount) { - throw new ConcurrentModificationException(); - } - } - } - - private void forEachNullKey(int expectedModCount, BiConsumer action) { - action.accept(null, nullKeyEntry.value); - - if (expectedModCount != modCount) { - throw new ConcurrentModificationException(); - } - } - - @Override - public void replaceAll(BiFunction function) { - Objects.requireNonNull(function); - final int expectedModCount = modCount; - if (nullKeyEntry != null) { - replaceforNullKey(expectedModCount, function); - } - Object[] tab = this.table; - for (int index = 0; index < tab.length; index++) { - Object item = tab[index]; - if (item == null) { - continue; - } - if (item instanceof HashMap.TreeBin) { - replaceEachTreeNode(expectedModCount, ((TreeBin)item).first, function); - continue; - } - @SuppressWarnings("unchecked") - Entry entry = (Entry)item; - while (entry != null) { - entry.value = function.apply(entry.key, entry.value); - entry = (Entry)entry.next; - - if (expectedModCount != modCount) { - throw new ConcurrentModificationException(); - } - } - } - } - - private void replaceEachTreeNode(int expectedModCount, TreeNode node, BiFunction function) { - while (node != null) { - @SuppressWarnings("unchecked") - Entry entry = (Entry)node.entry; - entry.value = function.apply(entry.key, entry.value); - node = (TreeNode)entry.next; - - if (expectedModCount != modCount) { - throw new ConcurrentModificationException(); - } - } - } - - private void replaceforNullKey(int expectedModCount, BiFunction function) { - nullKeyEntry.value = function.apply(null, nullKeyEntry.value); - - if (expectedModCount != modCount) { - throw new ConcurrentModificationException(); - } + Node e; + return (e = removeNode(hash(key), key, null, false, true)) == null ? + null : e.value; } - @Override - public V putIfAbsent(K key, V value) { - if (table == EMPTY_TABLE) { - inflateTable(threshold); - } - if (key == null) { - if (nullKeyEntry == null || nullKeyEntry.value == null) { - putForNullKey(value); - return null; - } else { - return nullKeyEntry.value; - } - } - int hash = hash(key); - int i = indexFor(hash, table.length); - boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? - - if (table[i] instanceof Entry) { - int listSize = 0; - Entry e = (Entry) table[i]; - for (; e != null; e = (Entry)e.next) { - if (e.hash == hash && Objects.equals(e.key, key)) { - if (e.value != null) { - return e.value; - } - e.value = value; - e.recordAccess(this); - return null; - } - listSize++; - } - // Didn't find, so fall through and call addEntry() to add the - // Entry and check for TreeBin conversion. - checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; - } else if (table[i] != null) { - TreeBin e = (TreeBin)table[i]; - TreeNode p = e.putTreeNode(hash, key, value, null); - if (p == null) { // not found, putTreeNode() added a new node - modCount++; - size++; - if (size >= threshold) { - resize(2 * table.length); - } - return null; - } else { // putTreeNode() found an existing node - Entry pEntry = (Entry)p.entry; - V oldVal = pEntry.value; - if (oldVal == null) { // only replace if maps to null - pEntry.value = value; - pEntry.recordAccess(this); - } - return oldVal; - } - } - modCount++; - addEntry(hash, key, value, i, checkIfNeedTree); - return null; - } - - @Override - public boolean remove(Object key, Object value) { - if (size == 0) { - return false; - } - if (key == null) { - if (nullKeyEntry != null && - Objects.equals(nullKeyEntry.value, value)) { - removeNullKey(); - return true; - } - return false; - } - int hash = hash(key); - int i = indexFor(hash, table.length); - - if (table[i] instanceof Entry) { - @SuppressWarnings("unchecked") - Entry prev = (Entry) table[i]; - Entry e = prev; - while (e != null) { - @SuppressWarnings("unchecked") - Entry next = (Entry) e.next; - if (e.hash == hash && Objects.equals(e.key, key)) { - if (!Objects.equals(e.value, value)) { - return false; - } - modCount++; - size--; - if (prev == e) - table[i] = next; - else - prev.next = next; - e.recordRemoval(this); - return true; - } - prev = e; - e = next; - } - } else if (table[i] != null) { - TreeBin tb = ((TreeBin) table[i]); - TreeNode p = tb.getTreeNode(hash, (K)key); - if (p != null) { - Entry pEntry = (Entry)p.entry; - // assert pEntry.key.equals(key); - if (Objects.equals(pEntry.value, value)) { - modCount++; - size--; - tb.deleteTreeNode(p); - pEntry.recordRemoval(this); - if (tb.root == null || tb.first == null) { - // assert tb.root == null && tb.first == null : - // "TreeBin.first and root should both be null"; - // TreeBin is now empty, we should blank this bin - table[i] = null; - } - return true; + /** + * Implements Map.remove and related methods + * + * @param hash hash for key + * @param key the key + * @param value the value to match if matchValue, else ignored + * @param matchValue if true only remove if value is equal + * @param movable if false do not move other nodes while removing + * @return the node, or null if none + */ + final Node removeNode(int hash, Object key, Object value, + boolean matchValue, boolean movable) { + Node[] tab; Node p; int n, index; + if ((tab = table) != null && (n = tab.length) > 0 && + (p = tab[index = (n - 1) & hash]) != null) { + Node node = null, e; K k; V v; + if (p.hash == hash && + ((k = p.key) == key || (key != null && key.equals(k)))) + node = p; + else if ((e = p.next) != null) { + if (p instanceof TreeNode) + node = ((TreeNode)p).getTreeNode(hash, key); + else { + do { + if (e.hash == hash && + ((k = e.key) == key || + (key != null && key.equals(k)))) { + node = e; + break; + } + p = e; + } while ((e = e.next) != null); } } - } - return false; - } - - @Override - public boolean replace(K key, V oldValue, V newValue) { - if (size == 0) { - return false; - } - if (key == null) { - if (nullKeyEntry != null && - Objects.equals(nullKeyEntry.value, oldValue)) { - putForNullKey(newValue); - return true; - } - return false; - } - int hash = hash(key); - int i = indexFor(hash, table.length); - - if (table[i] instanceof Entry) { - @SuppressWarnings("unchecked") - Entry e = (Entry) table[i]; - for (; e != null; e = (Entry)e.next) { - if (e.hash == hash && Objects.equals(e.key, key) && Objects.equals(e.value, oldValue)) { - e.value = newValue; - e.recordAccess(this); - return true; - } - } - return false; - } else if (table[i] != null) { - TreeBin tb = ((TreeBin) table[i]); - TreeNode p = tb.getTreeNode(hash, key); - if (p != null) { - Entry pEntry = (Entry)p.entry; - // assert pEntry.key.equals(key); - if (Objects.equals(pEntry.value, oldValue)) { - pEntry.value = newValue; - pEntry.recordAccess(this); - return true; - } - } - } - return false; - } - - @Override - public V replace(K key, V value) { - if (size == 0) { - return null; - } - if (key == null) { - if (nullKeyEntry != null) { - return putForNullKey(value); - } - return null; - } - int hash = hash(key); - int i = indexFor(hash, table.length); - if (table[i] instanceof Entry) { - @SuppressWarnings("unchecked") - Entry e = (Entry)table[i]; - for (; e != null; e = (Entry)e.next) { - if (e.hash == hash && Objects.equals(e.key, key)) { - V oldValue = e.value; - e.value = value; - e.recordAccess(this); - return oldValue; - } - } - - return null; - } else if (table[i] != null) { - TreeBin tb = ((TreeBin) table[i]); - TreeNode p = tb.getTreeNode(hash, key); - if (p != null) { - Entry pEntry = (Entry)p.entry; - // assert pEntry.key.equals(key); - V oldValue = pEntry.value; - pEntry.value = value; - pEntry.recordAccess(this); - return oldValue; + if (node != null && (!matchValue || (v = node.value) == value || + (value != null && value.equals(v)))) { + if (node instanceof TreeNode) + ((TreeNode)node).removeTreeNode(this, tab, movable); + else if (node == p) + tab[index] = node.next; + else + p.next = node.next; + ++modCount; + --size; + afterNodeRemoval(node); + return node; } } return null; } - @Override - public V computeIfAbsent(K key, Function mappingFunction) { - if (table == EMPTY_TABLE) { - inflateTable(threshold); - } - if (key == null) { - if (nullKeyEntry == null || nullKeyEntry.value == null) { - V newValue = mappingFunction.apply(key); - if (newValue != null) { - putForNullKey(newValue); - } - return newValue; - } - return nullKeyEntry.value; - } - int hash = hash(key); - int i = indexFor(hash, table.length); - boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? - - if (table[i] instanceof Entry) { - int listSize = 0; - @SuppressWarnings("unchecked") - Entry e = (Entry)table[i]; - for (; e != null; e = (Entry)e.next) { - if (e.hash == hash && Objects.equals(e.key, key)) { - V oldValue = e.value; - if (oldValue == null) { - V newValue = mappingFunction.apply(key); - if (newValue != null) { - e.value = newValue; - e.recordAccess(this); - } - return newValue; - } - return oldValue; - } - listSize++; - } - // Didn't find, fall through to call the mapping function - checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; - } else if (table[i] != null) { - TreeBin e = (TreeBin)table[i]; - V value = mappingFunction.apply(key); - if (value == null) { // Return the existing value, if any - TreeNode p = e.getTreeNode(hash, key); - if (p != null) { - return (V) p.entry.value; - } - return null; - } else { // Put the new value into the Tree, if absent - TreeNode p = e.putTreeNode(hash, key, value, null); - if (p == null) { // not found, new node was added - modCount++; - size++; - if (size >= threshold) { - resize(2 * table.length); - } - return value; - } else { // putTreeNode() found an existing node - Entry pEntry = (Entry)p.entry; - V oldVal = pEntry.value; - if (oldVal == null) { // only replace if maps to null - pEntry.value = value; - pEntry.recordAccess(this); - return value; - } - return oldVal; - } - } - } - V newValue = mappingFunction.apply(key); - if (newValue != null) { // add Entry and check for TreeBin conversion - modCount++; - addEntry(hash, key, newValue, i, checkIfNeedTree); - } - - return newValue; - } - - @Override - public V computeIfPresent(K key, BiFunction remappingFunction) { - if (size == 0) { - return null; - } - if (key == null) { - V oldValue; - if (nullKeyEntry != null && (oldValue = nullKeyEntry.value) != null) { - V newValue = remappingFunction.apply(key, oldValue); - if (newValue != null ) { - putForNullKey(newValue); - return newValue; - } else { - removeNullKey(); - } - } - return null; - } - int hash = hash(key); - int i = indexFor(hash, table.length); - if (table[i] instanceof Entry) { - @SuppressWarnings("unchecked") - Entry prev = (Entry)table[i]; - Entry e = prev; - while (e != null) { - Entry next = (Entry)e.next; - if (e.hash == hash && Objects.equals(e.key, key)) { - V oldValue = e.value; - if (oldValue == null) - break; - V newValue = remappingFunction.apply(key, oldValue); - if (newValue == null) { - modCount++; - size--; - if (prev == e) - table[i] = next; - else - prev.next = next; - e.recordRemoval(this); - } else { - e.value = newValue; - e.recordAccess(this); - } - return newValue; - } - prev = e; - e = next; - } - } else if (table[i] != null) { - TreeBin tb = (TreeBin)table[i]; - TreeNode p = tb.getTreeNode(hash, key); - if (p != null) { - Entry pEntry = (Entry)p.entry; - // assert pEntry.key.equals(key); - V oldValue = pEntry.value; - if (oldValue != null) { - V newValue = remappingFunction.apply(key, oldValue); - if (newValue == null) { // remove mapping - modCount++; - size--; - tb.deleteTreeNode(p); - pEntry.recordRemoval(this); - if (tb.root == null || tb.first == null) { - // assert tb.root == null && tb.first == null : - // "TreeBin.first and root should both be null"; - // TreeBin is now empty, we should blank this bin - table[i] = null; - } - } else { - pEntry.value = newValue; - pEntry.recordAccess(this); - } - return newValue; - } - } - } - return null; - } - - @Override - public V compute(K key, BiFunction remappingFunction) { - if (table == EMPTY_TABLE) { - inflateTable(threshold); - } - if (key == null) { - V oldValue = nullKeyEntry == null ? null : nullKeyEntry.value; - V newValue = remappingFunction.apply(key, oldValue); - if (newValue != oldValue || (oldValue == null && nullKeyEntry != null)) { - if (newValue == null) { - removeNullKey(); - } else { - putForNullKey(newValue); - } - } - return newValue; - } - int hash = hash(key); - int i = indexFor(hash, table.length); - boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? - - if (table[i] instanceof Entry) { - int listSize = 0; - @SuppressWarnings("unchecked") - Entry prev = (Entry)table[i]; - Entry e = prev; - - while (e != null) { - Entry next = (Entry)e.next; - if (e.hash == hash && Objects.equals(e.key, key)) { - V oldValue = e.value; - V newValue = remappingFunction.apply(key, oldValue); - if (newValue != oldValue || oldValue == null) { - if (newValue == null) { - modCount++; - size--; - if (prev == e) - table[i] = next; - else - prev.next = next; - e.recordRemoval(this); - } else { - e.value = newValue; - e.recordAccess(this); - } - } - return newValue; - } - prev = e; - e = next; - listSize++; - } - checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; - } else if (table[i] != null) { - TreeBin tb = (TreeBin)table[i]; - TreeNode p = tb.getTreeNode(hash, key); - V oldValue = p == null ? null : (V)p.entry.value; - V newValue = remappingFunction.apply(key, oldValue); - if (newValue != oldValue || (oldValue == null && p != null)) { - if (newValue == null) { - Entry pEntry = (Entry)p.entry; - modCount++; - size--; - tb.deleteTreeNode(p); - pEntry.recordRemoval(this); - if (tb.root == null || tb.first == null) { - // assert tb.root == null && tb.first == null : - // "TreeBin.first and root should both be null"; - // TreeBin is now empty, we should blank this bin - table[i] = null; - } - } else { - if (p != null) { // just update the value - Entry pEntry = (Entry)p.entry; - pEntry.value = newValue; - pEntry.recordAccess(this); - } else { // need to put new node - p = tb.putTreeNode(hash, key, newValue, null); - // assert p == null; // should have added a new node - modCount++; - size++; - if (size >= threshold) { - resize(2 * table.length); - } - } - } - } - return newValue; - } - - V newValue = remappingFunction.apply(key, null); - if (newValue != null) { - modCount++; - addEntry(hash, key, newValue, i, checkIfNeedTree); - } - - return newValue; - } - - @Override - public V merge(K key, V value, BiFunction remappingFunction) { - if (table == EMPTY_TABLE) { - inflateTable(threshold); - } - if (key == null) { - V oldValue = nullKeyEntry == null ? null : nullKeyEntry.value; - V newValue = oldValue == null ? value : remappingFunction.apply(oldValue, value); - if (newValue != null) { - putForNullKey(newValue); - } else if (nullKeyEntry != null) { - removeNullKey(); - } - return newValue; - } - int hash = hash(key); - int i = indexFor(hash, table.length); - boolean checkIfNeedTree = false; // Might we convert bin to a TreeBin? - - if (table[i] instanceof Entry) { - int listSize = 0; - @SuppressWarnings("unchecked") - Entry prev = (Entry)table[i]; - Entry e = prev; - - while (e != null) { - Entry next = (Entry)e.next; - if (e.hash == hash && Objects.equals(e.key, key)) { - V oldValue = e.value; - V newValue = (oldValue == null) ? value : - remappingFunction.apply(oldValue, value); - if (newValue == null) { - modCount++; - size--; - if (prev == e) - table[i] = next; - else - prev.next = next; - e.recordRemoval(this); - } else { - e.value = newValue; - e.recordAccess(this); - } - return newValue; - } - prev = e; - e = next; - listSize++; - } - // Didn't find, so fall through and (maybe) call addEntry() to add - // the Entry and check for TreeBin conversion. - checkIfNeedTree = listSize >= TreeBin.TREE_THRESHOLD; - } else if (table[i] != null) { - TreeBin tb = (TreeBin)table[i]; - TreeNode p = tb.getTreeNode(hash, key); - V oldValue = p == null ? null : (V)p.entry.value; - V newValue = (oldValue == null) ? value : - remappingFunction.apply(oldValue, value); - if (newValue == null) { - if (p != null) { - Entry pEntry = (Entry)p.entry; - modCount++; - size--; - tb.deleteTreeNode(p); - pEntry.recordRemoval(this); - - if (tb.root == null || tb.first == null) { - // assert tb.root == null && tb.first == null : - // "TreeBin.first and root should both be null"; - // TreeBin is now empty, we should blank this bin - table[i] = null; - } - } - return null; - } else if (newValue != oldValue) { - if (p != null) { // just update the value - Entry pEntry = (Entry)p.entry; - pEntry.value = newValue; - pEntry.recordAccess(this); - } else { // need to put new node - p = tb.putTreeNode(hash, key, newValue, null); - // assert p == null; // should have added a new node - modCount++; - size++; - if (size >= threshold) { - resize(2 * table.length); - } - } - } - return newValue; - } - if (value != null) { - modCount++; - addEntry(hash, key, value, i, checkIfNeedTree); - } - return value; - } - - // end of optimized implementations of default methods in Map - - /** - * Removes and returns the entry associated with the specified key - * in the HashMap. Returns null if the HashMap contains no mapping - * for this key. - * - * We don't bother converting TreeBins back to Entry lists if the bin falls - * back below TREE_THRESHOLD, but we do clear bins when removing the last - * TreeNode in a TreeBin. - */ - final Entry removeEntryForKey(Object key) { - if (size == 0) { - return null; - } - if (key == null) { - if (nullKeyEntry != null) { - return removeNullKey(); - } - return null; - } - int hash = hash(key); - int i = indexFor(hash, table.length); - - if (table[i] instanceof Entry) { - @SuppressWarnings("unchecked") - Entry prev = (Entry)table[i]; - Entry e = prev; - - while (e != null) { - @SuppressWarnings("unchecked") - Entry next = (Entry) e.next; - if (e.hash == hash && Objects.equals(e.key, key)) { - modCount++; - size--; - if (prev == e) - table[i] = next; - else - prev.next = next; - e.recordRemoval(this); - return e; - } - prev = e; - e = next; - } - } else if (table[i] != null) { - TreeBin tb = ((TreeBin) table[i]); - TreeNode p = tb.getTreeNode(hash, (K)key); - if (p != null) { - Entry pEntry = (Entry)p.entry; - // assert pEntry.key.equals(key); - modCount++; - size--; - tb.deleteTreeNode(p); - pEntry.recordRemoval(this); - if (tb.root == null || tb.first == null) { - // assert tb.root == null && tb.first == null : - // "TreeBin.first and root should both be null"; - // TreeBin is now empty, we should blank this bin - table[i] = null; - } - return pEntry; - } - } - return null; - } - - /** - * Special version of remove for EntrySet using {@code Map.Entry.equals()} - * for matching. - */ - final Entry removeMapping(Object o) { - if (size == 0 || !(o instanceof Map.Entry)) - return null; - - Map.Entry entry = (Map.Entry) o; - Object key = entry.getKey(); - - if (key == null) { - if (entry.equals(nullKeyEntry)) { - return removeNullKey(); - } - return null; - } - - int hash = hash(key); - int i = indexFor(hash, table.length); - - if (table[i] instanceof Entry) { - @SuppressWarnings("unchecked") - Entry prev = (Entry)table[i]; - Entry e = prev; - - while (e != null) { - @SuppressWarnings("unchecked") - Entry next = (Entry)e.next; - if (e.hash == hash && e.equals(entry)) { - modCount++; - size--; - if (prev == e) - table[i] = next; - else - prev.next = next; - e.recordRemoval(this); - return e; - } - prev = e; - e = next; - } - } else if (table[i] != null) { - TreeBin tb = ((TreeBin) table[i]); - TreeNode p = tb.getTreeNode(hash, (K)key); - if (p != null && p.entry.equals(entry)) { - @SuppressWarnings("unchecked") - Entry pEntry = (Entry)p.entry; - // assert pEntry.key.equals(key); - modCount++; - size--; - tb.deleteTreeNode(p); - pEntry.recordRemoval(this); - if (tb.root == null || tb.first == null) { - // assert tb.root == null && tb.first == null : - // "TreeBin.first and root should both be null"; - // TreeBin is now empty, we should blank this bin - table[i] = null; - } - return pEntry; - } - } - return null; - } - - /* - * Remove the mapping for the null key, and update internal accounting - * (size, modcount, recordRemoval, etc). - * - * Assumes nullKeyEntry is non-null. - */ - private Entry removeNullKey() { - // assert nullKeyEntry != null; - Entry retVal = nullKeyEntry; - modCount++; - size--; - retVal.recordRemoval(this); - nullKeyEntry = null; - return retVal; - } - /** * Removes all of the mappings from this map. * The map will be empty after this call returns. */ public void clear() { + Node[] tab; modCount++; - if (nullKeyEntry != null) { - nullKeyEntry = null; + if ((tab = table) != null && size > 0) { + size = 0; + for (int i = 0; i < tab.length; ++i) + tab[i] = null; } - Arrays.fill(table, null); - size = 0; } /** @@ -2134,352 +873,20 @@ * specified value */ public boolean containsValue(Object value) { - if (value == null) { - return containsNullValue(); - } - Object[] tab = table; - for (int i = 0; i < tab.length; i++) { - if (tab[i] instanceof Entry) { - Entry e = (Entry)tab[i]; - for (; e != null; e = (Entry)e.next) { - if (value.equals(e.value)) { - return true; - } - } - } else if (tab[i] != null) { - TreeBin e = (TreeBin)tab[i]; - TreeNode p = e.first; - for (; p != null; p = (TreeNode) p.entry.next) { - if (value == p.entry.value || value.equals(p.entry.value)) { + Node[] tab; V v; + if ((tab = table) != null && size > 0) { + for (int i = 0; i < tab.length; ++i) { + for (Node e = tab[i]; e != null; e = e.next) { + if ((v = e.value) == value || + (value != null && value.equals(v))) return true; - } - } - } - } - // Didn't find value in table - could be in nullKeyEntry - return (nullKeyEntry != null && (value == nullKeyEntry.value || - value.equals(nullKeyEntry.value))); - } - - /** - * Special-case code for containsValue with null argument - */ - private boolean containsNullValue() { - Object[] tab = table; - for (int i = 0; i < tab.length; i++) { - if (tab[i] instanceof Entry) { - Entry e = (Entry)tab[i]; - for (; e != null; e = (Entry)e.next) { - if (e.value == null) { - return true; - } - } - } else if (tab[i] != null) { - TreeBin e = (TreeBin)tab[i]; - TreeNode p = e.first; - for (; p != null; p = (TreeNode) p.entry.next) { - if (p.entry.value == null) { - return true; - } } } } - // Didn't find value in table - could be in nullKeyEntry - return (nullKeyEntry != null && nullKeyEntry.value == null); - } - - /** - * Returns a shallow copy of this HashMap instance: the keys and - * values themselves are not cloned. - * - * @return a shallow copy of this map - */ - @SuppressWarnings("unchecked") - public Object clone() { - HashMap result = null; - try { - result = (HashMap)super.clone(); - } catch (CloneNotSupportedException e) { - // assert false; - } - if (result.table != EMPTY_TABLE) { - result.inflateTable(Math.min( - (int) Math.min( - size * Math.min(1 / loadFactor, 4.0f), - // we have limits... - HashMap.MAXIMUM_CAPACITY), - table.length)); - } - result.entrySet = null; - result.modCount = 0; - result.size = 0; - result.nullKeyEntry = null; - result.init(); - result.putAllForCreate(this); - - return result; - } - - static class Entry implements Map.Entry { - final K key; - V value; - Object next; // an Entry, or a TreeNode - final int hash; - - /** - * Creates new entry. - */ - Entry(int h, K k, V v, Object n) { - value = v; - next = n; - key = k; - hash = h; - } - - public final K getKey() { - return key; - } - - public final V getValue() { - return value; - } - - public final V setValue(V newValue) { - V oldValue = value; - value = newValue; - return oldValue; - } - - public final boolean equals(Object o) { - if (!(o instanceof Map.Entry)) - return false; - Map.Entry e = (Map.Entry)o; - Object k1 = getKey(); - Object k2 = e.getKey(); - if (k1 == k2 || (k1 != null && k1.equals(k2))) { - Object v1 = getValue(); - Object v2 = e.getValue(); - if (v1 == v2 || (v1 != null && v1.equals(v2))) - return true; - } - return false; - } - - public final int hashCode() { - return Objects.hashCode(getKey()) ^ Objects.hashCode(getValue()); - } - - public final String toString() { - return getKey() + "=" + getValue(); - } - - /** - * This method is invoked whenever the value in an entry is - * overwritten for a key that's already in the HashMap. - */ - void recordAccess(HashMap m) { - } - - /** - * This method is invoked whenever the entry is - * removed from the table. - */ - void recordRemoval(HashMap m) { - } - } - - void addEntry(int hash, K key, V value, int bucketIndex) { - addEntry(hash, key, value, bucketIndex, true); - } - - /** - * Adds a new entry with the specified key, value and hash code to - * the specified bucket. It is the responsibility of this - * method to resize the table if appropriate. The new entry is then - * created by calling createEntry(). - * - * Subclass overrides this to alter the behavior of put method. - * - * If checkIfNeedTree is false, it is known that this bucket will not need - * to be converted to a TreeBin, so don't bothering checking. - * - * Assumes key is not null. - */ - void addEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) { - // assert key != null; - if ((size >= threshold) && (null != table[bucketIndex])) { - resize(2 * table.length); - hash = hash(key); - bucketIndex = indexFor(hash, table.length); - } - createEntry(hash, key, value, bucketIndex, checkIfNeedTree); + return false; } /** - * Called by addEntry(), and also used when creating entries - * as part of Map construction or "pseudo-construction" (cloning, - * deserialization). This version does not check for resizing of the table. - * - * This method is responsible for converting a bucket to a TreeBin once - * TREE_THRESHOLD is reached. However if checkIfNeedTree is false, it is known - * that this bucket will not need to be converted to a TreeBin, so don't - * bother checking. The new entry is constructed by calling newEntry(). - * - * Assumes key is not null. - * - * Note: buckets already converted to a TreeBin don't call this method, but - * instead call TreeBin.putTreeNode() to create new entries. - */ - void createEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) { - // assert key != null; - @SuppressWarnings("unchecked") - Entry e = (Entry)table[bucketIndex]; - table[bucketIndex] = newEntry(hash, key, value, e); - size++; - - if (checkIfNeedTree) { - int listSize = 0; - for (e = (Entry) table[bucketIndex]; e != null; e = (Entry)e.next) { - listSize++; - if (listSize >= TreeBin.TREE_THRESHOLD) { // Convert to TreeBin - if (comparableClassFor(key) != null) { - TreeBin t = new TreeBin(); - t.populate((Entry)table[bucketIndex]); - table[bucketIndex] = t; - } - break; - } - } - } - } - - /* - * Factory method to create a new Entry object. - */ - Entry newEntry(int hash, K key, V value, Object next) { - return new HashMap.Entry<>(hash, key, value, next); - } - - - private abstract class HashIterator implements Iterator { - Object next; // next entry to return, an Entry or a TreeNode - int expectedModCount; // For fast-fail - int index; // current slot - Object current; // current entry, an Entry or a TreeNode - - HashIterator() { - expectedModCount = modCount; - if (size > 0) { // advance to first entry - if (nullKeyEntry != null) { - // assert nullKeyEntry.next == null; - // This works with nextEntry(): nullKeyEntry isa Entry, and - // e.next will be null, so we'll hit the findNextBin() call. - next = nullKeyEntry; - } else { - findNextBin(); - } - } - } - - public final boolean hasNext() { - return next != null; - } - - @SuppressWarnings("unchecked") - final Entry nextEntry() { - if (modCount != expectedModCount) { - throw new ConcurrentModificationException(); - } - Object e = next; - Entry retVal; - - if (e == null) - throw new NoSuchElementException(); - - if (e instanceof TreeNode) { // TreeBin - retVal = (Entry)((TreeNode)e).entry; - next = retVal.next; - } else { - retVal = (Entry)e; - next = ((Entry)e).next; - } - - if (next == null) { // Move to next bin - findNextBin(); - } - current = e; - return retVal; - } - - public void remove() { - if (current == null) - throw new IllegalStateException(); - if (modCount != expectedModCount) - throw new ConcurrentModificationException(); - K k; - - if (current instanceof Entry) { - k = ((Entry)current).key; - } else { - k = ((Entry)((TreeNode)current).entry).key; - - } - current = null; - HashMap.this.removeEntryForKey(k); - expectedModCount = modCount; - } - - /* - * Set 'next' to the first entry of the next non-empty bin in the table - */ - private void findNextBin() { - // assert next == null; - Object[] t = table; - - while (index < t.length && (next = t[index++]) == null) - ; - if (next instanceof HashMap.TreeBin) { // Point to the first TreeNode - next = ((TreeBin) next).first; - // assert next != null; // There should be no empty TreeBins - } - } - } - - private final class ValueIterator extends HashIterator { - public V next() { - return nextEntry().value; - } - } - - private final class KeyIterator extends HashIterator { - public K next() { - return nextEntry().getKey(); - } - } - - private final class EntryIterator extends HashIterator> { - public Map.Entry next() { - return nextEntry(); - } - } - - // Subclass overrides these to alter behavior of views' iterator() method - Iterator newKeyIterator() { - return new KeyIterator(); - } - Iterator newValueIterator() { - return new ValueIterator(); - } - Iterator> newEntryIterator() { - return new EntryIterator(); - } - - - // Views - - private transient Set> entrySet = null; - - /** * Returns a {@link Set} view of the keys contained in this map. * The set is backed by the map, so changes to the map are * reflected in the set, and vice-versa. If the map is modified @@ -2491,35 +898,38 @@ * removeAll, retainAll, and clear * operations. It does not support the add or addAll * operations. + * + * @return a set view of the keys contained in this map */ public Set keySet() { - Set ks = keySet; - return (ks != null ? ks : (keySet = new KeySet())); + Set ks; + return (ks = keySet) == null ? (keySet = new KeySet()) : ks; } - private final class KeySet extends AbstractSet { - public Iterator iterator() { - return newKeyIterator(); + final class KeySet extends AbstractSet { + public final int size() { return size; } + public final void clear() { HashMap.this.clear(); } + public final Iterator iterator() { return new KeyIterator(); } + public final boolean contains(Object o) { return containsKey(o); } + public final boolean remove(Object key) { + return removeNode(hash(key), key, null, false, true) != null; } - public int size() { - return size; - } - public boolean contains(Object o) { - return containsKey(o); + public final Spliterator spliterator() { + return new KeySpliterator(HashMap.this, 0, -1, 0, 0); } - public boolean remove(Object o) { - return HashMap.this.removeEntryForKey(o) != null; - } - public void clear() { - HashMap.this.clear(); - } - - public Spliterator spliterator() { - if (HashMap.this.getClass() == HashMap.class) - return new KeySpliterator(HashMap.this, 0, -1, 0, 0); - else - return Spliterators.spliterator - (this, Spliterator.SIZED | Spliterator.DISTINCT); + public final void forEach(Consumer action) { + Node[] tab; + if (action == null) + throw new NullPointerException(); + if (size > 0 && (tab = table) != null) { + int mc = modCount; + for (int i = 0; i < tab.length; ++i) { + for (Node e = tab[i]; e != null; e = e.next) + action.accept(e.key); + } + if (modCount != mc) + throw new ConcurrentModificationException(); + } } } @@ -2535,32 +945,35 @@ * Collection.remove, removeAll, * retainAll and clear operations. It does not * support the add or addAll operations. + * + * @return a view of the values contained in this map */ public Collection values() { - Collection vs = values; - return (vs != null ? vs : (values = new Values())); + Collection vs; + return (vs = values) == null ? (values = new Values()) : vs; } - private final class Values extends AbstractCollection { - public Iterator iterator() { - return newValueIterator(); - } - public int size() { - return size; - } - public boolean contains(Object o) { - return containsValue(o); + final class Values extends AbstractCollection { + public final int size() { return size; } + public final void clear() { HashMap.this.clear(); } + public final Iterator iterator() { return new ValueIterator(); } + public final boolean contains(Object o) { return containsValue(o); } + public final Spliterator spliterator() { + return new ValueSpliterator(HashMap.this, 0, -1, 0, 0); } - public void clear() { - HashMap.this.clear(); - } - - public Spliterator spliterator() { - if (HashMap.this.getClass() == HashMap.class) - return new ValueSpliterator(HashMap.this, 0, -1, 0, 0); - else - return Spliterators.spliterator - (this, Spliterator.SIZED); + public final void forEach(Consumer action) { + Node[] tab; + if (action == null) + throw new NullPointerException(); + if (size > 0 && (tab = table) != null) { + int mc = modCount; + for (int i = 0; i < tab.length; ++i) { + for (Node e = tab[i]; e != null; e = e.next) + action.accept(e.value); + } + if (modCount != mc) + throw new ConcurrentModificationException(); + } } } @@ -2581,42 +994,324 @@ * @return a set view of the mappings contained in this map */ public Set> entrySet() { - return entrySet0(); + Set> es; + return (es = entrySet) == null ? (entrySet = new EntrySet()) : es; } - private Set> entrySet0() { - Set> es = entrySet; - return es != null ? es : (entrySet = new EntrySet()); - } - - private final class EntrySet extends AbstractSet> { - public Iterator> iterator() { - return newEntryIterator(); + final class EntrySet extends AbstractSet> { + public final int size() { return size; } + public final void clear() { HashMap.this.clear(); } + public final Iterator> iterator() { + return new EntryIterator(); } - public boolean contains(Object o) { + public final boolean contains(Object o) { if (!(o instanceof Map.Entry)) return false; Map.Entry e = (Map.Entry) o; - Entry candidate = getEntry(e.getKey()); + Object key = e.getKey(); + Node candidate = getNode(hash(key), key); return candidate != null && candidate.equals(e); } - public boolean remove(Object o) { - return removeMapping(o) != null; + public final boolean remove(Object o) { + if (o instanceof Map.Entry) { + Map.Entry e = (Map.Entry) o; + Object key = e.getKey(); + Object value = e.getValue(); + return removeNode(hash(key), key, value, true, true) != null; + } + return false; + } + public final Spliterator> spliterator() { + return new EntrySpliterator(HashMap.this, 0, -1, 0, 0); + } + public final void forEach(Consumer> action) { + Node[] tab; + if (action == null) + throw new NullPointerException(); + if (size > 0 && (tab = table) != null) { + int mc = modCount; + for (int i = 0; i < tab.length; ++i) { + for (Node e = tab[i]; e != null; e = e.next) + action.accept(e); + } + if (modCount != mc) + throw new ConcurrentModificationException(); + } + } + } + + // Overrides of JDK8 Map extension methods + + public V getOrDefault(Object key, V defaultValue) { + Node e; + return (e = getNode(hash(key), key)) == null ? defaultValue : e.value; + } + + public V putIfAbsent(K key, V value) { + return putVal(hash(key), key, value, true, true); + } + + public boolean remove(Object key, Object value) { + return removeNode(hash(key), key, value, true, true) != null; + } + + public boolean replace(K key, V oldValue, V newValue) { + Node e; V v; + if ((e = getNode(hash(key), key)) != null && + ((v = e.value) == oldValue || (v != null && v.equals(oldValue)))) { + e.value = newValue; + afterNodeAccess(e); + return true; + } + return false; + } + + public V replace(K key, V value) { + Node e; + if ((e = getNode(hash(key), key)) != null) { + V oldValue = e.value; + e.value = value; + afterNodeAccess(e); + return oldValue; } - public int size() { - return size; + return null; + } + + public V computeIfAbsent(K key, + Function mappingFunction) { + if (mappingFunction == null) + throw new NullPointerException(); + int hash = hash(key); + Node[] tab; Node first; int n, i; + int binCount = 0; + TreeNode t = null; + Node old = null; + if (size > threshold || (tab = table) == null || + (n = tab.length) == 0) + n = (tab = resize()).length; + if ((first = tab[i = (n - 1) & hash]) != null) { + if (first instanceof TreeNode) + old = (t = (TreeNode)first).getTreeNode(hash, key); + else { + Node e = first; K k; + do { + if (e.hash == hash && + ((k = e.key) == key || (key != null && key.equals(k)))) { + old = e; + break; + } + ++binCount; + } while ((e = e.next) != null); + } + V oldValue; + if (old != null && (oldValue = old.value) != null) { + afterNodeAccess(old); + return oldValue; + } } - public void clear() { - HashMap.this.clear(); + V v = mappingFunction.apply(key); + if (old != null) { + old.value = v; + afterNodeAccess(old); + return v; + } + else if (v == null) + return null; + else if (t != null) + t.putTreeVal(this, tab, hash, key, v); + else { + tab[i] = newNode(hash, key, v, first); + if (binCount >= TREEIFY_THRESHOLD - 1) + treeifyBin(tab, hash); + } + ++modCount; + ++size; + afterNodeInsertion(true); + return v; + } + + public V computeIfPresent(K key, + BiFunction remappingFunction) { + Node e; V oldValue; + int hash = hash(key); + if ((e = getNode(hash, key)) != null && + (oldValue = e.value) != null) { + V v = remappingFunction.apply(key, oldValue); + if (v != null) { + e.value = v; + afterNodeAccess(e); + return v; + } + else + removeNode(hash, key, null, false, true); } + return null; + } - public Spliterator> spliterator() { - if (HashMap.this.getClass() == HashMap.class) - return new EntrySpliterator(HashMap.this, 0, -1, 0, 0); + public V compute(K key, + BiFunction remappingFunction) { + if (remappingFunction == null) + throw new NullPointerException(); + int hash = hash(key); + Node[] tab; Node first; int n, i; + int binCount = 0; + TreeNode t = null; + Node old = null; + if (size > threshold || (tab = table) == null || + (n = tab.length) == 0) + n = (tab = resize()).length; + if ((first = tab[i = (n - 1) & hash]) != null) { + if (first instanceof TreeNode) + old = (t = (TreeNode)first).getTreeNode(hash, key); + else { + Node e = first; K k; + do { + if (e.hash == hash && + ((k = e.key) == key || (key != null && key.equals(k)))) { + old = e; + break; + } + ++binCount; + } while ((e = e.next) != null); + } + } + V oldValue = (old == null) ? null : old.value; + V v = remappingFunction.apply(key, oldValue); + if (old != null) { + if (v != null) { + old.value = v; + afterNodeAccess(old); + } else - return Spliterators.spliterator - (this, Spliterator.SIZED | Spliterator.DISTINCT); + removeNode(hash, key, null, false, true); + } + else if (v != null) { + if (t != null) + t.putTreeVal(this, tab, hash, key, v); + else { + tab[i] = newNode(hash, key, v, first); + if (binCount >= TREEIFY_THRESHOLD - 1) + treeifyBin(tab, hash); + } + ++modCount; + ++size; + afterNodeInsertion(true); + } + return v; + } + + public V merge(K key, V value, + BiFunction remappingFunction) { + if (remappingFunction == null) + throw new NullPointerException(); + int hash = hash(key); + Node[] tab; Node first; int n, i; + int binCount = 0; + TreeNode t = null; + Node old = null; + if (size > threshold || (tab = table) == null || + (n = tab.length) == 0) + n = (tab = resize()).length; + if ((first = tab[i = (n - 1) & hash]) != null) { + if (first instanceof TreeNode) + old = (t = (TreeNode)first).getTreeNode(hash, key); + else { + Node e = first; K k; + do { + if (e.hash == hash && + ((k = e.key) == key || (key != null && key.equals(k)))) { + old = e; + break; + } + ++binCount; + } while ((e = e.next) != null); + } + } + if (old != null) { + V v = remappingFunction.apply(old.value, value); + if (v != null) { + old.value = v; + afterNodeAccess(old); + } + else + removeNode(hash, key, null, false, true); + return v; } + if (value != null) { + if (t != null) + t.putTreeVal(this, tab, hash, key, value); + else { + tab[i] = newNode(hash, key, value, first); + if (binCount >= TREEIFY_THRESHOLD - 1) + treeifyBin(tab, hash); + } + ++modCount; + ++size; + afterNodeInsertion(true); + } + return value; + } + + public void forEach(BiConsumer action) { + Node[] tab; + if (action == null) + throw new NullPointerException(); + if (size > 0 && (tab = table) != null) { + int mc = modCount; + for (int i = 0; i < tab.length; ++i) { + for (Node e = tab[i]; e != null; e = e.next) + action.accept(e.key, e.value); + } + if (modCount != mc) + throw new ConcurrentModificationException(); + } + } + + public void replaceAll(BiFunction function) { + Node[] tab; + if (function == null) + throw new NullPointerException(); + if (size > 0 && (tab = table) != null) { + int mc = modCount; + for (int i = 0; i < tab.length; ++i) { + for (Node e = tab[i]; e != null; e = e.next) { + e.value = function.apply(e.key, e.value); + } + } + if (modCount != mc) + throw new ConcurrentModificationException(); + } + } + + /* ------------------------------------------------------------ */ + // Cloning and serialization + + /** + * Returns a shallow copy of this HashMap instance: the keys and + * values themselves are not cloned. + * + * @return a shallow copy of this map + */ + @SuppressWarnings("unchecked") + public Object clone() { + HashMap result; + try { + result = (HashMap)super.clone(); + } catch (CloneNotSupportedException e) { + // this shouldn't happen, since we are Cloneable + throw new InternalError(e); + } + result.reinitialize(); + result.putMapEntries(this, false); + return result; + } + + // These methods are also used when serializing HashSets + final float loadFactor() { return loadFactor; } + final int capacity() { + return (table != null) ? table.length : + (threshold > 0) ? threshold : + DEFAULT_INITIAL_CAPACITY; } /** @@ -2631,118 +1326,143 @@ * emitted in no particular order. */ private void writeObject(java.io.ObjectOutputStream s) - throws IOException - { + throws IOException { + int buckets = capacity(); // Write out the threshold, loadfactor, and any hidden stuff s.defaultWriteObject(); - - // Write out number of buckets - if (table==EMPTY_TABLE) { - s.writeInt(roundUpToPowerOf2(threshold)); - } else { - s.writeInt(table.length); - } - - // Write out size (number of Mappings) + s.writeInt(buckets); s.writeInt(size); - - // Write out keys and values (alternating) - if (size > 0) { - for(Map.Entry e : entrySet0()) { - s.writeObject(e.getKey()); - s.writeObject(e.getValue()); - } - } + internalWriteEntries(s); } - private static final long serialVersionUID = 362498820763181265L; - /** * Reconstitute the {@code HashMap} instance from a stream (i.e., * deserialize it). */ private void readObject(java.io.ObjectInputStream s) - throws IOException, ClassNotFoundException - { + throws IOException, ClassNotFoundException { // Read in the threshold (ignored), loadfactor, and any hidden stuff s.defaultReadObject(); - if (loadFactor <= 0 || Float.isNaN(loadFactor)) { + reinitialize(); + if (loadFactor <= 0 || Float.isNaN(loadFactor)) throw new InvalidObjectException("Illegal load factor: " + - loadFactor); - } - - // set other fields that need values - if (Holder.USE_HASHSEED) { - int seed = ThreadLocalRandom.current().nextInt(); - Holder.UNSAFE.putIntVolatile(this, Holder.HASHSEED_OFFSET, - (seed != 0) ? seed : 1); - } - table = EMPTY_TABLE; - - // Read in number of buckets - s.readInt(); // ignored. - - // Read number of mappings - int mappings = s.readInt(); + loadFactor); + s.readInt(); // Read and ignore number of buckets + int mappings = s.readInt(); // Read number of mappings (size) if (mappings < 0) throw new InvalidObjectException("Illegal mappings count: " + - mappings); - - // capacity chosen by number of mappings and desired load (if >= 0.25) - int capacity = (int) Math.min( - mappings * Math.min(1 / loadFactor, 4.0f), - // we have limits... - HashMap.MAXIMUM_CAPACITY); + mappings); + else if (mappings > 0) { // (if zero, use defaults) + // Size the table using given load factor only if within + // range of 0.25...4.0 + float lf = Math.min(Math.max(0.25f, loadFactor), 4.0f); + float fc = (float)mappings / lf + 1.0f; + int cap = ((fc < DEFAULT_INITIAL_CAPACITY) ? + DEFAULT_INITIAL_CAPACITY : + (fc >= MAXIMUM_CAPACITY) ? + MAXIMUM_CAPACITY : + tableSizeFor((int)fc)); + float ft = (float)cap * lf; + threshold = ((cap < MAXIMUM_CAPACITY && ft < MAXIMUM_CAPACITY) ? + (int)ft : Integer.MAX_VALUE); + @SuppressWarnings({"rawtypes","unchecked"}) + Node[] tab = (Node[])new Node[cap]; + table = tab; - // allocate the bucket array; - if (mappings > 0) { - inflateTable(capacity); - } else { - threshold = capacity; - } - - init(); // Give subclass a chance to do its thing. - - // Read the keys and values, and put the mappings in the HashMap - for (int i=0; i next; // next entry to return + Node current; // current entry + int expectedModCount; // for fast-fail + int index; // current slot + + HashIterator() { + expectedModCount = modCount; + Node[] t = table; + current = next = null; + index = 0; + if (t != null && size > 0) { // advance to first entry + do {} while (index < t.length && (next = t[index++]) == null); + } + } + + public final boolean hasNext() { + return next != null; + } - /** - * Standin until HM overhaul; based loosely on Weak and Identity HM. - */ + final Node nextNode() { + Node[] t; + Node e = next; + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + if (e == null) + throw new NoSuchElementException(); + if ((next = (current = e).next) == null && (t = table) != null) { + do {} while (index < t.length && (next = t[index++]) == null); + } + return e; + } + + public final void remove() { + Node p = current; + if (p == null) + throw new IllegalStateException(); + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + current = null; + K key = p.key; + removeNode(hash(key), key, null, false, false); + expectedModCount = modCount; + } + } + + final class KeyIterator extends HashIterator + implements Iterator { + public final K next() { return nextNode().key; } + } + + final class ValueIterator extends HashIterator + implements Iterator { + public final V next() { return nextNode().value; } + } + + final class EntryIterator extends HashIterator + implements Iterator> { + public final Map.Entry next() { return nextNode(); } + } + + /* ------------------------------------------------------------ */ + // spliterators + static class HashMapSpliterator { final HashMap map; - Object current; // current node, can be Entry or TreeNode + Node current; // current node int index; // current index, modified on advance/split int fence; // one past last index int est; // size estimate int expectedModCount; // for comodification checks - boolean acceptedNull; // Have we accepted the null key? - // Without this, we can't distinguish - // between being at the very beginning (and - // needing to accept null), or being at the - // end of the list in bin 0. In both cases, - // current == null && index == 0. HashMapSpliterator(HashMap m, int origin, - int fence, int est, - int expectedModCount) { + int fence, int est, + int expectedModCount) { this.map = m; this.index = origin; this.fence = fence; this.est = est; this.expectedModCount = expectedModCount; - this.acceptedNull = false; } final int getFence() { // initialize fence and size on first use @@ -2751,7 +1471,8 @@ HashMap m = map; est = m.size; expectedModCount = m.modCount; - hi = fence = m.table.length; + Node[] tab = m.table; + hi = fence = (tab == null) ? 0 : tab.length; } return hi; } @@ -2772,56 +1493,33 @@ public KeySpliterator trySplit() { int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; - if (lo >= mid || current != null) { - return null; - } else { - KeySpliterator retVal = new KeySpliterator(map, lo, - index = mid, est >>>= 1, expectedModCount); - // Only 'this' Spliterator chould check for null. - retVal.acceptedNull = true; - return retVal; - } + return (lo >= mid || current != null) ? null : + new KeySpliterator(map, lo, index = mid, est >>>= 1, + expectedModCount); } - @SuppressWarnings("unchecked") public void forEachRemaining(Consumer action) { int i, hi, mc; if (action == null) throw new NullPointerException(); HashMap m = map; - Object[] tab = m.table; + Node[] tab = m.table; if ((hi = fence) < 0) { mc = expectedModCount = m.modCount; - hi = fence = tab.length; + hi = fence = (tab == null) ? 0 : tab.length; } else mc = expectedModCount; - - if (!acceptedNull) { - acceptedNull = true; - if (m.nullKeyEntry != null) { - action.accept(m.nullKeyEntry.key); - } - } - if (tab.length >= hi && (i = index) >= 0 && - (i < (index = hi) || current != null)) { - Object p = current; + if (tab != null && tab.length >= hi && + (i = index) >= 0 && (i < (index = hi) || current != null)) { + Node p = current; current = null; do { - if (p == null) { + if (p == null) p = tab[i++]; - if (p instanceof HashMap.TreeBin) { - p = ((HashMap.TreeBin)p).first; - } - } else { - HashMap.Entry entry; - if (p instanceof HashMap.Entry) { - entry = (HashMap.Entry)p; - } else { - entry = (HashMap.Entry)((TreeNode)p).entry; - } - action.accept(entry.key); - p = entry.next; + else { + action.accept(p.key); + p = p.next; } } while (p != null || i < hi); if (m.modCount != mc) @@ -2829,39 +1527,18 @@ } } - @SuppressWarnings("unchecked") public boolean tryAdvance(Consumer action) { int hi; if (action == null) throw new NullPointerException(); - Object[] tab = map.table; - hi = getFence(); - - if (!acceptedNull) { - acceptedNull = true; - if (map.nullKeyEntry != null) { - action.accept(map.nullKeyEntry.key); - if (map.modCount != expectedModCount) - throw new ConcurrentModificationException(); - return true; - } - } - if (tab.length >= hi && index >= 0) { + Node[] tab = map.table; + if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { while (current != null || index < hi) { - if (current == null) { + if (current == null) current = tab[index++]; - if (current instanceof HashMap.TreeBin) { - current = ((HashMap.TreeBin)current).first; - } - } else { - HashMap.Entry entry; - if (current instanceof HashMap.Entry) { - entry = (HashMap.Entry)current; - } else { - entry = (HashMap.Entry)((TreeNode)current).entry; - } - K k = entry.key; - current = entry.next; + else { + K k = current.key; + current = current.next; action.accept(k); if (map.modCount != expectedModCount) throw new ConcurrentModificationException(); @@ -2888,56 +1565,33 @@ public ValueSpliterator trySplit() { int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; - if (lo >= mid || current != null) { - return null; - } else { - ValueSpliterator retVal = new ValueSpliterator(map, - lo, index = mid, est >>>= 1, expectedModCount); - // Only 'this' Spliterator chould check for null. - retVal.acceptedNull = true; - return retVal; - } + return (lo >= mid || current != null) ? null : + new ValueSpliterator(map, lo, index = mid, est >>>= 1, + expectedModCount); } - @SuppressWarnings("unchecked") public void forEachRemaining(Consumer action) { int i, hi, mc; if (action == null) throw new NullPointerException(); HashMap m = map; - Object[] tab = m.table; + Node[] tab = m.table; if ((hi = fence) < 0) { mc = expectedModCount = m.modCount; - hi = fence = tab.length; + hi = fence = (tab == null) ? 0 : tab.length; } else mc = expectedModCount; - - if (!acceptedNull) { - acceptedNull = true; - if (m.nullKeyEntry != null) { - action.accept(m.nullKeyEntry.value); - } - } - if (tab.length >= hi && (i = index) >= 0 && - (i < (index = hi) || current != null)) { - Object p = current; + if (tab != null && tab.length >= hi && + (i = index) >= 0 && (i < (index = hi) || current != null)) { + Node p = current; current = null; do { - if (p == null) { + if (p == null) p = tab[i++]; - if (p instanceof HashMap.TreeBin) { - p = ((HashMap.TreeBin)p).first; - } - } else { - HashMap.Entry entry; - if (p instanceof HashMap.Entry) { - entry = (HashMap.Entry)p; - } else { - entry = (HashMap.Entry)((TreeNode)p).entry; - } - action.accept(entry.value); - p = entry.next; + else { + action.accept(p.value); + p = p.next; } } while (p != null || i < hi); if (m.modCount != mc) @@ -2945,39 +1599,18 @@ } } - @SuppressWarnings("unchecked") public boolean tryAdvance(Consumer action) { int hi; if (action == null) throw new NullPointerException(); - Object[] tab = map.table; - hi = getFence(); - - if (!acceptedNull) { - acceptedNull = true; - if (map.nullKeyEntry != null) { - action.accept(map.nullKeyEntry.value); - if (map.modCount != expectedModCount) - throw new ConcurrentModificationException(); - return true; - } - } - if (tab.length >= hi && index >= 0) { + Node[] tab = map.table; + if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { while (current != null || index < hi) { - if (current == null) { + if (current == null) current = tab[index++]; - if (current instanceof HashMap.TreeBin) { - current = ((HashMap.TreeBin)current).first; - } - } else { - HashMap.Entry entry; - if (current instanceof HashMap.Entry) { - entry = (Entry)current; - } else { - entry = (Entry)((TreeNode)current).entry; - } - V v = entry.value; - current = entry.next; + else { + V v = current.value; + current = current.next; action.accept(v); if (map.modCount != expectedModCount) throw new ConcurrentModificationException(); @@ -3003,57 +1636,33 @@ public EntrySpliterator trySplit() { int hi = getFence(), lo = index, mid = (lo + hi) >>> 1; - if (lo >= mid || current != null) { - return null; - } else { - EntrySpliterator retVal = new EntrySpliterator(map, - lo, index = mid, est >>>= 1, expectedModCount); - // Only 'this' Spliterator chould check for null. - retVal.acceptedNull = true; - return retVal; - } + return (lo >= mid || current != null) ? null : + new EntrySpliterator(map, lo, index = mid, est >>>= 1, + expectedModCount); } - @SuppressWarnings("unchecked") public void forEachRemaining(Consumer> action) { int i, hi, mc; if (action == null) throw new NullPointerException(); HashMap m = map; - Object[] tab = m.table; + Node[] tab = m.table; if ((hi = fence) < 0) { mc = expectedModCount = m.modCount; - hi = fence = tab.length; + hi = fence = (tab == null) ? 0 : tab.length; } else mc = expectedModCount; - - if (!acceptedNull) { - acceptedNull = true; - if (m.nullKeyEntry != null) { - action.accept(m.nullKeyEntry); - } - } - if (tab.length >= hi && (i = index) >= 0 && - (i < (index = hi) || current != null)) { - Object p = current; + if (tab != null && tab.length >= hi && + (i = index) >= 0 && (i < (index = hi) || current != null)) { + Node p = current; current = null; do { - if (p == null) { + if (p == null) p = tab[i++]; - if (p instanceof HashMap.TreeBin) { - p = ((HashMap.TreeBin)p).first; - } - } else { - HashMap.Entry entry; - if (p instanceof HashMap.Entry) { - entry = (HashMap.Entry)p; - } else { - entry = (HashMap.Entry)((TreeNode)p).entry; - } - action.accept(entry); - p = entry.next; - + else { + action.accept(p); + p = p.next; } } while (p != null || i < hi); if (m.modCount != mc) @@ -3061,38 +1670,18 @@ } } - @SuppressWarnings("unchecked") public boolean tryAdvance(Consumer> action) { int hi; if (action == null) throw new NullPointerException(); - Object[] tab = map.table; - hi = getFence(); - - if (!acceptedNull) { - acceptedNull = true; - if (map.nullKeyEntry != null) { - action.accept(map.nullKeyEntry); - if (map.modCount != expectedModCount) - throw new ConcurrentModificationException(); - return true; - } - } - if (tab.length >= hi && index >= 0) { + Node[] tab = map.table; + if (tab != null && tab.length >= (hi = getFence()) && index >= 0) { while (current != null || index < hi) { - if (current == null) { + if (current == null) current = tab[index++]; - if (current instanceof HashMap.TreeBin) { - current = ((HashMap.TreeBin)current).first; - } - } else { - HashMap.Entry e; - if (current instanceof HashMap.Entry) { - e = (Entry)current; - } else { - e = (Entry)((TreeNode)current).entry; - } - current = e.next; + else { + Node e = current; + current = current.next; action.accept(e); if (map.modCount != expectedModCount) throw new ConcurrentModificationException(); @@ -3108,4 +1697,664 @@ Spliterator.DISTINCT; } } + + /* ------------------------------------------------------------ */ + // LinkedHashMap support + + + /* + * The following package-protected methods are designed to be + * overridden by LinkedHashMap, but not by any other subclass. + * Nearly all other internal methods are also package-protected + * but are declared final, so can be used by LinkedHashMap, view + * classes, and HashSet. + */ + + // Create a regular (non-tree) node + Node newNode(int hash, K key, V value, Node next) { + return new Node(hash, key, value, next); + } + + // For conversion from TreeNodes to plain nodes + Node replacementNode(Node p, Node next) { + return new Node(p.hash, p.key, p.value, next); + } + + // Create a tree bin node + TreeNode newTreeNode(int hash, K key, V value, Node next) { + return new TreeNode(hash, key, value, next); + } + + // For treeifyBin + TreeNode replacementTreeNode(Node p, Node next) { + return new TreeNode(p.hash, p.key, p.value, next); + } + + /** + * Reset to initial default state. Called by clone and readObject. + */ + void reinitialize() { + table = null; + entrySet = null; + keySet = null; + values = null; + modCount = 0; + threshold = 0; + size = 0; + } + + // Callbacks to allow LinkedHashMap post-actions + void afterNodeAccess(Node p) { } + void afterNodeInsertion(boolean evict) { } + void afterNodeRemoval(Node p) { } + + // Called only from writeObject, to ensure compatible ordering. + void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException { + Node[] tab; + if (size > 0 && (tab = table) != null) { + for (int i = 0; i < tab.length; ++i) { + for (Node e = tab[i]; e != null; e = e.next) { + s.writeObject(e.key); + s.writeObject(e.value); + } + } + } + } + + /* ------------------------------------------------------------ */ + // Tree bins + + /** + * Entry for Tree bins. Extends LinkedHashMap.Entry (which in turn + * extends Node) so can be used as extension of either regular or + * linked node. + */ + static final class TreeNode extends LinkedHashMap.Entry { + TreeNode parent; // red-black tree links + TreeNode left; + TreeNode right; + TreeNode prev; // needed to unlink next upon deletion + boolean red; + TreeNode(int hash, K key, V val, Node next) { + super(hash, key, val, next); + } + + /** + * Returns root of tree containing this node. + */ + final TreeNode root() { + for (TreeNode r = this, p;;) { + if ((p = r.parent) == null) + return r; + r = p; + } + } + + /** + * Ensures that the given root is the first node of its bin. + */ + static void moveRootToFront(Node[] tab, TreeNode root) { + int n; + if (root != null && tab != null && (n = tab.length) > 0) { + int index = (n - 1) & root.hash; + TreeNode first = (TreeNode)tab[index]; + if (root != first) { + Node rn; + tab[index] = root; + TreeNode rp = root.prev; + if ((rn = root.next) != null) + ((TreeNode)rn).prev = rp; + if (rp != null) + rp.next = rn; + if (first != null) + first.prev = root; + root.next = first; + root.prev = null; + } + assert checkInvariants(root); + } + } + + /** + * Finds the node starting at root p with the given hash and key. + * The kc argument caches comparableClassFor(key) upon first use + * comparing keys. + */ + final TreeNode find(int h, Object k, Class kc) { + TreeNode p = this; + do { + int ph, dir; K pk; + TreeNode pl = p.left, pr = p.right, q; + if ((ph = p.hash) > h) + p = pl; + else if (ph < h) + p = pr; + else if ((pk = p.key) == k || (k != null && k.equals(pk))) + return p; + else if (pl == null) + p = pr; + else if (pr == null) + p = pl; + else if ((kc != null || + (kc = comparableClassFor(k)) != null) && + (dir = compareComparables(kc, k, pk)) != 0) + p = (dir < 0) ? pl : pr; + else if ((q = pr.find(h, k, kc)) != null) + return q; + else + p = pl; + } while (p != null); + return null; + } + + /** + * Calls find for root node. + */ + final TreeNode getTreeNode(int h, Object k) { + return ((parent != null) ? root() : this).find(h, k, null); + } + + /** + * Tie-breaking utility for ordering insertions when equal + * hashCodes and non-comparable. We don't require a total + * order, just a consistent insertion rule to maintain + * equivalence across rebalancings. Tie-breaking further than + * necessary simplifies testing a bit. + */ + static int tieBreakOrder(Object a, Object b) { + int d; + if (a == null || b == null || + (d = a.getClass().getName(). + compareTo(b.getClass().getName())) == 0) + d = (System.identityHashCode(a) <= System.identityHashCode(b) ? + -1 : 1); + return d; + } + + /** + * Forms tree of the nodes linked from this node. + * @return root of tree + */ + final void treeify(Node[] tab) { + TreeNode root = null; + for (TreeNode x = this, next; x != null; x = next) { + next = (TreeNode)x.next; + x.left = x.right = null; + if (root == null) { + x.parent = null; + x.red = false; + root = x; + } + else { + K k = x.key; + int h = x.hash; + Class kc = null; + for (TreeNode p = root;;) { + int dir, ph; + K pk = p.key; + if ((ph = p.hash) > h) + dir = -1; + else if (ph < h) + dir = 1; + else if ((kc == null && + (kc = comparableClassFor(k)) == null) || + (dir = compareComparables(kc, k, pk)) == 0) + dir = tieBreakOrder(k, pk); + + TreeNode xp = p; + if ((p = (dir <= 0) ? p.left : p.right) == null) { + x.parent = xp; + if (dir <= 0) + xp.left = x; + else + xp.right = x; + root = balanceInsertion(root, x); + break; + } + } + } + } + moveRootToFront(tab, root); + } + + /** + * Returns a list of non-TreeNodes replacing those linked from + * this node. + */ + final Node untreeify(HashMap map) { + Node hd = null, tl = null; + for (Node q = this; q != null; q = q.next) { + Node p = map.replacementNode(q, null); + if (tl == null) + hd = p; + else + tl.next = p; + tl = p; + } + return hd; + } + + /** + * Tree version of putVal. + */ + final TreeNode putTreeVal(HashMap map, Node[] tab, + int h, K k, V v) { + Class kc = null; + boolean searched = false; + TreeNode root = (parent != null) ? root() : this; + for (TreeNode p = root;;) { + int dir, ph; K pk; + if ((ph = p.hash) > h) + dir = -1; + else if (ph < h) + dir = 1; + else if ((pk = p.key) == k || (pk != null && k.equals(pk))) + return p; + else if ((kc == null && + (kc = comparableClassFor(k)) == null) || + (dir = compareComparables(kc, k, pk)) == 0) { + if (!searched) { + TreeNode q, ch; + searched = true; + if (((ch = p.left) != null && + (q = ch.find(h, k, kc)) != null) || + ((ch = p.right) != null && + (q = ch.find(h, k, kc)) != null)) + return q; + } + dir = tieBreakOrder(k, pk); + } + + TreeNode xp = p; + if ((p = (dir <= 0) ? p.left : p.right) == null) { + Node xpn = xp.next; + TreeNode x = map.newTreeNode(h, k, v, xpn); + if (dir <= 0) + xp.left = x; + else + xp.right = x; + xp.next = x; + x.parent = x.prev = xp; + if (xpn != null) + ((TreeNode)xpn).prev = x; + moveRootToFront(tab, balanceInsertion(root, x)); + return null; + } + } + } + + /** + * Removes the given node, that must be present before this call. + * This is messier than typical red-black deletion code because we + * cannot swap the contents of an interior node with a leaf + * successor that is pinned by "next" pointers that are accessible + * independently during traversal. So instead we swap the tree + * linkages. If the current tree appears to have too few nodes, + * the bin is converted back to a plain bin. (The test triggers + * somewhere between 2 and 6 nodes, depending on tree structure). + */ + final void removeTreeNode(HashMap map, Node[] tab, + boolean movable) { + int n; + if (tab == null || (n = tab.length) == 0) + return; + int index = (n - 1) & hash; + TreeNode first = (TreeNode)tab[index], root = first, rl; + TreeNode succ = (TreeNode)next, pred = prev; + if (pred == null) + tab[index] = first = succ; + else + pred.next = succ; + if (succ != null) + succ.prev = pred; + if (first == null) + return; + if (root.parent != null) + root = root.root(); + if (root == null || root.right == null || + (rl = root.left) == null || rl.left == null) { + tab[index] = first.untreeify(map); // too small + return; + } + TreeNode p = this, pl = left, pr = right, replacement; + if (pl != null && pr != null) { + TreeNode s = pr, sl; + while ((sl = s.left) != null) // find successor + s = sl; + boolean c = s.red; s.red = p.red; p.red = c; // swap colors + TreeNode sr = s.right; + TreeNode pp = p.parent; + if (s == pr) { // p was s's direct parent + p.parent = s; + s.right = p; + } + else { + TreeNode sp = s.parent; + if ((p.parent = sp) != null) { + if (s == sp.left) + sp.left = p; + else + sp.right = p; + } + if ((s.right = pr) != null) + pr.parent = s; + } + p.left = null; + if ((p.right = sr) != null) + sr.parent = p; + if ((s.left = pl) != null) + pl.parent = s; + if ((s.parent = pp) == null) + root = s; + else if (p == pp.left) + pp.left = s; + else + pp.right = s; + if (sr != null) + replacement = sr; + else + replacement = p; + } + else if (pl != null) + replacement = pl; + else if (pr != null) + replacement = pr; + else + replacement = p; + if (replacement != p) { + TreeNode pp = replacement.parent = p.parent; + if (pp == null) + root = replacement; + else if (p == pp.left) + pp.left = replacement; + else + pp.right = replacement; + p.left = p.right = p.parent = null; + } + + TreeNode r = p.red ? root : balanceDeletion(root, replacement); + + if (replacement == p) { // detach + TreeNode pp = p.parent; + p.parent = null; + if (pp != null) { + if (p == pp.left) + pp.left = null; + else if (p == pp.right) + pp.right = null; + } + } + if (movable) + moveRootToFront(tab, r); + } + + /** + * Splits nodes in a tree bin into lower and upper tree bins, + * or untreeifies if now too small. Called only from resize; + * see above discussion about split bits and indices. + * + * @param map the map + * @param tab the table for recording bin heads + * @param index the index of the table being split + * @param bit the bit of hash to split on + */ + final void split(HashMap map, Node[] tab, int index, int bit) { + TreeNode b = this; + // Relink into lo and hi lists, preserving order + TreeNode loHead = null, loTail = null; + TreeNode hiHead = null, hiTail = null; + int lc = 0, hc = 0; + for (TreeNode e = b, next; e != null; e = next) { + next = (TreeNode)e.next; + e.next = null; + if ((e.hash & bit) == 0) { + if ((e.prev = loTail) == null) + loHead = e; + else + loTail.next = e; + loTail = e; + ++lc; + } + else { + if ((e.prev = hiTail) == null) + hiHead = e; + else + hiTail.next = e; + hiTail = e; + ++hc; + } + } + + if (loHead != null) { + if (lc <= UNTREEIFY_THRESHOLD) + tab[index] = loHead.untreeify(map); + else { + tab[index] = loHead; + if (hiHead != null) // (else is already treeified) + loHead.treeify(tab); + } + } + if (hiHead != null) { + if (hc <= UNTREEIFY_THRESHOLD) + tab[index + bit] = hiHead.untreeify(map); + else { + tab[index + bit] = hiHead; + if (loHead != null) + hiHead.treeify(tab); + } + } + } + + /* ------------------------------------------------------------ */ + // Red-black tree methods, all adapted from CLR + + static TreeNode rotateLeft(TreeNode root, + TreeNode p) { + TreeNode r, pp, rl; + if (p != null && (r = p.right) != null) { + if ((rl = p.right = r.left) != null) + rl.parent = p; + if ((pp = r.parent = p.parent) == null) + (root = r).red = false; + else if (pp.left == p) + pp.left = r; + else + pp.right = r; + r.left = p; + p.parent = r; + } + return root; + } + + static TreeNode rotateRight(TreeNode root, + TreeNode p) { + TreeNode l, pp, lr; + if (p != null && (l = p.left) != null) { + if ((lr = p.left = l.right) != null) + lr.parent = p; + if ((pp = l.parent = p.parent) == null) + (root = l).red = false; + else if (pp.right == p) + pp.right = l; + else + pp.left = l; + l.right = p; + p.parent = l; + } + return root; + } + + static TreeNode balanceInsertion(TreeNode root, + TreeNode x) { + x.red = true; + for (TreeNode xp, xpp, xppl, xppr;;) { + if ((xp = x.parent) == null) { + x.red = false; + return x; + } + else if (!xp.red || (xpp = xp.parent) == null) + return root; + if (xp == (xppl = xpp.left)) { + if ((xppr = xpp.right) != null && xppr.red) { + xppr.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.right) { + root = rotateLeft(root, x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + root = rotateRight(root, xpp); + } + } + } + } + else { + if (xppl != null && xppl.red) { + xppl.red = false; + xp.red = false; + xpp.red = true; + x = xpp; + } + else { + if (x == xp.left) { + root = rotateRight(root, x = xp); + xpp = (xp = x.parent) == null ? null : xp.parent; + } + if (xp != null) { + xp.red = false; + if (xpp != null) { + xpp.red = true; + root = rotateLeft(root, xpp); + } + } + } + } + } + } + + static TreeNode balanceDeletion(TreeNode root, + TreeNode x) { + for (TreeNode xp, xpl, xpr;;) { + if (x == null || x == root) + return root; + else if ((xp = x.parent) == null) { + x.red = false; + return x; + } + else if (x.red) { + x.red = false; + return root; + } + else if ((xpl = xp.left) == x) { + if ((xpr = xp.right) != null && xpr.red) { + xpr.red = false; + xp.red = true; + root = rotateLeft(root, xp); + xpr = (xp = x.parent) == null ? null : xp.right; + } + if (xpr == null) + x = xp; + else { + TreeNode sl = xpr.left, sr = xpr.right; + if ((sr == null || !sr.red) && + (sl == null || !sl.red)) { + xpr.red = true; + x = xp; + } + else { + if (sr == null || !sr.red) { + if (sl != null) + sl.red = false; + xpr.red = true; + root = rotateRight(root, xpr); + xpr = (xp = x.parent) == null ? + null : xp.right; + } + if (xpr != null) { + xpr.red = (xp == null) ? false : xp.red; + if ((sr = xpr.right) != null) + sr.red = false; + } + if (xp != null) { + xp.red = false; + root = rotateLeft(root, xp); + } + x = root; + } + } + } + else { // symmetric + if (xpl != null && xpl.red) { + xpl.red = false; + xp.red = true; + root = rotateRight(root, xp); + xpl = (xp = x.parent) == null ? null : xp.left; + } + if (xpl == null) + x = xp; + else { + TreeNode sl = xpl.left, sr = xpl.right; + if ((sl == null || !sl.red) && + (sr == null || !sr.red)) { + xpl.red = true; + x = xp; + } + else { + if (sl == null || !sl.red) { + if (sr != null) + sr.red = false; + xpl.red = true; + root = rotateLeft(root, xpl); + xpl = (xp = x.parent) == null ? + null : xp.left; + } + if (xpl != null) { + xpl.red = (xp == null) ? false : xp.red; + if ((sl = xpl.left) != null) + sl.red = false; + } + if (xp != null) { + xp.red = false; + root = rotateRight(root, xp); + } + x = root; + } + } + } + } + } + + /** + * Recursive invariant check + */ + static boolean checkInvariants(TreeNode t) { + TreeNode tp = t.parent, tl = t.left, tr = t.right, + tb = t.prev, tn = (TreeNode)t.next; + if (tb != null && tb.next != t) + return false; + if (tn != null && tn.prev != t) + return false; + if (tp != null && t != tp.left && t != tp.right) + return false; + if (tl != null && (tl.parent != t || tl.hash > t.hash)) + return false; + if (tr != null && (tr.parent != t || tr.hash < t.hash)) + return false; + if (t.red && tl != null && tl.red && tr != null && tr.red) + return false; + if (tl != null && !checkInvariants(tl)) + return false; + if (tr != null && !checkInvariants(tr)) + return false; + return true; + } + } + } diff -r 39ccb0972a2f -r dda89341ee2d jdk/src/share/classes/java/util/LinkedHashMap.java --- a/jdk/src/share/classes/java/util/LinkedHashMap.java Mon Aug 12 12:22:10 2013 +0200 +++ b/jdk/src/share/classes/java/util/LinkedHashMap.java Wed Sep 04 09:34:25 2013 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,9 +24,12 @@ */ package java.util; -import java.io.*; + +import java.util.function.Consumer; import java.util.function.BiConsumer; import java.util.function.BiFunction; +import java.io.Serializable; +import java.io.IOException; /** *

Hash table and linked list implementation of the Map interface, @@ -57,9 +60,9 @@ * order they were presented.) * *

A special {@link #LinkedHashMap(int,float,boolean) constructor} is - * provided to create a LinkedHashMap whose order of iteration is the - * order in which its entries were last accessed, from least-recently accessed - * to most-recently (access-order). This kind of map is well-suited to + * provided to create a linked hash map whose order of iteration is the order + * in which its entries were last accessed, from least-recently accessed to + * most-recently (access-order). This kind of map is well-suited to * building LRU caches. Invoking the put or get method * results in an access to the corresponding entry (assuming it exists after * the invocation completes). The putAll method generates one entry @@ -155,18 +158,53 @@ * @see Hashtable * @since 1.4 */ - public class LinkedHashMap extends HashMap implements Map { + /* + * Implementation note. A previous version of this class was + * internally structured a little differently. Because superclass + * HashMap now uses trees for some of its nodes, class + * LinkedHashMap.Entry is now treated as intermediary node class + * that can also be converted to tree form. The name of this + * class, LinkedHashMap.Entry, is confusing in several ways in its + * current context, but cannot be changed. Otherwise, even though + * it is not exported outside this package, some existing source + * code is known to have relied on a symbol resolution corner case + * rule in calls to removeEldestEntry that suppressed compilation + * errors due to ambiguous usages. So, we keep the name to + * preserve unmodified compilability. + * + * The changes in node classes also require using two fields + * (head, tail) rather than a pointer to a header node to maintain + * the doubly-linked before/after list. This class also + * previously used a different style of callback methods upon + * access, insertion, and removal. + */ + + /** + * HashMap.Node subclass for normal LinkedHashMap entries. + */ + static class Entry extends HashMap.Node { + Entry before, after; + Entry(int hash, K key, V value, Node next) { + super(hash, key, value, next); + } + } + private static final long serialVersionUID = 3801124242820219131L; /** - * The head of the doubly linked list. + * The head (eldest) of the doubly linked list. */ - private transient Entry header; + transient LinkedHashMap.Entry head; + + /** + * The tail (youngest) of the doubly linked list. + */ + transient LinkedHashMap.Entry tail; /** * The iteration ordering method for this linked hash map: true @@ -174,7 +212,125 @@ * * @serial */ - private final boolean accessOrder; + final boolean accessOrder; + + // internal utilities + + // link at the end of list + private void linkNodeLast(LinkedHashMap.Entry p) { + LinkedHashMap.Entry last = tail; + tail = p; + if (last == null) + head = p; + else { + p.before = last; + last.after = p; + } + } + + // apply src's links to dst + private void transferLinks(LinkedHashMap.Entry src, + LinkedHashMap.Entry dst) { + LinkedHashMap.Entry b = dst.before = src.before; + LinkedHashMap.Entry a = dst.after = src.after; + if (b == null) + head = dst; + else + b.after = dst; + if (a == null) + tail = dst; + else + a.before = dst; + } + + // overrides of HashMap hook methods + + void reinitialize() { + super.reinitialize(); + head = tail = null; + } + + Node newNode(int hash, K key, V value, Node e) { + LinkedHashMap.Entry p = + new LinkedHashMap.Entry(hash, key, value, e); + linkNodeLast(p); + return p; + } + + Node replacementNode(Node p, Node next) { + LinkedHashMap.Entry q = (LinkedHashMap.Entry)p; + LinkedHashMap.Entry t = + new LinkedHashMap.Entry(q.hash, q.key, q.value, next); + transferLinks(q, t); + return t; + } + + TreeNode newTreeNode(int hash, K key, V value, Node next) { + TreeNode p = new TreeNode(hash, key, value, next); + linkNodeLast(p); + return p; + } + + TreeNode replacementTreeNode(Node p, Node next) { + LinkedHashMap.Entry q = (LinkedHashMap.Entry)p; + TreeNode t = new TreeNode(q.hash, q.key, q.value, next); + transferLinks(q, t); + return t; + } + + void afterNodeRemoval(Node e) { // unlink + LinkedHashMap.Entry p = + (LinkedHashMap.Entry)e, b = p.before, a = p.after; + p.before = p.after = null; + if (b == null) + head = a; + else + b.after = a; + if (a == null) + tail = b; + else + a.before = b; + } + + void afterNodeInsertion(boolean evict) { // possibly remove eldest + LinkedHashMap.Entry first; + if (evict && (first = head) != null && removeEldestEntry(first)) { + K key = first.key; + removeNode(hash(key), key, null, false, true); + } + } + + void afterNodeAccess(Node e) { // move node to last + LinkedHashMap.Entry last; + if (accessOrder && (last = tail) != e) { + LinkedHashMap.Entry p = + (LinkedHashMap.Entry)e, b = p.before, a = p.after; + p.after = null; + if (b == null) + head = a; + else + b.after = a; + if (a != null) + a.before = b; + else + last = b; + if (last == null) + head = p; + else { + p.before = last; + last.after = p; + } + tail = p; + ++modCount; + } + } + + void internalWriteEntries(java.io.ObjectOutputStream s) throws IOException { + for (LinkedHashMap.Entry e = head; e != null; e = e.after) { + s.writeObject(e.key); + s.writeObject(e.value); + } + } /** * Constructs an empty insertion-ordered LinkedHashMap instance @@ -221,8 +377,9 @@ * @throws NullPointerException if the specified map is null */ public LinkedHashMap(Map m) { - super(m); + super(); accessOrder = false; + putMapEntries(m, false); } /** @@ -243,16 +400,6 @@ this.accessOrder = accessOrder; } - /** - * Called by superclass constructors and pseudoconstructors (clone, - * readObject) before any entries are inserted into the map. Initializes - * the chain. - */ - @Override - void init() { - header = new Entry<>(-1, null, null, null); - header.before = header.after = header; - } /** * Returns true if this map maps one or more keys to the @@ -263,15 +410,10 @@ * specified value */ public boolean containsValue(Object value) { - // Overridden to take advantage of faster iterator - if (value==null) { - for (Entry e = header.after; e != header; e = e.after) - if (e.value==null) - return true; - } else { - for (Entry e = header.after; e != header; e = e.after) - if (value.equals(e.value)) - return true; + for (LinkedHashMap.Entry e = head; e != null; e = e.after) { + V v = e.value; + if (v == value || (value != null && value.equals(v))) + return true; } return false; } @@ -292,10 +434,11 @@ * distinguish these two cases. */ public V get(Object key) { - Entry e = (Entry)getEntry(key); - if (e == null) + Node e; + if ((e = getNode(hash(key), key)) == null) return null; - e.recordAccess(this); + if (accessOrder) + afterNodeAccess(e); return e.value; } @@ -305,163 +448,7 @@ */ public void clear() { super.clear(); - header.before = header.after = header; - } - - @Override - public void forEach(BiConsumer action) { - Objects.requireNonNull(action); - int expectedModCount = modCount; - for (Entry entry = header.after; entry != header; entry = entry.after) { - action.accept(entry.key, entry.value); - - if (expectedModCount != modCount) { - throw new ConcurrentModificationException(); - } - } - } - - @Override - public void replaceAll(BiFunction function) { - Objects.requireNonNull(function); - int expectedModCount = modCount; - for (Entry entry = header.after; entry != header; entry = entry.after) { - entry.value = function.apply(entry.key, entry.value); - - if (expectedModCount != modCount) { - throw new ConcurrentModificationException(); - } - } - } - - /** - * LinkedHashMap entry. - */ - private static class Entry extends HashMap.Entry { - // These fields comprise the doubly linked list used for iteration. - Entry before, after; - - Entry(int hash, K key, V value, Object next) { - super(hash, key, value, next); - } - - /** - * Removes this entry from the linked list. - */ - private void remove() { - before.after = after; - after.before = before; - } - - /** - * Inserts this entry before the specified existing entry in the list. - */ - private void addBefore(Entry existingEntry) { - after = existingEntry; - before = existingEntry.before; - before.after = this; - after.before = this; - } - - /** - * This method is invoked by the superclass whenever the value - * of a pre-existing entry is read by Map.get or modified by Map.put. - * If the enclosing Map is access-ordered, it moves the entry - * to the end of the list; otherwise, it does nothing. - */ - void recordAccess(HashMap m) { - LinkedHashMap lm = (LinkedHashMap)m; - if (lm.accessOrder) { - lm.modCount++; - remove(); - addBefore(lm.header); - } - } - - void recordRemoval(HashMap m) { - remove(); - } - } - - private abstract class LinkedHashIterator implements Iterator { - Entry nextEntry = header.after; - Entry lastReturned = null; - - /** - * The modCount value that the iterator believes that the backing - * List should have. If this expectation is violated, the iterator - * has detected concurrent modification. - */ - int expectedModCount = modCount; - - public boolean hasNext() { - return nextEntry != header; - } - - public void remove() { - if (lastReturned == null) - throw new IllegalStateException(); - if (modCount != expectedModCount) - throw new ConcurrentModificationException(); - - LinkedHashMap.this.remove(lastReturned.key); - lastReturned = null; - expectedModCount = modCount; - } - - Entry nextEntry() { - if (modCount != expectedModCount) - throw new ConcurrentModificationException(); - if (nextEntry == header) - throw new NoSuchElementException(); - - Entry e = lastReturned = nextEntry; - nextEntry = e.after; - return e; - } - } - - private class KeyIterator extends LinkedHashIterator { - public K next() { return nextEntry().getKey(); } - } - - private class ValueIterator extends LinkedHashIterator { - public V next() { return nextEntry().value; } - } - - private class EntryIterator extends LinkedHashIterator> { - public Map.Entry next() { return nextEntry(); } - } - - // These Overrides alter the behavior of superclass view iterator() methods - Iterator newKeyIterator() { return new KeyIterator(); } - Iterator newValueIterator() { return new ValueIterator(); } - Iterator> newEntryIterator() { return new EntryIterator(); } - - /** - * This override alters behavior of superclass put method. It causes newly - * allocated entry to get inserted at the end of the linked list and - * removes the eldest entry if appropriate. - */ - @Override - void addEntry(int hash, K key, V value, int bucketIndex, boolean checkIfNeedTree) { - super.addEntry(hash, key, value, bucketIndex, checkIfNeedTree); - - // Remove eldest entry if instructed - Entry eldest = header.after; - if (removeEldestEntry(eldest)) { - removeEntryForKey(eldest.key); - } - } - - /* - * Create a new LinkedHashMap.Entry and setup the before/after pointers - */ - @Override - HashMap.Entry newEntry(int hash, K key, V value, Object next) { - Entry newEntry = new Entry<>(hash, key, value, next); - newEntry.addBefore(header); - return newEntry; + head = tail = null; } /** @@ -475,13 +462,13 @@ *

Sample use: this override will allow the map to grow up to 100 * entries and then delete the eldest entry each time a new entry is * added, maintaining a steady state of 100 entries. - *

{@code
+     * 
      *     private static final int MAX_ENTRIES = 100;
      *
      *     protected boolean removeEldestEntry(Map.Entry eldest) {
-     *        return size() > MAX_ENTRIES;
+     *        return size() > MAX_ENTRIES;
      *     }
-     * }
+ *
* *

This method typically does not modify the map in any way, * instead allowing the map to modify itself as directed by its @@ -508,4 +495,241 @@ protected boolean removeEldestEntry(Map.Entry eldest) { return false; } + + /** + * Returns a {@link Set} view of the keys contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. If the map is modified + * while an iteration over the set is in progress (except through + * the iterator's own remove operation), the results of + * the iteration are undefined. The set supports element removal, + * which removes the corresponding mapping from the map, via the + * Iterator.remove, Set.remove, + * removeAll, retainAll, and clear + * operations. It does not support the add or addAll + * operations. + * Its {@link Spliterator} typically provides faster sequential + * performance but much poorer parallel performance than that of + * {@code HashMap}. + * + * @return a set view of the keys contained in this map + */ + public Set keySet() { + Set ks; + return (ks = keySet) == null ? (keySet = new LinkedKeySet()) : ks; + } + + final class LinkedKeySet extends AbstractSet { + public final int size() { return size; } + public final void clear() { LinkedHashMap.this.clear(); } + public final Iterator iterator() { + return new LinkedKeyIterator(); + } + public final boolean contains(Object o) { return containsKey(o); } + public final boolean remove(Object key) { + return removeNode(hash(key), key, null, false, true) != null; + } + public final Spliterator spliterator() { + return Spliterators.spliterator(this, Spliterator.SIZED | + Spliterator.ORDERED | + Spliterator.DISTINCT); + } + public final void forEach(Consumer action) { + if (action == null) + throw new NullPointerException(); + int mc = modCount; + for (LinkedHashMap.Entry e = head; e != null; e = e.after) + action.accept(e.key); + if (modCount != mc) + throw new ConcurrentModificationException(); + } + } + + /** + * Returns a {@link Collection} view of the values contained in this map. + * The collection is backed by the map, so changes to the map are + * reflected in the collection, and vice-versa. If the map is + * modified while an iteration over the collection is in progress + * (except through the iterator's own remove operation), + * the results of the iteration are undefined. The collection + * supports element removal, which removes the corresponding + * mapping from the map, via the Iterator.remove, + * Collection.remove, removeAll, + * retainAll and clear operations. It does not + * support the add or addAll operations. + * Its {@link Spliterator} typically provides faster sequential + * performance but much poorer parallel performance than that of + * {@code HashMap}. + * + * @return a view of the values contained in this map + */ + public Collection values() { + Collection vs; + return (vs = values) == null ? (values = new LinkedValues()) : vs; + } + + final class LinkedValues extends AbstractCollection { + public final int size() { return size; } + public final void clear() { LinkedHashMap.this.clear(); } + public final Iterator iterator() { + return new LinkedValueIterator(); + } + public final boolean contains(Object o) { return containsValue(o); } + public final Spliterator spliterator() { + return Spliterators.spliterator(this, Spliterator.SIZED | + Spliterator.ORDERED); + } + public final void forEach(Consumer action) { + if (action == null) + throw new NullPointerException(); + int mc = modCount; + for (LinkedHashMap.Entry e = head; e != null; e = e.after) + action.accept(e.value); + if (modCount != mc) + throw new ConcurrentModificationException(); + } + } + + /** + * Returns a {@link Set} view of the mappings contained in this map. + * The set is backed by the map, so changes to the map are + * reflected in the set, and vice-versa. If the map is modified + * while an iteration over the set is in progress (except through + * the iterator's own remove operation, or through the + * setValue operation on a map entry returned by the + * iterator) the results of the iteration are undefined. The set + * supports element removal, which removes the corresponding + * mapping from the map, via the Iterator.remove, + * Set.remove, removeAll, retainAll and + * clear operations. It does not support the + * add or addAll operations. + * Its {@link Spliterator} typically provides faster sequential + * performance but much poorer parallel performance than that of + * {@code HashMap}. + * + * @return a set view of the mappings contained in this map + */ + public Set> entrySet() { + Set> es; + return (es = entrySet) == null ? (entrySet = new LinkedEntrySet()) : es; + } + + final class LinkedEntrySet extends AbstractSet> { + public final int size() { return size; } + public final void clear() { LinkedHashMap.this.clear(); } + public final Iterator> iterator() { + return new LinkedEntryIterator(); + } + public final boolean contains(Object o) { + if (!(o instanceof Map.Entry)) + return false; + Map.Entry e = (Map.Entry) o; + Object key = e.getKey(); + Node candidate = getNode(hash(key), key); + return candidate != null && candidate.equals(e); + } + public final boolean remove(Object o) { + if (o instanceof Map.Entry) { + Map.Entry e = (Map.Entry) o; + Object key = e.getKey(); + Object value = e.getValue(); + return removeNode(hash(key), key, value, true, true) != null; + } + return false; + } + public final Spliterator> spliterator() { + return Spliterators.spliterator(this, Spliterator.SIZED | + Spliterator.ORDERED | + Spliterator.DISTINCT); + } + public final void forEach(Consumer> action) { + if (action == null) + throw new NullPointerException(); + int mc = modCount; + for (LinkedHashMap.Entry e = head; e != null; e = e.after) + action.accept(e); + if (modCount != mc) + throw new ConcurrentModificationException(); + } + } + + // Map overrides + + public void forEach(BiConsumer action) { + if (action == null) + throw new NullPointerException(); + int mc = modCount; + for (LinkedHashMap.Entry e = head; e != null; e = e.after) + action.accept(e.key, e.value); + if (modCount != mc) + throw new ConcurrentModificationException(); + } + + public void replaceAll(BiFunction function) { + if (function == null) + throw new NullPointerException(); + int mc = modCount; + for (LinkedHashMap.Entry e = head; e != null; e = e.after) + e.value = function.apply(e.key, e.value); + if (modCount != mc) + throw new ConcurrentModificationException(); + } + + // Iterators + + abstract class LinkedHashIterator { + LinkedHashMap.Entry next; + LinkedHashMap.Entry current; + int expectedModCount; + + LinkedHashIterator() { + next = head; + expectedModCount = modCount; + current = null; + } + + public final boolean hasNext() { + return next != null; + } + + final LinkedHashMap.Entry nextNode() { + LinkedHashMap.Entry e = next; + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + if (e == null) + throw new NoSuchElementException(); + current = e; + next = e.after; + return e; + } + + public final void remove() { + Node p = current; + if (p == null) + throw new IllegalStateException(); + if (modCount != expectedModCount) + throw new ConcurrentModificationException(); + current = null; + K key = p.key; + removeNode(hash(key), key, null, false, false); + expectedModCount = modCount; + } + } + + final class LinkedKeyIterator extends LinkedHashIterator + implements Iterator { + public final K next() { return nextNode().getKey(); } + } + + final class LinkedValueIterator extends LinkedHashIterator + implements Iterator { + public final V next() { return nextNode().value; } + } + + final class LinkedEntryIterator extends LinkedHashIterator + implements Iterator> { + public final Map.Entry next() { return nextNode(); } + } + + } diff -r 39ccb0972a2f -r dda89341ee2d jdk/test/java/lang/reflect/Generics/Probe.java --- a/jdk/test/java/lang/reflect/Generics/Probe.java Mon Aug 12 12:22:10 2013 +0200 +++ b/jdk/test/java/lang/reflect/Generics/Probe.java Wed Sep 04 09:34:25 2013 +0200 @@ -50,9 +50,9 @@ "java.util.HashMap$EntryIterator", "java.util.HashMap$KeyIterator", "java.util.HashMap$ValueIterator", - "java.util.LinkedHashMap$EntryIterator", - "java.util.LinkedHashMap$KeyIterator", - "java.util.LinkedHashMap$ValueIterator"}) + "java.util.LinkedHashMap$LinkedEntryIterator", + "java.util.LinkedHashMap$LinkedKeyIterator", + "java.util.LinkedHashMap$LinkedValueIterator"}) public class Probe { public static void main (String... args) throws Throwable { Classes classesAnnotation = (Probe.class).getAnnotation(Classes.class); diff -r 39ccb0972a2f -r dda89341ee2d jdk/test/java/util/Map/CheckRandomHashSeed.java --- a/jdk/test/java/util/Map/CheckRandomHashSeed.java Mon Aug 12 12:22:10 2013 +0200 +++ b/jdk/test/java/util/Map/CheckRandomHashSeed.java Wed Sep 04 09:34:25 2013 +0200 @@ -53,8 +53,6 @@ throw new Error("Error in test setup: " + (expectRandom ? "" : "not " ) + "expecting random hashSeed, but " + PROP_NAME + " is " + (propSet ? "" : "not ") + "enabled"); } - testMap(new HashMap()); - testMap(new LinkedHashMap()); testMap(new WeakHashMap()); testMap(new Hashtable()); } diff -r 39ccb0972a2f -r dda89341ee2d jdk/test/java/util/Map/InPlaceOpsCollisions.java --- a/jdk/test/java/util/Map/InPlaceOpsCollisions.java Mon Aug 12 12:22:10 2013 +0200 +++ b/jdk/test/java/util/Map/InPlaceOpsCollisions.java Wed Sep 04 09:34:25 2013 +0200 @@ -25,7 +25,6 @@ * @test * @bug 8005698 * @run main InPlaceOpsCollisions -shortrun - * @run main/othervm -Djdk.map.randomseed=true InPlaceOpsCollisions -shortrun * @summary Ensure overrides of in-place operations in Maps behave well with lots of collisions. * @author Brent Christian */ diff -r 39ccb0972a2f -r dda89341ee2d jdk/test/java/util/Map/MapBinToFromTreeTest.java --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/jdk/test/java/util/Map/MapBinToFromTreeTest.java Wed Sep 04 09:34:25 2013 +0200 @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +import org.testng.annotations.DataProvider; +import org.testng.annotations.Test; + +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.BiConsumer; +import java.util.stream.Collector; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.testng.Assert.assertEquals; + +/* + * @test + * @bug 8023463 + * @summary Test the case where a bin is treeified and vice verser + * @run testng MapBinToFromTreeTest + */ + +@Test +public class MapBinToFromTreeTest { + + // Initial capacity of map + // Should be >= the map capacity for treeifiying, see HashMap/ConcurrentMap.MIN_TREEIFY_CAPACITY + static final int INITIAL_CAPACITY = 64; + + // Maximum size of map + // Should be > the treeify threshold, see HashMap/ConcurrentMap.TREEIFY_THRESHOLD + // Should be > INITIAL_CAPACITY to ensure resize occurs + static final int SIZE = 256; + + // Load factor of map + // A value 1.0 will ensure that a new threshold == capacity + static final float LOAD_FACTOR = 1.0f; + + @DataProvider(name = "maps") + static Object[][] mapProvider() { + return new Object[][] { + // Pass in the class name as a description for test reporting + // purposes + { HashMap.class.getName(), new HashMap(INITIAL_CAPACITY, LOAD_FACTOR) }, + { LinkedHashMap.class.getName(), new LinkedHashMap(INITIAL_CAPACITY, LOAD_FACTOR) }, + { ConcurrentHashMap.class.getName(), new ConcurrentHashMap(INITIAL_CAPACITY, LOAD_FACTOR) }, + }; + } + + @Test(dataProvider = "maps") + public void testPutThenGet(String d, Map m) { + put(SIZE, m, (i, s) -> { + for (int j = 0; j < s; j++) { + assertEquals(m.get(new HashCodeInteger(j)).intValue(), j, + String.format("Map.get(%d)", j)); + } + }); + } + + @Test(dataProvider = "maps") + public void testPutThenTraverse(String d, Map m) { + Collector> c = getCollector(m); + + put(SIZE, m, (i, s) -> { + // Note that it is OK to collect to a Set (HashSet) as long as + // integer values are used since these tests only check for + // collisions and other tests will verify more general functionality + Collection actual = m.keySet().stream().map(e -> e.value).collect(c); + Collection expected = IntStream.range(0, s).boxed().collect(c); + assertEquals(actual, expected, "Map.keySet()"); + }); + } + + @Test(dataProvider = "maps") + public void testRemoveThenGet(String d, Map m) { + put(SIZE, m, (i, s) -> { }); + + remove(m, (i, s) -> { + for (int j = i + 1; j < SIZE; j++) { + assertEquals(m.get(new HashCodeInteger(j)).intValue(), j, + String.format("Map.get(%d)", j)); + } + }); + } + + @Test(dataProvider = "maps") + public void testRemoveThenTraverse(String d, Map m) { + put(SIZE, m, (i, s) -> { }); + + Collector> c = getCollector(m); + + remove(m, (i, s) -> { + Collection actual = m.keySet().stream().map(e -> e.value).collect(c); + Collection expected = IntStream.range(i + 1, SIZE).boxed().collect(c); + assertEquals(actual, expected, "Map.keySet()"); + }); + } + + @Test(dataProvider = "maps") + public void testUntreeifyOnResizeWithGet(String d, Map m) { + // Fill the map with 64 entries grouped into 4 buckets + put(INITIAL_CAPACITY, m, (i, s) -> { }); + + for (int i = INITIAL_CAPACITY; i < SIZE; i++) { + // Add further entries in the 0'th bucket so as not to disturb + // other buckets, entries of which may be distributed and/or + // the bucket untreeified on resize + m.put(new HashCodeInteger(i, 0), i); + + for (int j = 0; j < INITIAL_CAPACITY; j++) { + assertEquals(m.get(new HashCodeInteger(j)).intValue(), j, + String.format("Map.get(%d) < INITIAL_CAPACITY", j)); + } + for (int j = INITIAL_CAPACITY; j <= i; j++) { + assertEquals(m.get(new HashCodeInteger(j, 0)).intValue(), j, + String.format("Map.get(%d) >= INITIAL_CAPACITY", j)); + } + } + } + + @Test(dataProvider = "maps") + public void testUntreeifyOnResizeWithTraverse(String d, Map m) { + // Fill the map with 64 entries grouped into 4 buckets + put(INITIAL_CAPACITY, m, (i, s) -> { }); + + Collector> c = getCollector(m); + + for (int i = INITIAL_CAPACITY; i < SIZE; i++) { + // Add further entries in the 0'th bucket so as not to disturb + // other buckets, entries of which may be distributed and/or + // the bucket untreeified on resize + m.put(new HashCodeInteger(i, 0), i); + + Collection actual = m.keySet().stream().map(e -> e.value).collect(c); + Collection expected = IntStream.rangeClosed(0, i).boxed().collect(c); + assertEquals(actual, expected, "Key set"); + } + } + + Collector> getCollector(Map m) { + Collector> collector = m instanceof LinkedHashMap + ? Collectors.toList() + : Collectors.toSet(); + return collector; + } + + void put(int size, Map m, BiConsumer c) { + for (int i = 0; i < size; i++) { + m.put(new HashCodeInteger(i), i); + + c.accept(i, m.size()); + } + } + + void remove(Map m, BiConsumer c) { + int size = m.size(); + // Remove all elements thus ensuring at some point trees will be + // converting back to bins + for (int i = 0; i < size; i++) { + m.remove(new HashCodeInteger(i)); + + c.accept(i, m.size()); + } + } + + final static class HashCodeInteger implements Comparable { + final int value; + + final int hashcode; + + HashCodeInteger(int value) { + this(value, hash(value)); + } + + HashCodeInteger(int value, int hashcode) { + this.value = value; + this.hashcode = hashcode; + } + + static int hash(int i) { + // Assuming 64 entries with keys from 0 to 63 then a map: + // - of capacity 64 will have 4 buckets with 16 entries per-bucket + // - of capacity 128 will have 8 buckets with 8 entries per-bucket + // - of capacity 256 will have 16 buckets with 4 entries per-bucket + // + // Re-sizing will result in re-distribution, doubling the buckets + // and reducing the entries by half. This will result in + // untreeifying when the number of entries is less than untreeify + // threshold (see HashMap/ConcurrentMap.UNTREEIFY_THRESHOLD) + return (i % 4) + (i / 4) * INITIAL_CAPACITY; + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof HashCodeInteger) { + HashCodeInteger other = (HashCodeInteger) obj; + return other.value == value; + } + return false; + } + + @Override + public int hashCode() { + return hashcode; + } + + @Override + public int compareTo(HashCodeInteger o) { + return value - o.value; + } + + @Override + public String toString() { + return Integer.toString(value); + } + } +} diff -r 39ccb0972a2f -r dda89341ee2d jdk/test/java/util/Map/TreeBinSplitBackToEntries.java --- a/jdk/test/java/util/Map/TreeBinSplitBackToEntries.java Mon Aug 12 12:22:10 2013 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,255 +0,0 @@ -/* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -import java.util.*; -import java.lang.reflect.Field; - -/* - * @test - * @bug 8005698 - * @summary Test the case where TreeBin.splitTreeBin() converts a bin back to an Entry list - * @run main TreeBinSplitBackToEntries unused - * @author Brent Christian - */ - -public class TreeBinSplitBackToEntries { - private static int EXPECTED_TREE_THRESHOLD = 16; - - // Easiest if this covers one bit higher then 'bit' in splitTreeBin() on the - // call where the TreeBin is converted back to an Entry list - private static int HASHMASK = 0x7F; - private static boolean verbose = false; - private static boolean fastFail = false; - private static boolean failed = false; - - static void printlnIfVerbose(String msg) { - if (verbose) {System.out.println(msg); } - } - - public static void main(String[] args) { - for (String arg : args) { - switch(arg) { - case "-verbose": - verbose = true; - break; - case "-fastfail": - fastFail = true; - break; - } - } - checkTreeThreshold(); - testMapHiTree(); - testMapLoTree(); - if (failed) { - System.out.println("Test Failed"); - System.exit(1); - } else { - System.out.println("Test Passed"); - } - } - - public static void checkTreeThreshold() { - int threshold = -1; - try { - Class treeBinClass = Class.forName("java.util.HashMap$TreeBin"); - Field treeThreshold = treeBinClass.getDeclaredField("TREE_THRESHOLD"); - treeThreshold.setAccessible(true); - threshold = treeThreshold.getInt(treeBinClass); - } catch (ClassNotFoundException|NoSuchFieldException|IllegalAccessException e) { - e.printStackTrace(); - throw new Error("Problem accessing TreeBin.TREE_THRESHOLD", e); - } - check("Expected TREE_THRESHOLD: " + EXPECTED_TREE_THRESHOLD +", found: " + threshold, - threshold == EXPECTED_TREE_THRESHOLD); - printlnIfVerbose("TREE_THRESHOLD: " + threshold); - } - - public static void testMapHiTree() { - Object[][] mapKeys = makeHiTreeTestData(); - testMapsForKeys(mapKeys, "hiTree"); - } - - public static void testMapLoTree() { - Object[][] mapKeys = makeLoTreeTestData(); - - testMapsForKeys(mapKeys, "loTree"); - } - - public static void testMapsForKeys(Object[][] mapKeys, String desc) { - // loop through data sets - for (Object[] keys_desc : mapKeys) { - Map[] maps = (Map[]) new Map[]{ - new HashMap<>(4, 0.8f), - new LinkedHashMap<>(4, 0.8f), - }; - // for each map type. - for (Map map : maps) { - Object[] keys = (Object[]) keys_desc[1]; - System.out.println(desc + ": testPutThenGet() for " + map.getClass()); - testPutThenGet(map, keys); - } - } - } - - private static void testPutThenGet(Map map, T[] keys) { - for (T key : keys) { - printlnIfVerbose("put()ing 0x" + Integer.toHexString(Integer.parseInt(key.toString())) + ", hashCode=" + Integer.toHexString(key.hashCode())); - map.put(key, key); - } - for (T key : keys) { - check("key: 0x" + Integer.toHexString(Integer.parseInt(key.toString())) + " not found in resulting " + map.getClass().getSimpleName(), map.get(key) != null); - } - } - - /* Data to force a non-empty loTree in TreeBin.splitTreeBin() to be converted back - * into an Entry list - */ - private static Object[][] makeLoTreeTestData() { - HashableInteger COLLIDING_OBJECTS[] = new HashableInteger[] { - new HashableInteger( 0x23, HASHMASK), - new HashableInteger( 0x123, HASHMASK), - new HashableInteger( 0x323, HASHMASK), - new HashableInteger( 0x523, HASHMASK), - - new HashableInteger( 0x723, HASHMASK), - new HashableInteger( 0x923, HASHMASK), - new HashableInteger( 0xB23, HASHMASK), - new HashableInteger( 0xD23, HASHMASK), - - new HashableInteger( 0xF23, HASHMASK), - new HashableInteger( 0xF123, HASHMASK), - new HashableInteger( 0x1023, HASHMASK), - new HashableInteger( 0x1123, HASHMASK), - - new HashableInteger( 0x1323, HASHMASK), - new HashableInteger( 0x1523, HASHMASK), - new HashableInteger( 0x1723, HASHMASK), - new HashableInteger( 0x1923, HASHMASK), - - new HashableInteger( 0x1B23, HASHMASK), - new HashableInteger( 0x1D23, HASHMASK), - new HashableInteger( 0x3123, HASHMASK), - new HashableInteger( 0x3323, HASHMASK), - new HashableInteger( 0x3523, HASHMASK), - - new HashableInteger( 0x3723, HASHMASK), - new HashableInteger( 0x1001, HASHMASK), - new HashableInteger( 0x4001, HASHMASK), - new HashableInteger( 0x1, HASHMASK), - }; - return new Object[][] { - new Object[]{"Colliding Objects", COLLIDING_OBJECTS}, - }; - } - - /* Data to force the hiTree in TreeBin.splitTreeBin() to be converted back - * into an Entry list - */ - private static Object[][] makeHiTreeTestData() { - HashableInteger COLLIDING_OBJECTS[] = new HashableInteger[] { - new HashableInteger( 0x1, HASHMASK), - new HashableInteger( 0x101, HASHMASK), - new HashableInteger( 0x301, HASHMASK), - new HashableInteger( 0x501, HASHMASK), - new HashableInteger( 0x701, HASHMASK), - - new HashableInteger( 0x1001, HASHMASK), - new HashableInteger( 0x1101, HASHMASK), - new HashableInteger( 0x1301, HASHMASK), - - new HashableInteger( 0x1501, HASHMASK), - new HashableInteger( 0x1701, HASHMASK), - new HashableInteger( 0x4001, HASHMASK), - new HashableInteger( 0x4101, HASHMASK), - new HashableInteger( 0x4301, HASHMASK), - - new HashableInteger( 0x4501, HASHMASK), - new HashableInteger( 0x4701, HASHMASK), - new HashableInteger( 0x8001, HASHMASK), - new HashableInteger( 0x8101, HASHMASK), - - - new HashableInteger( 0x8301, HASHMASK), - new HashableInteger( 0x8501, HASHMASK), - new HashableInteger( 0x8701, HASHMASK), - new HashableInteger( 0x9001, HASHMASK), - - new HashableInteger( 0x23, HASHMASK), - new HashableInteger( 0x123, HASHMASK), - new HashableInteger( 0x323, HASHMASK), - new HashableInteger( 0x523, HASHMASK), - }; - return new Object[][] { - new Object[]{"Colliding Objects", COLLIDING_OBJECTS}, - }; - } - - static void check(String desc, boolean cond) { - if (!cond) { - fail(desc); - } - } - - static void fail(String msg) { - failed = true; - (new Error("Failure: " + msg)).printStackTrace(System.err); - if (fastFail) { - System.exit(1); - } - } - - final static class HashableInteger implements Comparable { - final int value; - final int hashmask; //yes duplication - - HashableInteger(int value, int hashmask) { - this.value = value; - this.hashmask = hashmask; - } - - @Override - public boolean equals(Object obj) { - if (obj instanceof HashableInteger) { - HashableInteger other = (HashableInteger) obj; - return other.value == value; - } - return false; - } - - @Override - public int hashCode() { - // This version ANDs the mask - return value & hashmask; - } - - @Override - public int compareTo(HashableInteger o) { - return value - o.value; - } - - @Override - public String toString() { - return Integer.toString(value); - } - } -} diff -r 39ccb0972a2f -r dda89341ee2d jdk/test/java/util/Spliterator/SpliteratorCharacteristics.java --- a/jdk/test/java/util/Spliterator/SpliteratorCharacteristics.java Mon Aug 12 12:22:10 2013 +0200 +++ b/jdk/test/java/util/Spliterator/SpliteratorCharacteristics.java Wed Sep 04 09:34:25 2013 +0200 @@ -23,7 +23,7 @@ /** * @test - * @bug 8020156 8020009 8022326 + * @bug 8020156 8020009 8022326 8012913 * @run testng SpliteratorCharacteristics */ @@ -32,6 +32,10 @@ import java.util.Arrays; import java.util.Collection; import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; import java.util.Map; import java.util.Set; import java.util.SortedMap; @@ -47,7 +51,27 @@ @Test public class SpliteratorCharacteristics { - // TreeMap + public void testHashMap() { + assertMapCharacteristics(new HashMap<>(), + Spliterator.SIZED | Spliterator.DISTINCT); + } + + public void testHashSet() { + assertSetCharacteristics(new HashSet<>(), + Spliterator.SIZED | Spliterator.DISTINCT); + } + + public void testLinkedHashMap() { + assertMapCharacteristics(new LinkedHashMap<>(), + Spliterator.SIZED | Spliterator.DISTINCT | + Spliterator.ORDERED); + } + + public void testLinkedHashSet() { + assertSetCharacteristics(new LinkedHashSet<>(), + Spliterator.SIZED | Spliterator.DISTINCT | + Spliterator.ORDERED); + } public void testTreeMap() { assertSortedMapCharacteristics(new TreeMap<>(), @@ -61,9 +85,6 @@ Spliterator.SORTED | Spliterator.ORDERED); } - - // TreeSet - public void testTreeSet() { assertSortedSetCharacteristics(new TreeSet<>(), Spliterator.SIZED | Spliterator.DISTINCT | @@ -76,9 +97,6 @@ Spliterator.SORTED | Spliterator.ORDERED); } - - // ConcurrentSkipListMap - public void testConcurrentSkipListMap() { assertSortedMapCharacteristics(new ConcurrentSkipListMap<>(), Spliterator.CONCURRENT | Spliterator.NONNULL | @@ -93,9 +111,6 @@ Spliterator.ORDERED); } - - // ConcurrentSkipListSet - public void testConcurrentSkipListSet() { assertSortedSetCharacteristics(new ConcurrentSkipListSet<>(), Spliterator.CONCURRENT | Spliterator.NONNULL | @@ -113,35 +128,58 @@ // - void assertSortedMapCharacteristics(SortedMap m, int keyCharacteristics) { + + void assertMapCharacteristics(Map m, int keyCharacteristics) { + assertMapCharacteristics(m, keyCharacteristics, 0); + } + + void assertMapCharacteristics(Map m, int keyCharacteristics, int notValueCharacteristics) { initMap(m); - boolean hasComparator = m.comparator() != null; + assertCharacteristics(m.keySet(), keyCharacteristics); + + assertCharacteristics(m.values(), + keyCharacteristics & ~(Spliterator.DISTINCT | notValueCharacteristics)); + + assertCharacteristics(m.entrySet(), keyCharacteristics); + + if ((keyCharacteristics & Spliterator.SORTED) == 0) { + assertISEComparator(m.keySet()); + assertISEComparator(m.values()); + assertISEComparator(m.entrySet()); + } + } + + void assertSetCharacteristics(Set s, int keyCharacteristics) { + initSet(s); + + assertCharacteristics(s, keyCharacteristics); + + if ((keyCharacteristics & Spliterator.SORTED) == 0) { + assertISEComparator(s); + } + } + + void assertSortedMapCharacteristics(SortedMap m, int keyCharacteristics) { + assertMapCharacteristics(m, keyCharacteristics, Spliterator.SORTED); Set keys = m.keySet(); - assertCharacteristics(keys, keyCharacteristics); - if (hasComparator) { + if (m.comparator() != null) { assertNotNullComparator(keys); } else { assertNullComparator(keys); } - assertCharacteristics(m.values(), - keyCharacteristics & ~(Spliterator.DISTINCT | Spliterator.SORTED)); assertISEComparator(m.values()); - assertCharacteristics(m.entrySet(), keyCharacteristics); assertNotNullComparator(m.entrySet()); } void assertSortedSetCharacteristics(SortedSet s, int keyCharacteristics) { - initSet(s); + assertSetCharacteristics(s, keyCharacteristics); - boolean hasComparator = s.comparator() != null; - - assertCharacteristics(s, keyCharacteristics); - if (hasComparator) { + if (s.comparator() != null) { assertNotNullComparator(s); } else { @@ -161,27 +199,18 @@ } void assertCharacteristics(Collection c, int expectedCharacteristics) { - assertCharacteristics(c.spliterator(), expectedCharacteristics); - } - - void assertCharacteristics(Spliterator s, int expectedCharacteristics) { - assertTrue(s.hasCharacteristics(expectedCharacteristics)); + assertTrue(c.spliterator().hasCharacteristics(expectedCharacteristics), + "Spliterator characteristics"); } void assertNullComparator(Collection c) { - assertNullComparator(c.spliterator()); - } - - void assertNullComparator(Spliterator s) { - assertNull(s.getComparator()); + assertNull(c.spliterator().getComparator(), + "Comparator of Spliterator of Collection"); } void assertNotNullComparator(Collection c) { - assertNotNullComparator(c.spliterator()); - } - - void assertNotNullComparator(Spliterator s) { - assertNotNull(s.getComparator()); + assertNotNull(c.spliterator().getComparator(), + "Comparator of Spliterator of Collection"); } void assertISEComparator(Collection c) { @@ -196,6 +225,6 @@ catch (IllegalStateException e) { caught = true; } - assertTrue(caught); + assertTrue(caught, "Throwing IllegalStateException"); } }