--- a/jdk/src/share/classes/java/util/concurrent/ConcurrentHashMap.java Tue Jun 04 10:33:13 2013 -0700
+++ b/jdk/src/share/classes/java/util/concurrent/ConcurrentHashMap.java Tue Jun 04 21:59:23 2013 +0100
@@ -34,14 +34,47 @@
*/
package java.util.concurrent;
-import java.io.ObjectInputStream;
-import java.util.concurrent.locks.*;
-import java.util.*;
import java.io.Serializable;
+import java.io.ObjectStreamField;
+import java.lang.reflect.ParameterizedType;
+import java.lang.reflect.Type;
+import java.util.AbstractMap;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.ConcurrentModificationException;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Hashtable;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.Spliterator;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.concurrent.locks.StampedLock;
+import java.util.function.BiConsumer;
+import java.util.function.BiFunction;
+import java.util.function.BinaryOperator;
+import java.util.function.Consumer;
+import java.util.function.DoubleBinaryOperator;
+import java.util.function.Function;
+import java.util.function.IntBinaryOperator;
+import java.util.function.LongBinaryOperator;
+import java.util.function.ToDoubleBiFunction;
+import java.util.function.ToDoubleFunction;
+import java.util.function.ToIntBiFunction;
+import java.util.function.ToIntFunction;
+import java.util.function.ToLongBiFunction;
+import java.util.function.ToLongFunction;
+import java.util.stream.Stream;
/**
* A hash table supporting full concurrency of retrievals and
- * adjustable expected concurrency for updates. This class obeys the
+ * high expected concurrency for updates. This class obeys the
* same functional specification as {@link java.util.Hashtable}, and
* includes versions of methods corresponding to each method of
* {@code Hashtable}. However, even though all operations are
@@ -51,35 +84,61 @@
* interoperable with {@code Hashtable} in programs that rely on its
* thread safety but not on its synchronization details.
*
- * <p> Retrieval operations (including {@code get}) generally do not
- * block, so may overlap with update operations (including
- * {@code put} and {@code remove}). Retrievals reflect the results
- * of the most recently <em>completed</em> update operations holding
- * upon their onset. For aggregate operations such as {@code putAll}
- * and {@code clear}, concurrent retrievals may reflect insertion or
- * removal of only some entries. Similarly, Iterators and
- * Enumerations return elements reflecting the state of the hash table
- * at some point at or since the creation of the iterator/enumeration.
- * They do <em>not</em> throw {@link ConcurrentModificationException}.
- * However, iterators are designed to be used by only one thread at a time.
+ * <p>Retrieval operations (including {@code get}) generally do not
+ * block, so may overlap with update operations (including {@code put}
+ * and {@code remove}). Retrievals reflect the results of the most
+ * recently <em>completed</em> update operations holding upon their
+ * onset. (More formally, an update operation for a given key bears a
+ * <em>happens-before</em> relation with any (non-null) retrieval for
+ * that key reporting the updated value.) For aggregate operations
+ * such as {@code putAll} and {@code clear}, concurrent retrievals may
+ * reflect insertion or removal of only some entries. Similarly,
+ * Iterators and Enumerations return elements reflecting the state of
+ * the hash table at some point at or since the creation of the
+ * iterator/enumeration. They do <em>not</em> throw {@link
+ * ConcurrentModificationException}. However, iterators are designed
+ * to be used by only one thread at a time. Bear in mind that the
+ * results of aggregate status methods including {@code size}, {@code
+ * isEmpty}, and {@code containsValue} are typically useful only when
+ * a map is not undergoing concurrent updates in other threads.
+ * Otherwise the results of these methods reflect transient states
+ * that may be adequate for monitoring or estimation purposes, but not
+ * for program control.
*
- * <p> The allowed concurrency among update operations is guided by
- * the optional {@code concurrencyLevel} constructor argument
- * (default {@code 16}), which is used as a hint for internal sizing. The
- * table is internally partitioned to try to permit the indicated
- * number of concurrent updates without contention. Because placement
- * in hash tables is essentially random, the actual concurrency will
- * vary. Ideally, you should choose a value to accommodate as many
- * threads as will ever concurrently modify the table. Using a
- * significantly higher value than you need can waste space and time,
- * and a significantly lower value can lead to thread contention. But
- * overestimates and underestimates within an order of magnitude do
- * not usually have much noticeable impact. A value of one is
- * appropriate when it is known that only one thread will modify and
- * all others will only read. Also, resizing this or any other kind of
- * hash table is a relatively slow operation, so, when possible, it is
- * a good idea to provide estimates of expected table sizes in
- * constructors.
+ * <p>The table is dynamically expanded when there are too many
+ * collisions (i.e., keys that have distinct hash codes but fall into
+ * the same slot modulo the table size), with the expected average
+ * effect of maintaining roughly two bins per mapping (corresponding
+ * to a 0.75 load factor threshold for resizing). There may be much
+ * variance around this average as mappings are added and removed, but
+ * overall, this maintains a commonly accepted time/space tradeoff for
+ * hash tables. However, resizing this or any other kind of hash
+ * table may be a relatively slow operation. When possible, it is a
+ * good idea to provide a size estimate as an optional {@code
+ * initialCapacity} constructor argument. An additional optional
+ * {@code loadFactor} constructor argument provides a further means of
+ * customizing initial table capacity by specifying the table density
+ * to be used in calculating the amount of space to allocate for the
+ * given number of elements. Also, for compatibility with previous
+ * versions of this class, constructors may optionally specify an
+ * expected {@code concurrencyLevel} as an additional hint for
+ * internal sizing. Note that using many keys with exactly the same
+ * {@code hashCode()} is a sure way to slow down performance of any
+ * hash table. To ameliorate impact, when keys are {@link Comparable},
+ * this class may use comparison order among keys to help break ties.
+ *
+ * <p>A {@link Set} projection of a ConcurrentHashMap may be created
+ * (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed
+ * (using {@link #keySet(Object)} when only keys are of interest, and the
+ * mapped values are (perhaps transiently) not used or all take the
+ * same mapping value.
+ *
+ * <p>A ConcurrentHashMap can be used as scalable frequency map (a
+ * form of histogram or multiset) by using {@link
+ * java.util.concurrent.atomic.LongAdder} values and initializing via
+ * {@link #computeIfAbsent computeIfAbsent}. For example, to add a count
+ * to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use
+ * {@code freqs.computeIfAbsent(k -> new LongAdder()).increment();}
*
* <p>This class and its views and iterators implement all of the
* <em>optional</em> methods of the {@link Map} and {@link Iterator}
@@ -88,6 +147,114 @@
* <p>Like {@link Hashtable} but unlike {@link HashMap}, this class
* does <em>not</em> allow {@code null} to be used as a key or value.
*
+ * <p>ConcurrentHashMaps support a set of sequential and parallel bulk
+ * operations that, unlike most {@link Stream} methods, are designed
+ * to be safely, and often sensibly, applied even with maps that are
+ * being concurrently updated by other threads; for example, when
+ * computing a snapshot summary of the values in a shared registry.
+ * There are three kinds of operation, each with four forms, accepting
+ * functions with Keys, Values, Entries, and (Key, Value) arguments
+ * and/or return values. Because the elements of a ConcurrentHashMap
+ * are not ordered in any particular way, and may be processed in
+ * different orders in different parallel executions, the correctness
+ * of supplied functions should not depend on any ordering, or on any
+ * other objects or values that may transiently change while
+ * computation is in progress; and except for forEach actions, should
+ * ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry}
+ * objects do not support method {@code setValue}.
+ *
+ * <ul>
+ * <li> forEach: Perform a given action on each element.
+ * A variant form applies a given transformation on each element
+ * before performing the action.</li>
+ *
+ * <li> search: Return the first available non-null result of
+ * applying a given function on each element; skipping further
+ * search when a result is found.</li>
+ *
+ * <li> reduce: Accumulate each element. The supplied reduction
+ * function cannot rely on ordering (more formally, it should be
+ * both associative and commutative). There are five variants:
+ *
+ * <ul>
+ *
+ * <li> Plain reductions. (There is not a form of this method for
+ * (key, value) function arguments since there is no corresponding
+ * return type.)</li>
+ *
+ * <li> Mapped reductions that accumulate the results of a given
+ * function applied to each element.</li>
+ *
+ * <li> Reductions to scalar doubles, longs, and ints, using a
+ * given basis value.</li>
+ *
+ * </ul>
+ * </li>
+ * </ul>
+ *
+ * <p>These bulk operations accept a {@code parallelismThreshold}
+ * argument. Methods proceed sequentially if the current map size is
+ * estimated to be less than the given threshold. Using a value of
+ * {@code Long.MAX_VALUE} suppresses all parallelism. Using a value
+ * of {@code 1} results in maximal parallelism by partitioning into
+ * enough subtasks to fully utilize the {@link
+ * ForkJoinPool#commonPool()} that is used for all parallel
+ * computations. Normally, you would initially choose one of these
+ * extreme values, and then measure performance of using in-between
+ * values that trade off overhead versus throughput.
+ *
+ * <p>The concurrency properties of bulk operations follow
+ * from those of ConcurrentHashMap: Any non-null result returned
+ * from {@code get(key)} and related access methods bears a
+ * happens-before relation with the associated insertion or
+ * update. The result of any bulk operation reflects the
+ * composition of these per-element relations (but is not
+ * necessarily atomic with respect to the map as a whole unless it
+ * is somehow known to be quiescent). Conversely, because keys
+ * and values in the map are never null, null serves as a reliable
+ * atomic indicator of the current lack of any result. To
+ * maintain this property, null serves as an implicit basis for
+ * all non-scalar reduction operations. For the double, long, and
+ * int versions, the basis should be one that, when combined with
+ * any other value, returns that other value (more formally, it
+ * should be the identity element for the reduction). Most common
+ * reductions have these properties; for example, computing a sum
+ * with basis 0 or a minimum with basis MAX_VALUE.
+ *
+ * <p>Search and transformation functions provided as arguments
+ * should similarly return null to indicate the lack of any result
+ * (in which case it is not used). In the case of mapped
+ * reductions, this also enables transformations to serve as
+ * filters, returning null (or, in the case of primitive
+ * specializations, the identity basis) if the element should not
+ * be combined. You can create compound transformations and
+ * filterings by composing them yourself under this "null means
+ * there is nothing there now" rule before using them in search or
+ * reduce operations.
+ *
+ * <p>Methods accepting and/or returning Entry arguments maintain
+ * key-value associations. They may be useful for example when
+ * finding the key for the greatest value. Note that "plain" Entry
+ * arguments can be supplied using {@code new
+ * AbstractMap.SimpleEntry(k,v)}.
+ *
+ * <p>Bulk operations may complete abruptly, throwing an
+ * exception encountered in the application of a supplied
+ * function. Bear in mind when handling such exceptions that other
+ * concurrently executing functions could also have thrown
+ * exceptions, or would have done so if the first exception had
+ * not occurred.
+ *
+ * <p>Speedups for parallel compared to sequential forms are common
+ * but not guaranteed. Parallel operations involving brief functions
+ * on small maps may execute more slowly than sequential forms if the
+ * underlying work to parallelize the computation is more expensive
+ * than the computation itself. Similarly, parallelization may not
+ * lead to much actual parallelism if all processors are busy
+ * performing unrelated tasks.
+ *
+ * <p>All arguments to all task methods must be non-null.
+ *
* <p>This class is a member of the
* <a href="{@docRoot}/../technotes/guides/collections/index.html">
* Java Collections Framework</a>.
@@ -97,735 +264,2371 @@
* @param <K> the type of keys maintained by this map
* @param <V> the type of mapped values
*/
-public class ConcurrentHashMap<K, V> extends AbstractMap<K, V>
- implements ConcurrentMap<K, V>, Serializable {
+@SuppressWarnings({"unchecked", "rawtypes", "serial"})
+public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
+ implements ConcurrentMap<K,V>, Serializable {
+
private static final long serialVersionUID = 7249069246763182397L;
/*
- * The basic strategy is to subdivide the table among Segments,
- * each of which itself is a concurrently readable hash table. To
- * reduce footprint, all but one segments are constructed only
- * when first needed (see ensureSegment). To maintain visibility
- * in the presence of lazy construction, accesses to segments as
- * well as elements of segment's table must use volatile access,
- * which is done via Unsafe within methods segmentAt etc
- * below. These provide the functionality of AtomicReferenceArrays
- * but reduce the levels of indirection. Additionally,
- * volatile-writes of table elements and entry "next" fields
- * within locked operations use the cheaper "lazySet" forms of
- * writes (via putOrderedObject) because these writes are always
- * followed by lock releases that maintain sequential consistency
- * of table updates.
+ * Overview:
+ *
+ * The primary design goal of this hash table is to maintain
+ * concurrent readability (typically method get(), but also
+ * iterators and related methods) while minimizing update
+ * contention. Secondary goals are to keep space consumption about
+ * the same or better than java.util.HashMap, and to support high
+ * initial insertion rates on an empty table by many threads.
+ *
+ * Each key-value mapping is held in a Node. Because Node key
+ * fields can contain special values, they are defined using plain
+ * Object types (not type "K"). This leads to a lot of explicit
+ * casting (and the use of class-wide warning suppressions). It
+ * also allows some of the public methods to be factored into a
+ * smaller number of internal methods (although sadly not so for
+ * the five variants of put-related operations). The
+ * validation-based approach explained below leads to a lot of
+ * code sprawl because retry-control precludes factoring into
+ * smaller methods.
+ *
+ * The table is lazily initialized to a power-of-two size upon the
+ * first insertion. Each bin in the table normally contains a
+ * list of Nodes (most often, the list has only zero or one Node).
+ * Table accesses require volatile/atomic reads, writes, and
+ * CASes. Because there is no other way to arrange this without
+ * adding further indirections, we use intrinsics
+ * (sun.misc.Unsafe) operations.
+ *
+ * We use the top (sign) bit of Node hash fields for control
+ * purposes -- it is available anyway because of addressing
+ * constraints. Nodes with negative hash fields are forwarding
+ * nodes to either TreeBins or resized tables. The lower 31 bits
+ * of each normal Node's hash field contain a transformation of
+ * the key's hash code.
+ *
+ * Insertion (via put or its variants) of the first node in an
+ * empty bin is performed by just CASing it to the bin. This is
+ * by far the most common case for put operations under most
+ * key/hash distributions. Other update operations (insert,
+ * delete, and replace) require locks. We do not want to waste
+ * the space required to associate a distinct lock object with
+ * each bin, so instead use the first node of a bin list itself as
+ * a lock. Locking support for these locks relies on builtin
+ * "synchronized" monitors.
+ *
+ * Using the first node of a list as a lock does not by itself
+ * suffice though: When a node is locked, any update must first
+ * validate that it is still the first node after locking it, and
+ * retry if not. Because new nodes are always appended to lists,
+ * once a node is first in a bin, it remains first until deleted
+ * or the bin becomes invalidated (upon resizing).
+ *
+ * The main disadvantage of per-bin locks is that other update
+ * operations on other nodes in a bin list protected by the same
+ * lock can stall, for example when user equals() or mapping
+ * functions take a long time. However, statistically, under
+ * random hash codes, this is not a common problem. Ideally, the
+ * frequency of nodes in bins follows a Poisson distribution
+ * (http://en.wikipedia.org/wiki/Poisson_distribution) with a
+ * parameter of about 0.5 on average, given the resizing threshold
+ * of 0.75, although with a large variance because of resizing
+ * granularity. Ignoring variance, the expected occurrences of
+ * list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The
+ * first values are:
+ *
+ * 0: 0.60653066
+ * 1: 0.30326533
+ * 2: 0.07581633
+ * 3: 0.01263606
+ * 4: 0.00157952
+ * 5: 0.00015795
+ * 6: 0.00001316
+ * 7: 0.00000094
+ * 8: 0.00000006
+ * more: less than 1 in ten million
+ *
+ * Lock contention probability for two threads accessing distinct
+ * elements is roughly 1 / (8 * #elements) under random hashes.
*
- * Historical note: The previous version of this class relied
- * heavily on "final" fields, which avoided some volatile reads at
- * the expense of a large initial footprint. Some remnants of
- * that design (including forced construction of segment 0) exist
- * to ensure serialization compatibility.
+ * Actual hash code distributions encountered in practice
+ * sometimes deviate significantly from uniform randomness. This
+ * includes the case when N > (1<<30), so some keys MUST collide.
+ * Similarly for dumb or hostile usages in which multiple keys are
+ * designed to have identical hash codes. Also, although we guard
+ * against the worst effects of this (see method spread), sets of
+ * hashes may differ only in bits that do not impact their bin
+ * index for a given power-of-two mask. So we use a secondary
+ * strategy that applies when the number of nodes in a bin exceeds
+ * a threshold, and at least one of the keys implements
+ * Comparable. These TreeBins use a balanced tree to hold nodes
+ * (a specialized form of red-black trees), bounding search time
+ * to O(log N). Each search step in a TreeBin is at least twice as
+ * slow as in a regular list, but given that N cannot exceed
+ * (1<<64) (before running out of addresses) this bounds search
+ * steps, lock hold times, etc, to reasonable constants (roughly
+ * 100 nodes inspected per operation worst case) so long as keys
+ * are Comparable (which is very common -- String, Long, etc).
+ * TreeBin nodes (TreeNodes) also maintain the same "next"
+ * traversal pointers as regular nodes, so can be traversed in
+ * iterators in the same way.
+ *
+ * The table is resized when occupancy exceeds a percentage
+ * threshold (nominally, 0.75, but see below). Any thread
+ * noticing an overfull bin may assist in resizing after the
+ * initiating thread allocates and sets up the replacement
+ * array. However, rather than stalling, these other threads may
+ * proceed with insertions etc. The use of TreeBins shields us
+ * from the worst case effects of overfilling while resizes are in
+ * progress. Resizing proceeds by transferring bins, one by one,
+ * from the table to the next table. To enable concurrency, the
+ * next table must be (incrementally) prefilled with place-holders
+ * serving as reverse forwarders to the old table. Because we are
+ * using power-of-two expansion, the elements from each bin must
+ * either stay at same index, or move with a power of two
+ * offset. We eliminate unnecessary node creation by catching
+ * cases where old nodes can be reused because their next fields
+ * won't change. On average, only about one-sixth of them need
+ * cloning when a table doubles. The nodes they replace will be
+ * garbage collectable as soon as they are no longer referenced by
+ * any reader thread that may be in the midst of concurrently
+ * traversing table. Upon transfer, the old table bin contains
+ * only a special forwarding node (with hash field "MOVED") that
+ * contains the next table as its key. On encountering a
+ * forwarding node, access and update operations restart, using
+ * the new table.
+ *
+ * Each bin transfer requires its bin lock, which can stall
+ * waiting for locks while resizing. However, because other
+ * threads can join in and help resize rather than contend for
+ * locks, average aggregate waits become shorter as resizing
+ * progresses. The transfer operation must also ensure that all
+ * accessible bins in both the old and new table are usable by any
+ * traversal. This is arranged by proceeding from the last bin
+ * (table.length - 1) up towards the first. Upon seeing a
+ * forwarding node, traversals (see class Traverser) arrange to
+ * move to the new table without revisiting nodes. However, to
+ * ensure that no intervening nodes are skipped, bin splitting can
+ * only begin after the associated reverse-forwarders are in
+ * place.
+ *
+ * The traversal scheme also applies to partial traversals of
+ * ranges of bins (via an alternate Traverser constructor)
+ * to support partitioned aggregate operations. Also, read-only
+ * operations give up if ever forwarded to a null table, which
+ * provides support for shutdown-style clearing, which is also not
+ * currently implemented.
+ *
+ * Lazy table initialization minimizes footprint until first use,
+ * and also avoids resizings when the first operation is from a
+ * putAll, constructor with map argument, or deserialization.
+ * These cases attempt to override the initial capacity settings,
+ * but harmlessly fail to take effect in cases of races.
+ *
+ * The element count is maintained using a specialization of
+ * LongAdder. We need to incorporate a specialization rather than
+ * just use a LongAdder in order to access implicit
+ * contention-sensing that leads to creation of multiple
+ * Cells. The counter mechanics avoid contention on
+ * updates but can encounter cache thrashing if read too
+ * frequently during concurrent access. To avoid reading so often,
+ * resizing under contention is attempted only upon adding to a
+ * bin already holding two or more nodes. Under uniform hash
+ * distributions, the probability of this occurring at threshold
+ * is around 13%, meaning that only about 1 in 8 puts check
+ * threshold (and after resizing, many fewer do so). The bulk
+ * putAll operation further reduces contention by only committing
+ * count updates upon these size checks.
+ *
+ * Maintaining API and serialization compatibility with previous
+ * versions of this class introduces several oddities. Mainly: We
+ * leave untouched but unused constructor arguments refering to
+ * concurrencyLevel. We accept a loadFactor constructor argument,
+ * but apply it only to initial table capacity (which is the only
+ * time that we can guarantee to honor it.) We also declare an
+ * unused "Segment" class that is instantiated in minimal form
+ * only when serializing.
*/
/* ---------------- Constants -------------- */
/**
- * The default initial capacity for this table,
- * used when not otherwise specified in a constructor.
+ * The largest possible table capacity. This value must be
+ * exactly 1<<30 to stay within Java array allocation and indexing
+ * bounds for power of two table sizes, and is further required
+ * because the top two bits of 32bit hash fields are used for
+ * control purposes.
*/
- static final int DEFAULT_INITIAL_CAPACITY = 16;
+ private static final int MAXIMUM_CAPACITY = 1 << 30;
+
+ /**
+ * The default initial table capacity. Must be a power of 2
+ * (i.e., at least 1) and at most MAXIMUM_CAPACITY.
+ */
+ private static final int DEFAULT_CAPACITY = 16;
/**
- * The default load factor for this table, used when not
- * otherwise specified in a constructor.
+ * The largest possible (non-power of two) array size.
+ * Needed by toArray and related methods.
*/
- static final float DEFAULT_LOAD_FACTOR = 0.75f;
+ static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
/**
- * The default concurrency level for this table, used when not
- * otherwise specified in a constructor.
+ * The default concurrency level for this table. Unused but
+ * defined for compatibility with previous versions of this class.
*/
- static final int DEFAULT_CONCURRENCY_LEVEL = 16;
+ private static final int DEFAULT_CONCURRENCY_LEVEL = 16;
+
+ /**
+ * The load factor for this table. Overrides of this value in
+ * constructors affect only the initial table capacity. The
+ * actual floating point value isn't normally used -- it is
+ * simpler to use expressions such as {@code n - (n >>> 2)} for
+ * the associated resizing threshold.
+ */
+ private static final float LOAD_FACTOR = 0.75f;
/**
- * The maximum capacity, used if a higher value is implicitly
- * specified by either of the constructors with arguments. MUST
- * be a power of two <= 1<<30 to ensure that entries are indexable
- * using ints.
+ * The bin count threshold for using a tree rather than list for a
+ * bin. The value reflects the approximate break-even point for
+ * using tree-based operations.
*/
- static final int MAXIMUM_CAPACITY = 1 << 30;
+ private static final int TREE_THRESHOLD = 8;
/**
- * The minimum capacity for per-segment tables. Must be a power
- * of two, at least two to avoid immediate resizing on next use
- * after lazy construction.
+ * Minimum number of rebinnings per transfer step. Ranges are
+ * subdivided to allow multiple resizer threads. This value
+ * serves as a lower bound to avoid resizers encountering
+ * excessive memory contention. The value should be at least
+ * DEFAULT_CAPACITY.
+ */
+ private static final int MIN_TRANSFER_STRIDE = 16;
+
+ /*
+ * Encodings for Node hash fields. See above for explanation.
*/
- static final int MIN_SEGMENT_TABLE_CAPACITY = 2;
+ static final int MOVED = 0x80000000; // hash field for forwarding nodes
+ static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash
+
+ /** Number of CPUS, to place bounds on some sizings */
+ static final int NCPU = Runtime.getRuntime().availableProcessors();
+
+ /** For serialization compatibility. */
+ private static final ObjectStreamField[] serialPersistentFields = {
+ new ObjectStreamField("segments", Segment[].class),
+ new ObjectStreamField("segmentMask", Integer.TYPE),
+ new ObjectStreamField("segmentShift", Integer.TYPE)
+ };
/**
- * The maximum number of segments to allow; used to bound
- * constructor arguments. Must be power of two less than 1 << 24.
+ * A padded cell for distributing counts. Adapted from LongAdder
+ * and Striped64. See their internal docs for explanation.
*/
- static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
-
- /**
- * Number of unsynchronized retries in size and containsValue
- * methods before resorting to locking. This is used to avoid
- * unbounded retries if tables undergo continuous modification
- * which would make it impossible to obtain an accurate result.
- */
- static final int RETRIES_BEFORE_LOCK = 2;
+ @sun.misc.Contended static final class Cell {
+ volatile long value;
+ Cell(long x) { value = x; }
+ }
/* ---------------- Fields -------------- */
/**
- * A randomizing value associated with this instance that is applied to
- * hash code of keys to make hash collisions harder to find.
+ * The array of bins. Lazily initialized upon first insertion.
+ * Size is always a power of two. Accessed directly by iterators.
+ */
+ transient volatile Node<K,V>[] table;
+
+ /**
+ * The next table to use; non-null only while resizing.
+ */
+ private transient volatile Node<K,V>[] nextTable;
+
+ /**
+ * Base counter value, used mainly when there is no contention,
+ * but also as a fallback during table initialization
+ * races. Updated via CAS.
+ */
+ private transient volatile long baseCount;
+
+ /**
+ * Table initialization and resizing control. When negative, the
+ * table is being initialized or resized: -1 for initialization,
+ * else -(1 + the number of active resizing threads). Otherwise,
+ * when table is null, holds the initial table size to use upon
+ * creation, or 0 for default. After initialization, holds the
+ * next element count value upon which to resize the table.
+ */
+ private transient volatile int sizeCtl;
+
+ /**
+ * The next table index (plus one) to split while resizing.
+ */
+ private transient volatile int transferIndex;
+
+ /**
+ * The least available table index to split while resizing.
+ */
+ private transient volatile int transferOrigin;
+
+ /**
+ * Spinlock (locked via CAS) used when resizing and/or creating Cells.
*/
- private transient final int hashSeed = sun.misc.Hashing.randomHashSeed(this);
+ private transient volatile int cellsBusy;
+
+ /**
+ * Table of counter cells. When non-null, size is a power of 2.
+ */
+ private transient volatile Cell[] counterCells;
+
+ // views
+ private transient KeySetView<K,V> keySet;
+ private transient ValuesView<K,V> values;
+ private transient EntrySetView<K,V> entrySet;
+
+ /* ---------------- Table element access -------------- */
+
+ /*
+ * Volatile access methods are used for table elements as well as
+ * elements of in-progress next table while resizing. Uses are
+ * null checked by callers, and implicitly bounds-checked, relying
+ * on the invariants that tab arrays have non-zero size, and all
+ * indices are masked with (tab.length - 1) which is never
+ * negative and always less than length. Note that, to be correct
+ * wrt arbitrary concurrency errors by users, bounds checks must
+ * operate on local variables, which accounts for some odd-looking
+ * inline assignments below.
+ */
+
+ static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) {
+ return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE);
+ }
+
+ static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i,
+ Node<K,V> c, Node<K,V> v) {
+ return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v);
+ }
+
+ static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) {
+ U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v);
+ }
+
+ /* ---------------- Nodes -------------- */
/**
- * Mask value for indexing into segments. The upper bits of a
- * key's hash code are used to choose the segment.
+ * Key-value entry. This class is never exported out as a
+ * user-mutable Map.Entry (i.e., one supporting setValue; see
+ * MapEntry below), but can be used for read-only traversals used
+ * in bulk tasks. Nodes with a hash field of MOVED are special,
+ * and do not contain user keys or values (and are never
+ * exported). Otherwise, keys and vals are never null.
*/
- final int segmentMask;
+ static class Node<K,V> implements Map.Entry<K,V> {
+ final int hash;
+ final Object key;
+ volatile V val;
+ Node<K,V> next;
+
+ Node(int hash, Object key, V val, Node<K,V> next) {
+ this.hash = hash;
+ this.key = key;
+ this.val = val;
+ this.next = next;
+ }
+
+ public final K getKey() { return (K)key; }
+ public final V getValue() { return val; }
+ public final int hashCode() { return key.hashCode() ^ val.hashCode(); }
+ public final String toString(){ return key + "=" + val; }
+ public final V setValue(V value) {
+ throw new UnsupportedOperationException();
+ }
+
+ public final boolean equals(Object o) {
+ Object k, v, u; Map.Entry<?,?> e;
+ return ((o instanceof Map.Entry) &&
+ (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
+ (v = e.getValue()) != null &&
+ (k == key || k.equals(key)) &&
+ (v == (u = val) || v.equals(u)));
+ }
+ }
/**
- * Shift value for indexing within segments.
+ * Exported Entry for EntryIterator
*/
- final int segmentShift;
+ static final class MapEntry<K,V> implements Map.Entry<K,V> {
+ final K key; // non-null
+ V val; // non-null
+ final ConcurrentHashMap<K,V> map;
+ MapEntry(K key, V val, ConcurrentHashMap<K,V> map) {
+ this.key = key;
+ this.val = val;
+ this.map = map;
+ }
+ public K getKey() { return key; }
+ public V getValue() { return val; }
+ public int hashCode() { return key.hashCode() ^ val.hashCode(); }
+ public String toString() { return key + "=" + val; }
+
+ public boolean equals(Object o) {
+ Object k, v; Map.Entry<?,?> e;
+ return ((o instanceof Map.Entry) &&
+ (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
+ (v = e.getValue()) != null &&
+ (k == key || k.equals(key)) &&
+ (v == val || v.equals(val)));
+ }
+
+ /**
+ * Sets our entry's value and writes through to the map. The
+ * value to return is somewhat arbitrary here. Since we do not
+ * necessarily track asynchronous changes, the most recent
+ * "previous" value could be different from what we return (or
+ * could even have been removed, in which case the put will
+ * re-establish). We do not and cannot guarantee more.
+ */
+ public V setValue(V value) {
+ if (value == null) throw new NullPointerException();
+ V v = val;
+ val = value;
+ map.put(key, value);
+ return v;
+ }
+ }
+
+
+ /* ---------------- TreeBins -------------- */
+
+ /**
+ * Nodes for use in TreeBins
+ */
+ static final class TreeNode<K,V> extends Node<K,V> {
+ TreeNode<K,V> parent; // red-black tree links
+ TreeNode<K,V> left;
+ TreeNode<K,V> right;
+ TreeNode<K,V> prev; // needed to unlink next upon deletion
+ boolean red;
+
+ TreeNode(int hash, Object key, V val, Node<K,V> next,
+ TreeNode<K,V> parent) {
+ super(hash, key, val, next);
+ this.parent = parent;
+ }
+ }
+
+ /**
+ * Returns a Class for the given type of the form "class C
+ * implements Comparable<C>", if one exists, else null. See below
+ * for explanation.
+ */
+ static Class<?> comparableClassFor(Class<?> c) {
+ Class<?> s, cmpc; Type[] ts, as; Type t; ParameterizedType p;
+ if (c == String.class) // bypass checks
+ return c;
+ if (c != null && (cmpc = Comparable.class).isAssignableFrom(c)) {
+ while (cmpc.isAssignableFrom(s = c.getSuperclass()))
+ c = s; // find topmost comparable class
+ if ((ts = c.getGenericInterfaces()) != null) {
+ for (int i = 0; i < ts.length; ++i) {
+ if (((t = ts[i]) instanceof ParameterizedType) &&
+ ((p = (ParameterizedType)t).getRawType() == cmpc) &&
+ (as = p.getActualTypeArguments()) != null &&
+ as.length == 1 && as[0] == c) // type arg is c
+ return c;
+ }
+ }
+ }
+ return null;
+ }
/**
- * The segments, each of which is a specialized hash table.
- */
- final Segment<K,V>[] segments;
-
- transient Set<K> keySet;
- transient Set<Map.Entry<K,V>> entrySet;
- transient Collection<V> values;
-
- /**
- * ConcurrentHashMap list entry. Note that this is never exported
- * out as a user-visible Map.Entry.
+ * A specialized form of red-black tree for use in bins
+ * whose size exceeds a threshold.
+ *
+ * TreeBins use a special form of comparison for search and
+ * related operations (which is the main reason we cannot use
+ * existing collections such as TreeMaps). TreeBins contain
+ * Comparable elements, but may contain others, as well as
+ * elements that are Comparable but not necessarily Comparable
+ * for the same T, so we cannot invoke compareTo among them. To
+ * handle this, the tree is ordered primarily by hash value, then
+ * by Comparable.compareTo order if applicable. On lookup at a
+ * node, if elements are not comparable or compare as 0 then both
+ * left and right children may need to be searched in the case of
+ * tied hash values. (This corresponds to the full list search
+ * that would be necessary if all elements were non-Comparable and
+ * had tied hashes.) The red-black balancing code is updated from
+ * pre-jdk-collections
+ * (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java)
+ * based in turn on Cormen, Leiserson, and Rivest "Introduction to
+ * Algorithms" (CLR).
+ *
+ * TreeBins also maintain a separate locking discipline than
+ * regular bins. Because they are forwarded via special MOVED
+ * nodes at bin heads (which can never change once established),
+ * we cannot use those nodes as locks. Instead, TreeBin extends
+ * StampedLock to support a form of read-write lock. For update
+ * operations and table validation, the exclusive form of lock
+ * behaves in the same way as bin-head locks. However, lookups use
+ * shared read-lock mechanics to allow multiple readers in the
+ * absence of writers. Additionally, these lookups do not ever
+ * block: While the lock is not available, they proceed along the
+ * slow traversal path (via next-pointers) until the lock becomes
+ * available or the list is exhausted, whichever comes
+ * first. These cases are not fast, but maximize aggregate
+ * expected throughput.
*/
- static final class HashEntry<K,V> {
- final int hash;
- final K key;
- volatile V value;
- volatile HashEntry<K,V> next;
-
- HashEntry(int hash, K key, V value, HashEntry<K,V> next) {
- this.hash = hash;
- this.key = key;
- this.value = value;
- this.next = next;
+ static final class TreeBin<K,V> extends StampedLock {
+ private static final long serialVersionUID = 2249069246763182397L;
+ transient TreeNode<K,V> root; // root of tree
+ transient TreeNode<K,V> first; // head of next-pointer list
+
+ /** From CLR */
+ private void rotateLeft(TreeNode<K,V> p) {
+ if (p != null) {
+ TreeNode<K,V> r = p.right, pp, rl;
+ if ((rl = p.right = r.left) != null)
+ rl.parent = p;
+ if ((pp = r.parent = p.parent) == null)
+ root = r;
+ else if (pp.left == p)
+ pp.left = r;
+ else
+ pp.right = r;
+ r.left = p;
+ p.parent = r;
+ }
+ }
+
+ /** From CLR */
+ private void rotateRight(TreeNode<K,V> p) {
+ if (p != null) {
+ TreeNode<K,V> l = p.left, pp, lr;
+ if ((lr = p.left = l.right) != null)
+ lr.parent = p;
+ if ((pp = l.parent = p.parent) == null)
+ root = l;
+ else if (pp.right == p)
+ pp.right = l;
+ else
+ pp.left = l;
+ l.right = p;
+ p.parent = l;
+ }
+ }
+
+ /**
+ * Returns the TreeNode (or null if not found) for the given key
+ * starting at given root.
+ */
+ final TreeNode<K,V> getTreeNode(int h, Object k, TreeNode<K,V> p,
+ Class<?> cc) {
+ while (p != null) {
+ int dir, ph; Object pk; Class<?> pc;
+ if ((ph = p.hash) != h)
+ dir = (h < ph) ? -1 : 1;
+ else if ((pk = p.key) == k || k.equals(pk))
+ return p;
+ else if (cc == null || pk == null ||
+ ((pc = pk.getClass()) != cc &&
+ comparableClassFor(pc) != cc) ||
+ (dir = ((Comparable<Object>)k).compareTo(pk)) == 0) {
+ TreeNode<K,V> r, pr; // check both sides
+ if ((pr = p.right) != null &&
+ (r = getTreeNode(h, k, pr, cc)) != null)
+ return r;
+ else // continue left
+ dir = -1;
+ }
+ p = (dir > 0) ? p.right : p.left;
+ }
+ return null;
+ }
+
+ /**
+ * Wrapper for getTreeNode used by CHM.get. Tries to obtain
+ * read-lock to call getTreeNode, but during failure to get
+ * lock, searches along next links.
+ */
+ final V getValue(int h, Object k) {
+ Class<?> cc = comparableClassFor(k.getClass());
+ Node<K,V> r = null;
+ for (Node<K,V> e = first; e != null; e = e.next) {
+ long s;
+ if ((s = tryReadLock()) != 0L) {
+ try {
+ r = getTreeNode(h, k, root, cc);
+ } finally {
+ unlockRead(s);
+ }
+ break;
+ }
+ else if (e.hash == h && k.equals(e.key)) {
+ r = e;
+ break;
+ }
+ }
+ return r == null ? null : r.val;
+ }
+
+ /**
+ * Finds or adds a node.
+ * @return null if added
+ */
+ final TreeNode<K,V> putTreeNode(int h, Object k, V v) {
+ Class<?> cc = comparableClassFor(k.getClass());
+ TreeNode<K,V> pp = root, p = null;
+ int dir = 0;
+ while (pp != null) { // find existing node or leaf to insert at
+ int ph; Object pk; Class<?> pc;
+ p = pp;
+ if ((ph = p.hash) != h)
+ dir = (h < ph) ? -1 : 1;
+ else if ((pk = p.key) == k || k.equals(pk))
+ return p;
+ else if (cc == null || pk == null ||
+ ((pc = pk.getClass()) != cc &&
+ comparableClassFor(pc) != cc) ||
+ (dir = ((Comparable<Object>)k).compareTo(pk)) == 0) {
+ TreeNode<K,V> r, pr;
+ if ((pr = p.right) != null &&
+ (r = getTreeNode(h, k, pr, cc)) != null)
+ return r;
+ else // continue left
+ dir = -1;
+ }
+ pp = (dir > 0) ? p.right : p.left;
+ }
+
+ TreeNode<K,V> f = first;
+ TreeNode<K,V> x = first = new TreeNode<K,V>(h, k, v, f, p);
+ if (p == null)
+ root = x;
+ else { // attach and rebalance; adapted from CLR
+ if (f != null)
+ f.prev = x;
+ if (dir <= 0)
+ p.left = x;
+ else
+ p.right = x;
+ x.red = true;
+ for (TreeNode<K,V> xp, xpp, xppl, xppr;;) {
+ if ((xp = x.parent) == null) {
+ (root = x).red = false;
+ break;
+ }
+ else if (!xp.red || (xpp = xp.parent) == null) {
+ TreeNode<K,V> r = root;
+ if (r != null && r.red)
+ r.red = false;
+ break;
+ }
+ else if ((xppl = xpp.left) == xp) {
+ if ((xppr = xpp.right) != null && xppr.red) {
+ xppr.red = false;
+ xp.red = false;
+ xpp.red = true;
+ x = xpp;
+ }
+ else {
+ if (x == xp.right) {
+ rotateLeft(x = xp);
+ xpp = (xp = x.parent) == null ? null : xp.parent;
+ }
+ if (xp != null) {
+ xp.red = false;
+ if (xpp != null) {
+ xpp.red = true;
+ rotateRight(xpp);
+ }
+ }
+ }
+ }
+ else {
+ if (xppl != null && xppl.red) {
+ xppl.red = false;
+ xp.red = false;
+ xpp.red = true;
+ x = xpp;
+ }
+ else {
+ if (x == xp.left) {
+ rotateRight(x = xp);
+ xpp = (xp = x.parent) == null ? null : xp.parent;
+ }
+ if (xp != null) {
+ xp.red = false;
+ if (xpp != null) {
+ xpp.red = true;
+ rotateLeft(xpp);
+ }
+ }
+ }
+ }
+ }
+ }
+ assert checkInvariants();
+ return null;
+ }
+
+ /**
+ * Removes the given node, that must be present before this
+ * call. This is messier than typical red-black deletion code
+ * because we cannot swap the contents of an interior node
+ * with a leaf successor that is pinned by "next" pointers
+ * that are accessible independently of lock. So instead we
+ * swap the tree linkages.
+ */
+ final void deleteTreeNode(TreeNode<K,V> p) {
+ TreeNode<K,V> next = (TreeNode<K,V>)p.next;
+ TreeNode<K,V> pred = p.prev; // unlink traversal pointers
+ if (pred == null)
+ first = next;
+ else
+ pred.next = next;
+ if (next != null)
+ next.prev = pred;
+ else if (pred == null) {
+ root = null;
+ return;
+ }
+ TreeNode<K,V> replacement;
+ TreeNode<K,V> pl = p.left;
+ TreeNode<K,V> pr = p.right;
+ if (pl != null && pr != null) {
+ TreeNode<K,V> s = pr, sl;
+ while ((sl = s.left) != null) // find successor
+ s = sl;
+ boolean c = s.red; s.red = p.red; p.red = c; // swap colors
+ TreeNode<K,V> sr = s.right;
+ TreeNode<K,V> pp = p.parent;
+ if (s == pr) { // p was s's direct parent
+ p.parent = s;
+ s.right = p;
+ }
+ else {
+ TreeNode<K,V> sp = s.parent;
+ if ((p.parent = sp) != null) {
+ if (s == sp.left)
+ sp.left = p;
+ else
+ sp.right = p;
+ }
+ if ((s.right = pr) != null)
+ pr.parent = s;
+ }
+ p.left = null;
+ if ((p.right = sr) != null)
+ sr.parent = p;
+ if ((s.left = pl) != null)
+ pl.parent = s;
+ if ((s.parent = pp) == null)
+ root = s;
+ else if (p == pp.left)
+ pp.left = s;
+ else
+ pp.right = s;
+ if (sr != null)
+ replacement = sr;
+ else
+ replacement = p;
+ }
+ else if (pl != null)
+ replacement = pl;
+ else if (pr != null)
+ replacement = pr;
+ else
+ replacement = p;
+ if (replacement != p) {
+ TreeNode<K,V> pp = replacement.parent = p.parent;
+ if (pp == null)
+ root = replacement;
+ else if (p == pp.left)
+ pp.left = replacement;
+ else
+ pp.right = replacement;
+ p.left = p.right = p.parent = null;
+ }
+ if (!p.red) { // rebalance, from CLR
+ for (TreeNode<K,V> x = replacement; x != null; ) {
+ TreeNode<K,V> xp, xpl, xpr;
+ if (x.red || (xp = x.parent) == null) {
+ x.red = false;
+ break;
+ }
+ else if ((xpl = xp.left) == x) {
+ if ((xpr = xp.right) != null && xpr.red) {
+ xpr.red = false;
+ xp.red = true;
+ rotateLeft(xp);
+ xpr = (xp = x.parent) == null ? null : xp.right;
+ }
+ if (xpr == null)
+ x = xp;
+ else {
+ TreeNode<K,V> sl = xpr.left, sr = xpr.right;
+ if ((sr == null || !sr.red) &&
+ (sl == null || !sl.red)) {
+ xpr.red = true;
+ x = xp;
+ }
+ else {
+ if (sr == null || !sr.red) {
+ if (sl != null)
+ sl.red = false;
+ xpr.red = true;
+ rotateRight(xpr);
+ xpr = (xp = x.parent) == null ?
+ null : xp.right;
+ }
+ if (xpr != null) {
+ xpr.red = (xp == null) ? false : xp.red;
+ if ((sr = xpr.right) != null)
+ sr.red = false;
+ }
+ if (xp != null) {
+ xp.red = false;
+ rotateLeft(xp);
+ }
+ x = root;
+ }
+ }
+ }
+ else { // symmetric
+ if (xpl != null && xpl.red) {
+ xpl.red = false;
+ xp.red = true;
+ rotateRight(xp);
+ xpl = (xp = x.parent) == null ? null : xp.left;
+ }
+ if (xpl == null)
+ x = xp;
+ else {
+ TreeNode<K,V> sl = xpl.left, sr = xpl.right;
+ if ((sl == null || !sl.red) &&
+ (sr == null || !sr.red)) {
+ xpl.red = true;
+ x = xp;
+ }
+ else {
+ if (sl == null || !sl.red) {
+ if (sr != null)
+ sr.red = false;
+ xpl.red = true;
+ rotateLeft(xpl);
+ xpl = (xp = x.parent) == null ?
+ null : xp.left;
+ }
+ if (xpl != null) {
+ xpl.red = (xp == null) ? false : xp.red;
+ if ((sl = xpl.left) != null)
+ sl.red = false;
+ }
+ if (xp != null) {
+ xp.red = false;
+ rotateRight(xp);
+ }
+ x = root;
+ }
+ }
+ }
+ }
+ }
+ if (p == replacement) { // detach pointers
+ TreeNode<K,V> pp;
+ if ((pp = p.parent) != null) {
+ if (p == pp.left)
+ pp.left = null;
+ else if (p == pp.right)
+ pp.right = null;
+ p.parent = null;
+ }
+ }
+ assert checkInvariants();
+ }
+
+ /**
+ * Checks linkage and balance invariants at root
+ */
+ final boolean checkInvariants() {
+ TreeNode<K,V> r = root;
+ if (r == null)
+ return (first == null);
+ else
+ return (first != null) && checkTreeNode(r);
}
/**
- * Sets next field with volatile write semantics. (See above
- * about use of putOrderedObject.)
+ * Recursive invariant check
*/
- final void setNext(HashEntry<K,V> n) {
- UNSAFE.putOrderedObject(this, nextOffset, n);
+ final boolean checkTreeNode(TreeNode<K,V> t) {
+ TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right,
+ tb = t.prev, tn = (TreeNode<K,V>)t.next;
+ if (tb != null && tb.next != t)
+ return false;
+ if (tn != null && tn.prev != t)
+ return false;
+ if (tp != null && t != tp.left && t != tp.right)
+ return false;
+ if (tl != null && (tl.parent != t || tl.hash > t.hash))
+ return false;
+ if (tr != null && (tr.parent != t || tr.hash < t.hash))
+ return false;
+ if (t.red && tl != null && tl.red && tr != null && tr.red)
+ return false;
+ if (tl != null && !checkTreeNode(tl))
+ return false;
+ if (tr != null && !checkTreeNode(tr))
+ return false;
+ return true;
+ }
+ }
+
+ /* ---------------- Collision reduction methods -------------- */
+
+ /**
+ * Spreads higher bits to lower, and also forces top bit to 0.
+ * Because the table uses power-of-two masking, sets of hashes
+ * that vary only in bits above the current mask will always
+ * collide. (Among known examples are sets of Float keys holding
+ * consecutive whole numbers in small tables.) To counter this,
+ * we apply a transform that spreads the impact of higher bits
+ * downward. There is a tradeoff between speed, utility, and
+ * quality of bit-spreading. Because many common sets of hashes
+ * are already reasonably distributed across bits (so don't benefit
+ * from spreading), and because we use trees to handle large sets
+ * of collisions in bins, we don't need excessively high quality.
+ */
+ private static final int spread(int h) {
+ h ^= (h >>> 18) ^ (h >>> 12);
+ return (h ^ (h >>> 10)) & HASH_BITS;
+ }
+
+ /**
+ * Replaces a list bin with a tree bin if key is comparable. Call
+ * only when locked.
+ */
+ private final void replaceWithTreeBin(Node<K,V>[] tab, int index, Object key) {
+ if (tab != null && comparableClassFor(key.getClass()) != null) {
+ TreeBin<K,V> t = new TreeBin<K,V>();
+ for (Node<K,V> e = tabAt(tab, index); e != null; e = e.next)
+ t.putTreeNode(e.hash, e.key, e.val);
+ setTabAt(tab, index, new Node<K,V>(MOVED, t, null, null));
+ }
+ }
+
+ /* ---------------- Internal access and update methods -------------- */
+
+ /** Implementation for get and containsKey */
+ private final V internalGet(Object k) {
+ int h = spread(k.hashCode());
+ V v = null;
+ Node<K,V>[] tab; Node<K,V> e;
+ if ((tab = table) != null &&
+ (e = tabAt(tab, (tab.length - 1) & h)) != null) {
+ for (;;) {
+ int eh; Object ek;
+ if ((eh = e.hash) < 0) {
+ if ((ek = e.key) instanceof TreeBin) { // search TreeBin
+ v = ((TreeBin<K,V>)ek).getValue(h, k);
+ break;
+ }
+ else if (!(ek instanceof Node[]) || // try new table
+ (e = tabAt(tab = (Node<K,V>[])ek,
+ (tab.length - 1) & h)) == null)
+ break;
+ }
+ else if (eh == h && ((ek = e.key) == k || k.equals(ek))) {
+ v = e.val;
+ break;
+ }
+ else if ((e = e.next) == null)
+ break;
+ }
+ }
+ return v;
+ }
+
+ /**
+ * Implementation for the four public remove/replace methods:
+ * Replaces node value with v, conditional upon match of cv if
+ * non-null. If resulting value is null, delete.
+ */
+ private final V internalReplace(Object k, V v, Object cv) {
+ int h = spread(k.hashCode());
+ V oldVal = null;
+ for (Node<K,V>[] tab = table;;) {
+ Node<K,V> f; int i, fh; Object fk;
+ if (tab == null ||
+ (f = tabAt(tab, i = (tab.length - 1) & h)) == null)
+ break;
+ else if ((fh = f.hash) < 0) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin<K,V> t = (TreeBin<K,V>)fk;
+ long stamp = t.writeLock();
+ boolean validated = false;
+ boolean deleted = false;
+ try {
+ if (tabAt(tab, i) == f) {
+ validated = true;
+ Class<?> cc = comparableClassFor(k.getClass());
+ TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc);
+ if (p != null) {
+ V pv = p.val;
+ if (cv == null || cv == pv || cv.equals(pv)) {
+ oldVal = pv;
+ if (v != null)
+ p.val = v;
+ else {
+ deleted = true;
+ t.deleteTreeNode(p);
+ }
+ }
+ }
+ }
+ } finally {
+ t.unlockWrite(stamp);
+ }
+ if (validated) {
+ if (deleted)
+ addCount(-1L, -1);
+ break;
+ }
+ }
+ else
+ tab = (Node<K,V>[])fk;
+ }
+ else {
+ boolean validated = false;
+ boolean deleted = false;
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ validated = true;
+ for (Node<K,V> e = f, pred = null;;) {
+ Object ek;
+ if (e.hash == h &&
+ ((ek = e.key) == k || k.equals(ek))) {
+ V ev = e.val;
+ if (cv == null || cv == ev || cv.equals(ev)) {
+ oldVal = ev;
+ if (v != null)
+ e.val = v;
+ else {
+ deleted = true;
+ Node<K,V> en = e.next;
+ if (pred != null)
+ pred.next = en;
+ else
+ setTabAt(tab, i, en);
+ }
+ }
+ break;
+ }
+ pred = e;
+ if ((e = e.next) == null)
+ break;
+ }
+ }
+ }
+ if (validated) {
+ if (deleted)
+ addCount(-1L, -1);
+ break;
+ }
+ }
+ }
+ return oldVal;
+ }
+
+ /*
+ * Internal versions of insertion methods
+ * All have the same basic structure as the first (internalPut):
+ * 1. If table uninitialized, create
+ * 2. If bin empty, try to CAS new node
+ * 3. If bin stale, use new table
+ * 4. if bin converted to TreeBin, validate and relay to TreeBin methods
+ * 5. Lock and validate; if valid, scan and add or update
+ *
+ * The putAll method differs mainly in attempting to pre-allocate
+ * enough table space, and also more lazily performs count updates
+ * and checks.
+ *
+ * Most of the function-accepting methods can't be factored nicely
+ * because they require different functional forms, so instead
+ * sprawl out similar mechanics.
+ */
+
+ /** Implementation for put and putIfAbsent */
+ private final V internalPut(K k, V v, boolean onlyIfAbsent) {
+ if (k == null || v == null) throw new NullPointerException();
+ int h = spread(k.hashCode());
+ int len = 0;
+ for (Node<K,V>[] tab = table;;) {
+ int i, fh; Node<K,V> f; Object fk;
+ if (tab == null)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
+ if (casTabAt(tab, i, null, new Node<K,V>(h, k, v, null)))
+ break; // no lock when adding to empty bin
+ }
+ else if ((fh = f.hash) < 0) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin<K,V> t = (TreeBin<K,V>)fk;
+ long stamp = t.writeLock();
+ V oldVal = null;
+ try {
+ if (tabAt(tab, i) == f) {
+ len = 2;
+ TreeNode<K,V> p = t.putTreeNode(h, k, v);
+ if (p != null) {
+ oldVal = p.val;
+ if (!onlyIfAbsent)
+ p.val = v;
+ }
+ }
+ } finally {
+ t.unlockWrite(stamp);
+ }
+ if (len != 0) {
+ if (oldVal != null)
+ return oldVal;
+ break;
+ }
+ }
+ else
+ tab = (Node<K,V>[])fk;
+ }
+ else {
+ V oldVal = null;
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ len = 1;
+ for (Node<K,V> e = f;; ++len) {
+ Object ek;
+ if (e.hash == h &&
+ ((ek = e.key) == k || k.equals(ek))) {
+ oldVal = e.val;
+ if (!onlyIfAbsent)
+ e.val = v;
+ break;
+ }
+ Node<K,V> last = e;
+ if ((e = e.next) == null) {
+ last.next = new Node<K,V>(h, k, v, null);
+ if (len > TREE_THRESHOLD)
+ replaceWithTreeBin(tab, i, k);
+ break;
+ }
+ }
+ }
+ }
+ if (len != 0) {
+ if (oldVal != null)
+ return oldVal;
+ break;
+ }
+ }
+ }
+ addCount(1L, len);
+ return null;
+ }
+
+ /** Implementation for computeIfAbsent */
+ private final V internalComputeIfAbsent(K k, Function<? super K, ? extends V> mf) {
+ if (k == null || mf == null)
+ throw new NullPointerException();
+ int h = spread(k.hashCode());
+ V val = null;
+ int len = 0;
+ for (Node<K,V>[] tab = table;;) {
+ Node<K,V> f; int i; Object fk;
+ if (tab == null)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
+ Node<K,V> node = new Node<K,V>(h, k, null, null);
+ synchronized (node) {
+ if (casTabAt(tab, i, null, node)) {
+ len = 1;
+ try {
+ if ((val = mf.apply(k)) != null)
+ node.val = val;
+ } finally {
+ if (val == null)
+ setTabAt(tab, i, null);
+ }
+ }
+ }
+ if (len != 0)
+ break;
+ }
+ else if (f.hash < 0) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin<K,V> t = (TreeBin<K,V>)fk;
+ long stamp = t.writeLock();
+ boolean added = false;
+ try {
+ if (tabAt(tab, i) == f) {
+ len = 2;
+ Class<?> cc = comparableClassFor(k.getClass());
+ TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc);
+ if (p != null)
+ val = p.val;
+ else if ((val = mf.apply(k)) != null) {
+ added = true;
+ t.putTreeNode(h, k, val);
+ }
+ }
+ } finally {
+ t.unlockWrite(stamp);
+ }
+ if (len != 0) {
+ if (!added)
+ return val;
+ break;
+ }
+ }
+ else
+ tab = (Node<K,V>[])fk;
+ }
+ else {
+ boolean added = false;
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ len = 1;
+ for (Node<K,V> e = f;; ++len) {
+ Object ek; V ev;
+ if (e.hash == h &&
+ ((ek = e.key) == k || k.equals(ek))) {
+ val = e.val;
+ break;
+ }
+ Node<K,V> last = e;
+ if ((e = e.next) == null) {
+ if ((val = mf.apply(k)) != null) {
+ added = true;
+ last.next = new Node<K,V>(h, k, val, null);
+ if (len > TREE_THRESHOLD)
+ replaceWithTreeBin(tab, i, k);
+ }
+ break;
+ }
+ }
+ }
+ }
+ if (len != 0) {
+ if (!added)
+ return val;
+ break;
+ }
+ }
}
-
- // Unsafe mechanics
- static final sun.misc.Unsafe UNSAFE;
- static final long nextOffset;
- static {
- try {
- UNSAFE = sun.misc.Unsafe.getUnsafe();
- Class<?> k = HashEntry.class;
- nextOffset = UNSAFE.objectFieldOffset
- (k.getDeclaredField("next"));
- } catch (Exception e) {
- throw new Error(e);
+ if (val != null)
+ addCount(1L, len);
+ return val;
+ }
+
+ /** Implementation for compute */
+ private final V internalCompute(K k, boolean onlyIfPresent,
+ BiFunction<? super K, ? super V, ? extends V> mf) {
+ if (k == null || mf == null)
+ throw new NullPointerException();
+ int h = spread(k.hashCode());
+ V val = null;
+ int delta = 0;
+ int len = 0;
+ for (Node<K,V>[] tab = table;;) {
+ Node<K,V> f; int i, fh; Object fk;
+ if (tab == null)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
+ if (onlyIfPresent)
+ break;
+ Node<K,V> node = new Node<K,V>(h, k, null, null);
+ synchronized (node) {
+ if (casTabAt(tab, i, null, node)) {
+ try {
+ len = 1;
+ if ((val = mf.apply(k, null)) != null) {
+ node.val = val;
+ delta = 1;
+ }
+ } finally {
+ if (delta == 0)
+ setTabAt(tab, i, null);
+ }
+ }
+ }
+ if (len != 0)
+ break;
+ }
+ else if ((fh = f.hash) < 0) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin<K,V> t = (TreeBin<K,V>)fk;
+ long stamp = t.writeLock();
+ try {
+ if (tabAt(tab, i) == f) {
+ len = 2;
+ Class<?> cc = comparableClassFor(k.getClass());
+ TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc);
+ if (p != null || !onlyIfPresent) {
+ V pv = (p == null) ? null : p.val;
+ if ((val = mf.apply(k, pv)) != null) {
+ if (p != null)
+ p.val = val;
+ else {
+ delta = 1;
+ t.putTreeNode(h, k, val);
+ }
+ }
+ else if (p != null) {
+ delta = -1;
+ t.deleteTreeNode(p);
+ }
+ }
+ }
+ } finally {
+ t.unlockWrite(stamp);
+ }
+ if (len != 0)
+ break;
+ }
+ else
+ tab = (Node<K,V>[])fk;
+ }
+ else {
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ len = 1;
+ for (Node<K,V> e = f, pred = null;; ++len) {
+ Object ek;
+ if (e.hash == h &&
+ ((ek = e.key) == k || k.equals(ek))) {
+ val = mf.apply(k, e.val);
+ if (val != null)
+ e.val = val;
+ else {
+ delta = -1;
+ Node<K,V> en = e.next;
+ if (pred != null)
+ pred.next = en;
+ else
+ setTabAt(tab, i, en);
+ }
+ break;
+ }
+ pred = e;
+ if ((e = e.next) == null) {
+ if (!onlyIfPresent &&
+ (val = mf.apply(k, null)) != null) {
+ pred.next = new Node<K,V>(h, k, val, null);
+ delta = 1;
+ if (len > TREE_THRESHOLD)
+ replaceWithTreeBin(tab, i, k);
+ }
+ break;
+ }
+ }
+ }
+ }
+ if (len != 0)
+ break;
+ }
+ }
+ if (delta != 0)
+ addCount((long)delta, len);
+ return val;
+ }
+
+ /** Implementation for merge */
+ private final V internalMerge(K k, V v,
+ BiFunction<? super V, ? super V, ? extends V> mf) {
+ if (k == null || v == null || mf == null)
+ throw new NullPointerException();
+ int h = spread(k.hashCode());
+ V val = null;
+ int delta = 0;
+ int len = 0;
+ for (Node<K,V>[] tab = table;;) {
+ int i; Node<K,V> f; Object fk;
+ if (tab == null)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) {
+ if (casTabAt(tab, i, null, new Node<K,V>(h, k, v, null))) {
+ delta = 1;
+ val = v;
+ break;
+ }
+ }
+ else if (f.hash < 0) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin<K,V> t = (TreeBin<K,V>)fk;
+ long stamp = t.writeLock();
+ try {
+ if (tabAt(tab, i) == f) {
+ len = 2;
+ Class<?> cc = comparableClassFor(k.getClass());
+ TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc);
+ val = (p == null) ? v : mf.apply(p.val, v);
+ if (val != null) {
+ if (p != null)
+ p.val = val;
+ else {
+ delta = 1;
+ t.putTreeNode(h, k, val);
+ }
+ }
+ else if (p != null) {
+ delta = -1;
+ t.deleteTreeNode(p);
+ }
+ }
+ } finally {
+ t.unlockWrite(stamp);
+ }
+ if (len != 0)
+ break;
+ }
+ else
+ tab = (Node<K,V>[])fk;
+ }
+ else {
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ len = 1;
+ for (Node<K,V> e = f, pred = null;; ++len) {
+ Object ek;
+ if (e.hash == h &&
+ ((ek = e.key) == k || k.equals(ek))) {
+ val = mf.apply(e.val, v);
+ if (val != null)
+ e.val = val;
+ else {
+ delta = -1;
+ Node<K,V> en = e.next;
+ if (pred != null)
+ pred.next = en;
+ else
+ setTabAt(tab, i, en);
+ }
+ break;
+ }
+ pred = e;
+ if ((e = e.next) == null) {
+ delta = 1;
+ val = v;
+ pred.next = new Node<K,V>(h, k, val, null);
+ if (len > TREE_THRESHOLD)
+ replaceWithTreeBin(tab, i, k);
+ break;
+ }
+ }
+ }
+ }
+ if (len != 0)
+ break;
+ }
+ }
+ if (delta != 0)
+ addCount((long)delta, len);
+ return val;
+ }
+
+ /** Implementation for putAll */
+ private final void internalPutAll(Map<? extends K, ? extends V> m) {
+ tryPresize(m.size());
+ long delta = 0L; // number of uncommitted additions
+ boolean npe = false; // to throw exception on exit for nulls
+ try { // to clean up counts on other exceptions
+ for (Map.Entry<?, ? extends V> entry : m.entrySet()) {
+ Object k; V v;
+ if (entry == null || (k = entry.getKey()) == null ||
+ (v = entry.getValue()) == null) {
+ npe = true;
+ break;
+ }
+ int h = spread(k.hashCode());
+ for (Node<K,V>[] tab = table;;) {
+ int i; Node<K,V> f; int fh; Object fk;
+ if (tab == null)
+ tab = initTable();
+ else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){
+ if (casTabAt(tab, i, null, new Node<K,V>(h, k, v, null))) {
+ ++delta;
+ break;
+ }
+ }
+ else if ((fh = f.hash) < 0) {
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin<K,V> t = (TreeBin<K,V>)fk;
+ long stamp = t.writeLock();
+ boolean validated = false;
+ try {
+ if (tabAt(tab, i) == f) {
+ validated = true;
+ Class<?> cc = comparableClassFor(k.getClass());
+ TreeNode<K,V> p = t.getTreeNode(h, k,
+ t.root, cc);
+ if (p != null)
+ p.val = v;
+ else {
+ ++delta;
+ t.putTreeNode(h, k, v);
+ }
+ }
+ } finally {
+ t.unlockWrite(stamp);
+ }
+ if (validated)
+ break;
+ }
+ else
+ tab = (Node<K,V>[])fk;
+ }
+ else {
+ int len = 0;
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ len = 1;
+ for (Node<K,V> e = f;; ++len) {
+ Object ek;
+ if (e.hash == h &&
+ ((ek = e.key) == k || k.equals(ek))) {
+ e.val = v;
+ break;
+ }
+ Node<K,V> last = e;
+ if ((e = e.next) == null) {
+ ++delta;
+ last.next = new Node<K,V>(h, k, v, null);
+ if (len > TREE_THRESHOLD)
+ replaceWithTreeBin(tab, i, k);
+ break;
+ }
+ }
+ }
+ }
+ if (len != 0) {
+ if (len > 1) {
+ addCount(delta, len);
+ delta = 0L;
+ }
+ break;
+ }
+ }
+ }
+ }
+ } finally {
+ if (delta != 0L)
+ addCount(delta, 2);
+ }
+ if (npe)
+ throw new NullPointerException();
+ }
+
+ /**
+ * Implementation for clear. Steps through each bin, removing all
+ * nodes.
+ */
+ private final void internalClear() {
+ long delta = 0L; // negative number of deletions
+ int i = 0;
+ Node<K,V>[] tab = table;
+ while (tab != null && i < tab.length) {
+ Node<K,V> f = tabAt(tab, i);
+ if (f == null)
+ ++i;
+ else if (f.hash < 0) {
+ Object fk;
+ if ((fk = f.key) instanceof TreeBin) {
+ TreeBin<K,V> t = (TreeBin<K,V>)fk;
+ long stamp = t.writeLock();
+ try {
+ if (tabAt(tab, i) == f) {
+ for (Node<K,V> p = t.first; p != null; p = p.next)
+ --delta;
+ t.first = null;
+ t.root = null;
+ ++i;
+ }
+ } finally {
+ t.unlockWrite(stamp);
+ }
+ }
+ else
+ tab = (Node<K,V>[])fk;
+ }
+ else {
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ for (Node<K,V> e = f; e != null; e = e.next)
+ --delta;
+ setTabAt(tab, i, null);
+ ++i;
+ }
+ }
+ }
+ }
+ if (delta != 0L)
+ addCount(delta, -1);
+ }
+
+ /* ---------------- Table Initialization and Resizing -------------- */
+
+ /**
+ * Returns a power of two table size for the given desired capacity.
+ * See Hackers Delight, sec 3.2
+ */
+ private static final int tableSizeFor(int c) {
+ int n = c - 1;
+ n |= n >>> 1;
+ n |= n >>> 2;
+ n |= n >>> 4;
+ n |= n >>> 8;
+ n |= n >>> 16;
+ return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1;
+ }
+
+ /**
+ * Initializes table, using the size recorded in sizeCtl.
+ */
+ private final Node<K,V>[] initTable() {
+ Node<K,V>[] tab; int sc;
+ while ((tab = table) == null) {
+ if ((sc = sizeCtl) < 0)
+ Thread.yield(); // lost initialization race; just spin
+ else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
+ try {
+ if ((tab = table) == null) {
+ int n = (sc > 0) ? sc : DEFAULT_CAPACITY;
+ table = tab = (Node<K,V>[])new Node[n];
+ sc = n - (n >>> 2);
+ }
+ } finally {
+ sizeCtl = sc;
+ }
+ break;
+ }
+ }
+ return tab;
+ }
+
+ /**
+ * Adds to count, and if table is too small and not already
+ * resizing, initiates transfer. If already resizing, helps
+ * perform transfer if work is available. Rechecks occupancy
+ * after a transfer to see if another resize is already needed
+ * because resizings are lagging additions.
+ *
+ * @param x the count to add
+ * @param check if <0, don't check resize, if <= 1 only check if uncontended
+ */
+ private final void addCount(long x, int check) {
+ Cell[] as; long b, s;
+ if ((as = counterCells) != null ||
+ !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) {
+ Cell a; long v; int m;
+ boolean uncontended = true;
+ if (as == null || (m = as.length - 1) < 0 ||
+ (a = as[ThreadLocalRandom.getProbe() & m]) == null ||
+ !(uncontended =
+ U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) {
+ fullAddCount(x, uncontended);
+ return;
+ }
+ if (check <= 1)
+ return;
+ s = sumCount();
+ }
+ if (check >= 0) {
+ Node<K,V>[] tab, nt; int sc;
+ while (s >= (long)(sc = sizeCtl) && (tab = table) != null &&
+ tab.length < MAXIMUM_CAPACITY) {
+ if (sc < 0) {
+ if (sc == -1 || transferIndex <= transferOrigin ||
+ (nt = nextTable) == null)
+ break;
+ if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1))
+ transfer(tab, nt);
+ }
+ else if (U.compareAndSwapInt(this, SIZECTL, sc, -2))
+ transfer(tab, null);
+ s = sumCount();
}
}
}
/**
- * Gets the ith element of given table (if nonnull) with volatile
- * read semantics. Note: This is manually integrated into a few
- * performance-sensitive methods to reduce call overhead.
- */
- @SuppressWarnings("unchecked")
- static final <K,V> HashEntry<K,V> entryAt(HashEntry<K,V>[] tab, int i) {
- return (tab == null) ? null :
- (HashEntry<K,V>) UNSAFE.getObjectVolatile
- (tab, ((long)i << TSHIFT) + TBASE);
- }
-
- /**
- * Sets the ith element of given table, with volatile write
- * semantics. (See above about use of putOrderedObject.)
+ * Tries to presize table to accommodate the given number of elements.
+ *
+ * @param size number of elements (doesn't need to be perfectly accurate)
*/
- static final <K,V> void setEntryAt(HashEntry<K,V>[] tab, int i,
- HashEntry<K,V> e) {
- UNSAFE.putOrderedObject(tab, ((long)i << TSHIFT) + TBASE, e);
- }
-
- /**
- * Applies a supplemental hash function to a given hashCode, which
- * defends against poor quality hash functions. This is critical
- * because ConcurrentHashMap uses power-of-two length hash tables,
- * that otherwise encounter collisions for hashCodes that do not
- * differ in lower or upper bits.
- */
- private int hash(Object k) {
- if (k instanceof String) {
- return ((String) k).hash32();
+ private final void tryPresize(int size) {
+ int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY :
+ tableSizeFor(size + (size >>> 1) + 1);
+ int sc;
+ while ((sc = sizeCtl) >= 0) {
+ Node<K,V>[] tab = table; int n;
+ if (tab == null || (n = tab.length) == 0) {
+ n = (sc > c) ? sc : c;
+ if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
+ try {
+ if (table == tab) {
+ table = (Node<K,V>[])new Node[n];
+ sc = n - (n >>> 2);
+ }
+ } finally {
+ sizeCtl = sc;
+ }
+ }
+ }
+ else if (c <= sc || n >= MAXIMUM_CAPACITY)
+ break;
+ else if (tab == table &&
+ U.compareAndSwapInt(this, SIZECTL, sc, -2))
+ transfer(tab, null);
}
-
- int h = hashSeed ^ k.hashCode();
-
- // Spread bits to regularize both segment and index locations,
- // using variant of single-word Wang/Jenkins hash.
- h += (h << 15) ^ 0xffffcd7d;
- h ^= (h >>> 10);
- h += (h << 3);
- h ^= (h >>> 6);
- h += (h << 2) + (h << 14);
- return h ^ (h >>> 16);
}
/**
- * Segments are specialized versions of hash tables. This
- * subclasses from ReentrantLock opportunistically, just to
- * simplify some locking and avoid separate construction.
+ * Moves and/or copies the nodes in each bin to new table. See
+ * above for explanation.
*/
- static final class Segment<K,V> extends ReentrantLock implements Serializable {
- /*
- * Segments maintain a table of entry lists that are always
- * kept in a consistent state, so can be read (via volatile
- * reads of segments and tables) without locking. This
- * requires replicating nodes when necessary during table
- * resizing, so the old lists can be traversed by readers
- * still using old version of table.
- *
- * This class defines only mutative methods requiring locking.
- * Except as noted, the methods of this class perform the
- * per-segment versions of ConcurrentHashMap methods. (Other
- * methods are integrated directly into ConcurrentHashMap
- * methods.) These mutative methods use a form of controlled
- * spinning on contention via methods scanAndLock and
- * scanAndLockForPut. These intersperse tryLocks with
- * traversals to locate nodes. The main benefit is to absorb
- * cache misses (which are very common for hash tables) while
- * obtaining locks so that traversal is faster once
- * acquired. We do not actually use the found nodes since they
- * must be re-acquired under lock anyway to ensure sequential
- * consistency of updates (and in any case may be undetectably
- * stale), but they will normally be much faster to re-locate.
- * Also, scanAndLockForPut speculatively creates a fresh node
- * to use in put if no node is found.
- */
-
- private static final long serialVersionUID = 2249069246763182397L;
-
- /**
- * The maximum number of times to tryLock in a prescan before
- * possibly blocking on acquire in preparation for a locked
- * segment operation. On multiprocessors, using a bounded
- * number of retries maintains cache acquired while locating
- * nodes.
- */
- static final int MAX_SCAN_RETRIES =
- Runtime.getRuntime().availableProcessors() > 1 ? 64 : 1;
-
- /**
- * The per-segment table. Elements are accessed via
- * entryAt/setEntryAt providing volatile semantics.
- */
- transient volatile HashEntry<K,V>[] table;
-
- /**
- * The number of elements. Accessed only either within locks
- * or among other volatile reads that maintain visibility.
- */
- transient int count;
-
- /**
- * The total number of mutative operations in this segment.
- * Even though this may overflows 32 bits, it provides
- * sufficient accuracy for stability checks in CHM isEmpty()
- * and size() methods. Accessed only either within locks or
- * among other volatile reads that maintain visibility.
- */
- transient int modCount;
-
- /**
- * The table is rehashed when its size exceeds this threshold.
- * (The value of this field is always {@code (int)(capacity *
- * loadFactor)}.)
- */
- transient int threshold;
-
- /**
- * The load factor for the hash table. Even though this value
- * is same for all segments, it is replicated to avoid needing
- * links to outer object.
- * @serial
- */
- final float loadFactor;
-
- Segment(float lf, int threshold, HashEntry<K,V>[] tab) {
- this.loadFactor = lf;
- this.threshold = threshold;
- this.table = tab;
+ private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) {
+ int n = tab.length, stride;
+ if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE)
+ stride = MIN_TRANSFER_STRIDE; // subdivide range
+ if (nextTab == null) { // initiating
+ try {
+ nextTab = (Node<K,V>[])new Node[n << 1];
+ } catch (Throwable ex) { // try to cope with OOME
+ sizeCtl = Integer.MAX_VALUE;
+ return;
+ }
+ nextTable = nextTab;
+ transferOrigin = n;
+ transferIndex = n;
+ Node<K,V> rev = new Node<K,V>(MOVED, tab, null, null);
+ for (int k = n; k > 0;) { // progressively reveal ready slots
+ int nextk = (k > stride) ? k - stride : 0;
+ for (int m = nextk; m < k; ++m)
+ nextTab[m] = rev;
+ for (int m = n + nextk; m < n + k; ++m)
+ nextTab[m] = rev;
+ U.putOrderedInt(this, TRANSFERORIGIN, k = nextk);
+ }
}
-
- final V put(K key, int hash, V value, boolean onlyIfAbsent) {
- HashEntry<K,V> node = tryLock() ? null :
- scanAndLockForPut(key, hash, value);
- V oldValue;
- try {
- HashEntry<K,V>[] tab = table;
- int index = (tab.length - 1) & hash;
- HashEntry<K,V> first = entryAt(tab, index);
- for (HashEntry<K,V> e = first;;) {
- if (e != null) {
- K k;
- if ((k = e.key) == key ||
- (e.hash == hash && key.equals(k))) {
- oldValue = e.value;
- if (!onlyIfAbsent) {
- e.value = value;
- ++modCount;
- }
- break;
+ int nextn = nextTab.length;
+ Node<K,V> fwd = new Node<K,V>(MOVED, nextTab, null, null);
+ boolean advance = true;
+ for (int i = 0, bound = 0;;) {
+ int nextIndex, nextBound; Node<K,V> f; Object fk;
+ while (advance) {
+ if (--i >= bound)
+ advance = false;
+ else if ((nextIndex = transferIndex) <= transferOrigin) {
+ i = -1;
+ advance = false;
+ }
+ else if (U.compareAndSwapInt
+ (this, TRANSFERINDEX, nextIndex,
+ nextBound = (nextIndex > stride ?
+ nextIndex - stride : 0))) {
+ bound = nextBound;
+ i = nextIndex - 1;
+ advance = false;
+ }
+ }
+ if (i < 0 || i >= n || i + n >= nextn) {
+ for (int sc;;) {
+ if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) {
+ if (sc == -1) {
+ nextTable = null;
+ table = nextTab;
+ sizeCtl = (n << 1) - (n >>> 1);
}
- e = e.next;
- }
- else {
- if (node != null)
- node.setNext(first);
- else
- node = new HashEntry<K,V>(hash, key, value, first);
- int c = count + 1;
- if (c > threshold && tab.length < MAXIMUM_CAPACITY)
- rehash(node);
- else
- setEntryAt(tab, index, node);
- ++modCount;
- count = c;
- oldValue = null;
- break;
+ return;
}
}
- } finally {
- unlock();
+ }
+ else if ((f = tabAt(tab, i)) == null) {
+ if (casTabAt(tab, i, null, fwd)) {
+ setTabAt(nextTab, i, null);
+ setTabAt(nextTab, i + n, null);
+ advance = true;
+ }
}
- return oldValue;
- }
-
- /**
- * Doubles size of table and repacks entries, also adding the
- * given node to new table
- */
- @SuppressWarnings("unchecked")
- private void rehash(HashEntry<K,V> node) {
- /*
- * Reclassify nodes in each list to new table. Because we
- * are using power-of-two expansion, the elements from
- * each bin must either stay at same index, or move with a
- * power of two offset. We eliminate unnecessary node
- * creation by catching cases where old nodes can be
- * reused because their next fields won't change.
- * Statistically, at the default threshold, only about
- * one-sixth of them need cloning when a table
- * doubles. The nodes they replace will be garbage
- * collectable as soon as they are no longer referenced by
- * any reader thread that may be in the midst of
- * concurrently traversing table. Entry accesses use plain
- * array indexing because they are followed by volatile
- * table write.
- */
- HashEntry<K,V>[] oldTable = table;
- int oldCapacity = oldTable.length;
- int newCapacity = oldCapacity << 1;
- threshold = (int)(newCapacity * loadFactor);
- HashEntry<K,V>[] newTable =
- (HashEntry<K,V>[]) new HashEntry<?,?>[newCapacity];
- int sizeMask = newCapacity - 1;
- for (int i = 0; i < oldCapacity ; i++) {
- HashEntry<K,V> e = oldTable[i];
- if (e != null) {
- HashEntry<K,V> next = e.next;
- int idx = e.hash & sizeMask;
- if (next == null) // Single node on list
- newTable[idx] = e;
- else { // Reuse consecutive sequence at same slot
- HashEntry<K,V> lastRun = e;
- int lastIdx = idx;
- for (HashEntry<K,V> last = next;
- last != null;
- last = last.next) {
- int k = last.hash & sizeMask;
- if (k != lastIdx) {
- lastIdx = k;
- lastRun = last;
+ else if (f.hash >= 0) {
+ synchronized (f) {
+ if (tabAt(tab, i) == f) {
+ int runBit = f.hash & n;
+ Node<K,V> lastRun = f, lo = null, hi = null;
+ for (Node<K,V> p = f.next; p != null; p = p.next) {
+ int b = p.hash & n;
+ if (b != runBit) {
+ runBit = b;
+ lastRun = p;
}
}
- newTable[lastIdx] = lastRun;
- // Clone remaining nodes
- for (HashEntry<K,V> p = e; p != lastRun; p = p.next) {
- V v = p.value;
- int h = p.hash;
- int k = h & sizeMask;
- HashEntry<K,V> n = newTable[k];
- newTable[k] = new HashEntry<K,V>(h, p.key, v, n);
+ if (runBit == 0)
+ lo = lastRun;
+ else
+ hi = lastRun;
+ for (Node<K,V> p = f; p != lastRun; p = p.next) {
+ int ph = p.hash; Object pk = p.key; V pv = p.val;
+ if ((ph & n) == 0)
+ lo = new Node<K,V>(ph, pk, pv, lo);
+ else
+ hi = new Node<K,V>(ph, pk, pv, hi);
}
+ setTabAt(nextTab, i, lo);
+ setTabAt(nextTab, i + n, hi);
+ setTabAt(tab, i, fwd);
+ advance = true;
}
}
}
- int nodeIndex = node.hash & sizeMask; // add the new node
- node.setNext(newTable[nodeIndex]);
- newTable[nodeIndex] = node;
- table = newTable;
- }
-
- /**
- * Scans for a node containing given key while trying to
- * acquire lock, creating and returning one if not found. Upon
- * return, guarantees that lock is held. UNlike in most
- * methods, calls to method equals are not screened: Since
- * traversal speed doesn't matter, we might as well help warm
- * up the associated code and accesses as well.
- *
- * @return a new node if key not found, else null
- */
- private HashEntry<K,V> scanAndLockForPut(K key, int hash, V value) {
- HashEntry<K,V> first = entryForHash(this, hash);
- HashEntry<K,V> e = first;
- HashEntry<K,V> node = null;
- int retries = -1; // negative while locating node
- while (!tryLock()) {
- HashEntry<K,V> f; // to recheck first below
- if (retries < 0) {
- if (e == null) {
- if (node == null) // speculatively create node
- node = new HashEntry<K,V>(hash, key, value, null);
- retries = 0;
+ else if ((fk = f.key) instanceof TreeBin) {
+ TreeBin<K,V> t = (TreeBin<K,V>)fk;
+ long stamp = t.writeLock();
+ try {
+ if (tabAt(tab, i) == f) {
+ TreeNode<K,V> root;
+ Node<K,V> ln = null, hn = null;
+ if ((root = t.root) != null) {
+ Node<K,V> e, p; TreeNode<K,V> lr, rr; int lh;
+ TreeBin<K,V> lt = null, ht = null;
+ for (lr = root; lr.left != null; lr = lr.left);
+ for (rr = root; rr.right != null; rr = rr.right);
+ if ((lh = lr.hash) == rr.hash) { // move entire tree
+ if ((lh & n) == 0)
+ lt = t;
+ else
+ ht = t;
+ }
+ else {
+ lt = new TreeBin<K,V>();
+ ht = new TreeBin<K,V>();
+ int lc = 0, hc = 0;
+ for (e = t.first; e != null; e = e.next) {
+ int h = e.hash;
+ Object k = e.key; V v = e.val;
+ if ((h & n) == 0) {
+ ++lc;
+ lt.putTreeNode(h, k, v);
+ }
+ else {
+ ++hc;
+ ht.putTreeNode(h, k, v);
+ }
+ }
+ if (lc < TREE_THRESHOLD) { // throw away
+ for (p = lt.first; p != null; p = p.next)
+ ln = new Node<K,V>(p.hash, p.key,
+ p.val, ln);
+ lt = null;
+ }
+ if (hc < TREE_THRESHOLD) {
+ for (p = ht.first; p != null; p = p.next)
+ hn = new Node<K,V>(p.hash, p.key,
+ p.val, hn);
+ ht = null;
+ }
+ }
+ if (ln == null && lt != null)
+ ln = new Node<K,V>(MOVED, lt, null, null);
+ if (hn == null && ht != null)
+ hn = new Node<K,V>(MOVED, ht, null, null);
+ }
+ setTabAt(nextTab, i, ln);
+ setTabAt(nextTab, i + n, hn);
+ setTabAt(tab, i, fwd);
+ advance = true;
}
- else if (key.equals(e.key))
- retries = 0;
- else
- e = e.next;
- }
- else if (++retries > MAX_SCAN_RETRIES) {
- lock();
- break;
- }
- else if ((retries & 1) == 0 &&
- (f = entryForHash(this, hash)) != first) {
- e = first = f; // re-traverse if entry changed
- retries = -1;
+ } finally {
+ t.unlockWrite(stamp);
}
}
- return node;
+ else
+ advance = true; // already processed
+ }
+ }
+
+ /* ---------------- Counter support -------------- */
+
+ final long sumCount() {
+ Cell[] as = counterCells; Cell a;
+ long sum = baseCount;
+ if (as != null) {
+ for (int i = 0; i < as.length; ++i) {
+ if ((a = as[i]) != null)
+ sum += a.value;
+ }
+ }
+ return sum;
+ }
+
+ // See LongAdder version for explanation
+ private final void fullAddCount(long x, boolean wasUncontended) {
+ int h;
+ if ((h = ThreadLocalRandom.getProbe()) == 0) {
+ ThreadLocalRandom.localInit(); // force initialization
+ h = ThreadLocalRandom.getProbe();
+ wasUncontended = true;
}
-
- /**
- * Scans for a node containing the given key while trying to
- * acquire lock for a remove or replace operation. Upon
- * return, guarantees that lock is held. Note that we must
- * lock even if the key is not found, to ensure sequential
- * consistency of updates.
- */
- private void scanAndLock(Object key, int hash) {
- // similar to but simpler than scanAndLockForPut
- HashEntry<K,V> first = entryForHash(this, hash);
- HashEntry<K,V> e = first;
- int retries = -1;
- while (!tryLock()) {
- HashEntry<K,V> f;
- if (retries < 0) {
- if (e == null || key.equals(e.key))
- retries = 0;
- else
- e = e.next;
+ boolean collide = false; // True if last slot nonempty
+ for (;;) {
+ Cell[] as; Cell a; int n; long v;
+ if ((as = counterCells) != null && (n = as.length) > 0) {
+ if ((a = as[(n - 1) & h]) == null) {
+ if (cellsBusy == 0) { // Try to attach new Cell
+ Cell r = new Cell(x); // Optimistic create
+ if (cellsBusy == 0 &&
+ U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
+ boolean created = false;
+ try { // Recheck under lock
+ Cell[] rs; int m, j;
+ if ((rs = counterCells) != null &&
+ (m = rs.length) > 0 &&
+ rs[j = (m - 1) & h] == null) {
+ rs[j] = r;
+ created = true;
+ }
+ } finally {
+ cellsBusy = 0;
+ }
+ if (created)
+ break;
+ continue; // Slot is now non-empty
+ }
+ }
+ collide = false;
+ }
+ else if (!wasUncontended) // CAS already known to fail
+ wasUncontended = true; // Continue after rehash
+ else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))
+ break;
+ else if (counterCells != as || n >= NCPU)
+ collide = false; // At max size or stale
+ else if (!collide)
+ collide = true;
+ else if (cellsBusy == 0 &&
+ U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
+ try {
+ if (counterCells == as) {// Expand table unless stale
+ Cell[] rs = new Cell[n << 1];
+ for (int i = 0; i < n; ++i)
+ rs[i] = as[i];
+ counterCells = rs;
+ }
+ } finally {
+ cellsBusy = 0;
+ }
+ collide = false;
+ continue; // Retry with expanded table
}
- else if (++retries > MAX_SCAN_RETRIES) {
- lock();
- break;
+ h = ThreadLocalRandom.advanceProbe(h);
+ }
+ else if (cellsBusy == 0 && counterCells == as &&
+ U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) {
+ boolean init = false;
+ try { // Initialize table
+ if (counterCells == as) {
+ Cell[] rs = new Cell[2];
+ rs[h & 1] = new Cell(x);
+ counterCells = rs;
+ init = true;
+ }
+ } finally {
+ cellsBusy = 0;
}
- else if ((retries & 1) == 0 &&
- (f = entryForHash(this, hash)) != first) {
- e = first = f;
- retries = -1;
- }
+ if (init)
+ break;
}
+ else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x))
+ break; // Fall back on using base
+ }
+ }
+
+ /* ----------------Table Traversal -------------- */
+
+ /**
+ * Encapsulates traversal for methods such as containsValue; also
+ * serves as a base class for other iterators and spliterators.
+ *
+ * Method advance visits once each still-valid node that was
+ * reachable upon iterator construction. It might miss some that
+ * were added to a bin after the bin was visited, which is OK wrt
+ * consistency guarantees. Maintaining this property in the face
+ * of possible ongoing resizes requires a fair amount of
+ * bookkeeping state that is difficult to optimize away amidst
+ * volatile accesses. Even so, traversal maintains reasonable
+ * throughput.
+ *
+ * Normally, iteration proceeds bin-by-bin traversing lists.
+ * However, if the table has been resized, then all future steps
+ * must traverse both the bin at the current index as well as at
+ * (index + baseSize); and so on for further resizings. To
+ * paranoically cope with potential sharing by users of iterators
+ * across threads, iteration terminates if a bounds checks fails
+ * for a table read.
+ */
+ static class Traverser<K,V> {
+ Node<K,V>[] tab; // current table; updated if resized
+ Node<K,V> next; // the next entry to use
+ int index; // index of bin to use next
+ int baseIndex; // current index of initial table
+ int baseLimit; // index bound for initial table
+ final int baseSize; // initial table size
+
+ Traverser(Node<K,V>[] tab, int size, int index, int limit) {
+ this.tab = tab;
+ this.baseSize = size;
+ this.baseIndex = this.index = index;
+ this.baseLimit = limit;
+ this.next = null;
}
/**
- * Remove; match on key only if value null, else match both.
+ * Advances if possible, returning next valid node, or null if none.
*/
- final V remove(Object key, int hash, Object value) {
- if (!tryLock())
- scanAndLock(key, hash);
- V oldValue = null;
- try {
- HashEntry<K,V>[] tab = table;
- int index = (tab.length - 1) & hash;
- HashEntry<K,V> e = entryAt(tab, index);
- HashEntry<K,V> pred = null;
- while (e != null) {
- K k;
- HashEntry<K,V> next = e.next;
- if ((k = e.key) == key ||
- (e.hash == hash && key.equals(k))) {
- V v = e.value;
- if (value == null || value == v || value.equals(v)) {
- if (pred == null)
- setEntryAt(tab, index, next);
- else
- pred.setNext(next);
- ++modCount;
- --count;
- oldValue = v;
- }
- break;
- }
- pred = e;
- e = next;
- }
- } finally {
- unlock();
- }
- return oldValue;
- }
-
- final boolean replace(K key, int hash, V oldValue, V newValue) {
- if (!tryLock())
- scanAndLock(key, hash);
- boolean replaced = false;
- try {
- HashEntry<K,V> e;
- for (e = entryForHash(this, hash); e != null; e = e.next) {
- K k;
- if ((k = e.key) == key ||
- (e.hash == hash && key.equals(k))) {
- if (oldValue.equals(e.value)) {
- e.value = newValue;
- ++modCount;
- replaced = true;
- }
- break;
+ final Node<K,V> advance() {
+ Node<K,V> e;
+ if ((e = next) != null)
+ e = e.next;
+ for (;;) {
+ Node<K,V>[] t; int i, n; Object ek; // must use locals in checks
+ if (e != null)
+ return next = e;
+ if (baseIndex >= baseLimit || (t = tab) == null ||
+ (n = t.length) <= (i = index) || i < 0)
+ return next = null;
+ if ((e = tabAt(t, index)) != null && e.hash < 0) {
+ if ((ek = e.key) instanceof TreeBin)
+ e = ((TreeBin<K,V>)ek).first;
+ else {
+ tab = (Node<K,V>[])ek;
+ e = null;
+ continue;
}
}
- } finally {
- unlock();
- }
- return replaced;
- }
-
- final V replace(K key, int hash, V value) {
- if (!tryLock())
- scanAndLock(key, hash);
- V oldValue = null;
- try {
- HashEntry<K,V> e;
- for (e = entryForHash(this, hash); e != null; e = e.next) {
- K k;
- if ((k = e.key) == key ||
- (e.hash == hash && key.equals(k))) {
- oldValue = e.value;
- e.value = value;
- ++modCount;
- break;
- }
- }
- } finally {
- unlock();
- }
- return oldValue;
- }
-
- final void clear() {
- lock();
- try {
- HashEntry<K,V>[] tab = table;
- for (int i = 0; i < tab.length ; i++)
- setEntryAt(tab, i, null);
- ++modCount;
- count = 0;
- } finally {
- unlock();
+ if ((index += baseSize) >= n)
+ index = ++baseIndex; // visit upper slots if present
}
}
}
- // Accessing segments
-
/**
- * Gets the jth element of given segment array (if nonnull) with
- * volatile element access semantics via Unsafe. (The null check
- * can trigger harmlessly only during deserialization.) Note:
- * because each element of segments array is set only once (using
- * fully ordered writes), some performance-sensitive methods rely
- * on this method only as a recheck upon null reads.
- */
- @SuppressWarnings("unchecked")
- static final <K,V> Segment<K,V> segmentAt(Segment<K,V>[] ss, int j) {
- long u = (j << SSHIFT) + SBASE;
- return ss == null ? null :
- (Segment<K,V>) UNSAFE.getObjectVolatile(ss, u);
- }
-
- /**
- * Returns the segment for the given index, creating it and
- * recording in segment table (via CAS) if not already present.
- *
- * @param k the index
- * @return the segment
+ * Base of key, value, and entry Iterators. Adds fields to
+ * Traverser to support iterator.remove
*/
- @SuppressWarnings("unchecked")
- private Segment<K,V> ensureSegment(int k) {
- final Segment<K,V>[] ss = this.segments;
- long u = (k << SSHIFT) + SBASE; // raw offset
- Segment<K,V> seg;
- if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) {
- Segment<K,V> proto = ss[0]; // use segment 0 as prototype
- int cap = proto.table.length;
- float lf = proto.loadFactor;
- int threshold = (int)(cap * lf);
- HashEntry<K,V>[] tab = (HashEntry<K,V>[])new HashEntry<?,?>[cap];
- if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
- == null) { // recheck
- Segment<K,V> s = new Segment<K,V>(lf, threshold, tab);
- while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u))
- == null) {
- if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s))
- break;
- }
- }
+ static class BaseIterator<K,V> extends Traverser<K,V> {
+ final ConcurrentHashMap<K,V> map;
+ Node<K,V> lastReturned;
+ BaseIterator(Node<K,V>[] tab, int size, int index, int limit,
+ ConcurrentHashMap<K,V> map) {
+ super(tab, size, index, limit);
+ this.map = map;
+ advance();
+ }
+
+ public final boolean hasNext() { return next != null; }
+ public final boolean hasMoreElements() { return next != null; }
+
+ public final void remove() {
+ Node<K,V> p;
+ if ((p = lastReturned) == null)
+ throw new IllegalStateException();
+ lastReturned = null;
+ map.internalReplace((K)p.key, null, null);
+ }
+ }
+
+ static final class KeyIterator<K,V> extends BaseIterator<K,V>
+ implements Iterator<K>, Enumeration<K> {
+ KeyIterator(Node<K,V>[] tab, int index, int size, int limit,
+ ConcurrentHashMap<K,V> map) {
+ super(tab, index, size, limit, map);
+ }
+
+ public final K next() {
+ Node<K,V> p;
+ if ((p = next) == null)
+ throw new NoSuchElementException();
+ K k = (K)p.key;
+ lastReturned = p;
+ advance();
+ return k;
}
- return seg;
+
+ public final K nextElement() { return next(); }
+ }
+
+ static final class ValueIterator<K,V> extends BaseIterator<K,V>
+ implements Iterator<V>, Enumeration<V> {
+ ValueIterator(Node<K,V>[] tab, int index, int size, int limit,
+ ConcurrentHashMap<K,V> map) {
+ super(tab, index, size, limit, map);
+ }
+
+ public final V next() {
+ Node<K,V> p;
+ if ((p = next) == null)
+ throw new NoSuchElementException();
+ V v = p.val;
+ lastReturned = p;
+ advance();
+ return v;
+ }
+
+ public final V nextElement() { return next(); }
+ }
+
+ static final class EntryIterator<K,V> extends BaseIterator<K,V>
+ implements Iterator<Map.Entry<K,V>> {
+ EntryIterator(Node<K,V>[] tab, int index, int size, int limit,
+ ConcurrentHashMap<K,V> map) {
+ super(tab, index, size, limit, map);
+ }
+
+ public final Map.Entry<K,V> next() {
+ Node<K,V> p;
+ if ((p = next) == null)
+ throw new NoSuchElementException();
+ K k = (K)p.key;
+ V v = p.val;
+ lastReturned = p;
+ advance();
+ return new MapEntry<K,V>(k, v, map);
+ }
}
- // Hash-based segment and entry accesses
-
- /**
- * Gets the segment for the given hash code.
- */
- @SuppressWarnings("unchecked")
- private Segment<K,V> segmentForHash(int h) {
- long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
- return (Segment<K,V>) UNSAFE.getObjectVolatile(segments, u);
+ static final class KeySpliterator<K,V> extends Traverser<K,V>
+ implements Spliterator<K> {
+ long est; // size estimate
+ KeySpliterator(Node<K,V>[] tab, int size, int index, int limit,
+ long est) {
+ super(tab, size, index, limit);
+ this.est = est;
+ }
+
+ public Spliterator<K> trySplit() {
+ int i, f, h;
+ return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
+ new KeySpliterator<K,V>(tab, baseSize, baseLimit = h,
+ f, est >>>= 1);
+ }
+
+ public void forEachRemaining(Consumer<? super K> action) {
+ if (action == null) throw new NullPointerException();
+ for (Node<K,V> p; (p = advance()) != null;)
+ action.accept((K)p.key);
+ }
+
+ public boolean tryAdvance(Consumer<? super K> action) {
+ if (action == null) throw new NullPointerException();
+ Node<K,V> p;
+ if ((p = advance()) == null)
+ return false;
+ action.accept((K)p.key);
+ return true;
+ }
+
+ public long estimateSize() { return est; }
+
+ public int characteristics() {
+ return Spliterator.DISTINCT | Spliterator.CONCURRENT |
+ Spliterator.NONNULL;
+ }
}
- /**
- * Gets the table entry for the given segment and hash code.
- */
- @SuppressWarnings("unchecked")
- static final <K,V> HashEntry<K,V> entryForHash(Segment<K,V> seg, int h) {
- HashEntry<K,V>[] tab;
- return (seg == null || (tab = seg.table) == null) ? null :
- (HashEntry<K,V>) UNSAFE.getObjectVolatile
- (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
+ static final class ValueSpliterator<K,V> extends Traverser<K,V>
+ implements Spliterator<V> {
+ long est; // size estimate
+ ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit,
+ long est) {
+ super(tab, size, index, limit);
+ this.est = est;
+ }
+
+ public Spliterator<V> trySplit() {
+ int i, f, h;
+ return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
+ new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h,
+ f, est >>>= 1);
+ }
+
+ public void forEachRemaining(Consumer<? super V> action) {
+ if (action == null) throw new NullPointerException();
+ for (Node<K,V> p; (p = advance()) != null;)
+ action.accept(p.val);
+ }
+
+ public boolean tryAdvance(Consumer<? super V> action) {
+ if (action == null) throw new NullPointerException();
+ Node<K,V> p;
+ if ((p = advance()) == null)
+ return false;
+ action.accept(p.val);
+ return true;
+ }
+
+ public long estimateSize() { return est; }
+
+ public int characteristics() {
+ return Spliterator.CONCURRENT | Spliterator.NONNULL;
+ }
}
+ static final class EntrySpliterator<K,V> extends Traverser<K,V>
+ implements Spliterator<Map.Entry<K,V>> {
+ final ConcurrentHashMap<K,V> map; // To export MapEntry
+ long est; // size estimate
+ EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit,
+ long est, ConcurrentHashMap<K,V> map) {
+ super(tab, size, index, limit);
+ this.map = map;
+ this.est = est;
+ }
+
+ public Spliterator<Map.Entry<K,V>> trySplit() {
+ int i, f, h;
+ return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null :
+ new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h,
+ f, est >>>= 1, map);
+ }
+
+ public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) {
+ if (action == null) throw new NullPointerException();
+ for (Node<K,V> p; (p = advance()) != null; )
+ action.accept(new MapEntry<K,V>((K)p.key, p.val, map));
+ }
+
+ public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) {
+ if (action == null) throw new NullPointerException();
+ Node<K,V> p;
+ if ((p = advance()) == null)
+ return false;
+ action.accept(new MapEntry<K,V>((K)p.key, p.val, map));
+ return true;
+ }
+
+ public long estimateSize() { return est; }
+
+ public int characteristics() {
+ return Spliterator.DISTINCT | Spliterator.CONCURRENT |
+ Spliterator.NONNULL;
+ }
+ }
+
+
/* ---------------- Public operations -------------- */
/**
- * Creates a new, empty map with the specified initial
- * capacity, load factor and concurrency level.
- *
- * @param initialCapacity the initial capacity. The implementation
- * performs internal sizing to accommodate this many elements.
- * @param loadFactor the load factor threshold, used to control resizing.
- * Resizing may be performed when the average number of elements per
- * bin exceeds this threshold.
- * @param concurrencyLevel the estimated number of concurrently
- * updating threads. The implementation performs internal sizing
- * to try to accommodate this many threads.
- * @throws IllegalArgumentException if the initial capacity is
- * negative or the load factor or concurrencyLevel are
- * nonpositive.
+ * Creates a new, empty map with the default initial table size (16).
*/
- @SuppressWarnings("unchecked")
- public ConcurrentHashMap(int initialCapacity,
- float loadFactor, int concurrencyLevel) {
- if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0)
- throw new IllegalArgumentException();
- if (concurrencyLevel > MAX_SEGMENTS)
- concurrencyLevel = MAX_SEGMENTS;
- // Find power-of-two sizes best matching arguments
- int sshift = 0;
- int ssize = 1;
- while (ssize < concurrencyLevel) {
- ++sshift;
- ssize <<= 1;
- }
- this.segmentShift = 32 - sshift;
- this.segmentMask = ssize - 1;
- if (initialCapacity > MAXIMUM_CAPACITY)
- initialCapacity = MAXIMUM_CAPACITY;
- int c = initialCapacity / ssize;
- if (c * ssize < initialCapacity)
- ++c;
- int cap = MIN_SEGMENT_TABLE_CAPACITY;
- while (cap < c)
- cap <<= 1;
- // create segments and segments[0]
- Segment<K,V> s0 =
- new Segment<K,V>(loadFactor, (int)(cap * loadFactor),
- (HashEntry<K,V>[])new HashEntry<?,?>[cap]);
- Segment<K,V>[] ss = (Segment<K,V>[])new Segment<?,?>[ssize];
- UNSAFE.putOrderedObject(ss, SBASE, s0); // ordered write of segments[0]
- this.segments = ss;
+ public ConcurrentHashMap() {
}
/**
- * Creates a new, empty map with the specified initial capacity
- * and load factor and with the default concurrencyLevel (16).
+ * Creates a new, empty map with an initial table size
+ * accommodating the specified number of elements without the need
+ * to dynamically resize.
*
* @param initialCapacity The implementation performs internal
* sizing to accommodate this many elements.
- * @param loadFactor the load factor threshold, used to control resizing.
- * Resizing may be performed when the average number of elements per
- * bin exceeds this threshold.
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative
+ */
+ public ConcurrentHashMap(int initialCapacity) {
+ if (initialCapacity < 0)
+ throw new IllegalArgumentException();
+ int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ?
+ MAXIMUM_CAPACITY :
+ tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1));
+ this.sizeCtl = cap;
+ }
+
+ /**
+ * Creates a new map with the same mappings as the given map.
+ *
+ * @param m the map
+ */
+ public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
+ this.sizeCtl = DEFAULT_CAPACITY;
+ internalPutAll(m);
+ }
+
+ /**
+ * Creates a new, empty map with an initial table size based on
+ * the given number of elements ({@code initialCapacity}) and
+ * initial table density ({@code loadFactor}).
+ *
+ * @param initialCapacity the initial capacity. The implementation
+ * performs internal sizing to accommodate this many elements,
+ * given the specified load factor.
+ * @param loadFactor the load factor (table density) for
+ * establishing the initial table size
* @throws IllegalArgumentException if the initial capacity of
* elements is negative or the load factor is nonpositive
*
* @since 1.6
*/
public ConcurrentHashMap(int initialCapacity, float loadFactor) {
- this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL);
- }
-
- /**
- * Creates a new, empty map with the specified initial capacity,
- * and with default load factor (0.75) and concurrencyLevel (16).
- *
- * @param initialCapacity the initial capacity. The implementation
- * performs internal sizing to accommodate this many elements.
- * @throws IllegalArgumentException if the initial capacity of
- * elements is negative.
- */
- public ConcurrentHashMap(int initialCapacity) {
- this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
+ this(initialCapacity, loadFactor, 1);
}
/**
- * Creates a new, empty map with a default initial capacity (16),
- * load factor (0.75) and concurrencyLevel (16).
+ * Creates a new, empty map with an initial table size based on
+ * the given number of elements ({@code initialCapacity}), table
+ * density ({@code loadFactor}), and number of concurrently
+ * updating threads ({@code concurrencyLevel}).
+ *
+ * @param initialCapacity the initial capacity. The implementation
+ * performs internal sizing to accommodate this many elements,
+ * given the specified load factor.
+ * @param loadFactor the load factor (table density) for
+ * establishing the initial table size
+ * @param concurrencyLevel the estimated number of concurrently
+ * updating threads. The implementation may use this value as
+ * a sizing hint.
+ * @throws IllegalArgumentException if the initial capacity is
+ * negative or the load factor or concurrencyLevel are
+ * nonpositive
*/
- public ConcurrentHashMap() {
- this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
+ public ConcurrentHashMap(int initialCapacity,
+ float loadFactor, int concurrencyLevel) {
+ if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0)
+ throw new IllegalArgumentException();
+ if (initialCapacity < concurrencyLevel) // Use at least as many bins
+ initialCapacity = concurrencyLevel; // as estimated threads
+ long size = (long)(1.0 + (long)initialCapacity / loadFactor);
+ int cap = (size >= (long)MAXIMUM_CAPACITY) ?
+ MAXIMUM_CAPACITY : tableSizeFor((int)size);
+ this.sizeCtl = cap;
}
/**
- * Creates a new map with the same mappings as the given map.
- * The map is created with a capacity of 1.5 times the number
- * of mappings in the given map or 16 (whichever is greater),
- * and a default load factor (0.75) and concurrencyLevel (16).
+ * Creates a new {@link Set} backed by a ConcurrentHashMap
+ * from the given type to {@code Boolean.TRUE}.
*
- * @param m the map
+ * @return the new set
*/
- public ConcurrentHashMap(Map<? extends K, ? extends V> m) {
- this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1,
- DEFAULT_INITIAL_CAPACITY),
- DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL);
- putAll(m);
+ public static <K> KeySetView<K,Boolean> newKeySet() {
+ return new KeySetView<K,Boolean>
+ (new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE);
+ }
+
+ /**
+ * Creates a new {@link Set} backed by a ConcurrentHashMap
+ * from the given type to {@code Boolean.TRUE}.
+ *
+ * @param initialCapacity The implementation performs internal
+ * sizing to accommodate this many elements.
+ * @throws IllegalArgumentException if the initial capacity of
+ * elements is negative
+ * @return the new set
+ */
+ public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) {
+ return new KeySetView<K,Boolean>
+ (new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE);
}
/**
@@ -834,38 +2637,7 @@
* @return {@code true} if this map contains no key-value mappings
*/
public boolean isEmpty() {
- /*
- * Sum per-segment modCounts to avoid mis-reporting when
- * elements are concurrently added and removed in one segment
- * while checking another, in which case the table was never
- * actually empty at any point. (The sum ensures accuracy up
- * through at least 1<<31 per-segment modifications before
- * recheck.) Methods size() and containsValue() use similar
- * constructions for stability checks.
- */
- long sum = 0L;
- final Segment<K,V>[] segments = this.segments;
- for (int j = 0; j < segments.length; ++j) {
- Segment<K,V> seg = segmentAt(segments, j);
- if (seg != null) {
- if (seg.count != 0)
- return false;
- sum += seg.modCount;
- }
- }
- if (sum != 0L) { // recheck unless no modifications
- for (int j = 0; j < segments.length; ++j) {
- Segment<K,V> seg = segmentAt(segments, j);
- if (seg != null) {
- if (seg.count != 0)
- return false;
- sum -= seg.modCount;
- }
- }
- if (sum != 0L)
- return false;
- }
- return true;
+ return sumCount() <= 0L; // ignore transient negative values
}
/**
@@ -876,43 +2648,24 @@
* @return the number of key-value mappings in this map
*/
public int size() {
- // Try a few times to get accurate count. On failure due to
- // continuous async changes in table, resort to locking.
- final Segment<K,V>[] segments = this.segments;
- int size;
- boolean overflow; // true if size overflows 32 bits
- long sum; // sum of modCounts
- long last = 0L; // previous sum
- int retries = -1; // first iteration isn't retry
- try {
- for (;;) {
- if (retries++ == RETRIES_BEFORE_LOCK) {
- for (int j = 0; j < segments.length; ++j)
- ensureSegment(j).lock(); // force creation
- }
- sum = 0L;
- size = 0;
- overflow = false;
- for (int j = 0; j < segments.length; ++j) {
- Segment<K,V> seg = segmentAt(segments, j);
- if (seg != null) {
- sum += seg.modCount;
- int c = seg.count;
- if (c < 0 || (size += c) < 0)
- overflow = true;
- }
- }
- if (sum == last)
- break;
- last = sum;
- }
- } finally {
- if (retries > RETRIES_BEFORE_LOCK) {
- for (int j = 0; j < segments.length; ++j)
- segmentAt(segments, j).unlock();
- }
- }
- return overflow ? Integer.MAX_VALUE : size;
+ long n = sumCount();
+ return ((n < 0L) ? 0 :
+ (n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE :
+ (int)n);
+ }
+
+ /**
+ * Returns the number of mappings. This method should be used
+ * instead of {@link #size} because a ConcurrentHashMap may
+ * contain more mappings than can be represented as an int. The
+ * value returned is an estimate; the actual count may differ if
+ * there are concurrent insertions or removals.
+ *
+ * @return the number of mappings
+ */
+ public long mappingCount() {
+ long n = sumCount();
+ return (n < 0L) ? 0L : n; // ignore transient negative values
}
/**
@@ -926,23 +2679,24 @@
*
* @throws NullPointerException if the specified key is null
*/
- @SuppressWarnings("unchecked")
public V get(Object key) {
- Segment<K,V> s; // manually integrate access methods to reduce overhead
- HashEntry<K,V>[] tab;
- int h = hash(key);
- long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
- if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null &&
- (tab = s.table) != null) {
- for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile
- (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
- e != null; e = e.next) {
- K k;
- if ((k = e.key) == key || (e.hash == h && key.equals(k)))
- return e.value;
- }
- }
- return null;
+ return internalGet(key);
+ }
+
+ /**
+ * Returns the value to which the specified key is mapped, or the
+ * given default value if this map contains no mapping for the
+ * key.
+ *
+ * @param key the key whose associated value is to be returned
+ * @param defaultValue the value to return if this map contains
+ * no mapping for the given key
+ * @return the mapping for the key, if present; else the default value
+ * @throws NullPointerException if the specified key is null
+ */
+ public V getOrDefault(Object key, V defaultValue) {
+ V v;
+ return (v = internalGet(key)) == null ? defaultValue : v;
}
/**
@@ -954,29 +2708,14 @@
* {@code equals} method; {@code false} otherwise
* @throws NullPointerException if the specified key is null
*/
- @SuppressWarnings("unchecked")
public boolean containsKey(Object key) {
- Segment<K,V> s; // same as get() except no need for volatile value read
- HashEntry<K,V>[] tab;
- int h = hash(key);
- long u = (((h >>> segmentShift) & segmentMask) << SSHIFT) + SBASE;
- if ((s = (Segment<K,V>)UNSAFE.getObjectVolatile(segments, u)) != null &&
- (tab = s.table) != null) {
- for (HashEntry<K,V> e = (HashEntry<K,V>) UNSAFE.getObjectVolatile
- (tab, ((long)(((tab.length - 1) & h)) << TSHIFT) + TBASE);
- e != null; e = e.next) {
- K k;
- if ((k = e.key) == key || (e.hash == h && key.equals(k)))
- return true;
- }
- }
- return false;
+ return internalGet(key) != null;
}
/**
* Returns {@code true} if this map maps one or more keys to the
- * specified value. Note: This method requires a full traversal
- * of the map, and so is much slower than method {@code containsKey}.
+ * specified value. Note: This method may require a full traversal
+ * of the map, and is much slower than method {@code containsKey}.
*
* @param value value whose presence in this map is to be tested
* @return {@code true} if this map maps one or more keys to the
@@ -984,49 +2723,18 @@
* @throws NullPointerException if the specified value is null
*/
public boolean containsValue(Object value) {
- // Same idea as size()
if (value == null)
throw new NullPointerException();
- final Segment<K,V>[] segments = this.segments;
- boolean found = false;
- long last = 0;
- int retries = -1;
- try {
- outer: for (;;) {
- if (retries++ == RETRIES_BEFORE_LOCK) {
- for (int j = 0; j < segments.length; ++j)
- ensureSegment(j).lock(); // force creation
- }
- long hashSum = 0L;
- int sum = 0;
- for (int j = 0; j < segments.length; ++j) {
- HashEntry<K,V>[] tab;
- Segment<K,V> seg = segmentAt(segments, j);
- if (seg != null && (tab = seg.table) != null) {
- for (int i = 0 ; i < tab.length; i++) {
- HashEntry<K,V> e;
- for (e = entryAt(tab, i); e != null; e = e.next) {
- V v = e.value;
- if (v != null && value.equals(v)) {
- found = true;
- break outer;
- }
- }
- }
- sum += seg.modCount;
- }
- }
- if (retries > 0 && sum == last)
- break;
- last = sum;
- }
- } finally {
- if (retries > RETRIES_BEFORE_LOCK) {
- for (int j = 0; j < segments.length; ++j)
- segmentAt(segments, j).unlock();
+ Node<K,V>[] t;
+ if ((t = table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; ) {
+ V v;
+ if ((v = p.val) == value || value.equals(v))
+ return true;
}
}
- return found;
+ return false;
}
/**
@@ -1061,17 +2769,8 @@
* {@code null} if there was no mapping for {@code key}
* @throws NullPointerException if the specified key or value is null
*/
- @SuppressWarnings("unchecked")
public V put(K key, V value) {
- Segment<K,V> s;
- if (value == null)
- throw new NullPointerException();
- int hash = hash(key);
- int j = (hash >>> segmentShift) & segmentMask;
- if ((s = (Segment<K,V>)UNSAFE.getObject // nonvolatile; recheck
- (segments, (j << SSHIFT) + SBASE)) == null) // in ensureSegment
- s = ensureSegment(j);
- return s.put(key, hash, value, false);
+ return internalPut(key, value, false);
}
/**
@@ -1081,17 +2780,8 @@
* or {@code null} if there was no mapping for the key
* @throws NullPointerException if the specified key or value is null
*/
- @SuppressWarnings("unchecked")
public V putIfAbsent(K key, V value) {
- Segment<K,V> s;
- if (value == null)
- throw new NullPointerException();
- int hash = hash(key);
- int j = (hash >>> segmentShift) & segmentMask;
- if ((s = (Segment<K,V>)UNSAFE.getObject
- (segments, (j << SSHIFT) + SBASE)) == null)
- s = ensureSegment(j);
- return s.put(key, hash, value, true);
+ return internalPut(key, value, true);
}
/**
@@ -1102,8 +2792,105 @@
* @param m mappings to be stored in this map
*/
public void putAll(Map<? extends K, ? extends V> m) {
- for (Map.Entry<? extends K, ? extends V> e : m.entrySet())
- put(e.getKey(), e.getValue());
+ internalPutAll(m);
+ }
+
+ /**
+ * If the specified key is not already associated with a value,
+ * attempts to compute its value using the given mapping function
+ * and enters it into this map unless {@code null}. The entire
+ * method invocation is performed atomically, so the function is
+ * applied at most once per key. Some attempted update operations
+ * on this map by other threads may be blocked while computation
+ * is in progress, so the computation should be short and simple,
+ * and must not attempt to update any other mappings of this map.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param mappingFunction the function to compute a value
+ * @return the current (existing or computed) value associated with
+ * the specified key, or null if the computed value is null
+ * @throws NullPointerException if the specified key or mappingFunction
+ * is null
+ * @throws IllegalStateException if the computation detectably
+ * attempts a recursive update to this map that would
+ * otherwise never complete
+ * @throws RuntimeException or Error if the mappingFunction does so,
+ * in which case the mapping is left unestablished
+ */
+ public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) {
+ return internalComputeIfAbsent(key, mappingFunction);
+ }
+
+ /**
+ * If the value for the specified key is present, attempts to
+ * compute a new mapping given the key and its current mapped
+ * value. The entire method invocation is performed atomically.
+ * Some attempted update operations on this map by other threads
+ * may be blocked while computation is in progress, so the
+ * computation should be short and simple, and must not attempt to
+ * update any other mappings of this map.
+ *
+ * @param key key with which a value may be associated
+ * @param remappingFunction the function to compute a value
+ * @return the new value associated with the specified key, or null if none
+ * @throws NullPointerException if the specified key or remappingFunction
+ * is null
+ * @throws IllegalStateException if the computation detectably
+ * attempts a recursive update to this map that would
+ * otherwise never complete
+ * @throws RuntimeException or Error if the remappingFunction does so,
+ * in which case the mapping is unchanged
+ */
+ public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
+ return internalCompute(key, true, remappingFunction);
+ }
+
+ /**
+ * Attempts to compute a mapping for the specified key and its
+ * current mapped value (or {@code null} if there is no current
+ * mapping). The entire method invocation is performed atomically.
+ * Some attempted update operations on this map by other threads
+ * may be blocked while computation is in progress, so the
+ * computation should be short and simple, and must not attempt to
+ * update any other mappings of this Map.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param remappingFunction the function to compute a value
+ * @return the new value associated with the specified key, or null if none
+ * @throws NullPointerException if the specified key or remappingFunction
+ * is null
+ * @throws IllegalStateException if the computation detectably
+ * attempts a recursive update to this map that would
+ * otherwise never complete
+ * @throws RuntimeException or Error if the remappingFunction does so,
+ * in which case the mapping is unchanged
+ */
+ public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) {
+ return internalCompute(key, false, remappingFunction);
+ }
+
+ /**
+ * If the specified key is not already associated with a
+ * (non-null) value, associates it with the given value.
+ * Otherwise, replaces the value with the results of the given
+ * remapping function, or removes if {@code null}. The entire
+ * method invocation is performed atomically. Some attempted
+ * update operations on this map by other threads may be blocked
+ * while computation is in progress, so the computation should be
+ * short and simple, and must not attempt to update any other
+ * mappings of this Map.
+ *
+ * @param key key with which the specified value is to be associated
+ * @param value the value to use if absent
+ * @param remappingFunction the function to recompute a value if present
+ * @return the new value associated with the specified key, or null if none
+ * @throws NullPointerException if the specified key or the
+ * remappingFunction is null
+ * @throws RuntimeException or Error if the remappingFunction does so,
+ * in which case the mapping is unchanged
+ */
+ public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) {
+ return internalMerge(key, value, remappingFunction);
}
/**
@@ -1116,9 +2903,7 @@
* @throws NullPointerException if the specified key is null
*/
public V remove(Object key) {
- int hash = hash(key);
- Segment<K,V> s = segmentForHash(hash);
- return s == null ? null : s.remove(key, hash, null);
+ return internalReplace(key, null, null);
}
/**
@@ -1127,10 +2912,9 @@
* @throws NullPointerException if the specified key is null
*/
public boolean remove(Object key, Object value) {
- int hash = hash(key);
- Segment<K,V> s;
- return value != null && (s = segmentForHash(hash)) != null &&
- s.remove(key, hash, value) != null;
+ if (key == null)
+ throw new NullPointerException();
+ return value != null && internalReplace(key, null, value) != null;
}
/**
@@ -1139,11 +2923,9 @@
* @throws NullPointerException if any of the arguments are null
*/
public boolean replace(K key, V oldValue, V newValue) {
- int hash = hash(key);
- if (oldValue == null || newValue == null)
+ if (key == null || oldValue == null || newValue == null)
throw new NullPointerException();
- Segment<K,V> s = segmentForHash(hash);
- return s != null && s.replace(key, hash, oldValue, newValue);
+ return internalReplace(key, newValue, oldValue) != null;
}
/**
@@ -1154,23 +2936,16 @@
* @throws NullPointerException if the specified key or value is null
*/
public V replace(K key, V value) {
- int hash = hash(key);
- if (value == null)
+ if (key == null || value == null)
throw new NullPointerException();
- Segment<K,V> s = segmentForHash(hash);
- return s == null ? null : s.replace(key, hash, value);
+ return internalReplace(key, value, null);
}
/**
* Removes all of the mappings from this map.
*/
public void clear() {
- final Segment<K,V>[] segments = this.segments;
- for (int j = 0; j < segments.length; ++j) {
- Segment<K,V> s = segmentAt(segments, j);
- if (s != null)
- s.clear();
- }
+ internalClear();
}
/**
@@ -1188,10 +2963,29 @@
* and guarantees to traverse elements as they existed upon
* construction of the iterator, and may (but is not guaranteed to)
* reflect any modifications subsequent to construction.
+ *
+ * @return the set view
*/
- public Set<K> keySet() {
- Set<K> ks = keySet;
- return (ks != null) ? ks : (keySet = new KeySet());
+ public KeySetView<K,V> keySet() {
+ KeySetView<K,V> ks = keySet;
+ return (ks != null) ? ks : (keySet = new KeySetView<K,V>(this, null));
+ }
+
+ /**
+ * Returns a {@link Set} view of the keys in this map, using the
+ * given common mapped value for any additions (i.e., {@link
+ * Collection#add} and {@link Collection#addAll(Collection)}).
+ * This is of course only appropriate if it is acceptable to use
+ * the same value for all additions from this view.
+ *
+ * @param mappedValue the mapped value to use for any additions
+ * @return the set view
+ * @throws NullPointerException if the mappedValue is null
+ */
+ public KeySetView<K,V> keySet(V mappedValue) {
+ if (mappedValue == null)
+ throw new NullPointerException();
+ return new KeySetView<K,V>(this, mappedValue);
}
/**
@@ -1209,10 +3003,12 @@
* and guarantees to traverse elements as they existed upon
* construction of the iterator, and may (but is not guaranteed to)
* reflect any modifications subsequent to construction.
+ *
+ * @return the collection view
*/
public Collection<V> values() {
- Collection<V> vs = values;
- return (vs != null) ? vs : (values = new Values());
+ ValuesView<K,V> vs = values;
+ return (vs != null) ? vs : (values = new ValuesView<K,V>(this));
}
/**
@@ -1222,18 +3018,19 @@
* removal, which removes the corresponding mapping from the map,
* via the {@code Iterator.remove}, {@code Set.remove},
* {@code removeAll}, {@code retainAll}, and {@code clear}
- * operations. It does not support the {@code add} or
- * {@code addAll} operations.
+ * operations.
*
* <p>The view's {@code iterator} is a "weakly consistent" iterator
* that will never throw {@link ConcurrentModificationException},
* and guarantees to traverse elements as they existed upon
* construction of the iterator, and may (but is not guaranteed to)
* reflect any modifications subsequent to construction.
+ *
+ * @return the set view
*/
public Set<Map.Entry<K,V>> entrySet() {
- Set<Map.Entry<K,V>> es = entrySet;
- return (es != null) ? es : (entrySet = new EntrySet());
+ EntrySetView<K,V> es = entrySet;
+ return (es != null) ? es : (entrySet = new EntrySetView<K,V>(this));
}
/**
@@ -1243,7 +3040,9 @@
* @see #keySet()
*/
public Enumeration<K> keys() {
- return new KeyIterator();
+ Node<K,V>[] t;
+ int f = (t = table) == null ? 0 : t.length;
+ return new KeyIterator<K,V>(t, f, 0, f, this);
}
/**
@@ -1253,192 +3052,111 @@
* @see #values()
*/
public Enumeration<V> elements() {
- return new ValueIterator();
+ Node<K,V>[] t;
+ int f = (t = table) == null ? 0 : t.length;
+ return new ValueIterator<K,V>(t, f, 0, f, this);
}
- /* ---------------- Iterator Support -------------- */
-
- abstract class HashIterator {
- int nextSegmentIndex;
- int nextTableIndex;
- HashEntry<K,V>[] currentTable;
- HashEntry<K, V> nextEntry;
- HashEntry<K, V> lastReturned;
-
- HashIterator() {
- nextSegmentIndex = segments.length - 1;
- nextTableIndex = -1;
- advance();
+ /**
+ * Returns the hash code value for this {@link Map}, i.e.,
+ * the sum of, for each key-value pair in the map,
+ * {@code key.hashCode() ^ value.hashCode()}.
+ *
+ * @return the hash code value for this map
+ */
+ public int hashCode() {
+ int h = 0;
+ Node<K,V>[] t;
+ if ((t = table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; )
+ h += p.key.hashCode() ^ p.val.hashCode();
}
-
- /**
- * Sets nextEntry to first node of next non-empty table
- * (in backwards order, to simplify checks).
- */
- final void advance() {
- for (;;) {
- if (nextTableIndex >= 0) {
- if ((nextEntry = entryAt(currentTable,
- nextTableIndex--)) != null)
- break;
- }
- else if (nextSegmentIndex >= 0) {
- Segment<K,V> seg = segmentAt(segments, nextSegmentIndex--);
- if (seg != null && (currentTable = seg.table) != null)
- nextTableIndex = currentTable.length - 1;
- }
- else
- break;
- }
- }
-
- final HashEntry<K,V> nextEntry() {
- HashEntry<K,V> e = nextEntry;
- if (e == null)
- throw new NoSuchElementException();
- lastReturned = e; // cannot assign until after null check
- if ((nextEntry = e.next) == null)
- advance();
- return e;
- }
-
- public final boolean hasNext() { return nextEntry != null; }
- public final boolean hasMoreElements() { return nextEntry != null; }
-
- public final void remove() {
- if (lastReturned == null)
- throw new IllegalStateException();
- ConcurrentHashMap.this.remove(lastReturned.key);
- lastReturned = null;
- }
- }
-
- final class KeyIterator
- extends HashIterator
- implements Iterator<K>, Enumeration<K>
- {
- public final K next() { return super.nextEntry().key; }
- public final K nextElement() { return super.nextEntry().key; }
- }
-
- final class ValueIterator
- extends HashIterator
- implements Iterator<V>, Enumeration<V>
- {
- public final V next() { return super.nextEntry().value; }
- public final V nextElement() { return super.nextEntry().value; }
+ return h;
}
/**
- * Custom Entry class used by EntryIterator.next(), that relays
- * setValue changes to the underlying map.
+ * Returns a string representation of this map. The string
+ * representation consists of a list of key-value mappings (in no
+ * particular order) enclosed in braces ("{@code {}}"). Adjacent
+ * mappings are separated by the characters {@code ", "} (comma
+ * and space). Each key-value mapping is rendered as the key
+ * followed by an equals sign ("{@code =}") followed by the
+ * associated value.
+ *
+ * @return a string representation of this map
*/
- final class WriteThroughEntry
- extends AbstractMap.SimpleEntry<K,V>
- {
- static final long serialVersionUID = 7249069246763182397L;
-
- WriteThroughEntry(K k, V v) {
- super(k,v);
- }
-
- /**
- * Sets our entry's value and writes through to the map. The
- * value to return is somewhat arbitrary here. Since a
- * WriteThroughEntry does not necessarily track asynchronous
- * changes, the most recent "previous" value could be
- * different from what we return (or could even have been
- * removed in which case the put will re-establish). We do not
- * and cannot guarantee more.
- */
- public V setValue(V value) {
- if (value == null) throw new NullPointerException();
- V v = super.setValue(value);
- ConcurrentHashMap.this.put(getKey(), value);
- return v;
- }
- }
-
- final class EntryIterator
- extends HashIterator
- implements Iterator<Entry<K,V>>
- {
- public Map.Entry<K,V> next() {
- HashEntry<K,V> e = super.nextEntry();
- return new WriteThroughEntry(e.key, e.value);
- }
- }
-
- final class KeySet extends AbstractSet<K> {
- public Iterator<K> iterator() {
- return new KeyIterator();
- }
- public int size() {
- return ConcurrentHashMap.this.size();
- }
- public boolean isEmpty() {
- return ConcurrentHashMap.this.isEmpty();
- }
- public boolean contains(Object o) {
- return ConcurrentHashMap.this.containsKey(o);
+ public String toString() {
+ Node<K,V>[] t;
+ int f = (t = table) == null ? 0 : t.length;
+ Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
+ StringBuilder sb = new StringBuilder();
+ sb.append('{');
+ Node<K,V> p;
+ if ((p = it.advance()) != null) {
+ for (;;) {
+ K k = (K)p.key;
+ V v = p.val;
+ sb.append(k == this ? "(this Map)" : k);
+ sb.append('=');
+ sb.append(v == this ? "(this Map)" : v);
+ if ((p = it.advance()) == null)
+ break;
+ sb.append(',').append(' ');
+ }
}
- public boolean remove(Object o) {
- return ConcurrentHashMap.this.remove(o) != null;
- }
- public void clear() {
- ConcurrentHashMap.this.clear();
- }
- }
-
- final class Values extends AbstractCollection<V> {
- public Iterator<V> iterator() {
- return new ValueIterator();
- }
- public int size() {
- return ConcurrentHashMap.this.size();
- }
- public boolean isEmpty() {
- return ConcurrentHashMap.this.isEmpty();
- }
- public boolean contains(Object o) {
- return ConcurrentHashMap.this.containsValue(o);
- }
- public void clear() {
- ConcurrentHashMap.this.clear();
- }
+ return sb.append('}').toString();
}
- final class EntrySet extends AbstractSet<Map.Entry<K,V>> {
- public Iterator<Map.Entry<K,V>> iterator() {
- return new EntryIterator();
- }
- public boolean contains(Object o) {
- if (!(o instanceof Map.Entry))
+ /**
+ * Compares the specified object with this map for equality.
+ * Returns {@code true} if the given object is a map with the same
+ * mappings as this map. This operation may return misleading
+ * results if either map is concurrently modified during execution
+ * of this method.
+ *
+ * @param o object to be compared for equality with this map
+ * @return {@code true} if the specified object is equal to this map
+ */
+ public boolean equals(Object o) {
+ if (o != this) {
+ if (!(o instanceof Map))
return false;
- Map.Entry<?,?> e = (Map.Entry<?,?>)o;
- V v = ConcurrentHashMap.this.get(e.getKey());
- return v != null && v.equals(e.getValue());
+ Map<?,?> m = (Map<?,?>) o;
+ Node<K,V>[] t;
+ int f = (t = table) == null ? 0 : t.length;
+ Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f);
+ for (Node<K,V> p; (p = it.advance()) != null; ) {
+ V val = p.val;
+ Object v = m.get(p.key);
+ if (v == null || (v != val && !v.equals(val)))
+ return false;
+ }
+ for (Map.Entry<?,?> e : m.entrySet()) {
+ Object mk, mv, v;
+ if ((mk = e.getKey()) == null ||
+ (mv = e.getValue()) == null ||
+ (v = internalGet(mk)) == null ||
+ (mv != v && !mv.equals(v)))
+ return false;
+ }
}
- public boolean remove(Object o) {
- if (!(o instanceof Map.Entry))
- return false;
- Map.Entry<?,?> e = (Map.Entry<?,?>)o;
- return ConcurrentHashMap.this.remove(e.getKey(), e.getValue());
- }
- public int size() {
- return ConcurrentHashMap.this.size();
- }
- public boolean isEmpty() {
- return ConcurrentHashMap.this.isEmpty();
- }
- public void clear() {
- ConcurrentHashMap.this.clear();
- }
+ return true;
}
/* ---------------- Serialization Support -------------- */
/**
+ * Stripped-down version of helper class used in previous version,
+ * declared for the sake of serialization compatibility
+ */
+ static class Segment<K,V> extends ReentrantLock implements Serializable {
+ private static final long serialVersionUID = 2249069246763182397L;
+ final float loadFactor;
+ Segment(float lf) { this.loadFactor = lf; }
+ }
+
+ /**
* Saves the state of the {@code ConcurrentHashMap} instance to a
* stream (i.e., serializes it).
* @param s the stream
@@ -1448,119 +3166,2701 @@
* The key-value mappings are emitted in no particular order.
*/
private void writeObject(java.io.ObjectOutputStream s)
- throws java.io.IOException {
- // force all segments for serialization compatibility
- for (int k = 0; k < segments.length; ++k)
- ensureSegment(k);
- s.defaultWriteObject();
-
- final Segment<K,V>[] segments = this.segments;
- for (int k = 0; k < segments.length; ++k) {
- Segment<K,V> seg = segmentAt(segments, k);
- seg.lock();
- try {
- HashEntry<K,V>[] tab = seg.table;
- for (int i = 0; i < tab.length; ++i) {
- HashEntry<K,V> e;
- for (e = entryAt(tab, i); e != null; e = e.next) {
- s.writeObject(e.key);
- s.writeObject(e.value);
- }
- }
- } finally {
- seg.unlock();
+ throws java.io.IOException {
+ // For serialization compatibility
+ // Emulate segment calculation from previous version of this class
+ int sshift = 0;
+ int ssize = 1;
+ while (ssize < DEFAULT_CONCURRENCY_LEVEL) {
+ ++sshift;
+ ssize <<= 1;
+ }
+ int segmentShift = 32 - sshift;
+ int segmentMask = ssize - 1;
+ Segment<K,V>[] segments = (Segment<K,V>[])
+ new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL];
+ for (int i = 0; i < segments.length; ++i)
+ segments[i] = new Segment<K,V>(LOAD_FACTOR);
+ s.putFields().put("segments", segments);
+ s.putFields().put("segmentShift", segmentShift);
+ s.putFields().put("segmentMask", segmentMask);
+ s.writeFields();
+
+ Node<K,V>[] t;
+ if ((t = table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; ) {
+ s.writeObject(p.key);
+ s.writeObject(p.val);
}
}
s.writeObject(null);
s.writeObject(null);
+ segments = null; // throw away
}
/**
* Reconstitutes the instance from a stream (that is, deserializes it).
* @param s the stream
*/
- @SuppressWarnings("unchecked")
private void readObject(java.io.ObjectInputStream s)
- throws java.io.IOException, ClassNotFoundException {
- // Don't call defaultReadObject()
- ObjectInputStream.GetField oisFields = s.readFields();
- final Segment<K,V>[] oisSegments = (Segment<K,V>[])oisFields.get("segments", null);
-
- final int ssize = oisSegments.length;
- if (ssize < 1 || ssize > MAX_SEGMENTS
- || (ssize & (ssize-1)) != 0 ) // ssize not power of two
- throw new java.io.InvalidObjectException("Bad number of segments:"
- + ssize);
- int sshift = 0, ssizeTmp = ssize;
- while (ssizeTmp > 1) {
- ++sshift;
- ssizeTmp >>>= 1;
+ throws java.io.IOException, ClassNotFoundException {
+ s.defaultReadObject();
+
+ // Create all nodes, then place in table once size is known
+ long size = 0L;
+ Node<K,V> p = null;
+ for (;;) {
+ K k = (K) s.readObject();
+ V v = (V) s.readObject();
+ if (k != null && v != null) {
+ int h = spread(k.hashCode());
+ p = new Node<K,V>(h, k, v, p);
+ ++size;
+ }
+ else
+ break;
+ }
+ if (p != null) {
+ boolean init = false;
+ int n;
+ if (size >= (long)(MAXIMUM_CAPACITY >>> 1))
+ n = MAXIMUM_CAPACITY;
+ else {
+ int sz = (int)size;
+ n = tableSizeFor(sz + (sz >>> 1) + 1);
+ }
+ int sc = sizeCtl;
+ boolean collide = false;
+ if (n > sc &&
+ U.compareAndSwapInt(this, SIZECTL, sc, -1)) {
+ try {
+ if (table == null) {
+ init = true;
+ Node<K,V>[] tab = (Node<K,V>[])new Node[n];
+ int mask = n - 1;
+ while (p != null) {
+ int j = p.hash & mask;
+ Node<K,V> next = p.next;
+ Node<K,V> q = p.next = tabAt(tab, j);
+ setTabAt(tab, j, p);
+ if (!collide && q != null && q.hash == p.hash)
+ collide = true;
+ p = next;
+ }
+ table = tab;
+ addCount(size, -1);
+ sc = n - (n >>> 2);
+ }
+ } finally {
+ sizeCtl = sc;
+ }
+ if (collide) { // rescan and convert to TreeBins
+ Node<K,V>[] tab = table;
+ for (int i = 0; i < tab.length; ++i) {
+ int c = 0;
+ for (Node<K,V> e = tabAt(tab, i); e != null; e = e.next) {
+ if (++c > TREE_THRESHOLD &&
+ (e.key instanceof Comparable)) {
+ replaceWithTreeBin(tab, i, e.key);
+ break;
+ }
+ }
+ }
+ }
+ }
+ if (!init) { // Can only happen if unsafely published.
+ while (p != null) {
+ internalPut((K)p.key, p.val, false);
+ p = p.next;
+ }
+ }
+ }
+ }
+
+ // -------------------------------------------------------
+
+ // Overrides of other default Map methods
+
+ public void forEach(BiConsumer<? super K, ? super V> action) {
+ if (action == null) throw new NullPointerException();
+ Node<K,V>[] t;
+ if ((t = table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; ) {
+ action.accept((K)p.key, p.val);
+ }
+ }
+ }
+
+ public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) {
+ if (function == null) throw new NullPointerException();
+ Node<K,V>[] t;
+ if ((t = table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; ) {
+ K k = (K)p.key;
+ internalPut(k, function.apply(k, p.val), false);
+ }
}
- UNSAFE.putIntVolatile(this, SEGSHIFT_OFFSET, 32 - sshift);
- UNSAFE.putIntVolatile(this, SEGMASK_OFFSET, ssize - 1);
- UNSAFE.putObjectVolatile(this, SEGMENTS_OFFSET, oisSegments);
-
- // set hashMask
- UNSAFE.putIntVolatile(this, HASHSEED_OFFSET,
- sun.misc.Hashing.randomHashSeed(this));
-
- // Re-initialize segments to be minimally sized, and let grow.
- int cap = MIN_SEGMENT_TABLE_CAPACITY;
- final Segment<K,V>[] segments = this.segments;
- for (int k = 0; k < segments.length; ++k) {
- Segment<K,V> seg = segments[k];
- if (seg != null) {
- seg.threshold = (int)(cap * seg.loadFactor);
- seg.table = (HashEntry<K,V>[]) new HashEntry<?,?>[cap];
+ }
+
+ // -------------------------------------------------------
+
+ // Parallel bulk operations
+
+ /**
+ * Computes initial batch value for bulk tasks. The returned value
+ * is approximately exp2 of the number of times (minus one) to
+ * split task by two before executing leaf action. This value is
+ * faster to compute and more convenient to use as a guide to
+ * splitting than is the depth, since it is used while dividing by
+ * two anyway.
+ */
+ final int batchFor(long b) {
+ long n;
+ if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b)
+ return 0;
+ int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4
+ return (b <= 0L || (n /= b) >= sp) ? sp : (int)n;
+ }
+
+ /**
+ * Performs the given action for each (key, value).
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param action the action
+ */
+ public void forEach(long parallelismThreshold,
+ BiConsumer<? super K,? super V> action) {
+ if (action == null) throw new NullPointerException();
+ new ForEachMappingTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ action).invoke();
+ }
+
+ /**
+ * Performs the given action for each non-null transformation
+ * of each (key, value).
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element, or null if there is no transformation (in
+ * which case the action is not applied)
+ * @param action the action
+ */
+ public <U> void forEach(long parallelismThreshold,
+ BiFunction<? super K, ? super V, ? extends U> transformer,
+ Consumer<? super U> action) {
+ if (transformer == null || action == null)
+ throw new NullPointerException();
+ new ForEachTransformedMappingTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ transformer, action).invoke();
+ }
+
+ /**
+ * Returns a non-null result from applying the given search
+ * function on each (key, value), or null if none. Upon
+ * success, further element processing is suppressed and the
+ * results of any other parallel invocations of the search
+ * function are ignored.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param searchFunction a function returning a non-null
+ * result on success, else null
+ * @return a non-null result from applying the given search
+ * function on each (key, value), or null if none
+ */
+ public <U> U search(long parallelismThreshold,
+ BiFunction<? super K, ? super V, ? extends U> searchFunction) {
+ if (searchFunction == null) throw new NullPointerException();
+ return new SearchMappingsTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ searchFunction, new AtomicReference<U>()).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all (key, value) pairs using the given reducer to
+ * combine values, or null if none.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element, or null if there is no transformation (in
+ * which case it is not combined)
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all (key, value) pairs
+ */
+ public <U> U reduce(long parallelismThreshold,
+ BiFunction<? super K, ? super V, ? extends U> transformer,
+ BiFunction<? super U, ? super U, ? extends U> reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceMappingsTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all (key, value) pairs using the given reducer to
+ * combine values, and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all (key, value) pairs
+ */
+ public double reduceToDoubleIn(long parallelismThreshold,
+ ToDoubleBiFunction<? super K, ? super V> transformer,
+ double basis,
+ DoubleBinaryOperator reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceMappingsToDoubleTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all (key, value) pairs using the given reducer to
+ * combine values, and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all (key, value) pairs
+ */
+ public long reduceToLong(long parallelismThreshold,
+ ToLongBiFunction<? super K, ? super V> transformer,
+ long basis,
+ LongBinaryOperator reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceMappingsToLongTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all (key, value) pairs using the given reducer to
+ * combine values, and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all (key, value) pairs
+ */
+ public int reduceToInt(long parallelismThreshold,
+ ToIntBiFunction<? super K, ? super V> transformer,
+ int basis,
+ IntBinaryOperator reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceMappingsToIntTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Performs the given action for each key.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param action the action
+ */
+ public void forEachKey(long parallelismThreshold,
+ Consumer<? super K> action) {
+ if (action == null) throw new NullPointerException();
+ new ForEachKeyTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ action).invoke();
+ }
+
+ /**
+ * Performs the given action for each non-null transformation
+ * of each key.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element, or null if there is no transformation (in
+ * which case the action is not applied)
+ * @param action the action
+ */
+ public <U> void forEachKey(long parallelismThreshold,
+ Function<? super K, ? extends U> transformer,
+ Consumer<? super U> action) {
+ if (transformer == null || action == null)
+ throw new NullPointerException();
+ new ForEachTransformedKeyTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ transformer, action).invoke();
+ }
+
+ /**
+ * Returns a non-null result from applying the given search
+ * function on each key, or null if none. Upon success,
+ * further element processing is suppressed and the results of
+ * any other parallel invocations of the search function are
+ * ignored.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param searchFunction a function returning a non-null
+ * result on success, else null
+ * @return a non-null result from applying the given search
+ * function on each key, or null if none
+ */
+ public <U> U searchKeys(long parallelismThreshold,
+ Function<? super K, ? extends U> searchFunction) {
+ if (searchFunction == null) throw new NullPointerException();
+ return new SearchKeysTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ searchFunction, new AtomicReference<U>()).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating all keys using the given
+ * reducer to combine values, or null if none.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating all keys using the given
+ * reducer to combine values, or null if none
+ */
+ public K reduceKeys(long parallelismThreshold,
+ BiFunction<? super K, ? super K, ? extends K> reducer) {
+ if (reducer == null) throw new NullPointerException();
+ return new ReduceKeysTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all keys using the given reducer to combine values, or
+ * null if none.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element, or null if there is no transformation (in
+ * which case it is not combined)
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all keys
+ */
+ public <U> U reduceKeys(long parallelismThreshold,
+ Function<? super K, ? extends U> transformer,
+ BiFunction<? super U, ? super U, ? extends U> reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceKeysTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all keys using the given reducer to combine values, and
+ * the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all keys
+ */
+ public double reduceKeysToDouble(long parallelismThreshold,
+ ToDoubleFunction<? super K> transformer,
+ double basis,
+ DoubleBinaryOperator reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceKeysToDoubleTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all keys using the given reducer to combine values, and
+ * the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all keys
+ */
+ public long reduceKeysToLong(long parallelismThreshold,
+ ToLongFunction<? super K> transformer,
+ long basis,
+ LongBinaryOperator reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceKeysToLongTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all keys using the given reducer to combine values, and
+ * the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all keys
+ */
+ public int reduceKeysToInt(long parallelismThreshold,
+ ToIntFunction<? super K> transformer,
+ int basis,
+ IntBinaryOperator reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceKeysToIntTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Performs the given action for each value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param action the action
+ */
+ public void forEachValue(long parallelismThreshold,
+ Consumer<? super V> action) {
+ if (action == null)
+ throw new NullPointerException();
+ new ForEachValueTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ action).invoke();
+ }
+
+ /**
+ * Performs the given action for each non-null transformation
+ * of each value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element, or null if there is no transformation (in
+ * which case the action is not applied)
+ * @param action the action
+ */
+ public <U> void forEachValue(long parallelismThreshold,
+ Function<? super V, ? extends U> transformer,
+ Consumer<? super U> action) {
+ if (transformer == null || action == null)
+ throw new NullPointerException();
+ new ForEachTransformedValueTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ transformer, action).invoke();
+ }
+
+ /**
+ * Returns a non-null result from applying the given search
+ * function on each value, or null if none. Upon success,
+ * further element processing is suppressed and the results of
+ * any other parallel invocations of the search function are
+ * ignored.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param searchFunction a function returning a non-null
+ * result on success, else null
+ * @return a non-null result from applying the given search
+ * function on each value, or null if none
+ */
+ public <U> U searchValues(long parallelismThreshold,
+ Function<? super V, ? extends U> searchFunction) {
+ if (searchFunction == null) throw new NullPointerException();
+ return new SearchValuesTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ searchFunction, new AtomicReference<U>()).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating all values using the
+ * given reducer to combine values, or null if none.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating all values
+ */
+ public V reduceValues(long parallelismThreshold,
+ BiFunction<? super V, ? super V, ? extends V> reducer) {
+ if (reducer == null) throw new NullPointerException();
+ return new ReduceValuesTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all values using the given reducer to combine values, or
+ * null if none.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element, or null if there is no transformation (in
+ * which case it is not combined)
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all values
+ */
+ public <U> U reduceValues(long parallelismThreshold,
+ Function<? super V, ? extends U> transformer,
+ BiFunction<? super U, ? super U, ? extends U> reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceValuesTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all values using the given reducer to combine values,
+ * and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all values
+ */
+ public double reduceValuesToDouble(long parallelismThreshold,
+ ToDoubleFunction<? super V> transformer,
+ double basis,
+ DoubleBinaryOperator reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceValuesToDoubleTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all values using the given reducer to combine values,
+ * and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all values
+ */
+ public long reduceValuesToLong(long parallelismThreshold,
+ ToLongFunction<? super V> transformer,
+ long basis,
+ LongBinaryOperator reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceValuesToLongTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all values using the given reducer to combine values,
+ * and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all values
+ */
+ public int reduceValuesToInt(long parallelismThreshold,
+ ToIntFunction<? super V> transformer,
+ int basis,
+ IntBinaryOperator reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceValuesToIntTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Performs the given action for each entry.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param action the action
+ */
+ public void forEachEntry(long parallelismThreshold,
+ Consumer<? super Map.Entry<K,V>> action) {
+ if (action == null) throw new NullPointerException();
+ new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table,
+ action).invoke();
+ }
+
+ /**
+ * Performs the given action for each non-null transformation
+ * of each entry.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element, or null if there is no transformation (in
+ * which case the action is not applied)
+ * @param action the action
+ */
+ public <U> void forEachEntry(long parallelismThreshold,
+ Function<Map.Entry<K,V>, ? extends U> transformer,
+ Consumer<? super U> action) {
+ if (transformer == null || action == null)
+ throw new NullPointerException();
+ new ForEachTransformedEntryTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ transformer, action).invoke();
+ }
+
+ /**
+ * Returns a non-null result from applying the given search
+ * function on each entry, or null if none. Upon success,
+ * further element processing is suppressed and the results of
+ * any other parallel invocations of the search function are
+ * ignored.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param searchFunction a function returning a non-null
+ * result on success, else null
+ * @return a non-null result from applying the given search
+ * function on each entry, or null if none
+ */
+ public <U> U searchEntries(long parallelismThreshold,
+ Function<Map.Entry<K,V>, ? extends U> searchFunction) {
+ if (searchFunction == null) throw new NullPointerException();
+ return new SearchEntriesTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ searchFunction, new AtomicReference<U>()).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating all entries using the
+ * given reducer to combine values, or null if none.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating all entries
+ */
+ public Map.Entry<K,V> reduceEntries(long parallelismThreshold,
+ BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
+ if (reducer == null) throw new NullPointerException();
+ return new ReduceEntriesTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all entries using the given reducer to combine values,
+ * or null if none.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element, or null if there is no transformation (in
+ * which case it is not combined)
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all entries
+ */
+ public <U> U reduceEntries(long parallelismThreshold,
+ Function<Map.Entry<K,V>, ? extends U> transformer,
+ BiFunction<? super U, ? super U, ? extends U> reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceEntriesTask<K,V,U>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all entries using the given reducer to combine values,
+ * and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all entries
+ */
+ public double reduceEntriesToDouble(long parallelismThreshold,
+ ToDoubleFunction<Map.Entry<K,V>> transformer,
+ double basis,
+ DoubleBinaryOperator reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceEntriesToDoubleTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all entries using the given reducer to combine values,
+ * and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all entries
+ */
+ public long reduceEntriesToLong(long parallelismThreshold,
+ ToLongFunction<Map.Entry<K,V>> transformer,
+ long basis,
+ LongBinaryOperator reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceEntriesToLongTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+ /**
+ * Returns the result of accumulating the given transformation
+ * of all entries using the given reducer to combine values,
+ * and the given basis as an identity value.
+ *
+ * @param parallelismThreshold the (estimated) number of elements
+ * needed for this operation to be executed in parallel
+ * @param transformer a function returning the transformation
+ * for an element
+ * @param basis the identity (initial default value) for the reduction
+ * @param reducer a commutative associative combining function
+ * @return the result of accumulating the given transformation
+ * of all entries
+ */
+ public int reduceEntriesToInt(long parallelismThreshold,
+ ToIntFunction<Map.Entry<K,V>> transformer,
+ int basis,
+ IntBinaryOperator reducer) {
+ if (transformer == null || reducer == null)
+ throw new NullPointerException();
+ return new MapReduceEntriesToIntTask<K,V>
+ (null, batchFor(parallelismThreshold), 0, 0, table,
+ null, transformer, basis, reducer).invoke();
+ }
+
+
+ /* ----------------Views -------------- */
+
+ /**
+ * Base class for views.
+ */
+ abstract static class CollectionView<K,V,E>
+ implements Collection<E>, java.io.Serializable {
+ private static final long serialVersionUID = 7249069246763182397L;
+ final ConcurrentHashMap<K,V> map;
+ CollectionView(ConcurrentHashMap<K,V> map) { this.map = map; }
+
+ /**
+ * Returns the map backing this view.
+ *
+ * @return the map backing this view
+ */
+ public ConcurrentHashMap<K,V> getMap() { return map; }
+
+ /**
+ * Removes all of the elements from this view, by removing all
+ * the mappings from the map backing this view.
+ */
+ public final void clear() { map.clear(); }
+ public final int size() { return map.size(); }
+ public final boolean isEmpty() { return map.isEmpty(); }
+
+ // implementations below rely on concrete classes supplying these
+ // abstract methods
+ /**
+ * Returns a "weakly consistent" iterator that will never
+ * throw {@link ConcurrentModificationException}, and
+ * guarantees to traverse elements as they existed upon
+ * construction of the iterator, and may (but is not
+ * guaranteed to) reflect any modifications subsequent to
+ * construction.
+ */
+ public abstract Iterator<E> iterator();
+ public abstract boolean contains(Object o);
+ public abstract boolean remove(Object o);
+
+ private static final String oomeMsg = "Required array size too large";
+
+ public final Object[] toArray() {
+ long sz = map.mappingCount();
+ if (sz > MAX_ARRAY_SIZE)
+ throw new OutOfMemoryError(oomeMsg);
+ int n = (int)sz;
+ Object[] r = new Object[n];
+ int i = 0;
+ for (E e : this) {
+ if (i == n) {
+ if (n >= MAX_ARRAY_SIZE)
+ throw new OutOfMemoryError(oomeMsg);
+ if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
+ n = MAX_ARRAY_SIZE;
+ else
+ n += (n >>> 1) + 1;
+ r = Arrays.copyOf(r, n);
+ }
+ r[i++] = e;
+ }
+ return (i == n) ? r : Arrays.copyOf(r, i);
+ }
+
+ public final <T> T[] toArray(T[] a) {
+ long sz = map.mappingCount();
+ if (sz > MAX_ARRAY_SIZE)
+ throw new OutOfMemoryError(oomeMsg);
+ int m = (int)sz;
+ T[] r = (a.length >= m) ? a :
+ (T[])java.lang.reflect.Array
+ .newInstance(a.getClass().getComponentType(), m);
+ int n = r.length;
+ int i = 0;
+ for (E e : this) {
+ if (i == n) {
+ if (n >= MAX_ARRAY_SIZE)
+ throw new OutOfMemoryError(oomeMsg);
+ if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1)
+ n = MAX_ARRAY_SIZE;
+ else
+ n += (n >>> 1) + 1;
+ r = Arrays.copyOf(r, n);
+ }
+ r[i++] = (T)e;
+ }
+ if (a == r && i < n) {
+ r[i] = null; // null-terminate
+ return r;
+ }
+ return (i == n) ? r : Arrays.copyOf(r, i);
+ }
+
+ /**
+ * Returns a string representation of this collection.
+ * The string representation consists of the string representations
+ * of the collection's elements in the order they are returned by
+ * its iterator, enclosed in square brackets ({@code "[]"}).
+ * Adjacent elements are separated by the characters {@code ", "}
+ * (comma and space). Elements are converted to strings as by
+ * {@link String#valueOf(Object)}.
+ *
+ * @return a string representation of this collection
+ */
+ public final String toString() {
+ StringBuilder sb = new StringBuilder();
+ sb.append('[');
+ Iterator<E> it = iterator();
+ if (it.hasNext()) {
+ for (;;) {
+ Object e = it.next();
+ sb.append(e == this ? "(this Collection)" : e);
+ if (!it.hasNext())
+ break;
+ sb.append(',').append(' ');
+ }
+ }
+ return sb.append(']').toString();
+ }
+
+ public final boolean containsAll(Collection<?> c) {
+ if (c != this) {
+ for (Object e : c) {
+ if (e == null || !contains(e))
+ return false;
+ }
+ }
+ return true;
+ }
+
+ public final boolean removeAll(Collection<?> c) {
+ boolean modified = false;
+ for (Iterator<E> it = iterator(); it.hasNext();) {
+ if (c.contains(it.next())) {
+ it.remove();
+ modified = true;
+ }
+ }
+ return modified;
+ }
+
+ public final boolean retainAll(Collection<?> c) {
+ boolean modified = false;
+ for (Iterator<E> it = iterator(); it.hasNext();) {
+ if (!c.contains(it.next())) {
+ it.remove();
+ modified = true;
+ }
+ }
+ return modified;
+ }
+
+ }
+
+ /**
+ * A view of a ConcurrentHashMap as a {@link Set} of keys, in
+ * which additions may optionally be enabled by mapping to a
+ * common value. This class cannot be directly instantiated.
+ * See {@link #keySet() keySet()},
+ * {@link #keySet(Object) keySet(V)},
+ * {@link #newKeySet() newKeySet()},
+ * {@link #newKeySet(int) newKeySet(int)}.
+ */
+ public static class KeySetView<K,V> extends CollectionView<K,V,K>
+ implements Set<K>, java.io.Serializable {
+ private static final long serialVersionUID = 7249069246763182397L;
+ private final V value;
+ KeySetView(ConcurrentHashMap<K,V> map, V value) { // non-public
+ super(map);
+ this.value = value;
+ }
+
+ /**
+ * Returns the default mapped value for additions,
+ * or {@code null} if additions are not supported.
+ *
+ * @return the default mapped value for additions, or {@code null}
+ * if not supported
+ */
+ public V getMappedValue() { return value; }
+
+ /**
+ * {@inheritDoc}
+ * @throws NullPointerException if the specified key is null
+ */
+ public boolean contains(Object o) { return map.containsKey(o); }
+
+ /**
+ * Removes the key from this map view, by removing the key (and its
+ * corresponding value) from the backing map. This method does
+ * nothing if the key is not in the map.
+ *
+ * @param o the key to be removed from the backing map
+ * @return {@code true} if the backing map contained the specified key
+ * @throws NullPointerException if the specified key is null
+ */
+ public boolean remove(Object o) { return map.remove(o) != null; }
+
+ /**
+ * @return an iterator over the keys of the backing map
+ */
+ public Iterator<K> iterator() {
+ Node<K,V>[] t;
+ ConcurrentHashMap<K,V> m = map;
+ int f = (t = m.table) == null ? 0 : t.length;
+ return new KeyIterator<K,V>(t, f, 0, f, m);
+ }
+
+ /**
+ * Adds the specified key to this set view by mapping the key to
+ * the default mapped value in the backing map, if defined.
+ *
+ * @param e key to be added
+ * @return {@code true} if this set changed as a result of the call
+ * @throws NullPointerException if the specified key is null
+ * @throws UnsupportedOperationException if no default mapped value
+ * for additions was provided
+ */
+ public boolean add(K e) {
+ V v;
+ if ((v = value) == null)
+ throw new UnsupportedOperationException();
+ return map.internalPut(e, v, true) == null;
+ }
+
+ /**
+ * Adds all of the elements in the specified collection to this set,
+ * as if by calling {@link #add} on each one.
+ *
+ * @param c the elements to be inserted into this set
+ * @return {@code true} if this set changed as a result of the call
+ * @throws NullPointerException if the collection or any of its
+ * elements are {@code null}
+ * @throws UnsupportedOperationException if no default mapped value
+ * for additions was provided
+ */
+ public boolean addAll(Collection<? extends K> c) {
+ boolean added = false;
+ V v;
+ if ((v = value) == null)
+ throw new UnsupportedOperationException();
+ for (K e : c) {
+ if (map.internalPut(e, v, true) == null)
+ added = true;
+ }
+ return added;
+ }
+
+ public int hashCode() {
+ int h = 0;
+ for (K e : this)
+ h += e.hashCode();
+ return h;
+ }
+
+ public boolean equals(Object o) {
+ Set<?> c;
+ return ((o instanceof Set) &&
+ ((c = (Set<?>)o) == this ||
+ (containsAll(c) && c.containsAll(this))));
+ }
+
+ public Spliterator<K> spliterator() {
+ Node<K,V>[] t;
+ ConcurrentHashMap<K,V> m = map;
+ long n = m.sumCount();
+ int f = (t = m.table) == null ? 0 : t.length;
+ return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
+ }
+
+ public void forEach(Consumer<? super K> action) {
+ if (action == null) throw new NullPointerException();
+ Node<K,V>[] t;
+ if ((t = map.table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; )
+ action.accept((K)p.key);
+ }
+ }
+ }
+
+ /**
+ * A view of a ConcurrentHashMap as a {@link Collection} of
+ * values, in which additions are disabled. This class cannot be
+ * directly instantiated. See {@link #values()}.
+ */
+ static final class ValuesView<K,V> extends CollectionView<K,V,V>
+ implements Collection<V>, java.io.Serializable {
+ private static final long serialVersionUID = 2249069246763182397L;
+ ValuesView(ConcurrentHashMap<K,V> map) { super(map); }
+ public final boolean contains(Object o) {
+ return map.containsValue(o);
+ }
+
+ public final boolean remove(Object o) {
+ if (o != null) {
+ for (Iterator<V> it = iterator(); it.hasNext();) {
+ if (o.equals(it.next())) {
+ it.remove();
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ public final Iterator<V> iterator() {
+ ConcurrentHashMap<K,V> m = map;
+ Node<K,V>[] t;
+ int f = (t = m.table) == null ? 0 : t.length;
+ return new ValueIterator<K,V>(t, f, 0, f, m);
+ }
+
+ public final boolean add(V e) {
+ throw new UnsupportedOperationException();
+ }
+ public final boolean addAll(Collection<? extends V> c) {
+ throw new UnsupportedOperationException();
+ }
+
+ public Spliterator<V> spliterator() {
+ Node<K,V>[] t;
+ ConcurrentHashMap<K,V> m = map;
+ long n = m.sumCount();
+ int f = (t = m.table) == null ? 0 : t.length;
+ return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n);
+ }
+
+ public void forEach(Consumer<? super V> action) {
+ if (action == null) throw new NullPointerException();
+ Node<K,V>[] t;
+ if ((t = map.table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; )
+ action.accept(p.val);
+ }
+ }
+ }
+
+ /**
+ * A view of a ConcurrentHashMap as a {@link Set} of (key, value)
+ * entries. This class cannot be directly instantiated. See
+ * {@link #entrySet()}.
+ */
+ static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>>
+ implements Set<Map.Entry<K,V>>, java.io.Serializable {
+ private static final long serialVersionUID = 2249069246763182397L;
+ EntrySetView(ConcurrentHashMap<K,V> map) { super(map); }
+
+ public boolean contains(Object o) {
+ Object k, v, r; Map.Entry<?,?> e;
+ return ((o instanceof Map.Entry) &&
+ (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
+ (r = map.get(k)) != null &&
+ (v = e.getValue()) != null &&
+ (v == r || v.equals(r)));
+ }
+
+ public boolean remove(Object o) {
+ Object k, v; Map.Entry<?,?> e;
+ return ((o instanceof Map.Entry) &&
+ (k = (e = (Map.Entry<?,?>)o).getKey()) != null &&
+ (v = e.getValue()) != null &&
+ map.remove(k, v));
+ }
+
+ /**
+ * @return an iterator over the entries of the backing map
+ */
+ public Iterator<Map.Entry<K,V>> iterator() {
+ ConcurrentHashMap<K,V> m = map;
+ Node<K,V>[] t;
+ int f = (t = m.table) == null ? 0 : t.length;
+ return new EntryIterator<K,V>(t, f, 0, f, m);
+ }
+
+ public boolean add(Entry<K,V> e) {
+ return map.internalPut(e.getKey(), e.getValue(), false) == null;
+ }
+
+ public boolean addAll(Collection<? extends Entry<K,V>> c) {
+ boolean added = false;
+ for (Entry<K,V> e : c) {
+ if (add(e))
+ added = true;
+ }
+ return added;
+ }
+
+ public final int hashCode() {
+ int h = 0;
+ Node<K,V>[] t;
+ if ((t = map.table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; ) {
+ h += p.hashCode();
+ }
+ }
+ return h;
+ }
+
+ public final boolean equals(Object o) {
+ Set<?> c;
+ return ((o instanceof Set) &&
+ ((c = (Set<?>)o) == this ||
+ (containsAll(c) && c.containsAll(this))));
+ }
+
+ public Spliterator<Map.Entry<K,V>> spliterator() {
+ Node<K,V>[] t;
+ ConcurrentHashMap<K,V> m = map;
+ long n = m.sumCount();
+ int f = (t = m.table) == null ? 0 : t.length;
+ return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m);
+ }
+
+ public void forEach(Consumer<? super Map.Entry<K,V>> action) {
+ if (action == null) throw new NullPointerException();
+ Node<K,V>[] t;
+ if ((t = map.table) != null) {
+ Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length);
+ for (Node<K,V> p; (p = it.advance()) != null; )
+ action.accept(new MapEntry<K,V>((K)p.key, p.val, map));
+ }
+ }
+
+ }
+
+ // -------------------------------------------------------
+
+ /**
+ * Base class for bulk tasks. Repeats some fields and code from
+ * class Traverser, because we need to subclass CountedCompleter.
+ */
+ abstract static class BulkTask<K,V,R> extends CountedCompleter<R> {
+ Node<K,V>[] tab; // same as Traverser
+ Node<K,V> next;
+ int index;
+ int baseIndex;
+ int baseLimit;
+ final int baseSize;
+ int batch; // split control
+
+ BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) {
+ super(par);
+ this.batch = b;
+ this.index = this.baseIndex = i;
+ if ((this.tab = t) == null)
+ this.baseSize = this.baseLimit = 0;
+ else if (par == null)
+ this.baseSize = this.baseLimit = t.length;
+ else {
+ this.baseLimit = f;
+ this.baseSize = par.baseSize;
+ }
+ }
+
+ /**
+ * Same as Traverser version
+ */
+ final Node<K,V> advance() {
+ Node<K,V> e;
+ if ((e = next) != null)
+ e = e.next;
+ for (;;) {
+ Node<K,V>[] t; int i, n; Object ek;
+ if (e != null)
+ return next = e;
+ if (baseIndex >= baseLimit || (t = tab) == null ||
+ (n = t.length) <= (i = index) || i < 0)
+ return next = null;
+ if ((e = tabAt(t, index)) != null && e.hash < 0) {
+ if ((ek = e.key) instanceof TreeBin)
+ e = ((TreeBin<K,V>)ek).first;
+ else {
+ tab = (Node<K,V>[])ek;
+ e = null;
+ continue;
+ }
+ }
+ if ((index += baseSize) >= n)
+ index = ++baseIndex;
}
}
-
- // Read the keys and values, and put the mappings in the table
- for (;;) {
- K key = (K) s.readObject();
- V value = (V) s.readObject();
- if (key == null)
- break;
- put(key, value);
+ }
+
+ /*
+ * Task classes. Coded in a regular but ugly format/style to
+ * simplify checks that each variant differs in the right way from
+ * others. The null screenings exist because compilers cannot tell
+ * that we've already null-checked task arguments, so we force
+ * simplest hoisted bypass to help avoid convoluted traps.
+ */
+
+ static final class ForEachKeyTask<K,V>
+ extends BulkTask<K,V,Void> {
+ final Consumer<? super K> action;
+ ForEachKeyTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Consumer<? super K> action) {
+ super(p, b, i, f, t);
+ this.action = action;
+ }
+ public final void compute() {
+ final Consumer<? super K> action;
+ if ((action = this.action) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ new ForEachKeyTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ action).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null;)
+ action.accept((K)p.key);
+ propagateCompletion();
+ }
+ }
+ }
+
+ static final class ForEachValueTask<K,V>
+ extends BulkTask<K,V,Void> {
+ final Consumer<? super V> action;
+ ForEachValueTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Consumer<? super V> action) {
+ super(p, b, i, f, t);
+ this.action = action;
+ }
+ public final void compute() {
+ final Consumer<? super V> action;
+ if ((action = this.action) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ new ForEachValueTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ action).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null;)
+ action.accept(p.val);
+ propagateCompletion();
+ }
+ }
+ }
+
+ static final class ForEachEntryTask<K,V>
+ extends BulkTask<K,V,Void> {
+ final Consumer<? super Entry<K,V>> action;
+ ForEachEntryTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Consumer<? super Entry<K,V>> action) {
+ super(p, b, i, f, t);
+ this.action = action;
+ }
+ public final void compute() {
+ final Consumer<? super Entry<K,V>> action;
+ if ((action = this.action) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ new ForEachEntryTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ action).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ action.accept(p);
+ propagateCompletion();
+ }
+ }
+ }
+
+ static final class ForEachMappingTask<K,V>
+ extends BulkTask<K,V,Void> {
+ final BiConsumer<? super K, ? super V> action;
+ ForEachMappingTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ BiConsumer<? super K,? super V> action) {
+ super(p, b, i, f, t);
+ this.action = action;
+ }
+ public final void compute() {
+ final BiConsumer<? super K, ? super V> action;
+ if ((action = this.action) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ new ForEachMappingTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ action).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ action.accept((K)p.key, p.val);
+ propagateCompletion();
+ }
+ }
+ }
+
+ static final class ForEachTransformedKeyTask<K,V,U>
+ extends BulkTask<K,V,Void> {
+ final Function<? super K, ? extends U> transformer;
+ final Consumer<? super U> action;
+ ForEachTransformedKeyTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Function<? super K, ? extends U> transformer, Consumer<? super U> action) {
+ super(p, b, i, f, t);
+ this.transformer = transformer; this.action = action;
+ }
+ public final void compute() {
+ final Function<? super K, ? extends U> transformer;
+ final Consumer<? super U> action;
+ if ((transformer = this.transformer) != null &&
+ (action = this.action) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ new ForEachTransformedKeyTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ transformer, action).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ U u;
+ if ((u = transformer.apply((K)p.key)) != null)
+ action.accept(u);
+ }
+ propagateCompletion();
+ }
+ }
+ }
+
+ static final class ForEachTransformedValueTask<K,V,U>
+ extends BulkTask<K,V,Void> {
+ final Function<? super V, ? extends U> transformer;
+ final Consumer<? super U> action;
+ ForEachTransformedValueTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Function<? super V, ? extends U> transformer, Consumer<? super U> action) {
+ super(p, b, i, f, t);
+ this.transformer = transformer; this.action = action;
+ }
+ public final void compute() {
+ final Function<? super V, ? extends U> transformer;
+ final Consumer<? super U> action;
+ if ((transformer = this.transformer) != null &&
+ (action = this.action) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ new ForEachTransformedValueTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ transformer, action).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ U u;
+ if ((u = transformer.apply(p.val)) != null)
+ action.accept(u);
+ }
+ propagateCompletion();
+ }
+ }
+ }
+
+ static final class ForEachTransformedEntryTask<K,V,U>
+ extends BulkTask<K,V,Void> {
+ final Function<Map.Entry<K,V>, ? extends U> transformer;
+ final Consumer<? super U> action;
+ ForEachTransformedEntryTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) {
+ super(p, b, i, f, t);
+ this.transformer = transformer; this.action = action;
+ }
+ public final void compute() {
+ final Function<Map.Entry<K,V>, ? extends U> transformer;
+ final Consumer<? super U> action;
+ if ((transformer = this.transformer) != null &&
+ (action = this.action) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ new ForEachTransformedEntryTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ transformer, action).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ U u;
+ if ((u = transformer.apply(p)) != null)
+ action.accept(u);
+ }
+ propagateCompletion();
+ }
+ }
+ }
+
+ static final class ForEachTransformedMappingTask<K,V,U>
+ extends BulkTask<K,V,Void> {
+ final BiFunction<? super K, ? super V, ? extends U> transformer;
+ final Consumer<? super U> action;
+ ForEachTransformedMappingTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ BiFunction<? super K, ? super V, ? extends U> transformer,
+ Consumer<? super U> action) {
+ super(p, b, i, f, t);
+ this.transformer = transformer; this.action = action;
+ }
+ public final void compute() {
+ final BiFunction<? super K, ? super V, ? extends U> transformer;
+ final Consumer<? super U> action;
+ if ((transformer = this.transformer) != null &&
+ (action = this.action) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ new ForEachTransformedMappingTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ transformer, action).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ U u;
+ if ((u = transformer.apply((K)p.key, p.val)) != null)
+ action.accept(u);
+ }
+ propagateCompletion();
+ }
+ }
+ }
+
+ static final class SearchKeysTask<K,V,U>
+ extends BulkTask<K,V,U> {
+ final Function<? super K, ? extends U> searchFunction;
+ final AtomicReference<U> result;
+ SearchKeysTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Function<? super K, ? extends U> searchFunction,
+ AtomicReference<U> result) {
+ super(p, b, i, f, t);
+ this.searchFunction = searchFunction; this.result = result;
+ }
+ public final U getRawResult() { return result.get(); }
+ public final void compute() {
+ final Function<? super K, ? extends U> searchFunction;
+ final AtomicReference<U> result;
+ if ((searchFunction = this.searchFunction) != null &&
+ (result = this.result) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ if (result.get() != null)
+ return;
+ addToPendingCount(1);
+ new SearchKeysTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ searchFunction, result).fork();
+ }
+ while (result.get() == null) {
+ U u;
+ Node<K,V> p;
+ if ((p = advance()) == null) {
+ propagateCompletion();
+ break;
+ }
+ if ((u = searchFunction.apply((K)p.key)) != null) {
+ if (result.compareAndSet(null, u))
+ quietlyCompleteRoot();
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ static final class SearchValuesTask<K,V,U>
+ extends BulkTask<K,V,U> {
+ final Function<? super V, ? extends U> searchFunction;
+ final AtomicReference<U> result;
+ SearchValuesTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Function<? super V, ? extends U> searchFunction,
+ AtomicReference<U> result) {
+ super(p, b, i, f, t);
+ this.searchFunction = searchFunction; this.result = result;
+ }
+ public final U getRawResult() { return result.get(); }
+ public final void compute() {
+ final Function<? super V, ? extends U> searchFunction;
+ final AtomicReference<U> result;
+ if ((searchFunction = this.searchFunction) != null &&
+ (result = this.result) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ if (result.get() != null)
+ return;
+ addToPendingCount(1);
+ new SearchValuesTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ searchFunction, result).fork();
+ }
+ while (result.get() == null) {
+ U u;
+ Node<K,V> p;
+ if ((p = advance()) == null) {
+ propagateCompletion();
+ break;
+ }
+ if ((u = searchFunction.apply(p.val)) != null) {
+ if (result.compareAndSet(null, u))
+ quietlyCompleteRoot();
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ static final class SearchEntriesTask<K,V,U>
+ extends BulkTask<K,V,U> {
+ final Function<Entry<K,V>, ? extends U> searchFunction;
+ final AtomicReference<U> result;
+ SearchEntriesTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ Function<Entry<K,V>, ? extends U> searchFunction,
+ AtomicReference<U> result) {
+ super(p, b, i, f, t);
+ this.searchFunction = searchFunction; this.result = result;
+ }
+ public final U getRawResult() { return result.get(); }
+ public final void compute() {
+ final Function<Entry<K,V>, ? extends U> searchFunction;
+ final AtomicReference<U> result;
+ if ((searchFunction = this.searchFunction) != null &&
+ (result = this.result) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ if (result.get() != null)
+ return;
+ addToPendingCount(1);
+ new SearchEntriesTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ searchFunction, result).fork();
+ }
+ while (result.get() == null) {
+ U u;
+ Node<K,V> p;
+ if ((p = advance()) == null) {
+ propagateCompletion();
+ break;
+ }
+ if ((u = searchFunction.apply(p)) != null) {
+ if (result.compareAndSet(null, u))
+ quietlyCompleteRoot();
+ return;
+ }
+ }
+ }
+ }
+ }
+
+ static final class SearchMappingsTask<K,V,U>
+ extends BulkTask<K,V,U> {
+ final BiFunction<? super K, ? super V, ? extends U> searchFunction;
+ final AtomicReference<U> result;
+ SearchMappingsTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ BiFunction<? super K, ? super V, ? extends U> searchFunction,
+ AtomicReference<U> result) {
+ super(p, b, i, f, t);
+ this.searchFunction = searchFunction; this.result = result;
+ }
+ public final U getRawResult() { return result.get(); }
+ public final void compute() {
+ final BiFunction<? super K, ? super V, ? extends U> searchFunction;
+ final AtomicReference<U> result;
+ if ((searchFunction = this.searchFunction) != null &&
+ (result = this.result) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ if (result.get() != null)
+ return;
+ addToPendingCount(1);
+ new SearchMappingsTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ searchFunction, result).fork();
+ }
+ while (result.get() == null) {
+ U u;
+ Node<K,V> p;
+ if ((p = advance()) == null) {
+ propagateCompletion();
+ break;
+ }
+ if ((u = searchFunction.apply((K)p.key, p.val)) != null) {
+ if (result.compareAndSet(null, u))
+ quietlyCompleteRoot();
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ static final class ReduceKeysTask<K,V>
+ extends BulkTask<K,V,K> {
+ final BiFunction<? super K, ? super K, ? extends K> reducer;
+ K result;
+ ReduceKeysTask<K,V> rights, nextRight;
+ ReduceKeysTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ ReduceKeysTask<K,V> nextRight,
+ BiFunction<? super K, ? super K, ? extends K> reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.reducer = reducer;
+ }
+ public final K getRawResult() { return result; }
+ public final void compute() {
+ final BiFunction<? super K, ? super K, ? extends K> reducer;
+ if ((reducer = this.reducer) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new ReduceKeysTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, reducer)).fork();
+ }
+ K r = null;
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ K u = (K)p.key;
+ r = (r == null) ? u : u == null ? r : reducer.apply(r, u);
+ }
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ ReduceKeysTask<K,V>
+ t = (ReduceKeysTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ K tr, sr;
+ if ((sr = s.result) != null)
+ t.result = (((tr = t.result) == null) ? sr :
+ reducer.apply(tr, sr));
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class ReduceValuesTask<K,V>
+ extends BulkTask<K,V,V> {
+ final BiFunction<? super V, ? super V, ? extends V> reducer;
+ V result;
+ ReduceValuesTask<K,V> rights, nextRight;
+ ReduceValuesTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ ReduceValuesTask<K,V> nextRight,
+ BiFunction<? super V, ? super V, ? extends V> reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.reducer = reducer;
+ }
+ public final V getRawResult() { return result; }
+ public final void compute() {
+ final BiFunction<? super V, ? super V, ? extends V> reducer;
+ if ((reducer = this.reducer) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new ReduceValuesTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, reducer)).fork();
+ }
+ V r = null;
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ V v = p.val;
+ r = (r == null) ? v : reducer.apply(r, v);
+ }
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ ReduceValuesTask<K,V>
+ t = (ReduceValuesTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ V tr, sr;
+ if ((sr = s.result) != null)
+ t.result = (((tr = t.result) == null) ? sr :
+ reducer.apply(tr, sr));
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class ReduceEntriesTask<K,V>
+ extends BulkTask<K,V,Map.Entry<K,V>> {
+ final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
+ Map.Entry<K,V> result;
+ ReduceEntriesTask<K,V> rights, nextRight;
+ ReduceEntriesTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ ReduceEntriesTask<K,V> nextRight,
+ BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.reducer = reducer;
+ }
+ public final Map.Entry<K,V> getRawResult() { return result; }
+ public final void compute() {
+ final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer;
+ if ((reducer = this.reducer) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new ReduceEntriesTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, reducer)).fork();
+ }
+ Map.Entry<K,V> r = null;
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = (r == null) ? p : reducer.apply(r, p);
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ ReduceEntriesTask<K,V>
+ t = (ReduceEntriesTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ Map.Entry<K,V> tr, sr;
+ if ((sr = s.result) != null)
+ t.result = (((tr = t.result) == null) ? sr :
+ reducer.apply(tr, sr));
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class MapReduceKeysTask<K,V,U>
+ extends BulkTask<K,V,U> {
+ final Function<? super K, ? extends U> transformer;
+ final BiFunction<? super U, ? super U, ? extends U> reducer;
+ U result;
+ MapReduceKeysTask<K,V,U> rights, nextRight;
+ MapReduceKeysTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceKeysTask<K,V,U> nextRight,
+ Function<? super K, ? extends U> transformer,
+ BiFunction<? super U, ? super U, ? extends U> reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.reducer = reducer;
+ }
+ public final U getRawResult() { return result; }
+ public final void compute() {
+ final Function<? super K, ? extends U> transformer;
+ final BiFunction<? super U, ? super U, ? extends U> reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceKeysTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, reducer)).fork();
+ }
+ U r = null;
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ U u;
+ if ((u = transformer.apply((K)p.key)) != null)
+ r = (r == null) ? u : reducer.apply(r, u);
+ }
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ MapReduceKeysTask<K,V,U>
+ t = (MapReduceKeysTask<K,V,U>)c,
+ s = t.rights;
+ while (s != null) {
+ U tr, sr;
+ if ((sr = s.result) != null)
+ t.result = (((tr = t.result) == null) ? sr :
+ reducer.apply(tr, sr));
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class MapReduceValuesTask<K,V,U>
+ extends BulkTask<K,V,U> {
+ final Function<? super V, ? extends U> transformer;
+ final BiFunction<? super U, ? super U, ? extends U> reducer;
+ U result;
+ MapReduceValuesTask<K,V,U> rights, nextRight;
+ MapReduceValuesTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceValuesTask<K,V,U> nextRight,
+ Function<? super V, ? extends U> transformer,
+ BiFunction<? super U, ? super U, ? extends U> reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.reducer = reducer;
+ }
+ public final U getRawResult() { return result; }
+ public final void compute() {
+ final Function<? super V, ? extends U> transformer;
+ final BiFunction<? super U, ? super U, ? extends U> reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceValuesTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, reducer)).fork();
+ }
+ U r = null;
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ U u;
+ if ((u = transformer.apply(p.val)) != null)
+ r = (r == null) ? u : reducer.apply(r, u);
+ }
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ MapReduceValuesTask<K,V,U>
+ t = (MapReduceValuesTask<K,V,U>)c,
+ s = t.rights;
+ while (s != null) {
+ U tr, sr;
+ if ((sr = s.result) != null)
+ t.result = (((tr = t.result) == null) ? sr :
+ reducer.apply(tr, sr));
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class MapReduceEntriesTask<K,V,U>
+ extends BulkTask<K,V,U> {
+ final Function<Map.Entry<K,V>, ? extends U> transformer;
+ final BiFunction<? super U, ? super U, ? extends U> reducer;
+ U result;
+ MapReduceEntriesTask<K,V,U> rights, nextRight;
+ MapReduceEntriesTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceEntriesTask<K,V,U> nextRight,
+ Function<Map.Entry<K,V>, ? extends U> transformer,
+ BiFunction<? super U, ? super U, ? extends U> reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.reducer = reducer;
+ }
+ public final U getRawResult() { return result; }
+ public final void compute() {
+ final Function<Map.Entry<K,V>, ? extends U> transformer;
+ final BiFunction<? super U, ? super U, ? extends U> reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceEntriesTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, reducer)).fork();
+ }
+ U r = null;
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ U u;
+ if ((u = transformer.apply(p)) != null)
+ r = (r == null) ? u : reducer.apply(r, u);
+ }
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ MapReduceEntriesTask<K,V,U>
+ t = (MapReduceEntriesTask<K,V,U>)c,
+ s = t.rights;
+ while (s != null) {
+ U tr, sr;
+ if ((sr = s.result) != null)
+ t.result = (((tr = t.result) == null) ? sr :
+ reducer.apply(tr, sr));
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class MapReduceMappingsTask<K,V,U>
+ extends BulkTask<K,V,U> {
+ final BiFunction<? super K, ? super V, ? extends U> transformer;
+ final BiFunction<? super U, ? super U, ? extends U> reducer;
+ U result;
+ MapReduceMappingsTask<K,V,U> rights, nextRight;
+ MapReduceMappingsTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceMappingsTask<K,V,U> nextRight,
+ BiFunction<? super K, ? super V, ? extends U> transformer,
+ BiFunction<? super U, ? super U, ? extends U> reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.reducer = reducer;
+ }
+ public final U getRawResult() { return result; }
+ public final void compute() {
+ final BiFunction<? super K, ? super V, ? extends U> transformer;
+ final BiFunction<? super U, ? super U, ? extends U> reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceMappingsTask<K,V,U>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, reducer)).fork();
+ }
+ U r = null;
+ for (Node<K,V> p; (p = advance()) != null; ) {
+ U u;
+ if ((u = transformer.apply((K)p.key, p.val)) != null)
+ r = (r == null) ? u : reducer.apply(r, u);
+ }
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ MapReduceMappingsTask<K,V,U>
+ t = (MapReduceMappingsTask<K,V,U>)c,
+ s = t.rights;
+ while (s != null) {
+ U tr, sr;
+ if ((sr = s.result) != null)
+ t.result = (((tr = t.result) == null) ? sr :
+ reducer.apply(tr, sr));
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class MapReduceKeysToDoubleTask<K,V>
+ extends BulkTask<K,V,Double> {
+ final ToDoubleFunction<? super K> transformer;
+ final DoubleBinaryOperator reducer;
+ final double basis;
+ double result;
+ MapReduceKeysToDoubleTask<K,V> rights, nextRight;
+ MapReduceKeysToDoubleTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceKeysToDoubleTask<K,V> nextRight,
+ ToDoubleFunction<? super K> transformer,
+ double basis,
+ DoubleBinaryOperator reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Double getRawResult() { return result; }
+ public final void compute() {
+ final ToDoubleFunction<? super K> transformer;
+ final DoubleBinaryOperator reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ double r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceKeysToDoubleTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.applyAsDouble(r, transformer.applyAsDouble((K)p.key));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ MapReduceKeysToDoubleTask<K,V>
+ t = (MapReduceKeysToDoubleTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.applyAsDouble(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class MapReduceValuesToDoubleTask<K,V>
+ extends BulkTask<K,V,Double> {
+ final ToDoubleFunction<? super V> transformer;
+ final DoubleBinaryOperator reducer;
+ final double basis;
+ double result;
+ MapReduceValuesToDoubleTask<K,V> rights, nextRight;
+ MapReduceValuesToDoubleTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceValuesToDoubleTask<K,V> nextRight,
+ ToDoubleFunction<? super V> transformer,
+ double basis,
+ DoubleBinaryOperator reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Double getRawResult() { return result; }
+ public final void compute() {
+ final ToDoubleFunction<? super V> transformer;
+ final DoubleBinaryOperator reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ double r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceValuesToDoubleTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ MapReduceValuesToDoubleTask<K,V>
+ t = (MapReduceValuesToDoubleTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.applyAsDouble(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class MapReduceEntriesToDoubleTask<K,V>
+ extends BulkTask<K,V,Double> {
+ final ToDoubleFunction<Map.Entry<K,V>> transformer;
+ final DoubleBinaryOperator reducer;
+ final double basis;
+ double result;
+ MapReduceEntriesToDoubleTask<K,V> rights, nextRight;
+ MapReduceEntriesToDoubleTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceEntriesToDoubleTask<K,V> nextRight,
+ ToDoubleFunction<Map.Entry<K,V>> transformer,
+ double basis,
+ DoubleBinaryOperator reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Double getRawResult() { return result; }
+ public final void compute() {
+ final ToDoubleFunction<Map.Entry<K,V>> transformer;
+ final DoubleBinaryOperator reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ double r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceEntriesToDoubleTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.applyAsDouble(r, transformer.applyAsDouble(p));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ MapReduceEntriesToDoubleTask<K,V>
+ t = (MapReduceEntriesToDoubleTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.applyAsDouble(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class MapReduceMappingsToDoubleTask<K,V>
+ extends BulkTask<K,V,Double> {
+ final ToDoubleBiFunction<? super K, ? super V> transformer;
+ final DoubleBinaryOperator reducer;
+ final double basis;
+ double result;
+ MapReduceMappingsToDoubleTask<K,V> rights, nextRight;
+ MapReduceMappingsToDoubleTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceMappingsToDoubleTask<K,V> nextRight,
+ ToDoubleBiFunction<? super K, ? super V> transformer,
+ double basis,
+ DoubleBinaryOperator reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Double getRawResult() { return result; }
+ public final void compute() {
+ final ToDoubleBiFunction<? super K, ? super V> transformer;
+ final DoubleBinaryOperator reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ double r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceMappingsToDoubleTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.applyAsDouble(r, transformer.applyAsDouble((K)p.key, p.val));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ MapReduceMappingsToDoubleTask<K,V>
+ t = (MapReduceMappingsToDoubleTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.applyAsDouble(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class MapReduceKeysToLongTask<K,V>
+ extends BulkTask<K,V,Long> {
+ final ToLongFunction<? super K> transformer;
+ final LongBinaryOperator reducer;
+ final long basis;
+ long result;
+ MapReduceKeysToLongTask<K,V> rights, nextRight;
+ MapReduceKeysToLongTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceKeysToLongTask<K,V> nextRight,
+ ToLongFunction<? super K> transformer,
+ long basis,
+ LongBinaryOperator reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Long getRawResult() { return result; }
+ public final void compute() {
+ final ToLongFunction<? super K> transformer;
+ final LongBinaryOperator reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ long r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceKeysToLongTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.applyAsLong(r, transformer.applyAsLong((K)p.key));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ MapReduceKeysToLongTask<K,V>
+ t = (MapReduceKeysToLongTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.applyAsLong(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class MapReduceValuesToLongTask<K,V>
+ extends BulkTask<K,V,Long> {
+ final ToLongFunction<? super V> transformer;
+ final LongBinaryOperator reducer;
+ final long basis;
+ long result;
+ MapReduceValuesToLongTask<K,V> rights, nextRight;
+ MapReduceValuesToLongTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceValuesToLongTask<K,V> nextRight,
+ ToLongFunction<? super V> transformer,
+ long basis,
+ LongBinaryOperator reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Long getRawResult() { return result; }
+ public final void compute() {
+ final ToLongFunction<? super V> transformer;
+ final LongBinaryOperator reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ long r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceValuesToLongTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.applyAsLong(r, transformer.applyAsLong(p.val));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ MapReduceValuesToLongTask<K,V>
+ t = (MapReduceValuesToLongTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.applyAsLong(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class MapReduceEntriesToLongTask<K,V>
+ extends BulkTask<K,V,Long> {
+ final ToLongFunction<Map.Entry<K,V>> transformer;
+ final LongBinaryOperator reducer;
+ final long basis;
+ long result;
+ MapReduceEntriesToLongTask<K,V> rights, nextRight;
+ MapReduceEntriesToLongTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceEntriesToLongTask<K,V> nextRight,
+ ToLongFunction<Map.Entry<K,V>> transformer,
+ long basis,
+ LongBinaryOperator reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Long getRawResult() { return result; }
+ public final void compute() {
+ final ToLongFunction<Map.Entry<K,V>> transformer;
+ final LongBinaryOperator reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ long r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceEntriesToLongTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.applyAsLong(r, transformer.applyAsLong(p));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ MapReduceEntriesToLongTask<K,V>
+ t = (MapReduceEntriesToLongTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.applyAsLong(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class MapReduceMappingsToLongTask<K,V>
+ extends BulkTask<K,V,Long> {
+ final ToLongBiFunction<? super K, ? super V> transformer;
+ final LongBinaryOperator reducer;
+ final long basis;
+ long result;
+ MapReduceMappingsToLongTask<K,V> rights, nextRight;
+ MapReduceMappingsToLongTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceMappingsToLongTask<K,V> nextRight,
+ ToLongBiFunction<? super K, ? super V> transformer,
+ long basis,
+ LongBinaryOperator reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Long getRawResult() { return result; }
+ public final void compute() {
+ final ToLongBiFunction<? super K, ? super V> transformer;
+ final LongBinaryOperator reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ long r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceMappingsToLongTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.applyAsLong(r, transformer.applyAsLong((K)p.key, p.val));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ MapReduceMappingsToLongTask<K,V>
+ t = (MapReduceMappingsToLongTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.applyAsLong(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class MapReduceKeysToIntTask<K,V>
+ extends BulkTask<K,V,Integer> {
+ final ToIntFunction<? super K> transformer;
+ final IntBinaryOperator reducer;
+ final int basis;
+ int result;
+ MapReduceKeysToIntTask<K,V> rights, nextRight;
+ MapReduceKeysToIntTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceKeysToIntTask<K,V> nextRight,
+ ToIntFunction<? super K> transformer,
+ int basis,
+ IntBinaryOperator reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Integer getRawResult() { return result; }
+ public final void compute() {
+ final ToIntFunction<? super K> transformer;
+ final IntBinaryOperator reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ int r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceKeysToIntTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.applyAsInt(r, transformer.applyAsInt((K)p.key));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ MapReduceKeysToIntTask<K,V>
+ t = (MapReduceKeysToIntTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.applyAsInt(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class MapReduceValuesToIntTask<K,V>
+ extends BulkTask<K,V,Integer> {
+ final ToIntFunction<? super V> transformer;
+ final IntBinaryOperator reducer;
+ final int basis;
+ int result;
+ MapReduceValuesToIntTask<K,V> rights, nextRight;
+ MapReduceValuesToIntTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceValuesToIntTask<K,V> nextRight,
+ ToIntFunction<? super V> transformer,
+ int basis,
+ IntBinaryOperator reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Integer getRawResult() { return result; }
+ public final void compute() {
+ final ToIntFunction<? super V> transformer;
+ final IntBinaryOperator reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ int r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceValuesToIntTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.applyAsInt(r, transformer.applyAsInt(p.val));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ MapReduceValuesToIntTask<K,V>
+ t = (MapReduceValuesToIntTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.applyAsInt(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class MapReduceEntriesToIntTask<K,V>
+ extends BulkTask<K,V,Integer> {
+ final ToIntFunction<Map.Entry<K,V>> transformer;
+ final IntBinaryOperator reducer;
+ final int basis;
+ int result;
+ MapReduceEntriesToIntTask<K,V> rights, nextRight;
+ MapReduceEntriesToIntTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceEntriesToIntTask<K,V> nextRight,
+ ToIntFunction<Map.Entry<K,V>> transformer,
+ int basis,
+ IntBinaryOperator reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Integer getRawResult() { return result; }
+ public final void compute() {
+ final ToIntFunction<Map.Entry<K,V>> transformer;
+ final IntBinaryOperator reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ int r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceEntriesToIntTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.applyAsInt(r, transformer.applyAsInt(p));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ MapReduceEntriesToIntTask<K,V>
+ t = (MapReduceEntriesToIntTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.applyAsInt(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
+ }
+ }
+
+ static final class MapReduceMappingsToIntTask<K,V>
+ extends BulkTask<K,V,Integer> {
+ final ToIntBiFunction<? super K, ? super V> transformer;
+ final IntBinaryOperator reducer;
+ final int basis;
+ int result;
+ MapReduceMappingsToIntTask<K,V> rights, nextRight;
+ MapReduceMappingsToIntTask
+ (BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t,
+ MapReduceMappingsToIntTask<K,V> nextRight,
+ ToIntBiFunction<? super K, ? super V> transformer,
+ int basis,
+ IntBinaryOperator reducer) {
+ super(p, b, i, f, t); this.nextRight = nextRight;
+ this.transformer = transformer;
+ this.basis = basis; this.reducer = reducer;
+ }
+ public final Integer getRawResult() { return result; }
+ public final void compute() {
+ final ToIntBiFunction<? super K, ? super V> transformer;
+ final IntBinaryOperator reducer;
+ if ((transformer = this.transformer) != null &&
+ (reducer = this.reducer) != null) {
+ int r = this.basis;
+ for (int i = baseIndex, f, h; batch > 0 &&
+ (h = ((f = baseLimit) + i) >>> 1) > i;) {
+ addToPendingCount(1);
+ (rights = new MapReduceMappingsToIntTask<K,V>
+ (this, batch >>>= 1, baseLimit = h, f, tab,
+ rights, transformer, r, reducer)).fork();
+ }
+ for (Node<K,V> p; (p = advance()) != null; )
+ r = reducer.applyAsInt(r, transformer.applyAsInt((K)p.key, p.val));
+ result = r;
+ CountedCompleter<?> c;
+ for (c = firstComplete(); c != null; c = c.nextComplete()) {
+ MapReduceMappingsToIntTask<K,V>
+ t = (MapReduceMappingsToIntTask<K,V>)c,
+ s = t.rights;
+ while (s != null) {
+ t.result = reducer.applyAsInt(t.result, s.result);
+ s = t.rights = s.nextRight;
+ }
+ }
+ }
}
}
// Unsafe mechanics
- private static final sun.misc.Unsafe UNSAFE;
- private static final long SBASE;
- private static final int SSHIFT;
- private static final long TBASE;
- private static final int TSHIFT;
- private static final long HASHSEED_OFFSET;
- private static final long SEGSHIFT_OFFSET;
- private static final long SEGMASK_OFFSET;
- private static final long SEGMENTS_OFFSET;
+ private static final sun.misc.Unsafe U;
+ private static final long SIZECTL;
+ private static final long TRANSFERINDEX;
+ private static final long TRANSFERORIGIN;
+ private static final long BASECOUNT;
+ private static final long CELLSBUSY;
+ private static final long CELLVALUE;
+ private static final long ABASE;
+ private static final int ASHIFT;
static {
- int ss, ts;
try {
- UNSAFE = sun.misc.Unsafe.getUnsafe();
- Class<?> tc = HashEntry[].class;
- Class<?> sc = Segment[].class;
- TBASE = UNSAFE.arrayBaseOffset(tc);
- SBASE = UNSAFE.arrayBaseOffset(sc);
- ts = UNSAFE.arrayIndexScale(tc);
- ss = UNSAFE.arrayIndexScale(sc);
- HASHSEED_OFFSET = UNSAFE.objectFieldOffset(
- ConcurrentHashMap.class.getDeclaredField("hashSeed"));
- SEGSHIFT_OFFSET = UNSAFE.objectFieldOffset(
- ConcurrentHashMap.class.getDeclaredField("segmentShift"));
- SEGMASK_OFFSET = UNSAFE.objectFieldOffset(
- ConcurrentHashMap.class.getDeclaredField("segmentMask"));
- SEGMENTS_OFFSET = UNSAFE.objectFieldOffset(
- ConcurrentHashMap.class.getDeclaredField("segments"));
+ U = sun.misc.Unsafe.getUnsafe();
+ Class<?> k = ConcurrentHashMap.class;
+ SIZECTL = U.objectFieldOffset
+ (k.getDeclaredField("sizeCtl"));
+ TRANSFERINDEX = U.objectFieldOffset
+ (k.getDeclaredField("transferIndex"));
+ TRANSFERORIGIN = U.objectFieldOffset
+ (k.getDeclaredField("transferOrigin"));
+ BASECOUNT = U.objectFieldOffset
+ (k.getDeclaredField("baseCount"));
+ CELLSBUSY = U.objectFieldOffset
+ (k.getDeclaredField("cellsBusy"));
+ Class<?> ck = Cell.class;
+ CELLVALUE = U.objectFieldOffset
+ (ck.getDeclaredField("value"));
+ Class<?> sc = Node[].class;
+ ABASE = U.arrayBaseOffset(sc);
+ int scale = U.arrayIndexScale(sc);
+ if ((scale & (scale - 1)) != 0)
+ throw new Error("data type scale not a power of two");
+ ASHIFT = 31 - Integer.numberOfLeadingZeros(scale);
} catch (Exception e) {
throw new Error(e);
}
- if ((ss & (ss-1)) != 0 || (ts & (ts-1)) != 0)
- throw new Error("data type scale not a power of two");
- SSHIFT = 31 - Integer.numberOfLeadingZeros(ss);
- TSHIFT = 31 - Integer.numberOfLeadingZeros(ts);
}
-
}