author | dl |
Tue, 04 Jun 2013 21:59:23 +0100 | |
changeset 17945 | 97634ef9dd1c |
parent 17717 | fe0b28a1a3bd |
child 17953 | 9a56976d1ab2 |
permissions | -rw-r--r-- |
2 | 1 |
/* |
2 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
3 |
* |
|
4 |
* This code is free software; you can redistribute it and/or modify it |
|
5 |
* under the terms of the GNU General Public License version 2 only, as |
|
5506 | 6 |
* published by the Free Software Foundation. Oracle designates this |
2 | 7 |
* particular file as subject to the "Classpath" exception as provided |
5506 | 8 |
* by Oracle in the LICENSE file that accompanied this code. |
2 | 9 |
* |
10 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
11 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
12 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
13 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
14 |
* accompanied this code). |
|
15 |
* |
|
16 |
* You should have received a copy of the GNU General Public License version |
|
17 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
18 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
19 |
* |
|
5506 | 20 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
21 |
* or visit www.oracle.com if you need additional information or have any |
|
22 |
* questions. |
|
2 | 23 |
*/ |
24 |
||
25 |
/* |
|
26 |
* This file is available under and governed by the GNU General Public |
|
27 |
* License version 2 only, as published by the Free Software Foundation. |
|
28 |
* However, the following notice accompanied the original version of this |
|
29 |
* file: |
|
30 |
* |
|
31 |
* Written by Doug Lea with assistance from members of JCP JSR-166 |
|
32 |
* Expert Group and released to the public domain, as explained at |
|
9242
ef138d47df58
7034657: Update Creative Commons license URL in legal notices
dl
parents:
7518
diff
changeset
|
33 |
* http://creativecommons.org/publicdomain/zero/1.0/ |
2 | 34 |
*/ |
35 |
||
36 |
package java.util.concurrent; |
|
37 |
import java.io.Serializable; |
|
17945 | 38 |
import java.io.ObjectStreamField; |
39 |
import java.lang.reflect.ParameterizedType; |
|
40 |
import java.lang.reflect.Type; |
|
41 |
import java.util.AbstractMap; |
|
42 |
import java.util.Arrays; |
|
43 |
import java.util.Collection; |
|
44 |
import java.util.Comparator; |
|
45 |
import java.util.ConcurrentModificationException; |
|
46 |
import java.util.Enumeration; |
|
47 |
import java.util.HashMap; |
|
48 |
import java.util.Hashtable; |
|
49 |
import java.util.Iterator; |
|
50 |
import java.util.Map; |
|
51 |
import java.util.NoSuchElementException; |
|
52 |
import java.util.Set; |
|
53 |
import java.util.Spliterator; |
|
54 |
import java.util.concurrent.ConcurrentMap; |
|
55 |
import java.util.concurrent.ForkJoinPool; |
|
56 |
import java.util.concurrent.atomic.AtomicReference; |
|
57 |
import java.util.concurrent.locks.ReentrantLock; |
|
58 |
import java.util.concurrent.locks.StampedLock; |
|
59 |
import java.util.function.BiConsumer; |
|
60 |
import java.util.function.BiFunction; |
|
61 |
import java.util.function.BinaryOperator; |
|
62 |
import java.util.function.Consumer; |
|
63 |
import java.util.function.DoubleBinaryOperator; |
|
64 |
import java.util.function.Function; |
|
65 |
import java.util.function.IntBinaryOperator; |
|
66 |
import java.util.function.LongBinaryOperator; |
|
67 |
import java.util.function.ToDoubleBiFunction; |
|
68 |
import java.util.function.ToDoubleFunction; |
|
69 |
import java.util.function.ToIntBiFunction; |
|
70 |
import java.util.function.ToIntFunction; |
|
71 |
import java.util.function.ToLongBiFunction; |
|
72 |
import java.util.function.ToLongFunction; |
|
73 |
import java.util.stream.Stream; |
|
2 | 74 |
|
75 |
/** |
|
76 |
* A hash table supporting full concurrency of retrievals and |
|
17945 | 77 |
* high expected concurrency for updates. This class obeys the |
2 | 78 |
* same functional specification as {@link java.util.Hashtable}, and |
79 |
* includes versions of methods corresponding to each method of |
|
17717 | 80 |
* {@code Hashtable}. However, even though all operations are |
2 | 81 |
* thread-safe, retrieval operations do <em>not</em> entail locking, |
82 |
* and there is <em>not</em> any support for locking the entire table |
|
83 |
* in a way that prevents all access. This class is fully |
|
17717 | 84 |
* interoperable with {@code Hashtable} in programs that rely on its |
2 | 85 |
* thread safety but not on its synchronization details. |
86 |
* |
|
17945 | 87 |
* <p>Retrieval operations (including {@code get}) generally do not |
88 |
* block, so may overlap with update operations (including {@code put} |
|
89 |
* and {@code remove}). Retrievals reflect the results of the most |
|
90 |
* recently <em>completed</em> update operations holding upon their |
|
91 |
* onset. (More formally, an update operation for a given key bears a |
|
92 |
* <em>happens-before</em> relation with any (non-null) retrieval for |
|
93 |
* that key reporting the updated value.) For aggregate operations |
|
94 |
* such as {@code putAll} and {@code clear}, concurrent retrievals may |
|
95 |
* reflect insertion or removal of only some entries. Similarly, |
|
96 |
* Iterators and Enumerations return elements reflecting the state of |
|
97 |
* the hash table at some point at or since the creation of the |
|
98 |
* iterator/enumeration. They do <em>not</em> throw {@link |
|
99 |
* ConcurrentModificationException}. However, iterators are designed |
|
100 |
* to be used by only one thread at a time. Bear in mind that the |
|
101 |
* results of aggregate status methods including {@code size}, {@code |
|
102 |
* isEmpty}, and {@code containsValue} are typically useful only when |
|
103 |
* a map is not undergoing concurrent updates in other threads. |
|
104 |
* Otherwise the results of these methods reflect transient states |
|
105 |
* that may be adequate for monitoring or estimation purposes, but not |
|
106 |
* for program control. |
|
2 | 107 |
* |
17945 | 108 |
* <p>The table is dynamically expanded when there are too many |
109 |
* collisions (i.e., keys that have distinct hash codes but fall into |
|
110 |
* the same slot modulo the table size), with the expected average |
|
111 |
* effect of maintaining roughly two bins per mapping (corresponding |
|
112 |
* to a 0.75 load factor threshold for resizing). There may be much |
|
113 |
* variance around this average as mappings are added and removed, but |
|
114 |
* overall, this maintains a commonly accepted time/space tradeoff for |
|
115 |
* hash tables. However, resizing this or any other kind of hash |
|
116 |
* table may be a relatively slow operation. When possible, it is a |
|
117 |
* good idea to provide a size estimate as an optional {@code |
|
118 |
* initialCapacity} constructor argument. An additional optional |
|
119 |
* {@code loadFactor} constructor argument provides a further means of |
|
120 |
* customizing initial table capacity by specifying the table density |
|
121 |
* to be used in calculating the amount of space to allocate for the |
|
122 |
* given number of elements. Also, for compatibility with previous |
|
123 |
* versions of this class, constructors may optionally specify an |
|
124 |
* expected {@code concurrencyLevel} as an additional hint for |
|
125 |
* internal sizing. Note that using many keys with exactly the same |
|
126 |
* {@code hashCode()} is a sure way to slow down performance of any |
|
127 |
* hash table. To ameliorate impact, when keys are {@link Comparable}, |
|
128 |
* this class may use comparison order among keys to help break ties. |
|
129 |
* |
|
130 |
* <p>A {@link Set} projection of a ConcurrentHashMap may be created |
|
131 |
* (using {@link #newKeySet()} or {@link #newKeySet(int)}), or viewed |
|
132 |
* (using {@link #keySet(Object)} when only keys are of interest, and the |
|
133 |
* mapped values are (perhaps transiently) not used or all take the |
|
134 |
* same mapping value. |
|
135 |
* |
|
136 |
* <p>A ConcurrentHashMap can be used as scalable frequency map (a |
|
137 |
* form of histogram or multiset) by using {@link |
|
138 |
* java.util.concurrent.atomic.LongAdder} values and initializing via |
|
139 |
* {@link #computeIfAbsent computeIfAbsent}. For example, to add a count |
|
140 |
* to a {@code ConcurrentHashMap<String,LongAdder> freqs}, you can use |
|
141 |
* {@code freqs.computeIfAbsent(k -> new LongAdder()).increment();} |
|
2 | 142 |
* |
143 |
* <p>This class and its views and iterators implement all of the |
|
144 |
* <em>optional</em> methods of the {@link Map} and {@link Iterator} |
|
145 |
* interfaces. |
|
146 |
* |
|
17717 | 147 |
* <p>Like {@link Hashtable} but unlike {@link HashMap}, this class |
148 |
* does <em>not</em> allow {@code null} to be used as a key or value. |
|
2 | 149 |
* |
17945 | 150 |
* <p>ConcurrentHashMaps support a set of sequential and parallel bulk |
151 |
* operations that, unlike most {@link Stream} methods, are designed |
|
152 |
* to be safely, and often sensibly, applied even with maps that are |
|
153 |
* being concurrently updated by other threads; for example, when |
|
154 |
* computing a snapshot summary of the values in a shared registry. |
|
155 |
* There are three kinds of operation, each with four forms, accepting |
|
156 |
* functions with Keys, Values, Entries, and (Key, Value) arguments |
|
157 |
* and/or return values. Because the elements of a ConcurrentHashMap |
|
158 |
* are not ordered in any particular way, and may be processed in |
|
159 |
* different orders in different parallel executions, the correctness |
|
160 |
* of supplied functions should not depend on any ordering, or on any |
|
161 |
* other objects or values that may transiently change while |
|
162 |
* computation is in progress; and except for forEach actions, should |
|
163 |
* ideally be side-effect-free. Bulk operations on {@link java.util.Map.Entry} |
|
164 |
* objects do not support method {@code setValue}. |
|
165 |
* |
|
166 |
* <ul> |
|
167 |
* <li> forEach: Perform a given action on each element. |
|
168 |
* A variant form applies a given transformation on each element |
|
169 |
* before performing the action.</li> |
|
170 |
* |
|
171 |
* <li> search: Return the first available non-null result of |
|
172 |
* applying a given function on each element; skipping further |
|
173 |
* search when a result is found.</li> |
|
174 |
* |
|
175 |
* <li> reduce: Accumulate each element. The supplied reduction |
|
176 |
* function cannot rely on ordering (more formally, it should be |
|
177 |
* both associative and commutative). There are five variants: |
|
178 |
* |
|
179 |
* <ul> |
|
180 |
* |
|
181 |
* <li> Plain reductions. (There is not a form of this method for |
|
182 |
* (key, value) function arguments since there is no corresponding |
|
183 |
* return type.)</li> |
|
184 |
* |
|
185 |
* <li> Mapped reductions that accumulate the results of a given |
|
186 |
* function applied to each element.</li> |
|
187 |
* |
|
188 |
* <li> Reductions to scalar doubles, longs, and ints, using a |
|
189 |
* given basis value.</li> |
|
190 |
* |
|
191 |
* </ul> |
|
192 |
* </li> |
|
193 |
* </ul> |
|
194 |
* |
|
195 |
* <p>These bulk operations accept a {@code parallelismThreshold} |
|
196 |
* argument. Methods proceed sequentially if the current map size is |
|
197 |
* estimated to be less than the given threshold. Using a value of |
|
198 |
* {@code Long.MAX_VALUE} suppresses all parallelism. Using a value |
|
199 |
* of {@code 1} results in maximal parallelism by partitioning into |
|
200 |
* enough subtasks to fully utilize the {@link |
|
201 |
* ForkJoinPool#commonPool()} that is used for all parallel |
|
202 |
* computations. Normally, you would initially choose one of these |
|
203 |
* extreme values, and then measure performance of using in-between |
|
204 |
* values that trade off overhead versus throughput. |
|
205 |
* |
|
206 |
* <p>The concurrency properties of bulk operations follow |
|
207 |
* from those of ConcurrentHashMap: Any non-null result returned |
|
208 |
* from {@code get(key)} and related access methods bears a |
|
209 |
* happens-before relation with the associated insertion or |
|
210 |
* update. The result of any bulk operation reflects the |
|
211 |
* composition of these per-element relations (but is not |
|
212 |
* necessarily atomic with respect to the map as a whole unless it |
|
213 |
* is somehow known to be quiescent). Conversely, because keys |
|
214 |
* and values in the map are never null, null serves as a reliable |
|
215 |
* atomic indicator of the current lack of any result. To |
|
216 |
* maintain this property, null serves as an implicit basis for |
|
217 |
* all non-scalar reduction operations. For the double, long, and |
|
218 |
* int versions, the basis should be one that, when combined with |
|
219 |
* any other value, returns that other value (more formally, it |
|
220 |
* should be the identity element for the reduction). Most common |
|
221 |
* reductions have these properties; for example, computing a sum |
|
222 |
* with basis 0 or a minimum with basis MAX_VALUE. |
|
223 |
* |
|
224 |
* <p>Search and transformation functions provided as arguments |
|
225 |
* should similarly return null to indicate the lack of any result |
|
226 |
* (in which case it is not used). In the case of mapped |
|
227 |
* reductions, this also enables transformations to serve as |
|
228 |
* filters, returning null (or, in the case of primitive |
|
229 |
* specializations, the identity basis) if the element should not |
|
230 |
* be combined. You can create compound transformations and |
|
231 |
* filterings by composing them yourself under this "null means |
|
232 |
* there is nothing there now" rule before using them in search or |
|
233 |
* reduce operations. |
|
234 |
* |
|
235 |
* <p>Methods accepting and/or returning Entry arguments maintain |
|
236 |
* key-value associations. They may be useful for example when |
|
237 |
* finding the key for the greatest value. Note that "plain" Entry |
|
238 |
* arguments can be supplied using {@code new |
|
239 |
* AbstractMap.SimpleEntry(k,v)}. |
|
240 |
* |
|
241 |
* <p>Bulk operations may complete abruptly, throwing an |
|
242 |
* exception encountered in the application of a supplied |
|
243 |
* function. Bear in mind when handling such exceptions that other |
|
244 |
* concurrently executing functions could also have thrown |
|
245 |
* exceptions, or would have done so if the first exception had |
|
246 |
* not occurred. |
|
247 |
* |
|
248 |
* <p>Speedups for parallel compared to sequential forms are common |
|
249 |
* but not guaranteed. Parallel operations involving brief functions |
|
250 |
* on small maps may execute more slowly than sequential forms if the |
|
251 |
* underlying work to parallelize the computation is more expensive |
|
252 |
* than the computation itself. Similarly, parallelization may not |
|
253 |
* lead to much actual parallelism if all processors are busy |
|
254 |
* performing unrelated tasks. |
|
255 |
* |
|
256 |
* <p>All arguments to all task methods must be non-null. |
|
257 |
* |
|
2 | 258 |
* <p>This class is a member of the |
259 |
* <a href="{@docRoot}/../technotes/guides/collections/index.html"> |
|
260 |
* Java Collections Framework</a>. |
|
261 |
* |
|
262 |
* @since 1.5 |
|
263 |
* @author Doug Lea |
|
264 |
* @param <K> the type of keys maintained by this map |
|
265 |
* @param <V> the type of mapped values |
|
266 |
*/ |
|
17945 | 267 |
@SuppressWarnings({"unchecked", "rawtypes", "serial"}) |
268 |
public class ConcurrentHashMap<K,V> extends AbstractMap<K,V> |
|
269 |
implements ConcurrentMap<K,V>, Serializable { |
|
270 |
||
2 | 271 |
private static final long serialVersionUID = 7249069246763182397L; |
272 |
||
273 |
/* |
|
17945 | 274 |
* Overview: |
275 |
* |
|
276 |
* The primary design goal of this hash table is to maintain |
|
277 |
* concurrent readability (typically method get(), but also |
|
278 |
* iterators and related methods) while minimizing update |
|
279 |
* contention. Secondary goals are to keep space consumption about |
|
280 |
* the same or better than java.util.HashMap, and to support high |
|
281 |
* initial insertion rates on an empty table by many threads. |
|
282 |
* |
|
283 |
* Each key-value mapping is held in a Node. Because Node key |
|
284 |
* fields can contain special values, they are defined using plain |
|
285 |
* Object types (not type "K"). This leads to a lot of explicit |
|
286 |
* casting (and the use of class-wide warning suppressions). It |
|
287 |
* also allows some of the public methods to be factored into a |
|
288 |
* smaller number of internal methods (although sadly not so for |
|
289 |
* the five variants of put-related operations). The |
|
290 |
* validation-based approach explained below leads to a lot of |
|
291 |
* code sprawl because retry-control precludes factoring into |
|
292 |
* smaller methods. |
|
293 |
* |
|
294 |
* The table is lazily initialized to a power-of-two size upon the |
|
295 |
* first insertion. Each bin in the table normally contains a |
|
296 |
* list of Nodes (most often, the list has only zero or one Node). |
|
297 |
* Table accesses require volatile/atomic reads, writes, and |
|
298 |
* CASes. Because there is no other way to arrange this without |
|
299 |
* adding further indirections, we use intrinsics |
|
300 |
* (sun.misc.Unsafe) operations. |
|
301 |
* |
|
302 |
* We use the top (sign) bit of Node hash fields for control |
|
303 |
* purposes -- it is available anyway because of addressing |
|
304 |
* constraints. Nodes with negative hash fields are forwarding |
|
305 |
* nodes to either TreeBins or resized tables. The lower 31 bits |
|
306 |
* of each normal Node's hash field contain a transformation of |
|
307 |
* the key's hash code. |
|
308 |
* |
|
309 |
* Insertion (via put or its variants) of the first node in an |
|
310 |
* empty bin is performed by just CASing it to the bin. This is |
|
311 |
* by far the most common case for put operations under most |
|
312 |
* key/hash distributions. Other update operations (insert, |
|
313 |
* delete, and replace) require locks. We do not want to waste |
|
314 |
* the space required to associate a distinct lock object with |
|
315 |
* each bin, so instead use the first node of a bin list itself as |
|
316 |
* a lock. Locking support for these locks relies on builtin |
|
317 |
* "synchronized" monitors. |
|
318 |
* |
|
319 |
* Using the first node of a list as a lock does not by itself |
|
320 |
* suffice though: When a node is locked, any update must first |
|
321 |
* validate that it is still the first node after locking it, and |
|
322 |
* retry if not. Because new nodes are always appended to lists, |
|
323 |
* once a node is first in a bin, it remains first until deleted |
|
324 |
* or the bin becomes invalidated (upon resizing). |
|
325 |
* |
|
326 |
* The main disadvantage of per-bin locks is that other update |
|
327 |
* operations on other nodes in a bin list protected by the same |
|
328 |
* lock can stall, for example when user equals() or mapping |
|
329 |
* functions take a long time. However, statistically, under |
|
330 |
* random hash codes, this is not a common problem. Ideally, the |
|
331 |
* frequency of nodes in bins follows a Poisson distribution |
|
332 |
* (http://en.wikipedia.org/wiki/Poisson_distribution) with a |
|
333 |
* parameter of about 0.5 on average, given the resizing threshold |
|
334 |
* of 0.75, although with a large variance because of resizing |
|
335 |
* granularity. Ignoring variance, the expected occurrences of |
|
336 |
* list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The |
|
337 |
* first values are: |
|
338 |
* |
|
339 |
* 0: 0.60653066 |
|
340 |
* 1: 0.30326533 |
|
341 |
* 2: 0.07581633 |
|
342 |
* 3: 0.01263606 |
|
343 |
* 4: 0.00157952 |
|
344 |
* 5: 0.00015795 |
|
345 |
* 6: 0.00001316 |
|
346 |
* 7: 0.00000094 |
|
347 |
* 8: 0.00000006 |
|
348 |
* more: less than 1 in ten million |
|
349 |
* |
|
350 |
* Lock contention probability for two threads accessing distinct |
|
351 |
* elements is roughly 1 / (8 * #elements) under random hashes. |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
352 |
* |
17945 | 353 |
* Actual hash code distributions encountered in practice |
354 |
* sometimes deviate significantly from uniform randomness. This |
|
355 |
* includes the case when N > (1<<30), so some keys MUST collide. |
|
356 |
* Similarly for dumb or hostile usages in which multiple keys are |
|
357 |
* designed to have identical hash codes. Also, although we guard |
|
358 |
* against the worst effects of this (see method spread), sets of |
|
359 |
* hashes may differ only in bits that do not impact their bin |
|
360 |
* index for a given power-of-two mask. So we use a secondary |
|
361 |
* strategy that applies when the number of nodes in a bin exceeds |
|
362 |
* a threshold, and at least one of the keys implements |
|
363 |
* Comparable. These TreeBins use a balanced tree to hold nodes |
|
364 |
* (a specialized form of red-black trees), bounding search time |
|
365 |
* to O(log N). Each search step in a TreeBin is at least twice as |
|
366 |
* slow as in a regular list, but given that N cannot exceed |
|
367 |
* (1<<64) (before running out of addresses) this bounds search |
|
368 |
* steps, lock hold times, etc, to reasonable constants (roughly |
|
369 |
* 100 nodes inspected per operation worst case) so long as keys |
|
370 |
* are Comparable (which is very common -- String, Long, etc). |
|
371 |
* TreeBin nodes (TreeNodes) also maintain the same "next" |
|
372 |
* traversal pointers as regular nodes, so can be traversed in |
|
373 |
* iterators in the same way. |
|
374 |
* |
|
375 |
* The table is resized when occupancy exceeds a percentage |
|
376 |
* threshold (nominally, 0.75, but see below). Any thread |
|
377 |
* noticing an overfull bin may assist in resizing after the |
|
378 |
* initiating thread allocates and sets up the replacement |
|
379 |
* array. However, rather than stalling, these other threads may |
|
380 |
* proceed with insertions etc. The use of TreeBins shields us |
|
381 |
* from the worst case effects of overfilling while resizes are in |
|
382 |
* progress. Resizing proceeds by transferring bins, one by one, |
|
383 |
* from the table to the next table. To enable concurrency, the |
|
384 |
* next table must be (incrementally) prefilled with place-holders |
|
385 |
* serving as reverse forwarders to the old table. Because we are |
|
386 |
* using power-of-two expansion, the elements from each bin must |
|
387 |
* either stay at same index, or move with a power of two |
|
388 |
* offset. We eliminate unnecessary node creation by catching |
|
389 |
* cases where old nodes can be reused because their next fields |
|
390 |
* won't change. On average, only about one-sixth of them need |
|
391 |
* cloning when a table doubles. The nodes they replace will be |
|
392 |
* garbage collectable as soon as they are no longer referenced by |
|
393 |
* any reader thread that may be in the midst of concurrently |
|
394 |
* traversing table. Upon transfer, the old table bin contains |
|
395 |
* only a special forwarding node (with hash field "MOVED") that |
|
396 |
* contains the next table as its key. On encountering a |
|
397 |
* forwarding node, access and update operations restart, using |
|
398 |
* the new table. |
|
399 |
* |
|
400 |
* Each bin transfer requires its bin lock, which can stall |
|
401 |
* waiting for locks while resizing. However, because other |
|
402 |
* threads can join in and help resize rather than contend for |
|
403 |
* locks, average aggregate waits become shorter as resizing |
|
404 |
* progresses. The transfer operation must also ensure that all |
|
405 |
* accessible bins in both the old and new table are usable by any |
|
406 |
* traversal. This is arranged by proceeding from the last bin |
|
407 |
* (table.length - 1) up towards the first. Upon seeing a |
|
408 |
* forwarding node, traversals (see class Traverser) arrange to |
|
409 |
* move to the new table without revisiting nodes. However, to |
|
410 |
* ensure that no intervening nodes are skipped, bin splitting can |
|
411 |
* only begin after the associated reverse-forwarders are in |
|
412 |
* place. |
|
413 |
* |
|
414 |
* The traversal scheme also applies to partial traversals of |
|
415 |
* ranges of bins (via an alternate Traverser constructor) |
|
416 |
* to support partitioned aggregate operations. Also, read-only |
|
417 |
* operations give up if ever forwarded to a null table, which |
|
418 |
* provides support for shutdown-style clearing, which is also not |
|
419 |
* currently implemented. |
|
420 |
* |
|
421 |
* Lazy table initialization minimizes footprint until first use, |
|
422 |
* and also avoids resizings when the first operation is from a |
|
423 |
* putAll, constructor with map argument, or deserialization. |
|
424 |
* These cases attempt to override the initial capacity settings, |
|
425 |
* but harmlessly fail to take effect in cases of races. |
|
426 |
* |
|
427 |
* The element count is maintained using a specialization of |
|
428 |
* LongAdder. We need to incorporate a specialization rather than |
|
429 |
* just use a LongAdder in order to access implicit |
|
430 |
* contention-sensing that leads to creation of multiple |
|
431 |
* Cells. The counter mechanics avoid contention on |
|
432 |
* updates but can encounter cache thrashing if read too |
|
433 |
* frequently during concurrent access. To avoid reading so often, |
|
434 |
* resizing under contention is attempted only upon adding to a |
|
435 |
* bin already holding two or more nodes. Under uniform hash |
|
436 |
* distributions, the probability of this occurring at threshold |
|
437 |
* is around 13%, meaning that only about 1 in 8 puts check |
|
438 |
* threshold (and after resizing, many fewer do so). The bulk |
|
439 |
* putAll operation further reduces contention by only committing |
|
440 |
* count updates upon these size checks. |
|
441 |
* |
|
442 |
* Maintaining API and serialization compatibility with previous |
|
443 |
* versions of this class introduces several oddities. Mainly: We |
|
444 |
* leave untouched but unused constructor arguments refering to |
|
445 |
* concurrencyLevel. We accept a loadFactor constructor argument, |
|
446 |
* but apply it only to initial table capacity (which is the only |
|
447 |
* time that we can guarantee to honor it.) We also declare an |
|
448 |
* unused "Segment" class that is instantiated in minimal form |
|
449 |
* only when serializing. |
|
2 | 450 |
*/ |
451 |
||
452 |
/* ---------------- Constants -------------- */ |
|
453 |
||
454 |
/** |
|
17945 | 455 |
* The largest possible table capacity. This value must be |
456 |
* exactly 1<<30 to stay within Java array allocation and indexing |
|
457 |
* bounds for power of two table sizes, and is further required |
|
458 |
* because the top two bits of 32bit hash fields are used for |
|
459 |
* control purposes. |
|
2 | 460 |
*/ |
17945 | 461 |
private static final int MAXIMUM_CAPACITY = 1 << 30; |
462 |
||
463 |
/** |
|
464 |
* The default initial table capacity. Must be a power of 2 |
|
465 |
* (i.e., at least 1) and at most MAXIMUM_CAPACITY. |
|
466 |
*/ |
|
467 |
private static final int DEFAULT_CAPACITY = 16; |
|
2 | 468 |
|
469 |
/** |
|
17945 | 470 |
* The largest possible (non-power of two) array size. |
471 |
* Needed by toArray and related methods. |
|
2 | 472 |
*/ |
17945 | 473 |
static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; |
2 | 474 |
|
475 |
/** |
|
17945 | 476 |
* The default concurrency level for this table. Unused but |
477 |
* defined for compatibility with previous versions of this class. |
|
2 | 478 |
*/ |
17945 | 479 |
private static final int DEFAULT_CONCURRENCY_LEVEL = 16; |
480 |
||
481 |
/** |
|
482 |
* The load factor for this table. Overrides of this value in |
|
483 |
* constructors affect only the initial table capacity. The |
|
484 |
* actual floating point value isn't normally used -- it is |
|
485 |
* simpler to use expressions such as {@code n - (n >>> 2)} for |
|
486 |
* the associated resizing threshold. |
|
487 |
*/ |
|
488 |
private static final float LOAD_FACTOR = 0.75f; |
|
2 | 489 |
|
490 |
/** |
|
17945 | 491 |
* The bin count threshold for using a tree rather than list for a |
492 |
* bin. The value reflects the approximate break-even point for |
|
493 |
* using tree-based operations. |
|
2 | 494 |
*/ |
17945 | 495 |
private static final int TREE_THRESHOLD = 8; |
2 | 496 |
|
497 |
/** |
|
17945 | 498 |
* Minimum number of rebinnings per transfer step. Ranges are |
499 |
* subdivided to allow multiple resizer threads. This value |
|
500 |
* serves as a lower bound to avoid resizers encountering |
|
501 |
* excessive memory contention. The value should be at least |
|
502 |
* DEFAULT_CAPACITY. |
|
503 |
*/ |
|
504 |
private static final int MIN_TRANSFER_STRIDE = 16; |
|
505 |
||
506 |
/* |
|
507 |
* Encodings for Node hash fields. See above for explanation. |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
508 |
*/ |
17945 | 509 |
static final int MOVED = 0x80000000; // hash field for forwarding nodes |
510 |
static final int HASH_BITS = 0x7fffffff; // usable bits of normal node hash |
|
511 |
||
512 |
/** Number of CPUS, to place bounds on some sizings */ |
|
513 |
static final int NCPU = Runtime.getRuntime().availableProcessors(); |
|
514 |
||
515 |
/** For serialization compatibility. */ |
|
516 |
private static final ObjectStreamField[] serialPersistentFields = { |
|
517 |
new ObjectStreamField("segments", Segment[].class), |
|
518 |
new ObjectStreamField("segmentMask", Integer.TYPE), |
|
519 |
new ObjectStreamField("segmentShift", Integer.TYPE) |
|
520 |
}; |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
521 |
|
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
522 |
/** |
17945 | 523 |
* A padded cell for distributing counts. Adapted from LongAdder |
524 |
* and Striped64. See their internal docs for explanation. |
|
2 | 525 |
*/ |
17945 | 526 |
@sun.misc.Contended static final class Cell { |
527 |
volatile long value; |
|
528 |
Cell(long x) { value = x; } |
|
529 |
} |
|
2 | 530 |
|
531 |
/* ---------------- Fields -------------- */ |
|
532 |
||
533 |
/** |
|
17945 | 534 |
* The array of bins. Lazily initialized upon first insertion. |
535 |
* Size is always a power of two. Accessed directly by iterators. |
|
536 |
*/ |
|
537 |
transient volatile Node<K,V>[] table; |
|
538 |
||
539 |
/** |
|
540 |
* The next table to use; non-null only while resizing. |
|
541 |
*/ |
|
542 |
private transient volatile Node<K,V>[] nextTable; |
|
543 |
||
544 |
/** |
|
545 |
* Base counter value, used mainly when there is no contention, |
|
546 |
* but also as a fallback during table initialization |
|
547 |
* races. Updated via CAS. |
|
548 |
*/ |
|
549 |
private transient volatile long baseCount; |
|
550 |
||
551 |
/** |
|
552 |
* Table initialization and resizing control. When negative, the |
|
553 |
* table is being initialized or resized: -1 for initialization, |
|
554 |
* else -(1 + the number of active resizing threads). Otherwise, |
|
555 |
* when table is null, holds the initial table size to use upon |
|
556 |
* creation, or 0 for default. After initialization, holds the |
|
557 |
* next element count value upon which to resize the table. |
|
558 |
*/ |
|
559 |
private transient volatile int sizeCtl; |
|
560 |
||
561 |
/** |
|
562 |
* The next table index (plus one) to split while resizing. |
|
563 |
*/ |
|
564 |
private transient volatile int transferIndex; |
|
565 |
||
566 |
/** |
|
567 |
* The least available table index to split while resizing. |
|
568 |
*/ |
|
569 |
private transient volatile int transferOrigin; |
|
570 |
||
571 |
/** |
|
572 |
* Spinlock (locked via CAS) used when resizing and/or creating Cells. |
|
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
11279
diff
changeset
|
573 |
*/ |
17945 | 574 |
private transient volatile int cellsBusy; |
575 |
||
576 |
/** |
|
577 |
* Table of counter cells. When non-null, size is a power of 2. |
|
578 |
*/ |
|
579 |
private transient volatile Cell[] counterCells; |
|
580 |
||
581 |
// views |
|
582 |
private transient KeySetView<K,V> keySet; |
|
583 |
private transient ValuesView<K,V> values; |
|
584 |
private transient EntrySetView<K,V> entrySet; |
|
585 |
||
586 |
/* ---------------- Table element access -------------- */ |
|
587 |
||
588 |
/* |
|
589 |
* Volatile access methods are used for table elements as well as |
|
590 |
* elements of in-progress next table while resizing. Uses are |
|
591 |
* null checked by callers, and implicitly bounds-checked, relying |
|
592 |
* on the invariants that tab arrays have non-zero size, and all |
|
593 |
* indices are masked with (tab.length - 1) which is never |
|
594 |
* negative and always less than length. Note that, to be correct |
|
595 |
* wrt arbitrary concurrency errors by users, bounds checks must |
|
596 |
* operate on local variables, which accounts for some odd-looking |
|
597 |
* inline assignments below. |
|
598 |
*/ |
|
599 |
||
600 |
static final <K,V> Node<K,V> tabAt(Node<K,V>[] tab, int i) { |
|
601 |
return (Node<K,V>)U.getObjectVolatile(tab, ((long)i << ASHIFT) + ABASE); |
|
602 |
} |
|
603 |
||
604 |
static final <K,V> boolean casTabAt(Node<K,V>[] tab, int i, |
|
605 |
Node<K,V> c, Node<K,V> v) { |
|
606 |
return U.compareAndSwapObject(tab, ((long)i << ASHIFT) + ABASE, c, v); |
|
607 |
} |
|
608 |
||
609 |
static final <K,V> void setTabAt(Node<K,V>[] tab, int i, Node<K,V> v) { |
|
610 |
U.putObjectVolatile(tab, ((long)i << ASHIFT) + ABASE, v); |
|
611 |
} |
|
612 |
||
613 |
/* ---------------- Nodes -------------- */ |
|
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
11279
diff
changeset
|
614 |
|
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
11279
diff
changeset
|
615 |
/** |
17945 | 616 |
* Key-value entry. This class is never exported out as a |
617 |
* user-mutable Map.Entry (i.e., one supporting setValue; see |
|
618 |
* MapEntry below), but can be used for read-only traversals used |
|
619 |
* in bulk tasks. Nodes with a hash field of MOVED are special, |
|
620 |
* and do not contain user keys or values (and are never |
|
621 |
* exported). Otherwise, keys and vals are never null. |
|
2 | 622 |
*/ |
17945 | 623 |
static class Node<K,V> implements Map.Entry<K,V> { |
624 |
final int hash; |
|
625 |
final Object key; |
|
626 |
volatile V val; |
|
627 |
Node<K,V> next; |
|
628 |
||
629 |
Node(int hash, Object key, V val, Node<K,V> next) { |
|
630 |
this.hash = hash; |
|
631 |
this.key = key; |
|
632 |
this.val = val; |
|
633 |
this.next = next; |
|
634 |
} |
|
635 |
||
636 |
public final K getKey() { return (K)key; } |
|
637 |
public final V getValue() { return val; } |
|
638 |
public final int hashCode() { return key.hashCode() ^ val.hashCode(); } |
|
639 |
public final String toString(){ return key + "=" + val; } |
|
640 |
public final V setValue(V value) { |
|
641 |
throw new UnsupportedOperationException(); |
|
642 |
} |
|
643 |
||
644 |
public final boolean equals(Object o) { |
|
645 |
Object k, v, u; Map.Entry<?,?> e; |
|
646 |
return ((o instanceof Map.Entry) && |
|
647 |
(k = (e = (Map.Entry<?,?>)o).getKey()) != null && |
|
648 |
(v = e.getValue()) != null && |
|
649 |
(k == key || k.equals(key)) && |
|
650 |
(v == (u = val) || v.equals(u))); |
|
651 |
} |
|
652 |
} |
|
2 | 653 |
|
654 |
/** |
|
17945 | 655 |
* Exported Entry for EntryIterator |
2 | 656 |
*/ |
17945 | 657 |
static final class MapEntry<K,V> implements Map.Entry<K,V> { |
658 |
final K key; // non-null |
|
659 |
V val; // non-null |
|
660 |
final ConcurrentHashMap<K,V> map; |
|
661 |
MapEntry(K key, V val, ConcurrentHashMap<K,V> map) { |
|
662 |
this.key = key; |
|
663 |
this.val = val; |
|
664 |
this.map = map; |
|
665 |
} |
|
666 |
public K getKey() { return key; } |
|
667 |
public V getValue() { return val; } |
|
668 |
public int hashCode() { return key.hashCode() ^ val.hashCode(); } |
|
669 |
public String toString() { return key + "=" + val; } |
|
670 |
||
671 |
public boolean equals(Object o) { |
|
672 |
Object k, v; Map.Entry<?,?> e; |
|
673 |
return ((o instanceof Map.Entry) && |
|
674 |
(k = (e = (Map.Entry<?,?>)o).getKey()) != null && |
|
675 |
(v = e.getValue()) != null && |
|
676 |
(k == key || k.equals(key)) && |
|
677 |
(v == val || v.equals(val))); |
|
678 |
} |
|
679 |
||
680 |
/** |
|
681 |
* Sets our entry's value and writes through to the map. The |
|
682 |
* value to return is somewhat arbitrary here. Since we do not |
|
683 |
* necessarily track asynchronous changes, the most recent |
|
684 |
* "previous" value could be different from what we return (or |
|
685 |
* could even have been removed, in which case the put will |
|
686 |
* re-establish). We do not and cannot guarantee more. |
|
687 |
*/ |
|
688 |
public V setValue(V value) { |
|
689 |
if (value == null) throw new NullPointerException(); |
|
690 |
V v = val; |
|
691 |
val = value; |
|
692 |
map.put(key, value); |
|
693 |
return v; |
|
694 |
} |
|
695 |
} |
|
696 |
||
697 |
||
698 |
/* ---------------- TreeBins -------------- */ |
|
699 |
||
700 |
/** |
|
701 |
* Nodes for use in TreeBins |
|
702 |
*/ |
|
703 |
static final class TreeNode<K,V> extends Node<K,V> { |
|
704 |
TreeNode<K,V> parent; // red-black tree links |
|
705 |
TreeNode<K,V> left; |
|
706 |
TreeNode<K,V> right; |
|
707 |
TreeNode<K,V> prev; // needed to unlink next upon deletion |
|
708 |
boolean red; |
|
709 |
||
710 |
TreeNode(int hash, Object key, V val, Node<K,V> next, |
|
711 |
TreeNode<K,V> parent) { |
|
712 |
super(hash, key, val, next); |
|
713 |
this.parent = parent; |
|
714 |
} |
|
715 |
} |
|
716 |
||
717 |
/** |
|
718 |
* Returns a Class for the given type of the form "class C |
|
719 |
* implements Comparable<C>", if one exists, else null. See below |
|
720 |
* for explanation. |
|
721 |
*/ |
|
722 |
static Class<?> comparableClassFor(Class<?> c) { |
|
723 |
Class<?> s, cmpc; Type[] ts, as; Type t; ParameterizedType p; |
|
724 |
if (c == String.class) // bypass checks |
|
725 |
return c; |
|
726 |
if (c != null && (cmpc = Comparable.class).isAssignableFrom(c)) { |
|
727 |
while (cmpc.isAssignableFrom(s = c.getSuperclass())) |
|
728 |
c = s; // find topmost comparable class |
|
729 |
if ((ts = c.getGenericInterfaces()) != null) { |
|
730 |
for (int i = 0; i < ts.length; ++i) { |
|
731 |
if (((t = ts[i]) instanceof ParameterizedType) && |
|
732 |
((p = (ParameterizedType)t).getRawType() == cmpc) && |
|
733 |
(as = p.getActualTypeArguments()) != null && |
|
734 |
as.length == 1 && as[0] == c) // type arg is c |
|
735 |
return c; |
|
736 |
} |
|
737 |
} |
|
738 |
} |
|
739 |
return null; |
|
740 |
} |
|
2 | 741 |
|
742 |
/** |
|
17945 | 743 |
* A specialized form of red-black tree for use in bins |
744 |
* whose size exceeds a threshold. |
|
745 |
* |
|
746 |
* TreeBins use a special form of comparison for search and |
|
747 |
* related operations (which is the main reason we cannot use |
|
748 |
* existing collections such as TreeMaps). TreeBins contain |
|
749 |
* Comparable elements, but may contain others, as well as |
|
750 |
* elements that are Comparable but not necessarily Comparable |
|
751 |
* for the same T, so we cannot invoke compareTo among them. To |
|
752 |
* handle this, the tree is ordered primarily by hash value, then |
|
753 |
* by Comparable.compareTo order if applicable. On lookup at a |
|
754 |
* node, if elements are not comparable or compare as 0 then both |
|
755 |
* left and right children may need to be searched in the case of |
|
756 |
* tied hash values. (This corresponds to the full list search |
|
757 |
* that would be necessary if all elements were non-Comparable and |
|
758 |
* had tied hashes.) The red-black balancing code is updated from |
|
759 |
* pre-jdk-collections |
|
760 |
* (http://gee.cs.oswego.edu/dl/classes/collections/RBCell.java) |
|
761 |
* based in turn on Cormen, Leiserson, and Rivest "Introduction to |
|
762 |
* Algorithms" (CLR). |
|
763 |
* |
|
764 |
* TreeBins also maintain a separate locking discipline than |
|
765 |
* regular bins. Because they are forwarded via special MOVED |
|
766 |
* nodes at bin heads (which can never change once established), |
|
767 |
* we cannot use those nodes as locks. Instead, TreeBin extends |
|
768 |
* StampedLock to support a form of read-write lock. For update |
|
769 |
* operations and table validation, the exclusive form of lock |
|
770 |
* behaves in the same way as bin-head locks. However, lookups use |
|
771 |
* shared read-lock mechanics to allow multiple readers in the |
|
772 |
* absence of writers. Additionally, these lookups do not ever |
|
773 |
* block: While the lock is not available, they proceed along the |
|
774 |
* slow traversal path (via next-pointers) until the lock becomes |
|
775 |
* available or the list is exhausted, whichever comes |
|
776 |
* first. These cases are not fast, but maximize aggregate |
|
777 |
* expected throughput. |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
778 |
*/ |
17945 | 779 |
static final class TreeBin<K,V> extends StampedLock { |
780 |
private static final long serialVersionUID = 2249069246763182397L; |
|
781 |
transient TreeNode<K,V> root; // root of tree |
|
782 |
transient TreeNode<K,V> first; // head of next-pointer list |
|
783 |
||
784 |
/** From CLR */ |
|
785 |
private void rotateLeft(TreeNode<K,V> p) { |
|
786 |
if (p != null) { |
|
787 |
TreeNode<K,V> r = p.right, pp, rl; |
|
788 |
if ((rl = p.right = r.left) != null) |
|
789 |
rl.parent = p; |
|
790 |
if ((pp = r.parent = p.parent) == null) |
|
791 |
root = r; |
|
792 |
else if (pp.left == p) |
|
793 |
pp.left = r; |
|
794 |
else |
|
795 |
pp.right = r; |
|
796 |
r.left = p; |
|
797 |
p.parent = r; |
|
798 |
} |
|
799 |
} |
|
800 |
||
801 |
/** From CLR */ |
|
802 |
private void rotateRight(TreeNode<K,V> p) { |
|
803 |
if (p != null) { |
|
804 |
TreeNode<K,V> l = p.left, pp, lr; |
|
805 |
if ((lr = p.left = l.right) != null) |
|
806 |
lr.parent = p; |
|
807 |
if ((pp = l.parent = p.parent) == null) |
|
808 |
root = l; |
|
809 |
else if (pp.right == p) |
|
810 |
pp.right = l; |
|
811 |
else |
|
812 |
pp.left = l; |
|
813 |
l.right = p; |
|
814 |
p.parent = l; |
|
815 |
} |
|
816 |
} |
|
817 |
||
818 |
/** |
|
819 |
* Returns the TreeNode (or null if not found) for the given key |
|
820 |
* starting at given root. |
|
821 |
*/ |
|
822 |
final TreeNode<K,V> getTreeNode(int h, Object k, TreeNode<K,V> p, |
|
823 |
Class<?> cc) { |
|
824 |
while (p != null) { |
|
825 |
int dir, ph; Object pk; Class<?> pc; |
|
826 |
if ((ph = p.hash) != h) |
|
827 |
dir = (h < ph) ? -1 : 1; |
|
828 |
else if ((pk = p.key) == k || k.equals(pk)) |
|
829 |
return p; |
|
830 |
else if (cc == null || pk == null || |
|
831 |
((pc = pk.getClass()) != cc && |
|
832 |
comparableClassFor(pc) != cc) || |
|
833 |
(dir = ((Comparable<Object>)k).compareTo(pk)) == 0) { |
|
834 |
TreeNode<K,V> r, pr; // check both sides |
|
835 |
if ((pr = p.right) != null && |
|
836 |
(r = getTreeNode(h, k, pr, cc)) != null) |
|
837 |
return r; |
|
838 |
else // continue left |
|
839 |
dir = -1; |
|
840 |
} |
|
841 |
p = (dir > 0) ? p.right : p.left; |
|
842 |
} |
|
843 |
return null; |
|
844 |
} |
|
845 |
||
846 |
/** |
|
847 |
* Wrapper for getTreeNode used by CHM.get. Tries to obtain |
|
848 |
* read-lock to call getTreeNode, but during failure to get |
|
849 |
* lock, searches along next links. |
|
850 |
*/ |
|
851 |
final V getValue(int h, Object k) { |
|
852 |
Class<?> cc = comparableClassFor(k.getClass()); |
|
853 |
Node<K,V> r = null; |
|
854 |
for (Node<K,V> e = first; e != null; e = e.next) { |
|
855 |
long s; |
|
856 |
if ((s = tryReadLock()) != 0L) { |
|
857 |
try { |
|
858 |
r = getTreeNode(h, k, root, cc); |
|
859 |
} finally { |
|
860 |
unlockRead(s); |
|
861 |
} |
|
862 |
break; |
|
863 |
} |
|
864 |
else if (e.hash == h && k.equals(e.key)) { |
|
865 |
r = e; |
|
866 |
break; |
|
867 |
} |
|
868 |
} |
|
869 |
return r == null ? null : r.val; |
|
870 |
} |
|
871 |
||
872 |
/** |
|
873 |
* Finds or adds a node. |
|
874 |
* @return null if added |
|
875 |
*/ |
|
876 |
final TreeNode<K,V> putTreeNode(int h, Object k, V v) { |
|
877 |
Class<?> cc = comparableClassFor(k.getClass()); |
|
878 |
TreeNode<K,V> pp = root, p = null; |
|
879 |
int dir = 0; |
|
880 |
while (pp != null) { // find existing node or leaf to insert at |
|
881 |
int ph; Object pk; Class<?> pc; |
|
882 |
p = pp; |
|
883 |
if ((ph = p.hash) != h) |
|
884 |
dir = (h < ph) ? -1 : 1; |
|
885 |
else if ((pk = p.key) == k || k.equals(pk)) |
|
886 |
return p; |
|
887 |
else if (cc == null || pk == null || |
|
888 |
((pc = pk.getClass()) != cc && |
|
889 |
comparableClassFor(pc) != cc) || |
|
890 |
(dir = ((Comparable<Object>)k).compareTo(pk)) == 0) { |
|
891 |
TreeNode<K,V> r, pr; |
|
892 |
if ((pr = p.right) != null && |
|
893 |
(r = getTreeNode(h, k, pr, cc)) != null) |
|
894 |
return r; |
|
895 |
else // continue left |
|
896 |
dir = -1; |
|
897 |
} |
|
898 |
pp = (dir > 0) ? p.right : p.left; |
|
899 |
} |
|
900 |
||
901 |
TreeNode<K,V> f = first; |
|
902 |
TreeNode<K,V> x = first = new TreeNode<K,V>(h, k, v, f, p); |
|
903 |
if (p == null) |
|
904 |
root = x; |
|
905 |
else { // attach and rebalance; adapted from CLR |
|
906 |
if (f != null) |
|
907 |
f.prev = x; |
|
908 |
if (dir <= 0) |
|
909 |
p.left = x; |
|
910 |
else |
|
911 |
p.right = x; |
|
912 |
x.red = true; |
|
913 |
for (TreeNode<K,V> xp, xpp, xppl, xppr;;) { |
|
914 |
if ((xp = x.parent) == null) { |
|
915 |
(root = x).red = false; |
|
916 |
break; |
|
917 |
} |
|
918 |
else if (!xp.red || (xpp = xp.parent) == null) { |
|
919 |
TreeNode<K,V> r = root; |
|
920 |
if (r != null && r.red) |
|
921 |
r.red = false; |
|
922 |
break; |
|
923 |
} |
|
924 |
else if ((xppl = xpp.left) == xp) { |
|
925 |
if ((xppr = xpp.right) != null && xppr.red) { |
|
926 |
xppr.red = false; |
|
927 |
xp.red = false; |
|
928 |
xpp.red = true; |
|
929 |
x = xpp; |
|
930 |
} |
|
931 |
else { |
|
932 |
if (x == xp.right) { |
|
933 |
rotateLeft(x = xp); |
|
934 |
xpp = (xp = x.parent) == null ? null : xp.parent; |
|
935 |
} |
|
936 |
if (xp != null) { |
|
937 |
xp.red = false; |
|
938 |
if (xpp != null) { |
|
939 |
xpp.red = true; |
|
940 |
rotateRight(xpp); |
|
941 |
} |
|
942 |
} |
|
943 |
} |
|
944 |
} |
|
945 |
else { |
|
946 |
if (xppl != null && xppl.red) { |
|
947 |
xppl.red = false; |
|
948 |
xp.red = false; |
|
949 |
xpp.red = true; |
|
950 |
x = xpp; |
|
951 |
} |
|
952 |
else { |
|
953 |
if (x == xp.left) { |
|
954 |
rotateRight(x = xp); |
|
955 |
xpp = (xp = x.parent) == null ? null : xp.parent; |
|
956 |
} |
|
957 |
if (xp != null) { |
|
958 |
xp.red = false; |
|
959 |
if (xpp != null) { |
|
960 |
xpp.red = true; |
|
961 |
rotateLeft(xpp); |
|
962 |
} |
|
963 |
} |
|
964 |
} |
|
965 |
} |
|
966 |
} |
|
967 |
} |
|
968 |
assert checkInvariants(); |
|
969 |
return null; |
|
970 |
} |
|
971 |
||
972 |
/** |
|
973 |
* Removes the given node, that must be present before this |
|
974 |
* call. This is messier than typical red-black deletion code |
|
975 |
* because we cannot swap the contents of an interior node |
|
976 |
* with a leaf successor that is pinned by "next" pointers |
|
977 |
* that are accessible independently of lock. So instead we |
|
978 |
* swap the tree linkages. |
|
979 |
*/ |
|
980 |
final void deleteTreeNode(TreeNode<K,V> p) { |
|
981 |
TreeNode<K,V> next = (TreeNode<K,V>)p.next; |
|
982 |
TreeNode<K,V> pred = p.prev; // unlink traversal pointers |
|
983 |
if (pred == null) |
|
984 |
first = next; |
|
985 |
else |
|
986 |
pred.next = next; |
|
987 |
if (next != null) |
|
988 |
next.prev = pred; |
|
989 |
else if (pred == null) { |
|
990 |
root = null; |
|
991 |
return; |
|
992 |
} |
|
993 |
TreeNode<K,V> replacement; |
|
994 |
TreeNode<K,V> pl = p.left; |
|
995 |
TreeNode<K,V> pr = p.right; |
|
996 |
if (pl != null && pr != null) { |
|
997 |
TreeNode<K,V> s = pr, sl; |
|
998 |
while ((sl = s.left) != null) // find successor |
|
999 |
s = sl; |
|
1000 |
boolean c = s.red; s.red = p.red; p.red = c; // swap colors |
|
1001 |
TreeNode<K,V> sr = s.right; |
|
1002 |
TreeNode<K,V> pp = p.parent; |
|
1003 |
if (s == pr) { // p was s's direct parent |
|
1004 |
p.parent = s; |
|
1005 |
s.right = p; |
|
1006 |
} |
|
1007 |
else { |
|
1008 |
TreeNode<K,V> sp = s.parent; |
|
1009 |
if ((p.parent = sp) != null) { |
|
1010 |
if (s == sp.left) |
|
1011 |
sp.left = p; |
|
1012 |
else |
|
1013 |
sp.right = p; |
|
1014 |
} |
|
1015 |
if ((s.right = pr) != null) |
|
1016 |
pr.parent = s; |
|
1017 |
} |
|
1018 |
p.left = null; |
|
1019 |
if ((p.right = sr) != null) |
|
1020 |
sr.parent = p; |
|
1021 |
if ((s.left = pl) != null) |
|
1022 |
pl.parent = s; |
|
1023 |
if ((s.parent = pp) == null) |
|
1024 |
root = s; |
|
1025 |
else if (p == pp.left) |
|
1026 |
pp.left = s; |
|
1027 |
else |
|
1028 |
pp.right = s; |
|
1029 |
if (sr != null) |
|
1030 |
replacement = sr; |
|
1031 |
else |
|
1032 |
replacement = p; |
|
1033 |
} |
|
1034 |
else if (pl != null) |
|
1035 |
replacement = pl; |
|
1036 |
else if (pr != null) |
|
1037 |
replacement = pr; |
|
1038 |
else |
|
1039 |
replacement = p; |
|
1040 |
if (replacement != p) { |
|
1041 |
TreeNode<K,V> pp = replacement.parent = p.parent; |
|
1042 |
if (pp == null) |
|
1043 |
root = replacement; |
|
1044 |
else if (p == pp.left) |
|
1045 |
pp.left = replacement; |
|
1046 |
else |
|
1047 |
pp.right = replacement; |
|
1048 |
p.left = p.right = p.parent = null; |
|
1049 |
} |
|
1050 |
if (!p.red) { // rebalance, from CLR |
|
1051 |
for (TreeNode<K,V> x = replacement; x != null; ) { |
|
1052 |
TreeNode<K,V> xp, xpl, xpr; |
|
1053 |
if (x.red || (xp = x.parent) == null) { |
|
1054 |
x.red = false; |
|
1055 |
break; |
|
1056 |
} |
|
1057 |
else if ((xpl = xp.left) == x) { |
|
1058 |
if ((xpr = xp.right) != null && xpr.red) { |
|
1059 |
xpr.red = false; |
|
1060 |
xp.red = true; |
|
1061 |
rotateLeft(xp); |
|
1062 |
xpr = (xp = x.parent) == null ? null : xp.right; |
|
1063 |
} |
|
1064 |
if (xpr == null) |
|
1065 |
x = xp; |
|
1066 |
else { |
|
1067 |
TreeNode<K,V> sl = xpr.left, sr = xpr.right; |
|
1068 |
if ((sr == null || !sr.red) && |
|
1069 |
(sl == null || !sl.red)) { |
|
1070 |
xpr.red = true; |
|
1071 |
x = xp; |
|
1072 |
} |
|
1073 |
else { |
|
1074 |
if (sr == null || !sr.red) { |
|
1075 |
if (sl != null) |
|
1076 |
sl.red = false; |
|
1077 |
xpr.red = true; |
|
1078 |
rotateRight(xpr); |
|
1079 |
xpr = (xp = x.parent) == null ? |
|
1080 |
null : xp.right; |
|
1081 |
} |
|
1082 |
if (xpr != null) { |
|
1083 |
xpr.red = (xp == null) ? false : xp.red; |
|
1084 |
if ((sr = xpr.right) != null) |
|
1085 |
sr.red = false; |
|
1086 |
} |
|
1087 |
if (xp != null) { |
|
1088 |
xp.red = false; |
|
1089 |
rotateLeft(xp); |
|
1090 |
} |
|
1091 |
x = root; |
|
1092 |
} |
|
1093 |
} |
|
1094 |
} |
|
1095 |
else { // symmetric |
|
1096 |
if (xpl != null && xpl.red) { |
|
1097 |
xpl.red = false; |
|
1098 |
xp.red = true; |
|
1099 |
rotateRight(xp); |
|
1100 |
xpl = (xp = x.parent) == null ? null : xp.left; |
|
1101 |
} |
|
1102 |
if (xpl == null) |
|
1103 |
x = xp; |
|
1104 |
else { |
|
1105 |
TreeNode<K,V> sl = xpl.left, sr = xpl.right; |
|
1106 |
if ((sl == null || !sl.red) && |
|
1107 |
(sr == null || !sr.red)) { |
|
1108 |
xpl.red = true; |
|
1109 |
x = xp; |
|
1110 |
} |
|
1111 |
else { |
|
1112 |
if (sl == null || !sl.red) { |
|
1113 |
if (sr != null) |
|
1114 |
sr.red = false; |
|
1115 |
xpl.red = true; |
|
1116 |
rotateLeft(xpl); |
|
1117 |
xpl = (xp = x.parent) == null ? |
|
1118 |
null : xp.left; |
|
1119 |
} |
|
1120 |
if (xpl != null) { |
|
1121 |
xpl.red = (xp == null) ? false : xp.red; |
|
1122 |
if ((sl = xpl.left) != null) |
|
1123 |
sl.red = false; |
|
1124 |
} |
|
1125 |
if (xp != null) { |
|
1126 |
xp.red = false; |
|
1127 |
rotateRight(xp); |
|
1128 |
} |
|
1129 |
x = root; |
|
1130 |
} |
|
1131 |
} |
|
1132 |
} |
|
1133 |
} |
|
1134 |
} |
|
1135 |
if (p == replacement) { // detach pointers |
|
1136 |
TreeNode<K,V> pp; |
|
1137 |
if ((pp = p.parent) != null) { |
|
1138 |
if (p == pp.left) |
|
1139 |
pp.left = null; |
|
1140 |
else if (p == pp.right) |
|
1141 |
pp.right = null; |
|
1142 |
p.parent = null; |
|
1143 |
} |
|
1144 |
} |
|
1145 |
assert checkInvariants(); |
|
1146 |
} |
|
1147 |
||
1148 |
/** |
|
1149 |
* Checks linkage and balance invariants at root |
|
1150 |
*/ |
|
1151 |
final boolean checkInvariants() { |
|
1152 |
TreeNode<K,V> r = root; |
|
1153 |
if (r == null) |
|
1154 |
return (first == null); |
|
1155 |
else |
|
1156 |
return (first != null) && checkTreeNode(r); |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
1157 |
} |
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
1158 |
|
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
1159 |
/** |
17945 | 1160 |
* Recursive invariant check |
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
1161 |
*/ |
17945 | 1162 |
final boolean checkTreeNode(TreeNode<K,V> t) { |
1163 |
TreeNode<K,V> tp = t.parent, tl = t.left, tr = t.right, |
|
1164 |
tb = t.prev, tn = (TreeNode<K,V>)t.next; |
|
1165 |
if (tb != null && tb.next != t) |
|
1166 |
return false; |
|
1167 |
if (tn != null && tn.prev != t) |
|
1168 |
return false; |
|
1169 |
if (tp != null && t != tp.left && t != tp.right) |
|
1170 |
return false; |
|
1171 |
if (tl != null && (tl.parent != t || tl.hash > t.hash)) |
|
1172 |
return false; |
|
1173 |
if (tr != null && (tr.parent != t || tr.hash < t.hash)) |
|
1174 |
return false; |
|
1175 |
if (t.red && tl != null && tl.red && tr != null && tr.red) |
|
1176 |
return false; |
|
1177 |
if (tl != null && !checkTreeNode(tl)) |
|
1178 |
return false; |
|
1179 |
if (tr != null && !checkTreeNode(tr)) |
|
1180 |
return false; |
|
1181 |
return true; |
|
1182 |
} |
|
1183 |
} |
|
1184 |
||
1185 |
/* ---------------- Collision reduction methods -------------- */ |
|
1186 |
||
1187 |
/** |
|
1188 |
* Spreads higher bits to lower, and also forces top bit to 0. |
|
1189 |
* Because the table uses power-of-two masking, sets of hashes |
|
1190 |
* that vary only in bits above the current mask will always |
|
1191 |
* collide. (Among known examples are sets of Float keys holding |
|
1192 |
* consecutive whole numbers in small tables.) To counter this, |
|
1193 |
* we apply a transform that spreads the impact of higher bits |
|
1194 |
* downward. There is a tradeoff between speed, utility, and |
|
1195 |
* quality of bit-spreading. Because many common sets of hashes |
|
1196 |
* are already reasonably distributed across bits (so don't benefit |
|
1197 |
* from spreading), and because we use trees to handle large sets |
|
1198 |
* of collisions in bins, we don't need excessively high quality. |
|
1199 |
*/ |
|
1200 |
private static final int spread(int h) { |
|
1201 |
h ^= (h >>> 18) ^ (h >>> 12); |
|
1202 |
return (h ^ (h >>> 10)) & HASH_BITS; |
|
1203 |
} |
|
1204 |
||
1205 |
/** |
|
1206 |
* Replaces a list bin with a tree bin if key is comparable. Call |
|
1207 |
* only when locked. |
|
1208 |
*/ |
|
1209 |
private final void replaceWithTreeBin(Node<K,V>[] tab, int index, Object key) { |
|
1210 |
if (tab != null && comparableClassFor(key.getClass()) != null) { |
|
1211 |
TreeBin<K,V> t = new TreeBin<K,V>(); |
|
1212 |
for (Node<K,V> e = tabAt(tab, index); e != null; e = e.next) |
|
1213 |
t.putTreeNode(e.hash, e.key, e.val); |
|
1214 |
setTabAt(tab, index, new Node<K,V>(MOVED, t, null, null)); |
|
1215 |
} |
|
1216 |
} |
|
1217 |
||
1218 |
/* ---------------- Internal access and update methods -------------- */ |
|
1219 |
||
1220 |
/** Implementation for get and containsKey */ |
|
1221 |
private final V internalGet(Object k) { |
|
1222 |
int h = spread(k.hashCode()); |
|
1223 |
V v = null; |
|
1224 |
Node<K,V>[] tab; Node<K,V> e; |
|
1225 |
if ((tab = table) != null && |
|
1226 |
(e = tabAt(tab, (tab.length - 1) & h)) != null) { |
|
1227 |
for (;;) { |
|
1228 |
int eh; Object ek; |
|
1229 |
if ((eh = e.hash) < 0) { |
|
1230 |
if ((ek = e.key) instanceof TreeBin) { // search TreeBin |
|
1231 |
v = ((TreeBin<K,V>)ek).getValue(h, k); |
|
1232 |
break; |
|
1233 |
} |
|
1234 |
else if (!(ek instanceof Node[]) || // try new table |
|
1235 |
(e = tabAt(tab = (Node<K,V>[])ek, |
|
1236 |
(tab.length - 1) & h)) == null) |
|
1237 |
break; |
|
1238 |
} |
|
1239 |
else if (eh == h && ((ek = e.key) == k || k.equals(ek))) { |
|
1240 |
v = e.val; |
|
1241 |
break; |
|
1242 |
} |
|
1243 |
else if ((e = e.next) == null) |
|
1244 |
break; |
|
1245 |
} |
|
1246 |
} |
|
1247 |
return v; |
|
1248 |
} |
|
1249 |
||
1250 |
/** |
|
1251 |
* Implementation for the four public remove/replace methods: |
|
1252 |
* Replaces node value with v, conditional upon match of cv if |
|
1253 |
* non-null. If resulting value is null, delete. |
|
1254 |
*/ |
|
1255 |
private final V internalReplace(Object k, V v, Object cv) { |
|
1256 |
int h = spread(k.hashCode()); |
|
1257 |
V oldVal = null; |
|
1258 |
for (Node<K,V>[] tab = table;;) { |
|
1259 |
Node<K,V> f; int i, fh; Object fk; |
|
1260 |
if (tab == null || |
|
1261 |
(f = tabAt(tab, i = (tab.length - 1) & h)) == null) |
|
1262 |
break; |
|
1263 |
else if ((fh = f.hash) < 0) { |
|
1264 |
if ((fk = f.key) instanceof TreeBin) { |
|
1265 |
TreeBin<K,V> t = (TreeBin<K,V>)fk; |
|
1266 |
long stamp = t.writeLock(); |
|
1267 |
boolean validated = false; |
|
1268 |
boolean deleted = false; |
|
1269 |
try { |
|
1270 |
if (tabAt(tab, i) == f) { |
|
1271 |
validated = true; |
|
1272 |
Class<?> cc = comparableClassFor(k.getClass()); |
|
1273 |
TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc); |
|
1274 |
if (p != null) { |
|
1275 |
V pv = p.val; |
|
1276 |
if (cv == null || cv == pv || cv.equals(pv)) { |
|
1277 |
oldVal = pv; |
|
1278 |
if (v != null) |
|
1279 |
p.val = v; |
|
1280 |
else { |
|
1281 |
deleted = true; |
|
1282 |
t.deleteTreeNode(p); |
|
1283 |
} |
|
1284 |
} |
|
1285 |
} |
|
1286 |
} |
|
1287 |
} finally { |
|
1288 |
t.unlockWrite(stamp); |
|
1289 |
} |
|
1290 |
if (validated) { |
|
1291 |
if (deleted) |
|
1292 |
addCount(-1L, -1); |
|
1293 |
break; |
|
1294 |
} |
|
1295 |
} |
|
1296 |
else |
|
1297 |
tab = (Node<K,V>[])fk; |
|
1298 |
} |
|
1299 |
else { |
|
1300 |
boolean validated = false; |
|
1301 |
boolean deleted = false; |
|
1302 |
synchronized (f) { |
|
1303 |
if (tabAt(tab, i) == f) { |
|
1304 |
validated = true; |
|
1305 |
for (Node<K,V> e = f, pred = null;;) { |
|
1306 |
Object ek; |
|
1307 |
if (e.hash == h && |
|
1308 |
((ek = e.key) == k || k.equals(ek))) { |
|
1309 |
V ev = e.val; |
|
1310 |
if (cv == null || cv == ev || cv.equals(ev)) { |
|
1311 |
oldVal = ev; |
|
1312 |
if (v != null) |
|
1313 |
e.val = v; |
|
1314 |
else { |
|
1315 |
deleted = true; |
|
1316 |
Node<K,V> en = e.next; |
|
1317 |
if (pred != null) |
|
1318 |
pred.next = en; |
|
1319 |
else |
|
1320 |
setTabAt(tab, i, en); |
|
1321 |
} |
|
1322 |
} |
|
1323 |
break; |
|
1324 |
} |
|
1325 |
pred = e; |
|
1326 |
if ((e = e.next) == null) |
|
1327 |
break; |
|
1328 |
} |
|
1329 |
} |
|
1330 |
} |
|
1331 |
if (validated) { |
|
1332 |
if (deleted) |
|
1333 |
addCount(-1L, -1); |
|
1334 |
break; |
|
1335 |
} |
|
1336 |
} |
|
1337 |
} |
|
1338 |
return oldVal; |
|
1339 |
} |
|
1340 |
||
1341 |
/* |
|
1342 |
* Internal versions of insertion methods |
|
1343 |
* All have the same basic structure as the first (internalPut): |
|
1344 |
* 1. If table uninitialized, create |
|
1345 |
* 2. If bin empty, try to CAS new node |
|
1346 |
* 3. If bin stale, use new table |
|
1347 |
* 4. if bin converted to TreeBin, validate and relay to TreeBin methods |
|
1348 |
* 5. Lock and validate; if valid, scan and add or update |
|
1349 |
* |
|
1350 |
* The putAll method differs mainly in attempting to pre-allocate |
|
1351 |
* enough table space, and also more lazily performs count updates |
|
1352 |
* and checks. |
|
1353 |
* |
|
1354 |
* Most of the function-accepting methods can't be factored nicely |
|
1355 |
* because they require different functional forms, so instead |
|
1356 |
* sprawl out similar mechanics. |
|
1357 |
*/ |
|
1358 |
||
1359 |
/** Implementation for put and putIfAbsent */ |
|
1360 |
private final V internalPut(K k, V v, boolean onlyIfAbsent) { |
|
1361 |
if (k == null || v == null) throw new NullPointerException(); |
|
1362 |
int h = spread(k.hashCode()); |
|
1363 |
int len = 0; |
|
1364 |
for (Node<K,V>[] tab = table;;) { |
|
1365 |
int i, fh; Node<K,V> f; Object fk; |
|
1366 |
if (tab == null) |
|
1367 |
tab = initTable(); |
|
1368 |
else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { |
|
1369 |
if (casTabAt(tab, i, null, new Node<K,V>(h, k, v, null))) |
|
1370 |
break; // no lock when adding to empty bin |
|
1371 |
} |
|
1372 |
else if ((fh = f.hash) < 0) { |
|
1373 |
if ((fk = f.key) instanceof TreeBin) { |
|
1374 |
TreeBin<K,V> t = (TreeBin<K,V>)fk; |
|
1375 |
long stamp = t.writeLock(); |
|
1376 |
V oldVal = null; |
|
1377 |
try { |
|
1378 |
if (tabAt(tab, i) == f) { |
|
1379 |
len = 2; |
|
1380 |
TreeNode<K,V> p = t.putTreeNode(h, k, v); |
|
1381 |
if (p != null) { |
|
1382 |
oldVal = p.val; |
|
1383 |
if (!onlyIfAbsent) |
|
1384 |
p.val = v; |
|
1385 |
} |
|
1386 |
} |
|
1387 |
} finally { |
|
1388 |
t.unlockWrite(stamp); |
|
1389 |
} |
|
1390 |
if (len != 0) { |
|
1391 |
if (oldVal != null) |
|
1392 |
return oldVal; |
|
1393 |
break; |
|
1394 |
} |
|
1395 |
} |
|
1396 |
else |
|
1397 |
tab = (Node<K,V>[])fk; |
|
1398 |
} |
|
1399 |
else { |
|
1400 |
V oldVal = null; |
|
1401 |
synchronized (f) { |
|
1402 |
if (tabAt(tab, i) == f) { |
|
1403 |
len = 1; |
|
1404 |
for (Node<K,V> e = f;; ++len) { |
|
1405 |
Object ek; |
|
1406 |
if (e.hash == h && |
|
1407 |
((ek = e.key) == k || k.equals(ek))) { |
|
1408 |
oldVal = e.val; |
|
1409 |
if (!onlyIfAbsent) |
|
1410 |
e.val = v; |
|
1411 |
break; |
|
1412 |
} |
|
1413 |
Node<K,V> last = e; |
|
1414 |
if ((e = e.next) == null) { |
|
1415 |
last.next = new Node<K,V>(h, k, v, null); |
|
1416 |
if (len > TREE_THRESHOLD) |
|
1417 |
replaceWithTreeBin(tab, i, k); |
|
1418 |
break; |
|
1419 |
} |
|
1420 |
} |
|
1421 |
} |
|
1422 |
} |
|
1423 |
if (len != 0) { |
|
1424 |
if (oldVal != null) |
|
1425 |
return oldVal; |
|
1426 |
break; |
|
1427 |
} |
|
1428 |
} |
|
1429 |
} |
|
1430 |
addCount(1L, len); |
|
1431 |
return null; |
|
1432 |
} |
|
1433 |
||
1434 |
/** Implementation for computeIfAbsent */ |
|
1435 |
private final V internalComputeIfAbsent(K k, Function<? super K, ? extends V> mf) { |
|
1436 |
if (k == null || mf == null) |
|
1437 |
throw new NullPointerException(); |
|
1438 |
int h = spread(k.hashCode()); |
|
1439 |
V val = null; |
|
1440 |
int len = 0; |
|
1441 |
for (Node<K,V>[] tab = table;;) { |
|
1442 |
Node<K,V> f; int i; Object fk; |
|
1443 |
if (tab == null) |
|
1444 |
tab = initTable(); |
|
1445 |
else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { |
|
1446 |
Node<K,V> node = new Node<K,V>(h, k, null, null); |
|
1447 |
synchronized (node) { |
|
1448 |
if (casTabAt(tab, i, null, node)) { |
|
1449 |
len = 1; |
|
1450 |
try { |
|
1451 |
if ((val = mf.apply(k)) != null) |
|
1452 |
node.val = val; |
|
1453 |
} finally { |
|
1454 |
if (val == null) |
|
1455 |
setTabAt(tab, i, null); |
|
1456 |
} |
|
1457 |
} |
|
1458 |
} |
|
1459 |
if (len != 0) |
|
1460 |
break; |
|
1461 |
} |
|
1462 |
else if (f.hash < 0) { |
|
1463 |
if ((fk = f.key) instanceof TreeBin) { |
|
1464 |
TreeBin<K,V> t = (TreeBin<K,V>)fk; |
|
1465 |
long stamp = t.writeLock(); |
|
1466 |
boolean added = false; |
|
1467 |
try { |
|
1468 |
if (tabAt(tab, i) == f) { |
|
1469 |
len = 2; |
|
1470 |
Class<?> cc = comparableClassFor(k.getClass()); |
|
1471 |
TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc); |
|
1472 |
if (p != null) |
|
1473 |
val = p.val; |
|
1474 |
else if ((val = mf.apply(k)) != null) { |
|
1475 |
added = true; |
|
1476 |
t.putTreeNode(h, k, val); |
|
1477 |
} |
|
1478 |
} |
|
1479 |
} finally { |
|
1480 |
t.unlockWrite(stamp); |
|
1481 |
} |
|
1482 |
if (len != 0) { |
|
1483 |
if (!added) |
|
1484 |
return val; |
|
1485 |
break; |
|
1486 |
} |
|
1487 |
} |
|
1488 |
else |
|
1489 |
tab = (Node<K,V>[])fk; |
|
1490 |
} |
|
1491 |
else { |
|
1492 |
boolean added = false; |
|
1493 |
synchronized (f) { |
|
1494 |
if (tabAt(tab, i) == f) { |
|
1495 |
len = 1; |
|
1496 |
for (Node<K,V> e = f;; ++len) { |
|
1497 |
Object ek; V ev; |
|
1498 |
if (e.hash == h && |
|
1499 |
((ek = e.key) == k || k.equals(ek))) { |
|
1500 |
val = e.val; |
|
1501 |
break; |
|
1502 |
} |
|
1503 |
Node<K,V> last = e; |
|
1504 |
if ((e = e.next) == null) { |
|
1505 |
if ((val = mf.apply(k)) != null) { |
|
1506 |
added = true; |
|
1507 |
last.next = new Node<K,V>(h, k, val, null); |
|
1508 |
if (len > TREE_THRESHOLD) |
|
1509 |
replaceWithTreeBin(tab, i, k); |
|
1510 |
} |
|
1511 |
break; |
|
1512 |
} |
|
1513 |
} |
|
1514 |
} |
|
1515 |
} |
|
1516 |
if (len != 0) { |
|
1517 |
if (!added) |
|
1518 |
return val; |
|
1519 |
break; |
|
1520 |
} |
|
1521 |
} |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
1522 |
} |
17945 | 1523 |
if (val != null) |
1524 |
addCount(1L, len); |
|
1525 |
return val; |
|
1526 |
} |
|
1527 |
||
1528 |
/** Implementation for compute */ |
|
1529 |
private final V internalCompute(K k, boolean onlyIfPresent, |
|
1530 |
BiFunction<? super K, ? super V, ? extends V> mf) { |
|
1531 |
if (k == null || mf == null) |
|
1532 |
throw new NullPointerException(); |
|
1533 |
int h = spread(k.hashCode()); |
|
1534 |
V val = null; |
|
1535 |
int delta = 0; |
|
1536 |
int len = 0; |
|
1537 |
for (Node<K,V>[] tab = table;;) { |
|
1538 |
Node<K,V> f; int i, fh; Object fk; |
|
1539 |
if (tab == null) |
|
1540 |
tab = initTable(); |
|
1541 |
else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { |
|
1542 |
if (onlyIfPresent) |
|
1543 |
break; |
|
1544 |
Node<K,V> node = new Node<K,V>(h, k, null, null); |
|
1545 |
synchronized (node) { |
|
1546 |
if (casTabAt(tab, i, null, node)) { |
|
1547 |
try { |
|
1548 |
len = 1; |
|
1549 |
if ((val = mf.apply(k, null)) != null) { |
|
1550 |
node.val = val; |
|
1551 |
delta = 1; |
|
1552 |
} |
|
1553 |
} finally { |
|
1554 |
if (delta == 0) |
|
1555 |
setTabAt(tab, i, null); |
|
1556 |
} |
|
1557 |
} |
|
1558 |
} |
|
1559 |
if (len != 0) |
|
1560 |
break; |
|
1561 |
} |
|
1562 |
else if ((fh = f.hash) < 0) { |
|
1563 |
if ((fk = f.key) instanceof TreeBin) { |
|
1564 |
TreeBin<K,V> t = (TreeBin<K,V>)fk; |
|
1565 |
long stamp = t.writeLock(); |
|
1566 |
try { |
|
1567 |
if (tabAt(tab, i) == f) { |
|
1568 |
len = 2; |
|
1569 |
Class<?> cc = comparableClassFor(k.getClass()); |
|
1570 |
TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc); |
|
1571 |
if (p != null || !onlyIfPresent) { |
|
1572 |
V pv = (p == null) ? null : p.val; |
|
1573 |
if ((val = mf.apply(k, pv)) != null) { |
|
1574 |
if (p != null) |
|
1575 |
p.val = val; |
|
1576 |
else { |
|
1577 |
delta = 1; |
|
1578 |
t.putTreeNode(h, k, val); |
|
1579 |
} |
|
1580 |
} |
|
1581 |
else if (p != null) { |
|
1582 |
delta = -1; |
|
1583 |
t.deleteTreeNode(p); |
|
1584 |
} |
|
1585 |
} |
|
1586 |
} |
|
1587 |
} finally { |
|
1588 |
t.unlockWrite(stamp); |
|
1589 |
} |
|
1590 |
if (len != 0) |
|
1591 |
break; |
|
1592 |
} |
|
1593 |
else |
|
1594 |
tab = (Node<K,V>[])fk; |
|
1595 |
} |
|
1596 |
else { |
|
1597 |
synchronized (f) { |
|
1598 |
if (tabAt(tab, i) == f) { |
|
1599 |
len = 1; |
|
1600 |
for (Node<K,V> e = f, pred = null;; ++len) { |
|
1601 |
Object ek; |
|
1602 |
if (e.hash == h && |
|
1603 |
((ek = e.key) == k || k.equals(ek))) { |
|
1604 |
val = mf.apply(k, e.val); |
|
1605 |
if (val != null) |
|
1606 |
e.val = val; |
|
1607 |
else { |
|
1608 |
delta = -1; |
|
1609 |
Node<K,V> en = e.next; |
|
1610 |
if (pred != null) |
|
1611 |
pred.next = en; |
|
1612 |
else |
|
1613 |
setTabAt(tab, i, en); |
|
1614 |
} |
|
1615 |
break; |
|
1616 |
} |
|
1617 |
pred = e; |
|
1618 |
if ((e = e.next) == null) { |
|
1619 |
if (!onlyIfPresent && |
|
1620 |
(val = mf.apply(k, null)) != null) { |
|
1621 |
pred.next = new Node<K,V>(h, k, val, null); |
|
1622 |
delta = 1; |
|
1623 |
if (len > TREE_THRESHOLD) |
|
1624 |
replaceWithTreeBin(tab, i, k); |
|
1625 |
} |
|
1626 |
break; |
|
1627 |
} |
|
1628 |
} |
|
1629 |
} |
|
1630 |
} |
|
1631 |
if (len != 0) |
|
1632 |
break; |
|
1633 |
} |
|
1634 |
} |
|
1635 |
if (delta != 0) |
|
1636 |
addCount((long)delta, len); |
|
1637 |
return val; |
|
1638 |
} |
|
1639 |
||
1640 |
/** Implementation for merge */ |
|
1641 |
private final V internalMerge(K k, V v, |
|
1642 |
BiFunction<? super V, ? super V, ? extends V> mf) { |
|
1643 |
if (k == null || v == null || mf == null) |
|
1644 |
throw new NullPointerException(); |
|
1645 |
int h = spread(k.hashCode()); |
|
1646 |
V val = null; |
|
1647 |
int delta = 0; |
|
1648 |
int len = 0; |
|
1649 |
for (Node<K,V>[] tab = table;;) { |
|
1650 |
int i; Node<K,V> f; Object fk; |
|
1651 |
if (tab == null) |
|
1652 |
tab = initTable(); |
|
1653 |
else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null) { |
|
1654 |
if (casTabAt(tab, i, null, new Node<K,V>(h, k, v, null))) { |
|
1655 |
delta = 1; |
|
1656 |
val = v; |
|
1657 |
break; |
|
1658 |
} |
|
1659 |
} |
|
1660 |
else if (f.hash < 0) { |
|
1661 |
if ((fk = f.key) instanceof TreeBin) { |
|
1662 |
TreeBin<K,V> t = (TreeBin<K,V>)fk; |
|
1663 |
long stamp = t.writeLock(); |
|
1664 |
try { |
|
1665 |
if (tabAt(tab, i) == f) { |
|
1666 |
len = 2; |
|
1667 |
Class<?> cc = comparableClassFor(k.getClass()); |
|
1668 |
TreeNode<K,V> p = t.getTreeNode(h, k, t.root, cc); |
|
1669 |
val = (p == null) ? v : mf.apply(p.val, v); |
|
1670 |
if (val != null) { |
|
1671 |
if (p != null) |
|
1672 |
p.val = val; |
|
1673 |
else { |
|
1674 |
delta = 1; |
|
1675 |
t.putTreeNode(h, k, val); |
|
1676 |
} |
|
1677 |
} |
|
1678 |
else if (p != null) { |
|
1679 |
delta = -1; |
|
1680 |
t.deleteTreeNode(p); |
|
1681 |
} |
|
1682 |
} |
|
1683 |
} finally { |
|
1684 |
t.unlockWrite(stamp); |
|
1685 |
} |
|
1686 |
if (len != 0) |
|
1687 |
break; |
|
1688 |
} |
|
1689 |
else |
|
1690 |
tab = (Node<K,V>[])fk; |
|
1691 |
} |
|
1692 |
else { |
|
1693 |
synchronized (f) { |
|
1694 |
if (tabAt(tab, i) == f) { |
|
1695 |
len = 1; |
|
1696 |
for (Node<K,V> e = f, pred = null;; ++len) { |
|
1697 |
Object ek; |
|
1698 |
if (e.hash == h && |
|
1699 |
((ek = e.key) == k || k.equals(ek))) { |
|
1700 |
val = mf.apply(e.val, v); |
|
1701 |
if (val != null) |
|
1702 |
e.val = val; |
|
1703 |
else { |
|
1704 |
delta = -1; |
|
1705 |
Node<K,V> en = e.next; |
|
1706 |
if (pred != null) |
|
1707 |
pred.next = en; |
|
1708 |
else |
|
1709 |
setTabAt(tab, i, en); |
|
1710 |
} |
|
1711 |
break; |
|
1712 |
} |
|
1713 |
pred = e; |
|
1714 |
if ((e = e.next) == null) { |
|
1715 |
delta = 1; |
|
1716 |
val = v; |
|
1717 |
pred.next = new Node<K,V>(h, k, val, null); |
|
1718 |
if (len > TREE_THRESHOLD) |
|
1719 |
replaceWithTreeBin(tab, i, k); |
|
1720 |
break; |
|
1721 |
} |
|
1722 |
} |
|
1723 |
} |
|
1724 |
} |
|
1725 |
if (len != 0) |
|
1726 |
break; |
|
1727 |
} |
|
1728 |
} |
|
1729 |
if (delta != 0) |
|
1730 |
addCount((long)delta, len); |
|
1731 |
return val; |
|
1732 |
} |
|
1733 |
||
1734 |
/** Implementation for putAll */ |
|
1735 |
private final void internalPutAll(Map<? extends K, ? extends V> m) { |
|
1736 |
tryPresize(m.size()); |
|
1737 |
long delta = 0L; // number of uncommitted additions |
|
1738 |
boolean npe = false; // to throw exception on exit for nulls |
|
1739 |
try { // to clean up counts on other exceptions |
|
1740 |
for (Map.Entry<?, ? extends V> entry : m.entrySet()) { |
|
1741 |
Object k; V v; |
|
1742 |
if (entry == null || (k = entry.getKey()) == null || |
|
1743 |
(v = entry.getValue()) == null) { |
|
1744 |
npe = true; |
|
1745 |
break; |
|
1746 |
} |
|
1747 |
int h = spread(k.hashCode()); |
|
1748 |
for (Node<K,V>[] tab = table;;) { |
|
1749 |
int i; Node<K,V> f; int fh; Object fk; |
|
1750 |
if (tab == null) |
|
1751 |
tab = initTable(); |
|
1752 |
else if ((f = tabAt(tab, i = (tab.length - 1) & h)) == null){ |
|
1753 |
if (casTabAt(tab, i, null, new Node<K,V>(h, k, v, null))) { |
|
1754 |
++delta; |
|
1755 |
break; |
|
1756 |
} |
|
1757 |
} |
|
1758 |
else if ((fh = f.hash) < 0) { |
|
1759 |
if ((fk = f.key) instanceof TreeBin) { |
|
1760 |
TreeBin<K,V> t = (TreeBin<K,V>)fk; |
|
1761 |
long stamp = t.writeLock(); |
|
1762 |
boolean validated = false; |
|
1763 |
try { |
|
1764 |
if (tabAt(tab, i) == f) { |
|
1765 |
validated = true; |
|
1766 |
Class<?> cc = comparableClassFor(k.getClass()); |
|
1767 |
TreeNode<K,V> p = t.getTreeNode(h, k, |
|
1768 |
t.root, cc); |
|
1769 |
if (p != null) |
|
1770 |
p.val = v; |
|
1771 |
else { |
|
1772 |
++delta; |
|
1773 |
t.putTreeNode(h, k, v); |
|
1774 |
} |
|
1775 |
} |
|
1776 |
} finally { |
|
1777 |
t.unlockWrite(stamp); |
|
1778 |
} |
|
1779 |
if (validated) |
|
1780 |
break; |
|
1781 |
} |
|
1782 |
else |
|
1783 |
tab = (Node<K,V>[])fk; |
|
1784 |
} |
|
1785 |
else { |
|
1786 |
int len = 0; |
|
1787 |
synchronized (f) { |
|
1788 |
if (tabAt(tab, i) == f) { |
|
1789 |
len = 1; |
|
1790 |
for (Node<K,V> e = f;; ++len) { |
|
1791 |
Object ek; |
|
1792 |
if (e.hash == h && |
|
1793 |
((ek = e.key) == k || k.equals(ek))) { |
|
1794 |
e.val = v; |
|
1795 |
break; |
|
1796 |
} |
|
1797 |
Node<K,V> last = e; |
|
1798 |
if ((e = e.next) == null) { |
|
1799 |
++delta; |
|
1800 |
last.next = new Node<K,V>(h, k, v, null); |
|
1801 |
if (len > TREE_THRESHOLD) |
|
1802 |
replaceWithTreeBin(tab, i, k); |
|
1803 |
break; |
|
1804 |
} |
|
1805 |
} |
|
1806 |
} |
|
1807 |
} |
|
1808 |
if (len != 0) { |
|
1809 |
if (len > 1) { |
|
1810 |
addCount(delta, len); |
|
1811 |
delta = 0L; |
|
1812 |
} |
|
1813 |
break; |
|
1814 |
} |
|
1815 |
} |
|
1816 |
} |
|
1817 |
} |
|
1818 |
} finally { |
|
1819 |
if (delta != 0L) |
|
1820 |
addCount(delta, 2); |
|
1821 |
} |
|
1822 |
if (npe) |
|
1823 |
throw new NullPointerException(); |
|
1824 |
} |
|
1825 |
||
1826 |
/** |
|
1827 |
* Implementation for clear. Steps through each bin, removing all |
|
1828 |
* nodes. |
|
1829 |
*/ |
|
1830 |
private final void internalClear() { |
|
1831 |
long delta = 0L; // negative number of deletions |
|
1832 |
int i = 0; |
|
1833 |
Node<K,V>[] tab = table; |
|
1834 |
while (tab != null && i < tab.length) { |
|
1835 |
Node<K,V> f = tabAt(tab, i); |
|
1836 |
if (f == null) |
|
1837 |
++i; |
|
1838 |
else if (f.hash < 0) { |
|
1839 |
Object fk; |
|
1840 |
if ((fk = f.key) instanceof TreeBin) { |
|
1841 |
TreeBin<K,V> t = (TreeBin<K,V>)fk; |
|
1842 |
long stamp = t.writeLock(); |
|
1843 |
try { |
|
1844 |
if (tabAt(tab, i) == f) { |
|
1845 |
for (Node<K,V> p = t.first; p != null; p = p.next) |
|
1846 |
--delta; |
|
1847 |
t.first = null; |
|
1848 |
t.root = null; |
|
1849 |
++i; |
|
1850 |
} |
|
1851 |
} finally { |
|
1852 |
t.unlockWrite(stamp); |
|
1853 |
} |
|
1854 |
} |
|
1855 |
else |
|
1856 |
tab = (Node<K,V>[])fk; |
|
1857 |
} |
|
1858 |
else { |
|
1859 |
synchronized (f) { |
|
1860 |
if (tabAt(tab, i) == f) { |
|
1861 |
for (Node<K,V> e = f; e != null; e = e.next) |
|
1862 |
--delta; |
|
1863 |
setTabAt(tab, i, null); |
|
1864 |
++i; |
|
1865 |
} |
|
1866 |
} |
|
1867 |
} |
|
1868 |
} |
|
1869 |
if (delta != 0L) |
|
1870 |
addCount(delta, -1); |
|
1871 |
} |
|
1872 |
||
1873 |
/* ---------------- Table Initialization and Resizing -------------- */ |
|
1874 |
||
1875 |
/** |
|
1876 |
* Returns a power of two table size for the given desired capacity. |
|
1877 |
* See Hackers Delight, sec 3.2 |
|
1878 |
*/ |
|
1879 |
private static final int tableSizeFor(int c) { |
|
1880 |
int n = c - 1; |
|
1881 |
n |= n >>> 1; |
|
1882 |
n |= n >>> 2; |
|
1883 |
n |= n >>> 4; |
|
1884 |
n |= n >>> 8; |
|
1885 |
n |= n >>> 16; |
|
1886 |
return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; |
|
1887 |
} |
|
1888 |
||
1889 |
/** |
|
1890 |
* Initializes table, using the size recorded in sizeCtl. |
|
1891 |
*/ |
|
1892 |
private final Node<K,V>[] initTable() { |
|
1893 |
Node<K,V>[] tab; int sc; |
|
1894 |
while ((tab = table) == null) { |
|
1895 |
if ((sc = sizeCtl) < 0) |
|
1896 |
Thread.yield(); // lost initialization race; just spin |
|
1897 |
else if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { |
|
1898 |
try { |
|
1899 |
if ((tab = table) == null) { |
|
1900 |
int n = (sc > 0) ? sc : DEFAULT_CAPACITY; |
|
1901 |
table = tab = (Node<K,V>[])new Node[n]; |
|
1902 |
sc = n - (n >>> 2); |
|
1903 |
} |
|
1904 |
} finally { |
|
1905 |
sizeCtl = sc; |
|
1906 |
} |
|
1907 |
break; |
|
1908 |
} |
|
1909 |
} |
|
1910 |
return tab; |
|
1911 |
} |
|
1912 |
||
1913 |
/** |
|
1914 |
* Adds to count, and if table is too small and not already |
|
1915 |
* resizing, initiates transfer. If already resizing, helps |
|
1916 |
* perform transfer if work is available. Rechecks occupancy |
|
1917 |
* after a transfer to see if another resize is already needed |
|
1918 |
* because resizings are lagging additions. |
|
1919 |
* |
|
1920 |
* @param x the count to add |
|
1921 |
* @param check if <0, don't check resize, if <= 1 only check if uncontended |
|
1922 |
*/ |
|
1923 |
private final void addCount(long x, int check) { |
|
1924 |
Cell[] as; long b, s; |
|
1925 |
if ((as = counterCells) != null || |
|
1926 |
!U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) { |
|
1927 |
Cell a; long v; int m; |
|
1928 |
boolean uncontended = true; |
|
1929 |
if (as == null || (m = as.length - 1) < 0 || |
|
1930 |
(a = as[ThreadLocalRandom.getProbe() & m]) == null || |
|
1931 |
!(uncontended = |
|
1932 |
U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) { |
|
1933 |
fullAddCount(x, uncontended); |
|
1934 |
return; |
|
1935 |
} |
|
1936 |
if (check <= 1) |
|
1937 |
return; |
|
1938 |
s = sumCount(); |
|
1939 |
} |
|
1940 |
if (check >= 0) { |
|
1941 |
Node<K,V>[] tab, nt; int sc; |
|
1942 |
while (s >= (long)(sc = sizeCtl) && (tab = table) != null && |
|
1943 |
tab.length < MAXIMUM_CAPACITY) { |
|
1944 |
if (sc < 0) { |
|
1945 |
if (sc == -1 || transferIndex <= transferOrigin || |
|
1946 |
(nt = nextTable) == null) |
|
1947 |
break; |
|
1948 |
if (U.compareAndSwapInt(this, SIZECTL, sc, sc - 1)) |
|
1949 |
transfer(tab, nt); |
|
1950 |
} |
|
1951 |
else if (U.compareAndSwapInt(this, SIZECTL, sc, -2)) |
|
1952 |
transfer(tab, null); |
|
1953 |
s = sumCount(); |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
1954 |
} |
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
1955 |
} |
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
1956 |
} |
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
1957 |
|
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
1958 |
/** |
17945 | 1959 |
* Tries to presize table to accommodate the given number of elements. |
1960 |
* |
|
1961 |
* @param size number of elements (doesn't need to be perfectly accurate) |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
1962 |
*/ |
17945 | 1963 |
private final void tryPresize(int size) { |
1964 |
int c = (size >= (MAXIMUM_CAPACITY >>> 1)) ? MAXIMUM_CAPACITY : |
|
1965 |
tableSizeFor(size + (size >>> 1) + 1); |
|
1966 |
int sc; |
|
1967 |
while ((sc = sizeCtl) >= 0) { |
|
1968 |
Node<K,V>[] tab = table; int n; |
|
1969 |
if (tab == null || (n = tab.length) == 0) { |
|
1970 |
n = (sc > c) ? sc : c; |
|
1971 |
if (U.compareAndSwapInt(this, SIZECTL, sc, -1)) { |
|
1972 |
try { |
|
1973 |
if (table == tab) { |
|
1974 |
table = (Node<K,V>[])new Node[n]; |
|
1975 |
sc = n - (n >>> 2); |
|
1976 |
} |
|
1977 |
} finally { |
|
1978 |
sizeCtl = sc; |
|
1979 |
} |
|
1980 |
} |
|
1981 |
} |
|
1982 |
else if (c <= sc || n >= MAXIMUM_CAPACITY) |
|
1983 |
break; |
|
1984 |
else if (tab == table && |
|
1985 |
U.compareAndSwapInt(this, SIZECTL, sc, -2)) |
|
1986 |
transfer(tab, null); |
|
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
11279
diff
changeset
|
1987 |
} |
2 | 1988 |
} |
1989 |
||
1990 |
/** |
|
17945 | 1991 |
* Moves and/or copies the nodes in each bin to new table. See |
1992 |
* above for explanation. |
|
2 | 1993 |
*/ |
17945 | 1994 |
private final void transfer(Node<K,V>[] tab, Node<K,V>[] nextTab) { |
1995 |
int n = tab.length, stride; |
|
1996 |
if ((stride = (NCPU > 1) ? (n >>> 3) / NCPU : n) < MIN_TRANSFER_STRIDE) |
|
1997 |
stride = MIN_TRANSFER_STRIDE; // subdivide range |
|
1998 |
if (nextTab == null) { // initiating |
|
1999 |
try { |
|
2000 |
nextTab = (Node<K,V>[])new Node[n << 1]; |
|
2001 |
} catch (Throwable ex) { // try to cope with OOME |
|
2002 |
sizeCtl = Integer.MAX_VALUE; |
|
2003 |
return; |
|
2004 |
} |
|
2005 |
nextTable = nextTab; |
|
2006 |
transferOrigin = n; |
|
2007 |
transferIndex = n; |
|
2008 |
Node<K,V> rev = new Node<K,V>(MOVED, tab, null, null); |
|
2009 |
for (int k = n; k > 0;) { // progressively reveal ready slots |
|
2010 |
int nextk = (k > stride) ? k - stride : 0; |
|
2011 |
for (int m = nextk; m < k; ++m) |
|
2012 |
nextTab[m] = rev; |
|
2013 |
for (int m = n + nextk; m < n + k; ++m) |
|
2014 |
nextTab[m] = rev; |
|
2015 |
U.putOrderedInt(this, TRANSFERORIGIN, k = nextk); |
|
2016 |
} |
|
2 | 2017 |
} |
17945 | 2018 |
int nextn = nextTab.length; |
2019 |
Node<K,V> fwd = new Node<K,V>(MOVED, nextTab, null, null); |
|
2020 |
boolean advance = true; |
|
2021 |
for (int i = 0, bound = 0;;) { |
|
2022 |
int nextIndex, nextBound; Node<K,V> f; Object fk; |
|
2023 |
while (advance) { |
|
2024 |
if (--i >= bound) |
|
2025 |
advance = false; |
|
2026 |
else if ((nextIndex = transferIndex) <= transferOrigin) { |
|
2027 |
i = -1; |
|
2028 |
advance = false; |
|
2029 |
} |
|
2030 |
else if (U.compareAndSwapInt |
|
2031 |
(this, TRANSFERINDEX, nextIndex, |
|
2032 |
nextBound = (nextIndex > stride ? |
|
2033 |
nextIndex - stride : 0))) { |
|
2034 |
bound = nextBound; |
|
2035 |
i = nextIndex - 1; |
|
2036 |
advance = false; |
|
2037 |
} |
|
2038 |
} |
|
2039 |
if (i < 0 || i >= n || i + n >= nextn) { |
|
2040 |
for (int sc;;) { |
|
2041 |
if (U.compareAndSwapInt(this, SIZECTL, sc = sizeCtl, ++sc)) { |
|
2042 |
if (sc == -1) { |
|
2043 |
nextTable = null; |
|
2044 |
table = nextTab; |
|
2045 |
sizeCtl = (n << 1) - (n >>> 1); |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2046 |
} |
17945 | 2047 |
return; |
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2048 |
} |
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2049 |
} |
17945 | 2050 |
} |
2051 |
else if ((f = tabAt(tab, i)) == null) { |
|
2052 |
if (casTabAt(tab, i, null, fwd)) { |
|
2053 |
setTabAt(nextTab, i, null); |
|
2054 |
setTabAt(nextTab, i + n, null); |
|
2055 |
advance = true; |
|
2056 |
} |
|
2 | 2057 |
} |
17945 | 2058 |
else if (f.hash >= 0) { |
2059 |
synchronized (f) { |
|
2060 |
if (tabAt(tab, i) == f) { |
|
2061 |
int runBit = f.hash & n; |
|
2062 |
Node<K,V> lastRun = f, lo = null, hi = null; |
|
2063 |
for (Node<K,V> p = f.next; p != null; p = p.next) { |
|
2064 |
int b = p.hash & n; |
|
2065 |
if (b != runBit) { |
|
2066 |
runBit = b; |
|
2067 |
lastRun = p; |
|
2 | 2068 |
} |
2069 |
} |
|
17945 | 2070 |
if (runBit == 0) |
2071 |
lo = lastRun; |
|
2072 |
else |
|
2073 |
hi = lastRun; |
|
2074 |
for (Node<K,V> p = f; p != lastRun; p = p.next) { |
|
2075 |
int ph = p.hash; Object pk = p.key; V pv = p.val; |
|
2076 |
if ((ph & n) == 0) |
|
2077 |
lo = new Node<K,V>(ph, pk, pv, lo); |
|
2078 |
else |
|
2079 |
hi = new Node<K,V>(ph, pk, pv, hi); |
|
2 | 2080 |
} |
17945 | 2081 |
setTabAt(nextTab, i, lo); |
2082 |
setTabAt(nextTab, i + n, hi); |
|
2083 |
setTabAt(tab, i, fwd); |
|
2084 |
advance = true; |
|
2 | 2085 |
} |
2086 |
} |
|
2087 |
} |
|
17945 | 2088 |
else if ((fk = f.key) instanceof TreeBin) { |
2089 |
TreeBin<K,V> t = (TreeBin<K,V>)fk; |
|
2090 |
long stamp = t.writeLock(); |
|
2091 |
try { |
|
2092 |
if (tabAt(tab, i) == f) { |
|
2093 |
TreeNode<K,V> root; |
|
2094 |
Node<K,V> ln = null, hn = null; |
|
2095 |
if ((root = t.root) != null) { |
|
2096 |
Node<K,V> e, p; TreeNode<K,V> lr, rr; int lh; |
|
2097 |
TreeBin<K,V> lt = null, ht = null; |
|
2098 |
for (lr = root; lr.left != null; lr = lr.left); |
|
2099 |
for (rr = root; rr.right != null; rr = rr.right); |
|
2100 |
if ((lh = lr.hash) == rr.hash) { // move entire tree |
|
2101 |
if ((lh & n) == 0) |
|
2102 |
lt = t; |
|
2103 |
else |
|
2104 |
ht = t; |
|
2105 |
} |
|
2106 |
else { |
|
2107 |
lt = new TreeBin<K,V>(); |
|
2108 |
ht = new TreeBin<K,V>(); |
|
2109 |
int lc = 0, hc = 0; |
|
2110 |
for (e = t.first; e != null; e = e.next) { |
|
2111 |
int h = e.hash; |
|
2112 |
Object k = e.key; V v = e.val; |
|
2113 |
if ((h & n) == 0) { |
|
2114 |
++lc; |
|
2115 |
lt.putTreeNode(h, k, v); |
|
2116 |
} |
|
2117 |
else { |
|
2118 |
++hc; |
|
2119 |
ht.putTreeNode(h, k, v); |
|
2120 |
} |
|
2121 |
} |
|
2122 |
if (lc < TREE_THRESHOLD) { // throw away |
|
2123 |
for (p = lt.first; p != null; p = p.next) |
|
2124 |
ln = new Node<K,V>(p.hash, p.key, |
|
2125 |
p.val, ln); |
|
2126 |
lt = null; |
|
2127 |
} |
|
2128 |
if (hc < TREE_THRESHOLD) { |
|
2129 |
for (p = ht.first; p != null; p = p.next) |
|
2130 |
hn = new Node<K,V>(p.hash, p.key, |
|
2131 |
p.val, hn); |
|
2132 |
ht = null; |
|
2133 |
} |
|
2134 |
} |
|
2135 |
if (ln == null && lt != null) |
|
2136 |
ln = new Node<K,V>(MOVED, lt, null, null); |
|
2137 |
if (hn == null && ht != null) |
|
2138 |
hn = new Node<K,V>(MOVED, ht, null, null); |
|
2139 |
} |
|
2140 |
setTabAt(nextTab, i, ln); |
|
2141 |
setTabAt(nextTab, i + n, hn); |
|
2142 |
setTabAt(tab, i, fwd); |
|
2143 |
advance = true; |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2144 |
} |
17945 | 2145 |
} finally { |
2146 |
t.unlockWrite(stamp); |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2147 |
} |
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2148 |
} |
17945 | 2149 |
else |
2150 |
advance = true; // already processed |
|
2151 |
} |
|
2152 |
} |
|
2153 |
||
2154 |
/* ---------------- Counter support -------------- */ |
|
2155 |
||
2156 |
final long sumCount() { |
|
2157 |
Cell[] as = counterCells; Cell a; |
|
2158 |
long sum = baseCount; |
|
2159 |
if (as != null) { |
|
2160 |
for (int i = 0; i < as.length; ++i) { |
|
2161 |
if ((a = as[i]) != null) |
|
2162 |
sum += a.value; |
|
2163 |
} |
|
2164 |
} |
|
2165 |
return sum; |
|
2166 |
} |
|
2167 |
||
2168 |
// See LongAdder version for explanation |
|
2169 |
private final void fullAddCount(long x, boolean wasUncontended) { |
|
2170 |
int h; |
|
2171 |
if ((h = ThreadLocalRandom.getProbe()) == 0) { |
|
2172 |
ThreadLocalRandom.localInit(); // force initialization |
|
2173 |
h = ThreadLocalRandom.getProbe(); |
|
2174 |
wasUncontended = true; |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2175 |
} |
17945 | 2176 |
boolean collide = false; // True if last slot nonempty |
2177 |
for (;;) { |
|
2178 |
Cell[] as; Cell a; int n; long v; |
|
2179 |
if ((as = counterCells) != null && (n = as.length) > 0) { |
|
2180 |
if ((a = as[(n - 1) & h]) == null) { |
|
2181 |
if (cellsBusy == 0) { // Try to attach new Cell |
|
2182 |
Cell r = new Cell(x); // Optimistic create |
|
2183 |
if (cellsBusy == 0 && |
|
2184 |
U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { |
|
2185 |
boolean created = false; |
|
2186 |
try { // Recheck under lock |
|
2187 |
Cell[] rs; int m, j; |
|
2188 |
if ((rs = counterCells) != null && |
|
2189 |
(m = rs.length) > 0 && |
|
2190 |
rs[j = (m - 1) & h] == null) { |
|
2191 |
rs[j] = r; |
|
2192 |
created = true; |
|
2193 |
} |
|
2194 |
} finally { |
|
2195 |
cellsBusy = 0; |
|
2196 |
} |
|
2197 |
if (created) |
|
2198 |
break; |
|
2199 |
continue; // Slot is now non-empty |
|
2200 |
} |
|
2201 |
} |
|
2202 |
collide = false; |
|
2203 |
} |
|
2204 |
else if (!wasUncontended) // CAS already known to fail |
|
2205 |
wasUncontended = true; // Continue after rehash |
|
2206 |
else if (U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x)) |
|
2207 |
break; |
|
2208 |
else if (counterCells != as || n >= NCPU) |
|
2209 |
collide = false; // At max size or stale |
|
2210 |
else if (!collide) |
|
2211 |
collide = true; |
|
2212 |
else if (cellsBusy == 0 && |
|
2213 |
U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { |
|
2214 |
try { |
|
2215 |
if (counterCells == as) {// Expand table unless stale |
|
2216 |
Cell[] rs = new Cell[n << 1]; |
|
2217 |
for (int i = 0; i < n; ++i) |
|
2218 |
rs[i] = as[i]; |
|
2219 |
counterCells = rs; |
|
2220 |
} |
|
2221 |
} finally { |
|
2222 |
cellsBusy = 0; |
|
2223 |
} |
|
2224 |
collide = false; |
|
2225 |
continue; // Retry with expanded table |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2226 |
} |
17945 | 2227 |
h = ThreadLocalRandom.advanceProbe(h); |
2228 |
} |
|
2229 |
else if (cellsBusy == 0 && counterCells == as && |
|
2230 |
U.compareAndSwapInt(this, CELLSBUSY, 0, 1)) { |
|
2231 |
boolean init = false; |
|
2232 |
try { // Initialize table |
|
2233 |
if (counterCells == as) { |
|
2234 |
Cell[] rs = new Cell[2]; |
|
2235 |
rs[h & 1] = new Cell(x); |
|
2236 |
counterCells = rs; |
|
2237 |
init = true; |
|
2238 |
} |
|
2239 |
} finally { |
|
2240 |
cellsBusy = 0; |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2241 |
} |
17945 | 2242 |
if (init) |
2243 |
break; |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2244 |
} |
17945 | 2245 |
else if (U.compareAndSwapLong(this, BASECOUNT, v = baseCount, v + x)) |
2246 |
break; // Fall back on using base |
|
2247 |
} |
|
2248 |
} |
|
2249 |
||
2250 |
/* ----------------Table Traversal -------------- */ |
|
2251 |
||
2252 |
/** |
|
2253 |
* Encapsulates traversal for methods such as containsValue; also |
|
2254 |
* serves as a base class for other iterators and spliterators. |
|
2255 |
* |
|
2256 |
* Method advance visits once each still-valid node that was |
|
2257 |
* reachable upon iterator construction. It might miss some that |
|
2258 |
* were added to a bin after the bin was visited, which is OK wrt |
|
2259 |
* consistency guarantees. Maintaining this property in the face |
|
2260 |
* of possible ongoing resizes requires a fair amount of |
|
2261 |
* bookkeeping state that is difficult to optimize away amidst |
|
2262 |
* volatile accesses. Even so, traversal maintains reasonable |
|
2263 |
* throughput. |
|
2264 |
* |
|
2265 |
* Normally, iteration proceeds bin-by-bin traversing lists. |
|
2266 |
* However, if the table has been resized, then all future steps |
|
2267 |
* must traverse both the bin at the current index as well as at |
|
2268 |
* (index + baseSize); and so on for further resizings. To |
|
2269 |
* paranoically cope with potential sharing by users of iterators |
|
2270 |
* across threads, iteration terminates if a bounds checks fails |
|
2271 |
* for a table read. |
|
2272 |
*/ |
|
2273 |
static class Traverser<K,V> { |
|
2274 |
Node<K,V>[] tab; // current table; updated if resized |
|
2275 |
Node<K,V> next; // the next entry to use |
|
2276 |
int index; // index of bin to use next |
|
2277 |
int baseIndex; // current index of initial table |
|
2278 |
int baseLimit; // index bound for initial table |
|
2279 |
final int baseSize; // initial table size |
|
2280 |
||
2281 |
Traverser(Node<K,V>[] tab, int size, int index, int limit) { |
|
2282 |
this.tab = tab; |
|
2283 |
this.baseSize = size; |
|
2284 |
this.baseIndex = this.index = index; |
|
2285 |
this.baseLimit = limit; |
|
2286 |
this.next = null; |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2287 |
} |
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2288 |
|
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2289 |
/** |
17945 | 2290 |
* Advances if possible, returning next valid node, or null if none. |
2 | 2291 |
*/ |
17945 | 2292 |
final Node<K,V> advance() { |
2293 |
Node<K,V> e; |
|
2294 |
if ((e = next) != null) |
|
2295 |
e = e.next; |
|
2296 |
for (;;) { |
|
2297 |
Node<K,V>[] t; int i, n; Object ek; // must use locals in checks |
|
2298 |
if (e != null) |
|
2299 |
return next = e; |
|
2300 |
if (baseIndex >= baseLimit || (t = tab) == null || |
|
2301 |
(n = t.length) <= (i = index) || i < 0) |
|
2302 |
return next = null; |
|
2303 |
if ((e = tabAt(t, index)) != null && e.hash < 0) { |
|
2304 |
if ((ek = e.key) instanceof TreeBin) |
|
2305 |
e = ((TreeBin<K,V>)ek).first; |
|
2306 |
else { |
|
2307 |
tab = (Node<K,V>[])ek; |
|
2308 |
e = null; |
|
2309 |
continue; |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2310 |
} |
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2311 |
} |
17945 | 2312 |
if ((index += baseSize) >= n) |
2313 |
index = ++baseIndex; // visit upper slots if present |
|
2 | 2314 |
} |
2315 |
} |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2316 |
} |
2 | 2317 |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2318 |
/** |
17945 | 2319 |
* Base of key, value, and entry Iterators. Adds fields to |
2320 |
* Traverser to support iterator.remove |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2321 |
*/ |
17945 | 2322 |
static class BaseIterator<K,V> extends Traverser<K,V> { |
2323 |
final ConcurrentHashMap<K,V> map; |
|
2324 |
Node<K,V> lastReturned; |
|
2325 |
BaseIterator(Node<K,V>[] tab, int size, int index, int limit, |
|
2326 |
ConcurrentHashMap<K,V> map) { |
|
2327 |
super(tab, size, index, limit); |
|
2328 |
this.map = map; |
|
2329 |
advance(); |
|
2330 |
} |
|
2331 |
||
2332 |
public final boolean hasNext() { return next != null; } |
|
2333 |
public final boolean hasMoreElements() { return next != null; } |
|
2334 |
||
2335 |
public final void remove() { |
|
2336 |
Node<K,V> p; |
|
2337 |
if ((p = lastReturned) == null) |
|
2338 |
throw new IllegalStateException(); |
|
2339 |
lastReturned = null; |
|
2340 |
map.internalReplace((K)p.key, null, null); |
|
2341 |
} |
|
2342 |
} |
|
2343 |
||
2344 |
static final class KeyIterator<K,V> extends BaseIterator<K,V> |
|
2345 |
implements Iterator<K>, Enumeration<K> { |
|
2346 |
KeyIterator(Node<K,V>[] tab, int index, int size, int limit, |
|
2347 |
ConcurrentHashMap<K,V> map) { |
|
2348 |
super(tab, index, size, limit, map); |
|
2349 |
} |
|
2350 |
||
2351 |
public final K next() { |
|
2352 |
Node<K,V> p; |
|
2353 |
if ((p = next) == null) |
|
2354 |
throw new NoSuchElementException(); |
|
2355 |
K k = (K)p.key; |
|
2356 |
lastReturned = p; |
|
2357 |
advance(); |
|
2358 |
return k; |
|
2 | 2359 |
} |
17945 | 2360 |
|
2361 |
public final K nextElement() { return next(); } |
|
2362 |
} |
|
2363 |
||
2364 |
static final class ValueIterator<K,V> extends BaseIterator<K,V> |
|
2365 |
implements Iterator<V>, Enumeration<V> { |
|
2366 |
ValueIterator(Node<K,V>[] tab, int index, int size, int limit, |
|
2367 |
ConcurrentHashMap<K,V> map) { |
|
2368 |
super(tab, index, size, limit, map); |
|
2369 |
} |
|
2370 |
||
2371 |
public final V next() { |
|
2372 |
Node<K,V> p; |
|
2373 |
if ((p = next) == null) |
|
2374 |
throw new NoSuchElementException(); |
|
2375 |
V v = p.val; |
|
2376 |
lastReturned = p; |
|
2377 |
advance(); |
|
2378 |
return v; |
|
2379 |
} |
|
2380 |
||
2381 |
public final V nextElement() { return next(); } |
|
2382 |
} |
|
2383 |
||
2384 |
static final class EntryIterator<K,V> extends BaseIterator<K,V> |
|
2385 |
implements Iterator<Map.Entry<K,V>> { |
|
2386 |
EntryIterator(Node<K,V>[] tab, int index, int size, int limit, |
|
2387 |
ConcurrentHashMap<K,V> map) { |
|
2388 |
super(tab, index, size, limit, map); |
|
2389 |
} |
|
2390 |
||
2391 |
public final Map.Entry<K,V> next() { |
|
2392 |
Node<K,V> p; |
|
2393 |
if ((p = next) == null) |
|
2394 |
throw new NoSuchElementException(); |
|
2395 |
K k = (K)p.key; |
|
2396 |
V v = p.val; |
|
2397 |
lastReturned = p; |
|
2398 |
advance(); |
|
2399 |
return new MapEntry<K,V>(k, v, map); |
|
2400 |
} |
|
2 | 2401 |
} |
2402 |
||
17945 | 2403 |
static final class KeySpliterator<K,V> extends Traverser<K,V> |
2404 |
implements Spliterator<K> { |
|
2405 |
long est; // size estimate |
|
2406 |
KeySpliterator(Node<K,V>[] tab, int size, int index, int limit, |
|
2407 |
long est) { |
|
2408 |
super(tab, size, index, limit); |
|
2409 |
this.est = est; |
|
2410 |
} |
|
2411 |
||
2412 |
public Spliterator<K> trySplit() { |
|
2413 |
int i, f, h; |
|
2414 |
return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : |
|
2415 |
new KeySpliterator<K,V>(tab, baseSize, baseLimit = h, |
|
2416 |
f, est >>>= 1); |
|
2417 |
} |
|
2418 |
||
2419 |
public void forEachRemaining(Consumer<? super K> action) { |
|
2420 |
if (action == null) throw new NullPointerException(); |
|
2421 |
for (Node<K,V> p; (p = advance()) != null;) |
|
2422 |
action.accept((K)p.key); |
|
2423 |
} |
|
2424 |
||
2425 |
public boolean tryAdvance(Consumer<? super K> action) { |
|
2426 |
if (action == null) throw new NullPointerException(); |
|
2427 |
Node<K,V> p; |
|
2428 |
if ((p = advance()) == null) |
|
2429 |
return false; |
|
2430 |
action.accept((K)p.key); |
|
2431 |
return true; |
|
2432 |
} |
|
2433 |
||
2434 |
public long estimateSize() { return est; } |
|
2435 |
||
2436 |
public int characteristics() { |
|
2437 |
return Spliterator.DISTINCT | Spliterator.CONCURRENT | |
|
2438 |
Spliterator.NONNULL; |
|
2439 |
} |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2440 |
} |
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2441 |
|
17945 | 2442 |
static final class ValueSpliterator<K,V> extends Traverser<K,V> |
2443 |
implements Spliterator<V> { |
|
2444 |
long est; // size estimate |
|
2445 |
ValueSpliterator(Node<K,V>[] tab, int size, int index, int limit, |
|
2446 |
long est) { |
|
2447 |
super(tab, size, index, limit); |
|
2448 |
this.est = est; |
|
2449 |
} |
|
2450 |
||
2451 |
public Spliterator<V> trySplit() { |
|
2452 |
int i, f, h; |
|
2453 |
return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : |
|
2454 |
new ValueSpliterator<K,V>(tab, baseSize, baseLimit = h, |
|
2455 |
f, est >>>= 1); |
|
2456 |
} |
|
2457 |
||
2458 |
public void forEachRemaining(Consumer<? super V> action) { |
|
2459 |
if (action == null) throw new NullPointerException(); |
|
2460 |
for (Node<K,V> p; (p = advance()) != null;) |
|
2461 |
action.accept(p.val); |
|
2462 |
} |
|
2463 |
||
2464 |
public boolean tryAdvance(Consumer<? super V> action) { |
|
2465 |
if (action == null) throw new NullPointerException(); |
|
2466 |
Node<K,V> p; |
|
2467 |
if ((p = advance()) == null) |
|
2468 |
return false; |
|
2469 |
action.accept(p.val); |
|
2470 |
return true; |
|
2471 |
} |
|
2472 |
||
2473 |
public long estimateSize() { return est; } |
|
2474 |
||
2475 |
public int characteristics() { |
|
2476 |
return Spliterator.CONCURRENT | Spliterator.NONNULL; |
|
2477 |
} |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2478 |
} |
2 | 2479 |
|
17945 | 2480 |
static final class EntrySpliterator<K,V> extends Traverser<K,V> |
2481 |
implements Spliterator<Map.Entry<K,V>> { |
|
2482 |
final ConcurrentHashMap<K,V> map; // To export MapEntry |
|
2483 |
long est; // size estimate |
|
2484 |
EntrySpliterator(Node<K,V>[] tab, int size, int index, int limit, |
|
2485 |
long est, ConcurrentHashMap<K,V> map) { |
|
2486 |
super(tab, size, index, limit); |
|
2487 |
this.map = map; |
|
2488 |
this.est = est; |
|
2489 |
} |
|
2490 |
||
2491 |
public Spliterator<Map.Entry<K,V>> trySplit() { |
|
2492 |
int i, f, h; |
|
2493 |
return (h = ((i = baseIndex) + (f = baseLimit)) >>> 1) <= i ? null : |
|
2494 |
new EntrySpliterator<K,V>(tab, baseSize, baseLimit = h, |
|
2495 |
f, est >>>= 1, map); |
|
2496 |
} |
|
2497 |
||
2498 |
public void forEachRemaining(Consumer<? super Map.Entry<K,V>> action) { |
|
2499 |
if (action == null) throw new NullPointerException(); |
|
2500 |
for (Node<K,V> p; (p = advance()) != null; ) |
|
2501 |
action.accept(new MapEntry<K,V>((K)p.key, p.val, map)); |
|
2502 |
} |
|
2503 |
||
2504 |
public boolean tryAdvance(Consumer<? super Map.Entry<K,V>> action) { |
|
2505 |
if (action == null) throw new NullPointerException(); |
|
2506 |
Node<K,V> p; |
|
2507 |
if ((p = advance()) == null) |
|
2508 |
return false; |
|
2509 |
action.accept(new MapEntry<K,V>((K)p.key, p.val, map)); |
|
2510 |
return true; |
|
2511 |
} |
|
2512 |
||
2513 |
public long estimateSize() { return est; } |
|
2514 |
||
2515 |
public int characteristics() { |
|
2516 |
return Spliterator.DISTINCT | Spliterator.CONCURRENT | |
|
2517 |
Spliterator.NONNULL; |
|
2518 |
} |
|
2519 |
} |
|
2520 |
||
2521 |
||
2 | 2522 |
/* ---------------- Public operations -------------- */ |
2523 |
||
2524 |
/** |
|
17945 | 2525 |
* Creates a new, empty map with the default initial table size (16). |
2 | 2526 |
*/ |
17945 | 2527 |
public ConcurrentHashMap() { |
2 | 2528 |
} |
2529 |
||
2530 |
/** |
|
17945 | 2531 |
* Creates a new, empty map with an initial table size |
2532 |
* accommodating the specified number of elements without the need |
|
2533 |
* to dynamically resize. |
|
2 | 2534 |
* |
2535 |
* @param initialCapacity The implementation performs internal |
|
2536 |
* sizing to accommodate this many elements. |
|
17945 | 2537 |
* @throws IllegalArgumentException if the initial capacity of |
2538 |
* elements is negative |
|
2539 |
*/ |
|
2540 |
public ConcurrentHashMap(int initialCapacity) { |
|
2541 |
if (initialCapacity < 0) |
|
2542 |
throw new IllegalArgumentException(); |
|
2543 |
int cap = ((initialCapacity >= (MAXIMUM_CAPACITY >>> 1)) ? |
|
2544 |
MAXIMUM_CAPACITY : |
|
2545 |
tableSizeFor(initialCapacity + (initialCapacity >>> 1) + 1)); |
|
2546 |
this.sizeCtl = cap; |
|
2547 |
} |
|
2548 |
||
2549 |
/** |
|
2550 |
* Creates a new map with the same mappings as the given map. |
|
2551 |
* |
|
2552 |
* @param m the map |
|
2553 |
*/ |
|
2554 |
public ConcurrentHashMap(Map<? extends K, ? extends V> m) { |
|
2555 |
this.sizeCtl = DEFAULT_CAPACITY; |
|
2556 |
internalPutAll(m); |
|
2557 |
} |
|
2558 |
||
2559 |
/** |
|
2560 |
* Creates a new, empty map with an initial table size based on |
|
2561 |
* the given number of elements ({@code initialCapacity}) and |
|
2562 |
* initial table density ({@code loadFactor}). |
|
2563 |
* |
|
2564 |
* @param initialCapacity the initial capacity. The implementation |
|
2565 |
* performs internal sizing to accommodate this many elements, |
|
2566 |
* given the specified load factor. |
|
2567 |
* @param loadFactor the load factor (table density) for |
|
2568 |
* establishing the initial table size |
|
2 | 2569 |
* @throws IllegalArgumentException if the initial capacity of |
2570 |
* elements is negative or the load factor is nonpositive |
|
2571 |
* |
|
2572 |
* @since 1.6 |
|
2573 |
*/ |
|
2574 |
public ConcurrentHashMap(int initialCapacity, float loadFactor) { |
|
17945 | 2575 |
this(initialCapacity, loadFactor, 1); |
2 | 2576 |
} |
2577 |
||
2578 |
/** |
|
17945 | 2579 |
* Creates a new, empty map with an initial table size based on |
2580 |
* the given number of elements ({@code initialCapacity}), table |
|
2581 |
* density ({@code loadFactor}), and number of concurrently |
|
2582 |
* updating threads ({@code concurrencyLevel}). |
|
2583 |
* |
|
2584 |
* @param initialCapacity the initial capacity. The implementation |
|
2585 |
* performs internal sizing to accommodate this many elements, |
|
2586 |
* given the specified load factor. |
|
2587 |
* @param loadFactor the load factor (table density) for |
|
2588 |
* establishing the initial table size |
|
2589 |
* @param concurrencyLevel the estimated number of concurrently |
|
2590 |
* updating threads. The implementation may use this value as |
|
2591 |
* a sizing hint. |
|
2592 |
* @throws IllegalArgumentException if the initial capacity is |
|
2593 |
* negative or the load factor or concurrencyLevel are |
|
2594 |
* nonpositive |
|
2 | 2595 |
*/ |
17945 | 2596 |
public ConcurrentHashMap(int initialCapacity, |
2597 |
float loadFactor, int concurrencyLevel) { |
|
2598 |
if (!(loadFactor > 0.0f) || initialCapacity < 0 || concurrencyLevel <= 0) |
|
2599 |
throw new IllegalArgumentException(); |
|
2600 |
if (initialCapacity < concurrencyLevel) // Use at least as many bins |
|
2601 |
initialCapacity = concurrencyLevel; // as estimated threads |
|
2602 |
long size = (long)(1.0 + (long)initialCapacity / loadFactor); |
|
2603 |
int cap = (size >= (long)MAXIMUM_CAPACITY) ? |
|
2604 |
MAXIMUM_CAPACITY : tableSizeFor((int)size); |
|
2605 |
this.sizeCtl = cap; |
|
2 | 2606 |
} |
2607 |
||
2608 |
/** |
|
17945 | 2609 |
* Creates a new {@link Set} backed by a ConcurrentHashMap |
2610 |
* from the given type to {@code Boolean.TRUE}. |
|
2 | 2611 |
* |
17945 | 2612 |
* @return the new set |
2 | 2613 |
*/ |
17945 | 2614 |
public static <K> KeySetView<K,Boolean> newKeySet() { |
2615 |
return new KeySetView<K,Boolean> |
|
2616 |
(new ConcurrentHashMap<K,Boolean>(), Boolean.TRUE); |
|
2617 |
} |
|
2618 |
||
2619 |
/** |
|
2620 |
* Creates a new {@link Set} backed by a ConcurrentHashMap |
|
2621 |
* from the given type to {@code Boolean.TRUE}. |
|
2622 |
* |
|
2623 |
* @param initialCapacity The implementation performs internal |
|
2624 |
* sizing to accommodate this many elements. |
|
2625 |
* @throws IllegalArgumentException if the initial capacity of |
|
2626 |
* elements is negative |
|
2627 |
* @return the new set |
|
2628 |
*/ |
|
2629 |
public static <K> KeySetView<K,Boolean> newKeySet(int initialCapacity) { |
|
2630 |
return new KeySetView<K,Boolean> |
|
2631 |
(new ConcurrentHashMap<K,Boolean>(initialCapacity), Boolean.TRUE); |
|
2 | 2632 |
} |
2633 |
||
2634 |
/** |
|
17717 | 2635 |
* Returns {@code true} if this map contains no key-value mappings. |
2 | 2636 |
* |
17717 | 2637 |
* @return {@code true} if this map contains no key-value mappings |
2 | 2638 |
*/ |
2639 |
public boolean isEmpty() { |
|
17945 | 2640 |
return sumCount() <= 0L; // ignore transient negative values |
2 | 2641 |
} |
2642 |
||
2643 |
/** |
|
2644 |
* Returns the number of key-value mappings in this map. If the |
|
17717 | 2645 |
* map contains more than {@code Integer.MAX_VALUE} elements, returns |
2646 |
* {@code Integer.MAX_VALUE}. |
|
2 | 2647 |
* |
2648 |
* @return the number of key-value mappings in this map |
|
2649 |
*/ |
|
2650 |
public int size() { |
|
17945 | 2651 |
long n = sumCount(); |
2652 |
return ((n < 0L) ? 0 : |
|
2653 |
(n > (long)Integer.MAX_VALUE) ? Integer.MAX_VALUE : |
|
2654 |
(int)n); |
|
2655 |
} |
|
2656 |
||
2657 |
/** |
|
2658 |
* Returns the number of mappings. This method should be used |
|
2659 |
* instead of {@link #size} because a ConcurrentHashMap may |
|
2660 |
* contain more mappings than can be represented as an int. The |
|
2661 |
* value returned is an estimate; the actual count may differ if |
|
2662 |
* there are concurrent insertions or removals. |
|
2663 |
* |
|
2664 |
* @return the number of mappings |
|
2665 |
*/ |
|
2666 |
public long mappingCount() { |
|
2667 |
long n = sumCount(); |
|
2668 |
return (n < 0L) ? 0L : n; // ignore transient negative values |
|
2 | 2669 |
} |
2670 |
||
2671 |
/** |
|
2672 |
* Returns the value to which the specified key is mapped, |
|
2673 |
* or {@code null} if this map contains no mapping for the key. |
|
2674 |
* |
|
2675 |
* <p>More formally, if this map contains a mapping from a key |
|
2676 |
* {@code k} to a value {@code v} such that {@code key.equals(k)}, |
|
2677 |
* then this method returns {@code v}; otherwise it returns |
|
2678 |
* {@code null}. (There can be at most one such mapping.) |
|
2679 |
* |
|
2680 |
* @throws NullPointerException if the specified key is null |
|
2681 |
*/ |
|
2682 |
public V get(Object key) { |
|
17945 | 2683 |
return internalGet(key); |
2684 |
} |
|
2685 |
||
2686 |
/** |
|
2687 |
* Returns the value to which the specified key is mapped, or the |
|
2688 |
* given default value if this map contains no mapping for the |
|
2689 |
* key. |
|
2690 |
* |
|
2691 |
* @param key the key whose associated value is to be returned |
|
2692 |
* @param defaultValue the value to return if this map contains |
|
2693 |
* no mapping for the given key |
|
2694 |
* @return the mapping for the key, if present; else the default value |
|
2695 |
* @throws NullPointerException if the specified key is null |
|
2696 |
*/ |
|
2697 |
public V getOrDefault(Object key, V defaultValue) { |
|
2698 |
V v; |
|
2699 |
return (v = internalGet(key)) == null ? defaultValue : v; |
|
2 | 2700 |
} |
2701 |
||
2702 |
/** |
|
2703 |
* Tests if the specified object is a key in this table. |
|
2704 |
* |
|
17717 | 2705 |
* @param key possible key |
2706 |
* @return {@code true} if and only if the specified object |
|
2 | 2707 |
* is a key in this table, as determined by the |
17717 | 2708 |
* {@code equals} method; {@code false} otherwise |
2 | 2709 |
* @throws NullPointerException if the specified key is null |
2710 |
*/ |
|
2711 |
public boolean containsKey(Object key) { |
|
17945 | 2712 |
return internalGet(key) != null; |
2 | 2713 |
} |
2714 |
||
2715 |
/** |
|
17717 | 2716 |
* Returns {@code true} if this map maps one or more keys to the |
17945 | 2717 |
* specified value. Note: This method may require a full traversal |
2718 |
* of the map, and is much slower than method {@code containsKey}. |
|
2 | 2719 |
* |
2720 |
* @param value value whose presence in this map is to be tested |
|
17717 | 2721 |
* @return {@code true} if this map maps one or more keys to the |
2 | 2722 |
* specified value |
2723 |
* @throws NullPointerException if the specified value is null |
|
2724 |
*/ |
|
2725 |
public boolean containsValue(Object value) { |
|
2726 |
if (value == null) |
|
2727 |
throw new NullPointerException(); |
|
17945 | 2728 |
Node<K,V>[] t; |
2729 |
if ((t = table) != null) { |
|
2730 |
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); |
|
2731 |
for (Node<K,V> p; (p = it.advance()) != null; ) { |
|
2732 |
V v; |
|
2733 |
if ((v = p.val) == value || value.equals(v)) |
|
2734 |
return true; |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
2735 |
} |
2 | 2736 |
} |
17945 | 2737 |
return false; |
2 | 2738 |
} |
2739 |
||
2740 |
/** |
|
2741 |
* Legacy method testing if some key maps into the specified value |
|
2742 |
* in this table. This method is identical in functionality to |
|
17717 | 2743 |
* {@link #containsValue(Object)}, and exists solely to ensure |
2 | 2744 |
* full compatibility with class {@link java.util.Hashtable}, |
2745 |
* which supported this method prior to introduction of the |
|
2746 |
* Java Collections framework. |
|
11279 | 2747 |
* |
2 | 2748 |
* @param value a value to search for |
17717 | 2749 |
* @return {@code true} if and only if some key maps to the |
2750 |
* {@code value} argument in this table as |
|
2751 |
* determined by the {@code equals} method; |
|
2752 |
* {@code false} otherwise |
|
2 | 2753 |
* @throws NullPointerException if the specified value is null |
2754 |
*/ |
|
2755 |
public boolean contains(Object value) { |
|
2756 |
return containsValue(value); |
|
2757 |
} |
|
2758 |
||
2759 |
/** |
|
2760 |
* Maps the specified key to the specified value in this table. |
|
2761 |
* Neither the key nor the value can be null. |
|
2762 |
* |
|
17717 | 2763 |
* <p>The value can be retrieved by calling the {@code get} method |
2 | 2764 |
* with a key that is equal to the original key. |
2765 |
* |
|
2766 |
* @param key key with which the specified value is to be associated |
|
2767 |
* @param value value to be associated with the specified key |
|
17717 | 2768 |
* @return the previous value associated with {@code key}, or |
2769 |
* {@code null} if there was no mapping for {@code key} |
|
2 | 2770 |
* @throws NullPointerException if the specified key or value is null |
2771 |
*/ |
|
2772 |
public V put(K key, V value) { |
|
17945 | 2773 |
return internalPut(key, value, false); |
2 | 2774 |
} |
2775 |
||
2776 |
/** |
|
2777 |
* {@inheritDoc} |
|
2778 |
* |
|
2779 |
* @return the previous value associated with the specified key, |
|
17717 | 2780 |
* or {@code null} if there was no mapping for the key |
2 | 2781 |
* @throws NullPointerException if the specified key or value is null |
2782 |
*/ |
|
2783 |
public V putIfAbsent(K key, V value) { |
|
17945 | 2784 |
return internalPut(key, value, true); |
2 | 2785 |
} |
2786 |
||
2787 |
/** |
|
2788 |
* Copies all of the mappings from the specified map to this one. |
|
2789 |
* These mappings replace any mappings that this map had for any of the |
|
2790 |
* keys currently in the specified map. |
|
2791 |
* |
|
2792 |
* @param m mappings to be stored in this map |
|
2793 |
*/ |
|
2794 |
public void putAll(Map<? extends K, ? extends V> m) { |
|
17945 | 2795 |
internalPutAll(m); |
2796 |
} |
|
2797 |
||
2798 |
/** |
|
2799 |
* If the specified key is not already associated with a value, |
|
2800 |
* attempts to compute its value using the given mapping function |
|
2801 |
* and enters it into this map unless {@code null}. The entire |
|
2802 |
* method invocation is performed atomically, so the function is |
|
2803 |
* applied at most once per key. Some attempted update operations |
|
2804 |
* on this map by other threads may be blocked while computation |
|
2805 |
* is in progress, so the computation should be short and simple, |
|
2806 |
* and must not attempt to update any other mappings of this map. |
|
2807 |
* |
|
2808 |
* @param key key with which the specified value is to be associated |
|
2809 |
* @param mappingFunction the function to compute a value |
|
2810 |
* @return the current (existing or computed) value associated with |
|
2811 |
* the specified key, or null if the computed value is null |
|
2812 |
* @throws NullPointerException if the specified key or mappingFunction |
|
2813 |
* is null |
|
2814 |
* @throws IllegalStateException if the computation detectably |
|
2815 |
* attempts a recursive update to this map that would |
|
2816 |
* otherwise never complete |
|
2817 |
* @throws RuntimeException or Error if the mappingFunction does so, |
|
2818 |
* in which case the mapping is left unestablished |
|
2819 |
*/ |
|
2820 |
public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) { |
|
2821 |
return internalComputeIfAbsent(key, mappingFunction); |
|
2822 |
} |
|
2823 |
||
2824 |
/** |
|
2825 |
* If the value for the specified key is present, attempts to |
|
2826 |
* compute a new mapping given the key and its current mapped |
|
2827 |
* value. The entire method invocation is performed atomically. |
|
2828 |
* Some attempted update operations on this map by other threads |
|
2829 |
* may be blocked while computation is in progress, so the |
|
2830 |
* computation should be short and simple, and must not attempt to |
|
2831 |
* update any other mappings of this map. |
|
2832 |
* |
|
2833 |
* @param key key with which a value may be associated |
|
2834 |
* @param remappingFunction the function to compute a value |
|
2835 |
* @return the new value associated with the specified key, or null if none |
|
2836 |
* @throws NullPointerException if the specified key or remappingFunction |
|
2837 |
* is null |
|
2838 |
* @throws IllegalStateException if the computation detectably |
|
2839 |
* attempts a recursive update to this map that would |
|
2840 |
* otherwise never complete |
|
2841 |
* @throws RuntimeException or Error if the remappingFunction does so, |
|
2842 |
* in which case the mapping is unchanged |
|
2843 |
*/ |
|
2844 |
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { |
|
2845 |
return internalCompute(key, true, remappingFunction); |
|
2846 |
} |
|
2847 |
||
2848 |
/** |
|
2849 |
* Attempts to compute a mapping for the specified key and its |
|
2850 |
* current mapped value (or {@code null} if there is no current |
|
2851 |
* mapping). The entire method invocation is performed atomically. |
|
2852 |
* Some attempted update operations on this map by other threads |
|
2853 |
* may be blocked while computation is in progress, so the |
|
2854 |
* computation should be short and simple, and must not attempt to |
|
2855 |
* update any other mappings of this Map. |
|
2856 |
* |
|
2857 |
* @param key key with which the specified value is to be associated |
|
2858 |
* @param remappingFunction the function to compute a value |
|
2859 |
* @return the new value associated with the specified key, or null if none |
|
2860 |
* @throws NullPointerException if the specified key or remappingFunction |
|
2861 |
* is null |
|
2862 |
* @throws IllegalStateException if the computation detectably |
|
2863 |
* attempts a recursive update to this map that would |
|
2864 |
* otherwise never complete |
|
2865 |
* @throws RuntimeException or Error if the remappingFunction does so, |
|
2866 |
* in which case the mapping is unchanged |
|
2867 |
*/ |
|
2868 |
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { |
|
2869 |
return internalCompute(key, false, remappingFunction); |
|
2870 |
} |
|
2871 |
||
2872 |
/** |
|
2873 |
* If the specified key is not already associated with a |
|
2874 |
* (non-null) value, associates it with the given value. |
|
2875 |
* Otherwise, replaces the value with the results of the given |
|
2876 |
* remapping function, or removes if {@code null}. The entire |
|
2877 |
* method invocation is performed atomically. Some attempted |
|
2878 |
* update operations on this map by other threads may be blocked |
|
2879 |
* while computation is in progress, so the computation should be |
|
2880 |
* short and simple, and must not attempt to update any other |
|
2881 |
* mappings of this Map. |
|
2882 |
* |
|
2883 |
* @param key key with which the specified value is to be associated |
|
2884 |
* @param value the value to use if absent |
|
2885 |
* @param remappingFunction the function to recompute a value if present |
|
2886 |
* @return the new value associated with the specified key, or null if none |
|
2887 |
* @throws NullPointerException if the specified key or the |
|
2888 |
* remappingFunction is null |
|
2889 |
* @throws RuntimeException or Error if the remappingFunction does so, |
|
2890 |
* in which case the mapping is unchanged |
|
2891 |
*/ |
|
2892 |
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { |
|
2893 |
return internalMerge(key, value, remappingFunction); |
|
2 | 2894 |
} |
2895 |
||
2896 |
/** |
|
2897 |
* Removes the key (and its corresponding value) from this map. |
|
2898 |
* This method does nothing if the key is not in the map. |
|
2899 |
* |
|
2900 |
* @param key the key that needs to be removed |
|
17717 | 2901 |
* @return the previous value associated with {@code key}, or |
2902 |
* {@code null} if there was no mapping for {@code key} |
|
2 | 2903 |
* @throws NullPointerException if the specified key is null |
2904 |
*/ |
|
2905 |
public V remove(Object key) { |
|
17945 | 2906 |
return internalReplace(key, null, null); |
2 | 2907 |
} |
2908 |
||
2909 |
/** |
|
2910 |
* {@inheritDoc} |
|
2911 |
* |
|
2912 |
* @throws NullPointerException if the specified key is null |
|
2913 |
*/ |
|
2914 |
public boolean remove(Object key, Object value) { |
|
17945 | 2915 |
if (key == null) |
2916 |
throw new NullPointerException(); |
|
2917 |
return value != null && internalReplace(key, null, value) != null; |
|
2 | 2918 |
} |
2919 |
||
2920 |
/** |
|
2921 |
* {@inheritDoc} |
|
2922 |
* |
|
2923 |
* @throws NullPointerException if any of the arguments are null |
|
2924 |
*/ |
|
2925 |
public boolean replace(K key, V oldValue, V newValue) { |
|
17945 | 2926 |
if (key == null || oldValue == null || newValue == null) |
2 | 2927 |
throw new NullPointerException(); |
17945 | 2928 |
return internalReplace(key, newValue, oldValue) != null; |
2 | 2929 |
} |
2930 |
||
2931 |
/** |
|
2932 |
* {@inheritDoc} |
|
2933 |
* |
|
2934 |
* @return the previous value associated with the specified key, |
|
17717 | 2935 |
* or {@code null} if there was no mapping for the key |
2 | 2936 |
* @throws NullPointerException if the specified key or value is null |
2937 |
*/ |
|
2938 |
public V replace(K key, V value) { |
|
17945 | 2939 |
if (key == null || value == null) |
2 | 2940 |
throw new NullPointerException(); |
17945 | 2941 |
return internalReplace(key, value, null); |
2 | 2942 |
} |
2943 |
||
2944 |
/** |
|
2945 |
* Removes all of the mappings from this map. |
|
2946 |
*/ |
|
2947 |
public void clear() { |
|
17945 | 2948 |
internalClear(); |
2 | 2949 |
} |
2950 |
||
2951 |
/** |
|
2952 |
* Returns a {@link Set} view of the keys contained in this map. |
|
2953 |
* The set is backed by the map, so changes to the map are |
|
17717 | 2954 |
* reflected in the set, and vice-versa. The set supports element |
2 | 2955 |
* removal, which removes the corresponding mapping from this map, |
17717 | 2956 |
* via the {@code Iterator.remove}, {@code Set.remove}, |
2957 |
* {@code removeAll}, {@code retainAll}, and {@code clear} |
|
2958 |
* operations. It does not support the {@code add} or |
|
2959 |
* {@code addAll} operations. |
|
2 | 2960 |
* |
17717 | 2961 |
* <p>The view's {@code iterator} is a "weakly consistent" iterator |
2 | 2962 |
* that will never throw {@link ConcurrentModificationException}, |
2963 |
* and guarantees to traverse elements as they existed upon |
|
2964 |
* construction of the iterator, and may (but is not guaranteed to) |
|
2965 |
* reflect any modifications subsequent to construction. |
|
17945 | 2966 |
* |
2967 |
* @return the set view |
|
2 | 2968 |
*/ |
17945 | 2969 |
public KeySetView<K,V> keySet() { |
2970 |
KeySetView<K,V> ks = keySet; |
|
2971 |
return (ks != null) ? ks : (keySet = new KeySetView<K,V>(this, null)); |
|
2972 |
} |
|
2973 |
||
2974 |
/** |
|
2975 |
* Returns a {@link Set} view of the keys in this map, using the |
|
2976 |
* given common mapped value for any additions (i.e., {@link |
|
2977 |
* Collection#add} and {@link Collection#addAll(Collection)}). |
|
2978 |
* This is of course only appropriate if it is acceptable to use |
|
2979 |
* the same value for all additions from this view. |
|
2980 |
* |
|
2981 |
* @param mappedValue the mapped value to use for any additions |
|
2982 |
* @return the set view |
|
2983 |
* @throws NullPointerException if the mappedValue is null |
|
2984 |
*/ |
|
2985 |
public KeySetView<K,V> keySet(V mappedValue) { |
|
2986 |
if (mappedValue == null) |
|
2987 |
throw new NullPointerException(); |
|
2988 |
return new KeySetView<K,V>(this, mappedValue); |
|
2 | 2989 |
} |
2990 |
||
2991 |
/** |
|
2992 |
* Returns a {@link Collection} view of the values contained in this map. |
|
2993 |
* The collection is backed by the map, so changes to the map are |
|
2994 |
* reflected in the collection, and vice-versa. The collection |
|
2995 |
* supports element removal, which removes the corresponding |
|
17717 | 2996 |
* mapping from this map, via the {@code Iterator.remove}, |
2997 |
* {@code Collection.remove}, {@code removeAll}, |
|
2998 |
* {@code retainAll}, and {@code clear} operations. It does not |
|
2999 |
* support the {@code add} or {@code addAll} operations. |
|
2 | 3000 |
* |
17717 | 3001 |
* <p>The view's {@code iterator} is a "weakly consistent" iterator |
2 | 3002 |
* that will never throw {@link ConcurrentModificationException}, |
3003 |
* and guarantees to traverse elements as they existed upon |
|
3004 |
* construction of the iterator, and may (but is not guaranteed to) |
|
3005 |
* reflect any modifications subsequent to construction. |
|
17945 | 3006 |
* |
3007 |
* @return the collection view |
|
2 | 3008 |
*/ |
3009 |
public Collection<V> values() { |
|
17945 | 3010 |
ValuesView<K,V> vs = values; |
3011 |
return (vs != null) ? vs : (values = new ValuesView<K,V>(this)); |
|
2 | 3012 |
} |
3013 |
||
3014 |
/** |
|
3015 |
* Returns a {@link Set} view of the mappings contained in this map. |
|
3016 |
* The set is backed by the map, so changes to the map are |
|
3017 |
* reflected in the set, and vice-versa. The set supports element |
|
3018 |
* removal, which removes the corresponding mapping from the map, |
|
17717 | 3019 |
* via the {@code Iterator.remove}, {@code Set.remove}, |
3020 |
* {@code removeAll}, {@code retainAll}, and {@code clear} |
|
17945 | 3021 |
* operations. |
2 | 3022 |
* |
17717 | 3023 |
* <p>The view's {@code iterator} is a "weakly consistent" iterator |
2 | 3024 |
* that will never throw {@link ConcurrentModificationException}, |
3025 |
* and guarantees to traverse elements as they existed upon |
|
3026 |
* construction of the iterator, and may (but is not guaranteed to) |
|
3027 |
* reflect any modifications subsequent to construction. |
|
17945 | 3028 |
* |
3029 |
* @return the set view |
|
2 | 3030 |
*/ |
3031 |
public Set<Map.Entry<K,V>> entrySet() { |
|
17945 | 3032 |
EntrySetView<K,V> es = entrySet; |
3033 |
return (es != null) ? es : (entrySet = new EntrySetView<K,V>(this)); |
|
2 | 3034 |
} |
3035 |
||
3036 |
/** |
|
3037 |
* Returns an enumeration of the keys in this table. |
|
3038 |
* |
|
3039 |
* @return an enumeration of the keys in this table |
|
3040 |
* @see #keySet() |
|
3041 |
*/ |
|
3042 |
public Enumeration<K> keys() { |
|
17945 | 3043 |
Node<K,V>[] t; |
3044 |
int f = (t = table) == null ? 0 : t.length; |
|
3045 |
return new KeyIterator<K,V>(t, f, 0, f, this); |
|
2 | 3046 |
} |
3047 |
||
3048 |
/** |
|
3049 |
* Returns an enumeration of the values in this table. |
|
3050 |
* |
|
3051 |
* @return an enumeration of the values in this table |
|
3052 |
* @see #values() |
|
3053 |
*/ |
|
3054 |
public Enumeration<V> elements() { |
|
17945 | 3055 |
Node<K,V>[] t; |
3056 |
int f = (t = table) == null ? 0 : t.length; |
|
3057 |
return new ValueIterator<K,V>(t, f, 0, f, this); |
|
2 | 3058 |
} |
3059 |
||
17945 | 3060 |
/** |
3061 |
* Returns the hash code value for this {@link Map}, i.e., |
|
3062 |
* the sum of, for each key-value pair in the map, |
|
3063 |
* {@code key.hashCode() ^ value.hashCode()}. |
|
3064 |
* |
|
3065 |
* @return the hash code value for this map |
|
3066 |
*/ |
|
3067 |
public int hashCode() { |
|
3068 |
int h = 0; |
|
3069 |
Node<K,V>[] t; |
|
3070 |
if ((t = table) != null) { |
|
3071 |
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); |
|
3072 |
for (Node<K,V> p; (p = it.advance()) != null; ) |
|
3073 |
h += p.key.hashCode() ^ p.val.hashCode(); |
|
2 | 3074 |
} |
17945 | 3075 |
return h; |
2 | 3076 |
} |
3077 |
||
3078 |
/** |
|
17945 | 3079 |
* Returns a string representation of this map. The string |
3080 |
* representation consists of a list of key-value mappings (in no |
|
3081 |
* particular order) enclosed in braces ("{@code {}}"). Adjacent |
|
3082 |
* mappings are separated by the characters {@code ", "} (comma |
|
3083 |
* and space). Each key-value mapping is rendered as the key |
|
3084 |
* followed by an equals sign ("{@code =}") followed by the |
|
3085 |
* associated value. |
|
3086 |
* |
|
3087 |
* @return a string representation of this map |
|
2 | 3088 |
*/ |
17945 | 3089 |
public String toString() { |
3090 |
Node<K,V>[] t; |
|
3091 |
int f = (t = table) == null ? 0 : t.length; |
|
3092 |
Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f); |
|
3093 |
StringBuilder sb = new StringBuilder(); |
|
3094 |
sb.append('{'); |
|
3095 |
Node<K,V> p; |
|
3096 |
if ((p = it.advance()) != null) { |
|
3097 |
for (;;) { |
|
3098 |
K k = (K)p.key; |
|
3099 |
V v = p.val; |
|
3100 |
sb.append(k == this ? "(this Map)" : k); |
|
3101 |
sb.append('='); |
|
3102 |
sb.append(v == this ? "(this Map)" : v); |
|
3103 |
if ((p = it.advance()) == null) |
|
3104 |
break; |
|
3105 |
sb.append(',').append(' '); |
|
3106 |
} |
|
2 | 3107 |
} |
17945 | 3108 |
return sb.append('}').toString(); |
2 | 3109 |
} |
3110 |
||
17945 | 3111 |
/** |
3112 |
* Compares the specified object with this map for equality. |
|
3113 |
* Returns {@code true} if the given object is a map with the same |
|
3114 |
* mappings as this map. This operation may return misleading |
|
3115 |
* results if either map is concurrently modified during execution |
|
3116 |
* of this method. |
|
3117 |
* |
|
3118 |
* @param o object to be compared for equality with this map |
|
3119 |
* @return {@code true} if the specified object is equal to this map |
|
3120 |
*/ |
|
3121 |
public boolean equals(Object o) { |
|
3122 |
if (o != this) { |
|
3123 |
if (!(o instanceof Map)) |
|
2 | 3124 |
return false; |
17945 | 3125 |
Map<?,?> m = (Map<?,?>) o; |
3126 |
Node<K,V>[] t; |
|
3127 |
int f = (t = table) == null ? 0 : t.length; |
|
3128 |
Traverser<K,V> it = new Traverser<K,V>(t, f, 0, f); |
|
3129 |
for (Node<K,V> p; (p = it.advance()) != null; ) { |
|
3130 |
V val = p.val; |
|
3131 |
Object v = m.get(p.key); |
|
3132 |
if (v == null || (v != val && !v.equals(val))) |
|
3133 |
return false; |
|
3134 |
} |
|
3135 |
for (Map.Entry<?,?> e : m.entrySet()) { |
|
3136 |
Object mk, mv, v; |
|
3137 |
if ((mk = e.getKey()) == null || |
|
3138 |
(mv = e.getValue()) == null || |
|
3139 |
(v = internalGet(mk)) == null || |
|
3140 |
(mv != v && !mv.equals(v))) |
|
3141 |
return false; |
|
3142 |
} |
|
2 | 3143 |
} |
17945 | 3144 |
return true; |
2 | 3145 |
} |
3146 |
||
3147 |
/* ---------------- Serialization Support -------------- */ |
|
3148 |
||
3149 |
/** |
|
17945 | 3150 |
* Stripped-down version of helper class used in previous version, |
3151 |
* declared for the sake of serialization compatibility |
|
3152 |
*/ |
|
3153 |
static class Segment<K,V> extends ReentrantLock implements Serializable { |
|
3154 |
private static final long serialVersionUID = 2249069246763182397L; |
|
3155 |
final float loadFactor; |
|
3156 |
Segment(float lf) { this.loadFactor = lf; } |
|
3157 |
} |
|
3158 |
||
3159 |
/** |
|
17717 | 3160 |
* Saves the state of the {@code ConcurrentHashMap} instance to a |
11279 | 3161 |
* stream (i.e., serializes it). |
2 | 3162 |
* @param s the stream |
3163 |
* @serialData |
|
3164 |
* the key (Object) and value (Object) |
|
3165 |
* for each key-value mapping, followed by a null pair. |
|
3166 |
* The key-value mappings are emitted in no particular order. |
|
3167 |
*/ |
|
11279 | 3168 |
private void writeObject(java.io.ObjectOutputStream s) |
17945 | 3169 |
throws java.io.IOException { |
3170 |
// For serialization compatibility |
|
3171 |
// Emulate segment calculation from previous version of this class |
|
3172 |
int sshift = 0; |
|
3173 |
int ssize = 1; |
|
3174 |
while (ssize < DEFAULT_CONCURRENCY_LEVEL) { |
|
3175 |
++sshift; |
|
3176 |
ssize <<= 1; |
|
3177 |
} |
|
3178 |
int segmentShift = 32 - sshift; |
|
3179 |
int segmentMask = ssize - 1; |
|
3180 |
Segment<K,V>[] segments = (Segment<K,V>[]) |
|
3181 |
new Segment<?,?>[DEFAULT_CONCURRENCY_LEVEL]; |
|
3182 |
for (int i = 0; i < segments.length; ++i) |
|
3183 |
segments[i] = new Segment<K,V>(LOAD_FACTOR); |
|
3184 |
s.putFields().put("segments", segments); |
|
3185 |
s.putFields().put("segmentShift", segmentShift); |
|
3186 |
s.putFields().put("segmentMask", segmentMask); |
|
3187 |
s.writeFields(); |
|
3188 |
||
3189 |
Node<K,V>[] t; |
|
3190 |
if ((t = table) != null) { |
|
3191 |
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); |
|
3192 |
for (Node<K,V> p; (p = it.advance()) != null; ) { |
|
3193 |
s.writeObject(p.key); |
|
3194 |
s.writeObject(p.val); |
|
2 | 3195 |
} |
3196 |
} |
|
3197 |
s.writeObject(null); |
|
3198 |
s.writeObject(null); |
|
17945 | 3199 |
segments = null; // throw away |
2 | 3200 |
} |
3201 |
||
3202 |
/** |
|
17717 | 3203 |
* Reconstitutes the instance from a stream (that is, deserializes it). |
2 | 3204 |
* @param s the stream |
3205 |
*/ |
|
3206 |
private void readObject(java.io.ObjectInputStream s) |
|
17945 | 3207 |
throws java.io.IOException, ClassNotFoundException { |
3208 |
s.defaultReadObject(); |
|
3209 |
||
3210 |
// Create all nodes, then place in table once size is known |
|
3211 |
long size = 0L; |
|
3212 |
Node<K,V> p = null; |
|
3213 |
for (;;) { |
|
3214 |
K k = (K) s.readObject(); |
|
3215 |
V v = (V) s.readObject(); |
|
3216 |
if (k != null && v != null) { |
|
3217 |
int h = spread(k.hashCode()); |
|
3218 |
p = new Node<K,V>(h, k, v, p); |
|
3219 |
++size; |
|
3220 |
} |
|
3221 |
else |
|
3222 |
break; |
|
3223 |
} |
|
3224 |
if (p != null) { |
|
3225 |
boolean init = false; |
|
3226 |
int n; |
|
3227 |
if (size >= (long)(MAXIMUM_CAPACITY >>> 1)) |
|
3228 |
n = MAXIMUM_CAPACITY; |
|
3229 |
else { |
|
3230 |
int sz = (int)size; |
|
3231 |
n = tableSizeFor(sz + (sz >>> 1) + 1); |
|
3232 |
} |
|
3233 |
int sc = sizeCtl; |
|
3234 |
boolean collide = false; |
|
3235 |
if (n > sc && |
|
3236 |
U.compareAndSwapInt(this, SIZECTL, sc, -1)) { |
|
3237 |
try { |
|
3238 |
if (table == null) { |
|
3239 |
init = true; |
|
3240 |
Node<K,V>[] tab = (Node<K,V>[])new Node[n]; |
|
3241 |
int mask = n - 1; |
|
3242 |
while (p != null) { |
|
3243 |
int j = p.hash & mask; |
|
3244 |
Node<K,V> next = p.next; |
|
3245 |
Node<K,V> q = p.next = tabAt(tab, j); |
|
3246 |
setTabAt(tab, j, p); |
|
3247 |
if (!collide && q != null && q.hash == p.hash) |
|
3248 |
collide = true; |
|
3249 |
p = next; |
|
3250 |
} |
|
3251 |
table = tab; |
|
3252 |
addCount(size, -1); |
|
3253 |
sc = n - (n >>> 2); |
|
3254 |
} |
|
3255 |
} finally { |
|
3256 |
sizeCtl = sc; |
|
3257 |
} |
|
3258 |
if (collide) { // rescan and convert to TreeBins |
|
3259 |
Node<K,V>[] tab = table; |
|
3260 |
for (int i = 0; i < tab.length; ++i) { |
|
3261 |
int c = 0; |
|
3262 |
for (Node<K,V> e = tabAt(tab, i); e != null; e = e.next) { |
|
3263 |
if (++c > TREE_THRESHOLD && |
|
3264 |
(e.key instanceof Comparable)) { |
|
3265 |
replaceWithTreeBin(tab, i, e.key); |
|
3266 |
break; |
|
3267 |
} |
|
3268 |
} |
|
3269 |
} |
|
3270 |
} |
|
3271 |
} |
|
3272 |
if (!init) { // Can only happen if unsafely published. |
|
3273 |
while (p != null) { |
|
3274 |
internalPut((K)p.key, p.val, false); |
|
3275 |
p = p.next; |
|
3276 |
} |
|
3277 |
} |
|
3278 |
} |
|
3279 |
} |
|
3280 |
||
3281 |
// ------------------------------------------------------- |
|
3282 |
||
3283 |
// Overrides of other default Map methods |
|
3284 |
||
3285 |
public void forEach(BiConsumer<? super K, ? super V> action) { |
|
3286 |
if (action == null) throw new NullPointerException(); |
|
3287 |
Node<K,V>[] t; |
|
3288 |
if ((t = table) != null) { |
|
3289 |
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); |
|
3290 |
for (Node<K,V> p; (p = it.advance()) != null; ) { |
|
3291 |
action.accept((K)p.key, p.val); |
|
3292 |
} |
|
3293 |
} |
|
3294 |
} |
|
3295 |
||
3296 |
public void replaceAll(BiFunction<? super K, ? super V, ? extends V> function) { |
|
3297 |
if (function == null) throw new NullPointerException(); |
|
3298 |
Node<K,V>[] t; |
|
3299 |
if ((t = table) != null) { |
|
3300 |
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); |
|
3301 |
for (Node<K,V> p; (p = it.advance()) != null; ) { |
|
3302 |
K k = (K)p.key; |
|
3303 |
internalPut(k, function.apply(k, p.val), false); |
|
3304 |
} |
|
16883 | 3305 |
} |
17945 | 3306 |
} |
3307 |
||
3308 |
// ------------------------------------------------------- |
|
3309 |
||
3310 |
// Parallel bulk operations |
|
3311 |
||
3312 |
/** |
|
3313 |
* Computes initial batch value for bulk tasks. The returned value |
|
3314 |
* is approximately exp2 of the number of times (minus one) to |
|
3315 |
* split task by two before executing leaf action. This value is |
|
3316 |
* faster to compute and more convenient to use as a guide to |
|
3317 |
* splitting than is the depth, since it is used while dividing by |
|
3318 |
* two anyway. |
|
3319 |
*/ |
|
3320 |
final int batchFor(long b) { |
|
3321 |
long n; |
|
3322 |
if (b == Long.MAX_VALUE || (n = sumCount()) <= 1L || n < b) |
|
3323 |
return 0; |
|
3324 |
int sp = ForkJoinPool.getCommonPoolParallelism() << 2; // slack of 4 |
|
3325 |
return (b <= 0L || (n /= b) >= sp) ? sp : (int)n; |
|
3326 |
} |
|
3327 |
||
3328 |
/** |
|
3329 |
* Performs the given action for each (key, value). |
|
3330 |
* |
|
3331 |
* @param parallelismThreshold the (estimated) number of elements |
|
3332 |
* needed for this operation to be executed in parallel |
|
3333 |
* @param action the action |
|
3334 |
*/ |
|
3335 |
public void forEach(long parallelismThreshold, |
|
3336 |
BiConsumer<? super K,? super V> action) { |
|
3337 |
if (action == null) throw new NullPointerException(); |
|
3338 |
new ForEachMappingTask<K,V> |
|
3339 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3340 |
action).invoke(); |
|
3341 |
} |
|
3342 |
||
3343 |
/** |
|
3344 |
* Performs the given action for each non-null transformation |
|
3345 |
* of each (key, value). |
|
3346 |
* |
|
3347 |
* @param parallelismThreshold the (estimated) number of elements |
|
3348 |
* needed for this operation to be executed in parallel |
|
3349 |
* @param transformer a function returning the transformation |
|
3350 |
* for an element, or null if there is no transformation (in |
|
3351 |
* which case the action is not applied) |
|
3352 |
* @param action the action |
|
3353 |
*/ |
|
3354 |
public <U> void forEach(long parallelismThreshold, |
|
3355 |
BiFunction<? super K, ? super V, ? extends U> transformer, |
|
3356 |
Consumer<? super U> action) { |
|
3357 |
if (transformer == null || action == null) |
|
3358 |
throw new NullPointerException(); |
|
3359 |
new ForEachTransformedMappingTask<K,V,U> |
|
3360 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3361 |
transformer, action).invoke(); |
|
3362 |
} |
|
3363 |
||
3364 |
/** |
|
3365 |
* Returns a non-null result from applying the given search |
|
3366 |
* function on each (key, value), or null if none. Upon |
|
3367 |
* success, further element processing is suppressed and the |
|
3368 |
* results of any other parallel invocations of the search |
|
3369 |
* function are ignored. |
|
3370 |
* |
|
3371 |
* @param parallelismThreshold the (estimated) number of elements |
|
3372 |
* needed for this operation to be executed in parallel |
|
3373 |
* @param searchFunction a function returning a non-null |
|
3374 |
* result on success, else null |
|
3375 |
* @return a non-null result from applying the given search |
|
3376 |
* function on each (key, value), or null if none |
|
3377 |
*/ |
|
3378 |
public <U> U search(long parallelismThreshold, |
|
3379 |
BiFunction<? super K, ? super V, ? extends U> searchFunction) { |
|
3380 |
if (searchFunction == null) throw new NullPointerException(); |
|
3381 |
return new SearchMappingsTask<K,V,U> |
|
3382 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3383 |
searchFunction, new AtomicReference<U>()).invoke(); |
|
3384 |
} |
|
3385 |
||
3386 |
/** |
|
3387 |
* Returns the result of accumulating the given transformation |
|
3388 |
* of all (key, value) pairs using the given reducer to |
|
3389 |
* combine values, or null if none. |
|
3390 |
* |
|
3391 |
* @param parallelismThreshold the (estimated) number of elements |
|
3392 |
* needed for this operation to be executed in parallel |
|
3393 |
* @param transformer a function returning the transformation |
|
3394 |
* for an element, or null if there is no transformation (in |
|
3395 |
* which case it is not combined) |
|
3396 |
* @param reducer a commutative associative combining function |
|
3397 |
* @return the result of accumulating the given transformation |
|
3398 |
* of all (key, value) pairs |
|
3399 |
*/ |
|
3400 |
public <U> U reduce(long parallelismThreshold, |
|
3401 |
BiFunction<? super K, ? super V, ? extends U> transformer, |
|
3402 |
BiFunction<? super U, ? super U, ? extends U> reducer) { |
|
3403 |
if (transformer == null || reducer == null) |
|
3404 |
throw new NullPointerException(); |
|
3405 |
return new MapReduceMappingsTask<K,V,U> |
|
3406 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3407 |
null, transformer, reducer).invoke(); |
|
3408 |
} |
|
3409 |
||
3410 |
/** |
|
3411 |
* Returns the result of accumulating the given transformation |
|
3412 |
* of all (key, value) pairs using the given reducer to |
|
3413 |
* combine values, and the given basis as an identity value. |
|
3414 |
* |
|
3415 |
* @param parallelismThreshold the (estimated) number of elements |
|
3416 |
* needed for this operation to be executed in parallel |
|
3417 |
* @param transformer a function returning the transformation |
|
3418 |
* for an element |
|
3419 |
* @param basis the identity (initial default value) for the reduction |
|
3420 |
* @param reducer a commutative associative combining function |
|
3421 |
* @return the result of accumulating the given transformation |
|
3422 |
* of all (key, value) pairs |
|
3423 |
*/ |
|
3424 |
public double reduceToDoubleIn(long parallelismThreshold, |
|
3425 |
ToDoubleBiFunction<? super K, ? super V> transformer, |
|
3426 |
double basis, |
|
3427 |
DoubleBinaryOperator reducer) { |
|
3428 |
if (transformer == null || reducer == null) |
|
3429 |
throw new NullPointerException(); |
|
3430 |
return new MapReduceMappingsToDoubleTask<K,V> |
|
3431 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3432 |
null, transformer, basis, reducer).invoke(); |
|
3433 |
} |
|
3434 |
||
3435 |
/** |
|
3436 |
* Returns the result of accumulating the given transformation |
|
3437 |
* of all (key, value) pairs using the given reducer to |
|
3438 |
* combine values, and the given basis as an identity value. |
|
3439 |
* |
|
3440 |
* @param parallelismThreshold the (estimated) number of elements |
|
3441 |
* needed for this operation to be executed in parallel |
|
3442 |
* @param transformer a function returning the transformation |
|
3443 |
* for an element |
|
3444 |
* @param basis the identity (initial default value) for the reduction |
|
3445 |
* @param reducer a commutative associative combining function |
|
3446 |
* @return the result of accumulating the given transformation |
|
3447 |
* of all (key, value) pairs |
|
3448 |
*/ |
|
3449 |
public long reduceToLong(long parallelismThreshold, |
|
3450 |
ToLongBiFunction<? super K, ? super V> transformer, |
|
3451 |
long basis, |
|
3452 |
LongBinaryOperator reducer) { |
|
3453 |
if (transformer == null || reducer == null) |
|
3454 |
throw new NullPointerException(); |
|
3455 |
return new MapReduceMappingsToLongTask<K,V> |
|
3456 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3457 |
null, transformer, basis, reducer).invoke(); |
|
3458 |
} |
|
3459 |
||
3460 |
/** |
|
3461 |
* Returns the result of accumulating the given transformation |
|
3462 |
* of all (key, value) pairs using the given reducer to |
|
3463 |
* combine values, and the given basis as an identity value. |
|
3464 |
* |
|
3465 |
* @param parallelismThreshold the (estimated) number of elements |
|
3466 |
* needed for this operation to be executed in parallel |
|
3467 |
* @param transformer a function returning the transformation |
|
3468 |
* for an element |
|
3469 |
* @param basis the identity (initial default value) for the reduction |
|
3470 |
* @param reducer a commutative associative combining function |
|
3471 |
* @return the result of accumulating the given transformation |
|
3472 |
* of all (key, value) pairs |
|
3473 |
*/ |
|
3474 |
public int reduceToInt(long parallelismThreshold, |
|
3475 |
ToIntBiFunction<? super K, ? super V> transformer, |
|
3476 |
int basis, |
|
3477 |
IntBinaryOperator reducer) { |
|
3478 |
if (transformer == null || reducer == null) |
|
3479 |
throw new NullPointerException(); |
|
3480 |
return new MapReduceMappingsToIntTask<K,V> |
|
3481 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3482 |
null, transformer, basis, reducer).invoke(); |
|
3483 |
} |
|
3484 |
||
3485 |
/** |
|
3486 |
* Performs the given action for each key. |
|
3487 |
* |
|
3488 |
* @param parallelismThreshold the (estimated) number of elements |
|
3489 |
* needed for this operation to be executed in parallel |
|
3490 |
* @param action the action |
|
3491 |
*/ |
|
3492 |
public void forEachKey(long parallelismThreshold, |
|
3493 |
Consumer<? super K> action) { |
|
3494 |
if (action == null) throw new NullPointerException(); |
|
3495 |
new ForEachKeyTask<K,V> |
|
3496 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3497 |
action).invoke(); |
|
3498 |
} |
|
3499 |
||
3500 |
/** |
|
3501 |
* Performs the given action for each non-null transformation |
|
3502 |
* of each key. |
|
3503 |
* |
|
3504 |
* @param parallelismThreshold the (estimated) number of elements |
|
3505 |
* needed for this operation to be executed in parallel |
|
3506 |
* @param transformer a function returning the transformation |
|
3507 |
* for an element, or null if there is no transformation (in |
|
3508 |
* which case the action is not applied) |
|
3509 |
* @param action the action |
|
3510 |
*/ |
|
3511 |
public <U> void forEachKey(long parallelismThreshold, |
|
3512 |
Function<? super K, ? extends U> transformer, |
|
3513 |
Consumer<? super U> action) { |
|
3514 |
if (transformer == null || action == null) |
|
3515 |
throw new NullPointerException(); |
|
3516 |
new ForEachTransformedKeyTask<K,V,U> |
|
3517 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3518 |
transformer, action).invoke(); |
|
3519 |
} |
|
3520 |
||
3521 |
/** |
|
3522 |
* Returns a non-null result from applying the given search |
|
3523 |
* function on each key, or null if none. Upon success, |
|
3524 |
* further element processing is suppressed and the results of |
|
3525 |
* any other parallel invocations of the search function are |
|
3526 |
* ignored. |
|
3527 |
* |
|
3528 |
* @param parallelismThreshold the (estimated) number of elements |
|
3529 |
* needed for this operation to be executed in parallel |
|
3530 |
* @param searchFunction a function returning a non-null |
|
3531 |
* result on success, else null |
|
3532 |
* @return a non-null result from applying the given search |
|
3533 |
* function on each key, or null if none |
|
3534 |
*/ |
|
3535 |
public <U> U searchKeys(long parallelismThreshold, |
|
3536 |
Function<? super K, ? extends U> searchFunction) { |
|
3537 |
if (searchFunction == null) throw new NullPointerException(); |
|
3538 |
return new SearchKeysTask<K,V,U> |
|
3539 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3540 |
searchFunction, new AtomicReference<U>()).invoke(); |
|
3541 |
} |
|
3542 |
||
3543 |
/** |
|
3544 |
* Returns the result of accumulating all keys using the given |
|
3545 |
* reducer to combine values, or null if none. |
|
3546 |
* |
|
3547 |
* @param parallelismThreshold the (estimated) number of elements |
|
3548 |
* needed for this operation to be executed in parallel |
|
3549 |
* @param reducer a commutative associative combining function |
|
3550 |
* @return the result of accumulating all keys using the given |
|
3551 |
* reducer to combine values, or null if none |
|
3552 |
*/ |
|
3553 |
public K reduceKeys(long parallelismThreshold, |
|
3554 |
BiFunction<? super K, ? super K, ? extends K> reducer) { |
|
3555 |
if (reducer == null) throw new NullPointerException(); |
|
3556 |
return new ReduceKeysTask<K,V> |
|
3557 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3558 |
null, reducer).invoke(); |
|
3559 |
} |
|
3560 |
||
3561 |
/** |
|
3562 |
* Returns the result of accumulating the given transformation |
|
3563 |
* of all keys using the given reducer to combine values, or |
|
3564 |
* null if none. |
|
3565 |
* |
|
3566 |
* @param parallelismThreshold the (estimated) number of elements |
|
3567 |
* needed for this operation to be executed in parallel |
|
3568 |
* @param transformer a function returning the transformation |
|
3569 |
* for an element, or null if there is no transformation (in |
|
3570 |
* which case it is not combined) |
|
3571 |
* @param reducer a commutative associative combining function |
|
3572 |
* @return the result of accumulating the given transformation |
|
3573 |
* of all keys |
|
3574 |
*/ |
|
3575 |
public <U> U reduceKeys(long parallelismThreshold, |
|
3576 |
Function<? super K, ? extends U> transformer, |
|
3577 |
BiFunction<? super U, ? super U, ? extends U> reducer) { |
|
3578 |
if (transformer == null || reducer == null) |
|
3579 |
throw new NullPointerException(); |
|
3580 |
return new MapReduceKeysTask<K,V,U> |
|
3581 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3582 |
null, transformer, reducer).invoke(); |
|
3583 |
} |
|
3584 |
||
3585 |
/** |
|
3586 |
* Returns the result of accumulating the given transformation |
|
3587 |
* of all keys using the given reducer to combine values, and |
|
3588 |
* the given basis as an identity value. |
|
3589 |
* |
|
3590 |
* @param parallelismThreshold the (estimated) number of elements |
|
3591 |
* needed for this operation to be executed in parallel |
|
3592 |
* @param transformer a function returning the transformation |
|
3593 |
* for an element |
|
3594 |
* @param basis the identity (initial default value) for the reduction |
|
3595 |
* @param reducer a commutative associative combining function |
|
3596 |
* @return the result of accumulating the given transformation |
|
3597 |
* of all keys |
|
3598 |
*/ |
|
3599 |
public double reduceKeysToDouble(long parallelismThreshold, |
|
3600 |
ToDoubleFunction<? super K> transformer, |
|
3601 |
double basis, |
|
3602 |
DoubleBinaryOperator reducer) { |
|
3603 |
if (transformer == null || reducer == null) |
|
3604 |
throw new NullPointerException(); |
|
3605 |
return new MapReduceKeysToDoubleTask<K,V> |
|
3606 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3607 |
null, transformer, basis, reducer).invoke(); |
|
3608 |
} |
|
3609 |
||
3610 |
/** |
|
3611 |
* Returns the result of accumulating the given transformation |
|
3612 |
* of all keys using the given reducer to combine values, and |
|
3613 |
* the given basis as an identity value. |
|
3614 |
* |
|
3615 |
* @param parallelismThreshold the (estimated) number of elements |
|
3616 |
* needed for this operation to be executed in parallel |
|
3617 |
* @param transformer a function returning the transformation |
|
3618 |
* for an element |
|
3619 |
* @param basis the identity (initial default value) for the reduction |
|
3620 |
* @param reducer a commutative associative combining function |
|
3621 |
* @return the result of accumulating the given transformation |
|
3622 |
* of all keys |
|
3623 |
*/ |
|
3624 |
public long reduceKeysToLong(long parallelismThreshold, |
|
3625 |
ToLongFunction<? super K> transformer, |
|
3626 |
long basis, |
|
3627 |
LongBinaryOperator reducer) { |
|
3628 |
if (transformer == null || reducer == null) |
|
3629 |
throw new NullPointerException(); |
|
3630 |
return new MapReduceKeysToLongTask<K,V> |
|
3631 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3632 |
null, transformer, basis, reducer).invoke(); |
|
3633 |
} |
|
3634 |
||
3635 |
/** |
|
3636 |
* Returns the result of accumulating the given transformation |
|
3637 |
* of all keys using the given reducer to combine values, and |
|
3638 |
* the given basis as an identity value. |
|
3639 |
* |
|
3640 |
* @param parallelismThreshold the (estimated) number of elements |
|
3641 |
* needed for this operation to be executed in parallel |
|
3642 |
* @param transformer a function returning the transformation |
|
3643 |
* for an element |
|
3644 |
* @param basis the identity (initial default value) for the reduction |
|
3645 |
* @param reducer a commutative associative combining function |
|
3646 |
* @return the result of accumulating the given transformation |
|
3647 |
* of all keys |
|
3648 |
*/ |
|
3649 |
public int reduceKeysToInt(long parallelismThreshold, |
|
3650 |
ToIntFunction<? super K> transformer, |
|
3651 |
int basis, |
|
3652 |
IntBinaryOperator reducer) { |
|
3653 |
if (transformer == null || reducer == null) |
|
3654 |
throw new NullPointerException(); |
|
3655 |
return new MapReduceKeysToIntTask<K,V> |
|
3656 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3657 |
null, transformer, basis, reducer).invoke(); |
|
3658 |
} |
|
3659 |
||
3660 |
/** |
|
3661 |
* Performs the given action for each value. |
|
3662 |
* |
|
3663 |
* @param parallelismThreshold the (estimated) number of elements |
|
3664 |
* needed for this operation to be executed in parallel |
|
3665 |
* @param action the action |
|
3666 |
*/ |
|
3667 |
public void forEachValue(long parallelismThreshold, |
|
3668 |
Consumer<? super V> action) { |
|
3669 |
if (action == null) |
|
3670 |
throw new NullPointerException(); |
|
3671 |
new ForEachValueTask<K,V> |
|
3672 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3673 |
action).invoke(); |
|
3674 |
} |
|
3675 |
||
3676 |
/** |
|
3677 |
* Performs the given action for each non-null transformation |
|
3678 |
* of each value. |
|
3679 |
* |
|
3680 |
* @param parallelismThreshold the (estimated) number of elements |
|
3681 |
* needed for this operation to be executed in parallel |
|
3682 |
* @param transformer a function returning the transformation |
|
3683 |
* for an element, or null if there is no transformation (in |
|
3684 |
* which case the action is not applied) |
|
3685 |
* @param action the action |
|
3686 |
*/ |
|
3687 |
public <U> void forEachValue(long parallelismThreshold, |
|
3688 |
Function<? super V, ? extends U> transformer, |
|
3689 |
Consumer<? super U> action) { |
|
3690 |
if (transformer == null || action == null) |
|
3691 |
throw new NullPointerException(); |
|
3692 |
new ForEachTransformedValueTask<K,V,U> |
|
3693 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3694 |
transformer, action).invoke(); |
|
3695 |
} |
|
3696 |
||
3697 |
/** |
|
3698 |
* Returns a non-null result from applying the given search |
|
3699 |
* function on each value, or null if none. Upon success, |
|
3700 |
* further element processing is suppressed and the results of |
|
3701 |
* any other parallel invocations of the search function are |
|
3702 |
* ignored. |
|
3703 |
* |
|
3704 |
* @param parallelismThreshold the (estimated) number of elements |
|
3705 |
* needed for this operation to be executed in parallel |
|
3706 |
* @param searchFunction a function returning a non-null |
|
3707 |
* result on success, else null |
|
3708 |
* @return a non-null result from applying the given search |
|
3709 |
* function on each value, or null if none |
|
3710 |
*/ |
|
3711 |
public <U> U searchValues(long parallelismThreshold, |
|
3712 |
Function<? super V, ? extends U> searchFunction) { |
|
3713 |
if (searchFunction == null) throw new NullPointerException(); |
|
3714 |
return new SearchValuesTask<K,V,U> |
|
3715 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3716 |
searchFunction, new AtomicReference<U>()).invoke(); |
|
3717 |
} |
|
3718 |
||
3719 |
/** |
|
3720 |
* Returns the result of accumulating all values using the |
|
3721 |
* given reducer to combine values, or null if none. |
|
3722 |
* |
|
3723 |
* @param parallelismThreshold the (estimated) number of elements |
|
3724 |
* needed for this operation to be executed in parallel |
|
3725 |
* @param reducer a commutative associative combining function |
|
3726 |
* @return the result of accumulating all values |
|
3727 |
*/ |
|
3728 |
public V reduceValues(long parallelismThreshold, |
|
3729 |
BiFunction<? super V, ? super V, ? extends V> reducer) { |
|
3730 |
if (reducer == null) throw new NullPointerException(); |
|
3731 |
return new ReduceValuesTask<K,V> |
|
3732 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3733 |
null, reducer).invoke(); |
|
3734 |
} |
|
3735 |
||
3736 |
/** |
|
3737 |
* Returns the result of accumulating the given transformation |
|
3738 |
* of all values using the given reducer to combine values, or |
|
3739 |
* null if none. |
|
3740 |
* |
|
3741 |
* @param parallelismThreshold the (estimated) number of elements |
|
3742 |
* needed for this operation to be executed in parallel |
|
3743 |
* @param transformer a function returning the transformation |
|
3744 |
* for an element, or null if there is no transformation (in |
|
3745 |
* which case it is not combined) |
|
3746 |
* @param reducer a commutative associative combining function |
|
3747 |
* @return the result of accumulating the given transformation |
|
3748 |
* of all values |
|
3749 |
*/ |
|
3750 |
public <U> U reduceValues(long parallelismThreshold, |
|
3751 |
Function<? super V, ? extends U> transformer, |
|
3752 |
BiFunction<? super U, ? super U, ? extends U> reducer) { |
|
3753 |
if (transformer == null || reducer == null) |
|
3754 |
throw new NullPointerException(); |
|
3755 |
return new MapReduceValuesTask<K,V,U> |
|
3756 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3757 |
null, transformer, reducer).invoke(); |
|
3758 |
} |
|
3759 |
||
3760 |
/** |
|
3761 |
* Returns the result of accumulating the given transformation |
|
3762 |
* of all values using the given reducer to combine values, |
|
3763 |
* and the given basis as an identity value. |
|
3764 |
* |
|
3765 |
* @param parallelismThreshold the (estimated) number of elements |
|
3766 |
* needed for this operation to be executed in parallel |
|
3767 |
* @param transformer a function returning the transformation |
|
3768 |
* for an element |
|
3769 |
* @param basis the identity (initial default value) for the reduction |
|
3770 |
* @param reducer a commutative associative combining function |
|
3771 |
* @return the result of accumulating the given transformation |
|
3772 |
* of all values |
|
3773 |
*/ |
|
3774 |
public double reduceValuesToDouble(long parallelismThreshold, |
|
3775 |
ToDoubleFunction<? super V> transformer, |
|
3776 |
double basis, |
|
3777 |
DoubleBinaryOperator reducer) { |
|
3778 |
if (transformer == null || reducer == null) |
|
3779 |
throw new NullPointerException(); |
|
3780 |
return new MapReduceValuesToDoubleTask<K,V> |
|
3781 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3782 |
null, transformer, basis, reducer).invoke(); |
|
3783 |
} |
|
3784 |
||
3785 |
/** |
|
3786 |
* Returns the result of accumulating the given transformation |
|
3787 |
* of all values using the given reducer to combine values, |
|
3788 |
* and the given basis as an identity value. |
|
3789 |
* |
|
3790 |
* @param parallelismThreshold the (estimated) number of elements |
|
3791 |
* needed for this operation to be executed in parallel |
|
3792 |
* @param transformer a function returning the transformation |
|
3793 |
* for an element |
|
3794 |
* @param basis the identity (initial default value) for the reduction |
|
3795 |
* @param reducer a commutative associative combining function |
|
3796 |
* @return the result of accumulating the given transformation |
|
3797 |
* of all values |
|
3798 |
*/ |
|
3799 |
public long reduceValuesToLong(long parallelismThreshold, |
|
3800 |
ToLongFunction<? super V> transformer, |
|
3801 |
long basis, |
|
3802 |
LongBinaryOperator reducer) { |
|
3803 |
if (transformer == null || reducer == null) |
|
3804 |
throw new NullPointerException(); |
|
3805 |
return new MapReduceValuesToLongTask<K,V> |
|
3806 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3807 |
null, transformer, basis, reducer).invoke(); |
|
3808 |
} |
|
3809 |
||
3810 |
/** |
|
3811 |
* Returns the result of accumulating the given transformation |
|
3812 |
* of all values using the given reducer to combine values, |
|
3813 |
* and the given basis as an identity value. |
|
3814 |
* |
|
3815 |
* @param parallelismThreshold the (estimated) number of elements |
|
3816 |
* needed for this operation to be executed in parallel |
|
3817 |
* @param transformer a function returning the transformation |
|
3818 |
* for an element |
|
3819 |
* @param basis the identity (initial default value) for the reduction |
|
3820 |
* @param reducer a commutative associative combining function |
|
3821 |
* @return the result of accumulating the given transformation |
|
3822 |
* of all values |
|
3823 |
*/ |
|
3824 |
public int reduceValuesToInt(long parallelismThreshold, |
|
3825 |
ToIntFunction<? super V> transformer, |
|
3826 |
int basis, |
|
3827 |
IntBinaryOperator reducer) { |
|
3828 |
if (transformer == null || reducer == null) |
|
3829 |
throw new NullPointerException(); |
|
3830 |
return new MapReduceValuesToIntTask<K,V> |
|
3831 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3832 |
null, transformer, basis, reducer).invoke(); |
|
3833 |
} |
|
3834 |
||
3835 |
/** |
|
3836 |
* Performs the given action for each entry. |
|
3837 |
* |
|
3838 |
* @param parallelismThreshold the (estimated) number of elements |
|
3839 |
* needed for this operation to be executed in parallel |
|
3840 |
* @param action the action |
|
3841 |
*/ |
|
3842 |
public void forEachEntry(long parallelismThreshold, |
|
3843 |
Consumer<? super Map.Entry<K,V>> action) { |
|
3844 |
if (action == null) throw new NullPointerException(); |
|
3845 |
new ForEachEntryTask<K,V>(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3846 |
action).invoke(); |
|
3847 |
} |
|
3848 |
||
3849 |
/** |
|
3850 |
* Performs the given action for each non-null transformation |
|
3851 |
* of each entry. |
|
3852 |
* |
|
3853 |
* @param parallelismThreshold the (estimated) number of elements |
|
3854 |
* needed for this operation to be executed in parallel |
|
3855 |
* @param transformer a function returning the transformation |
|
3856 |
* for an element, or null if there is no transformation (in |
|
3857 |
* which case the action is not applied) |
|
3858 |
* @param action the action |
|
3859 |
*/ |
|
3860 |
public <U> void forEachEntry(long parallelismThreshold, |
|
3861 |
Function<Map.Entry<K,V>, ? extends U> transformer, |
|
3862 |
Consumer<? super U> action) { |
|
3863 |
if (transformer == null || action == null) |
|
3864 |
throw new NullPointerException(); |
|
3865 |
new ForEachTransformedEntryTask<K,V,U> |
|
3866 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3867 |
transformer, action).invoke(); |
|
3868 |
} |
|
3869 |
||
3870 |
/** |
|
3871 |
* Returns a non-null result from applying the given search |
|
3872 |
* function on each entry, or null if none. Upon success, |
|
3873 |
* further element processing is suppressed and the results of |
|
3874 |
* any other parallel invocations of the search function are |
|
3875 |
* ignored. |
|
3876 |
* |
|
3877 |
* @param parallelismThreshold the (estimated) number of elements |
|
3878 |
* needed for this operation to be executed in parallel |
|
3879 |
* @param searchFunction a function returning a non-null |
|
3880 |
* result on success, else null |
|
3881 |
* @return a non-null result from applying the given search |
|
3882 |
* function on each entry, or null if none |
|
3883 |
*/ |
|
3884 |
public <U> U searchEntries(long parallelismThreshold, |
|
3885 |
Function<Map.Entry<K,V>, ? extends U> searchFunction) { |
|
3886 |
if (searchFunction == null) throw new NullPointerException(); |
|
3887 |
return new SearchEntriesTask<K,V,U> |
|
3888 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3889 |
searchFunction, new AtomicReference<U>()).invoke(); |
|
3890 |
} |
|
3891 |
||
3892 |
/** |
|
3893 |
* Returns the result of accumulating all entries using the |
|
3894 |
* given reducer to combine values, or null if none. |
|
3895 |
* |
|
3896 |
* @param parallelismThreshold the (estimated) number of elements |
|
3897 |
* needed for this operation to be executed in parallel |
|
3898 |
* @param reducer a commutative associative combining function |
|
3899 |
* @return the result of accumulating all entries |
|
3900 |
*/ |
|
3901 |
public Map.Entry<K,V> reduceEntries(long parallelismThreshold, |
|
3902 |
BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) { |
|
3903 |
if (reducer == null) throw new NullPointerException(); |
|
3904 |
return new ReduceEntriesTask<K,V> |
|
3905 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3906 |
null, reducer).invoke(); |
|
3907 |
} |
|
3908 |
||
3909 |
/** |
|
3910 |
* Returns the result of accumulating the given transformation |
|
3911 |
* of all entries using the given reducer to combine values, |
|
3912 |
* or null if none. |
|
3913 |
* |
|
3914 |
* @param parallelismThreshold the (estimated) number of elements |
|
3915 |
* needed for this operation to be executed in parallel |
|
3916 |
* @param transformer a function returning the transformation |
|
3917 |
* for an element, or null if there is no transformation (in |
|
3918 |
* which case it is not combined) |
|
3919 |
* @param reducer a commutative associative combining function |
|
3920 |
* @return the result of accumulating the given transformation |
|
3921 |
* of all entries |
|
3922 |
*/ |
|
3923 |
public <U> U reduceEntries(long parallelismThreshold, |
|
3924 |
Function<Map.Entry<K,V>, ? extends U> transformer, |
|
3925 |
BiFunction<? super U, ? super U, ? extends U> reducer) { |
|
3926 |
if (transformer == null || reducer == null) |
|
3927 |
throw new NullPointerException(); |
|
3928 |
return new MapReduceEntriesTask<K,V,U> |
|
3929 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3930 |
null, transformer, reducer).invoke(); |
|
3931 |
} |
|
3932 |
||
3933 |
/** |
|
3934 |
* Returns the result of accumulating the given transformation |
|
3935 |
* of all entries using the given reducer to combine values, |
|
3936 |
* and the given basis as an identity value. |
|
3937 |
* |
|
3938 |
* @param parallelismThreshold the (estimated) number of elements |
|
3939 |
* needed for this operation to be executed in parallel |
|
3940 |
* @param transformer a function returning the transformation |
|
3941 |
* for an element |
|
3942 |
* @param basis the identity (initial default value) for the reduction |
|
3943 |
* @param reducer a commutative associative combining function |
|
3944 |
* @return the result of accumulating the given transformation |
|
3945 |
* of all entries |
|
3946 |
*/ |
|
3947 |
public double reduceEntriesToDouble(long parallelismThreshold, |
|
3948 |
ToDoubleFunction<Map.Entry<K,V>> transformer, |
|
3949 |
double basis, |
|
3950 |
DoubleBinaryOperator reducer) { |
|
3951 |
if (transformer == null || reducer == null) |
|
3952 |
throw new NullPointerException(); |
|
3953 |
return new MapReduceEntriesToDoubleTask<K,V> |
|
3954 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3955 |
null, transformer, basis, reducer).invoke(); |
|
3956 |
} |
|
3957 |
||
3958 |
/** |
|
3959 |
* Returns the result of accumulating the given transformation |
|
3960 |
* of all entries using the given reducer to combine values, |
|
3961 |
* and the given basis as an identity value. |
|
3962 |
* |
|
3963 |
* @param parallelismThreshold the (estimated) number of elements |
|
3964 |
* needed for this operation to be executed in parallel |
|
3965 |
* @param transformer a function returning the transformation |
|
3966 |
* for an element |
|
3967 |
* @param basis the identity (initial default value) for the reduction |
|
3968 |
* @param reducer a commutative associative combining function |
|
3969 |
* @return the result of accumulating the given transformation |
|
3970 |
* of all entries |
|
3971 |
*/ |
|
3972 |
public long reduceEntriesToLong(long parallelismThreshold, |
|
3973 |
ToLongFunction<Map.Entry<K,V>> transformer, |
|
3974 |
long basis, |
|
3975 |
LongBinaryOperator reducer) { |
|
3976 |
if (transformer == null || reducer == null) |
|
3977 |
throw new NullPointerException(); |
|
3978 |
return new MapReduceEntriesToLongTask<K,V> |
|
3979 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
3980 |
null, transformer, basis, reducer).invoke(); |
|
3981 |
} |
|
3982 |
||
3983 |
/** |
|
3984 |
* Returns the result of accumulating the given transformation |
|
3985 |
* of all entries using the given reducer to combine values, |
|
3986 |
* and the given basis as an identity value. |
|
3987 |
* |
|
3988 |
* @param parallelismThreshold the (estimated) number of elements |
|
3989 |
* needed for this operation to be executed in parallel |
|
3990 |
* @param transformer a function returning the transformation |
|
3991 |
* for an element |
|
3992 |
* @param basis the identity (initial default value) for the reduction |
|
3993 |
* @param reducer a commutative associative combining function |
|
3994 |
* @return the result of accumulating the given transformation |
|
3995 |
* of all entries |
|
3996 |
*/ |
|
3997 |
public int reduceEntriesToInt(long parallelismThreshold, |
|
3998 |
ToIntFunction<Map.Entry<K,V>> transformer, |
|
3999 |
int basis, |
|
4000 |
IntBinaryOperator reducer) { |
|
4001 |
if (transformer == null || reducer == null) |
|
4002 |
throw new NullPointerException(); |
|
4003 |
return new MapReduceEntriesToIntTask<K,V> |
|
4004 |
(null, batchFor(parallelismThreshold), 0, 0, table, |
|
4005 |
null, transformer, basis, reducer).invoke(); |
|
4006 |
} |
|
4007 |
||
4008 |
||
4009 |
/* ----------------Views -------------- */ |
|
4010 |
||
4011 |
/** |
|
4012 |
* Base class for views. |
|
4013 |
*/ |
|
4014 |
abstract static class CollectionView<K,V,E> |
|
4015 |
implements Collection<E>, java.io.Serializable { |
|
4016 |
private static final long serialVersionUID = 7249069246763182397L; |
|
4017 |
final ConcurrentHashMap<K,V> map; |
|
4018 |
CollectionView(ConcurrentHashMap<K,V> map) { this.map = map; } |
|
4019 |
||
4020 |
/** |
|
4021 |
* Returns the map backing this view. |
|
4022 |
* |
|
4023 |
* @return the map backing this view |
|
4024 |
*/ |
|
4025 |
public ConcurrentHashMap<K,V> getMap() { return map; } |
|
4026 |
||
4027 |
/** |
|
4028 |
* Removes all of the elements from this view, by removing all |
|
4029 |
* the mappings from the map backing this view. |
|
4030 |
*/ |
|
4031 |
public final void clear() { map.clear(); } |
|
4032 |
public final int size() { return map.size(); } |
|
4033 |
public final boolean isEmpty() { return map.isEmpty(); } |
|
4034 |
||
4035 |
// implementations below rely on concrete classes supplying these |
|
4036 |
// abstract methods |
|
4037 |
/** |
|
4038 |
* Returns a "weakly consistent" iterator that will never |
|
4039 |
* throw {@link ConcurrentModificationException}, and |
|
4040 |
* guarantees to traverse elements as they existed upon |
|
4041 |
* construction of the iterator, and may (but is not |
|
4042 |
* guaranteed to) reflect any modifications subsequent to |
|
4043 |
* construction. |
|
4044 |
*/ |
|
4045 |
public abstract Iterator<E> iterator(); |
|
4046 |
public abstract boolean contains(Object o); |
|
4047 |
public abstract boolean remove(Object o); |
|
4048 |
||
4049 |
private static final String oomeMsg = "Required array size too large"; |
|
4050 |
||
4051 |
public final Object[] toArray() { |
|
4052 |
long sz = map.mappingCount(); |
|
4053 |
if (sz > MAX_ARRAY_SIZE) |
|
4054 |
throw new OutOfMemoryError(oomeMsg); |
|
4055 |
int n = (int)sz; |
|
4056 |
Object[] r = new Object[n]; |
|
4057 |
int i = 0; |
|
4058 |
for (E e : this) { |
|
4059 |
if (i == n) { |
|
4060 |
if (n >= MAX_ARRAY_SIZE) |
|
4061 |
throw new OutOfMemoryError(oomeMsg); |
|
4062 |
if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) |
|
4063 |
n = MAX_ARRAY_SIZE; |
|
4064 |
else |
|
4065 |
n += (n >>> 1) + 1; |
|
4066 |
r = Arrays.copyOf(r, n); |
|
4067 |
} |
|
4068 |
r[i++] = e; |
|
4069 |
} |
|
4070 |
return (i == n) ? r : Arrays.copyOf(r, i); |
|
4071 |
} |
|
4072 |
||
4073 |
public final <T> T[] toArray(T[] a) { |
|
4074 |
long sz = map.mappingCount(); |
|
4075 |
if (sz > MAX_ARRAY_SIZE) |
|
4076 |
throw new OutOfMemoryError(oomeMsg); |
|
4077 |
int m = (int)sz; |
|
4078 |
T[] r = (a.length >= m) ? a : |
|
4079 |
(T[])java.lang.reflect.Array |
|
4080 |
.newInstance(a.getClass().getComponentType(), m); |
|
4081 |
int n = r.length; |
|
4082 |
int i = 0; |
|
4083 |
for (E e : this) { |
|
4084 |
if (i == n) { |
|
4085 |
if (n >= MAX_ARRAY_SIZE) |
|
4086 |
throw new OutOfMemoryError(oomeMsg); |
|
4087 |
if (n >= MAX_ARRAY_SIZE - (MAX_ARRAY_SIZE >>> 1) - 1) |
|
4088 |
n = MAX_ARRAY_SIZE; |
|
4089 |
else |
|
4090 |
n += (n >>> 1) + 1; |
|
4091 |
r = Arrays.copyOf(r, n); |
|
4092 |
} |
|
4093 |
r[i++] = (T)e; |
|
4094 |
} |
|
4095 |
if (a == r && i < n) { |
|
4096 |
r[i] = null; // null-terminate |
|
4097 |
return r; |
|
4098 |
} |
|
4099 |
return (i == n) ? r : Arrays.copyOf(r, i); |
|
4100 |
} |
|
4101 |
||
4102 |
/** |
|
4103 |
* Returns a string representation of this collection. |
|
4104 |
* The string representation consists of the string representations |
|
4105 |
* of the collection's elements in the order they are returned by |
|
4106 |
* its iterator, enclosed in square brackets ({@code "[]"}). |
|
4107 |
* Adjacent elements are separated by the characters {@code ", "} |
|
4108 |
* (comma and space). Elements are converted to strings as by |
|
4109 |
* {@link String#valueOf(Object)}. |
|
4110 |
* |
|
4111 |
* @return a string representation of this collection |
|
4112 |
*/ |
|
4113 |
public final String toString() { |
|
4114 |
StringBuilder sb = new StringBuilder(); |
|
4115 |
sb.append('['); |
|
4116 |
Iterator<E> it = iterator(); |
|
4117 |
if (it.hasNext()) { |
|
4118 |
for (;;) { |
|
4119 |
Object e = it.next(); |
|
4120 |
sb.append(e == this ? "(this Collection)" : e); |
|
4121 |
if (!it.hasNext()) |
|
4122 |
break; |
|
4123 |
sb.append(',').append(' '); |
|
4124 |
} |
|
4125 |
} |
|
4126 |
return sb.append(']').toString(); |
|
4127 |
} |
|
4128 |
||
4129 |
public final boolean containsAll(Collection<?> c) { |
|
4130 |
if (c != this) { |
|
4131 |
for (Object e : c) { |
|
4132 |
if (e == null || !contains(e)) |
|
4133 |
return false; |
|
4134 |
} |
|
4135 |
} |
|
4136 |
return true; |
|
4137 |
} |
|
4138 |
||
4139 |
public final boolean removeAll(Collection<?> c) { |
|
4140 |
boolean modified = false; |
|
4141 |
for (Iterator<E> it = iterator(); it.hasNext();) { |
|
4142 |
if (c.contains(it.next())) { |
|
4143 |
it.remove(); |
|
4144 |
modified = true; |
|
4145 |
} |
|
4146 |
} |
|
4147 |
return modified; |
|
4148 |
} |
|
4149 |
||
4150 |
public final boolean retainAll(Collection<?> c) { |
|
4151 |
boolean modified = false; |
|
4152 |
for (Iterator<E> it = iterator(); it.hasNext();) { |
|
4153 |
if (!c.contains(it.next())) { |
|
4154 |
it.remove(); |
|
4155 |
modified = true; |
|
4156 |
} |
|
4157 |
} |
|
4158 |
return modified; |
|
4159 |
} |
|
4160 |
||
4161 |
} |
|
4162 |
||
4163 |
/** |
|
4164 |
* A view of a ConcurrentHashMap as a {@link Set} of keys, in |
|
4165 |
* which additions may optionally be enabled by mapping to a |
|
4166 |
* common value. This class cannot be directly instantiated. |
|
4167 |
* See {@link #keySet() keySet()}, |
|
4168 |
* {@link #keySet(Object) keySet(V)}, |
|
4169 |
* {@link #newKeySet() newKeySet()}, |
|
4170 |
* {@link #newKeySet(int) newKeySet(int)}. |
|
4171 |
*/ |
|
4172 |
public static class KeySetView<K,V> extends CollectionView<K,V,K> |
|
4173 |
implements Set<K>, java.io.Serializable { |
|
4174 |
private static final long serialVersionUID = 7249069246763182397L; |
|
4175 |
private final V value; |
|
4176 |
KeySetView(ConcurrentHashMap<K,V> map, V value) { // non-public |
|
4177 |
super(map); |
|
4178 |
this.value = value; |
|
4179 |
} |
|
4180 |
||
4181 |
/** |
|
4182 |
* Returns the default mapped value for additions, |
|
4183 |
* or {@code null} if additions are not supported. |
|
4184 |
* |
|
4185 |
* @return the default mapped value for additions, or {@code null} |
|
4186 |
* if not supported |
|
4187 |
*/ |
|
4188 |
public V getMappedValue() { return value; } |
|
4189 |
||
4190 |
/** |
|
4191 |
* {@inheritDoc} |
|
4192 |
* @throws NullPointerException if the specified key is null |
|
4193 |
*/ |
|
4194 |
public boolean contains(Object o) { return map.containsKey(o); } |
|
4195 |
||
4196 |
/** |
|
4197 |
* Removes the key from this map view, by removing the key (and its |
|
4198 |
* corresponding value) from the backing map. This method does |
|
4199 |
* nothing if the key is not in the map. |
|
4200 |
* |
|
4201 |
* @param o the key to be removed from the backing map |
|
4202 |
* @return {@code true} if the backing map contained the specified key |
|
4203 |
* @throws NullPointerException if the specified key is null |
|
4204 |
*/ |
|
4205 |
public boolean remove(Object o) { return map.remove(o) != null; } |
|
4206 |
||
4207 |
/** |
|
4208 |
* @return an iterator over the keys of the backing map |
|
4209 |
*/ |
|
4210 |
public Iterator<K> iterator() { |
|
4211 |
Node<K,V>[] t; |
|
4212 |
ConcurrentHashMap<K,V> m = map; |
|
4213 |
int f = (t = m.table) == null ? 0 : t.length; |
|
4214 |
return new KeyIterator<K,V>(t, f, 0, f, m); |
|
4215 |
} |
|
4216 |
||
4217 |
/** |
|
4218 |
* Adds the specified key to this set view by mapping the key to |
|
4219 |
* the default mapped value in the backing map, if defined. |
|
4220 |
* |
|
4221 |
* @param e key to be added |
|
4222 |
* @return {@code true} if this set changed as a result of the call |
|
4223 |
* @throws NullPointerException if the specified key is null |
|
4224 |
* @throws UnsupportedOperationException if no default mapped value |
|
4225 |
* for additions was provided |
|
4226 |
*/ |
|
4227 |
public boolean add(K e) { |
|
4228 |
V v; |
|
4229 |
if ((v = value) == null) |
|
4230 |
throw new UnsupportedOperationException(); |
|
4231 |
return map.internalPut(e, v, true) == null; |
|
4232 |
} |
|
4233 |
||
4234 |
/** |
|
4235 |
* Adds all of the elements in the specified collection to this set, |
|
4236 |
* as if by calling {@link #add} on each one. |
|
4237 |
* |
|
4238 |
* @param c the elements to be inserted into this set |
|
4239 |
* @return {@code true} if this set changed as a result of the call |
|
4240 |
* @throws NullPointerException if the collection or any of its |
|
4241 |
* elements are {@code null} |
|
4242 |
* @throws UnsupportedOperationException if no default mapped value |
|
4243 |
* for additions was provided |
|
4244 |
*/ |
|
4245 |
public boolean addAll(Collection<? extends K> c) { |
|
4246 |
boolean added = false; |
|
4247 |
V v; |
|
4248 |
if ((v = value) == null) |
|
4249 |
throw new UnsupportedOperationException(); |
|
4250 |
for (K e : c) { |
|
4251 |
if (map.internalPut(e, v, true) == null) |
|
4252 |
added = true; |
|
4253 |
} |
|
4254 |
return added; |
|
4255 |
} |
|
4256 |
||
4257 |
public int hashCode() { |
|
4258 |
int h = 0; |
|
4259 |
for (K e : this) |
|
4260 |
h += e.hashCode(); |
|
4261 |
return h; |
|
4262 |
} |
|
4263 |
||
4264 |
public boolean equals(Object o) { |
|
4265 |
Set<?> c; |
|
4266 |
return ((o instanceof Set) && |
|
4267 |
((c = (Set<?>)o) == this || |
|
4268 |
(containsAll(c) && c.containsAll(this)))); |
|
4269 |
} |
|
4270 |
||
4271 |
public Spliterator<K> spliterator() { |
|
4272 |
Node<K,V>[] t; |
|
4273 |
ConcurrentHashMap<K,V> m = map; |
|
4274 |
long n = m.sumCount(); |
|
4275 |
int f = (t = m.table) == null ? 0 : t.length; |
|
4276 |
return new KeySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n); |
|
4277 |
} |
|
4278 |
||
4279 |
public void forEach(Consumer<? super K> action) { |
|
4280 |
if (action == null) throw new NullPointerException(); |
|
4281 |
Node<K,V>[] t; |
|
4282 |
if ((t = map.table) != null) { |
|
4283 |
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); |
|
4284 |
for (Node<K,V> p; (p = it.advance()) != null; ) |
|
4285 |
action.accept((K)p.key); |
|
4286 |
} |
|
4287 |
} |
|
4288 |
} |
|
4289 |
||
4290 |
/** |
|
4291 |
* A view of a ConcurrentHashMap as a {@link Collection} of |
|
4292 |
* values, in which additions are disabled. This class cannot be |
|
4293 |
* directly instantiated. See {@link #values()}. |
|
4294 |
*/ |
|
4295 |
static final class ValuesView<K,V> extends CollectionView<K,V,V> |
|
4296 |
implements Collection<V>, java.io.Serializable { |
|
4297 |
private static final long serialVersionUID = 2249069246763182397L; |
|
4298 |
ValuesView(ConcurrentHashMap<K,V> map) { super(map); } |
|
4299 |
public final boolean contains(Object o) { |
|
4300 |
return map.containsValue(o); |
|
4301 |
} |
|
4302 |
||
4303 |
public final boolean remove(Object o) { |
|
4304 |
if (o != null) { |
|
4305 |
for (Iterator<V> it = iterator(); it.hasNext();) { |
|
4306 |
if (o.equals(it.next())) { |
|
4307 |
it.remove(); |
|
4308 |
return true; |
|
4309 |
} |
|
4310 |
} |
|
4311 |
} |
|
4312 |
return false; |
|
4313 |
} |
|
4314 |
||
4315 |
public final Iterator<V> iterator() { |
|
4316 |
ConcurrentHashMap<K,V> m = map; |
|
4317 |
Node<K,V>[] t; |
|
4318 |
int f = (t = m.table) == null ? 0 : t.length; |
|
4319 |
return new ValueIterator<K,V>(t, f, 0, f, m); |
|
4320 |
} |
|
4321 |
||
4322 |
public final boolean add(V e) { |
|
4323 |
throw new UnsupportedOperationException(); |
|
4324 |
} |
|
4325 |
public final boolean addAll(Collection<? extends V> c) { |
|
4326 |
throw new UnsupportedOperationException(); |
|
4327 |
} |
|
4328 |
||
4329 |
public Spliterator<V> spliterator() { |
|
4330 |
Node<K,V>[] t; |
|
4331 |
ConcurrentHashMap<K,V> m = map; |
|
4332 |
long n = m.sumCount(); |
|
4333 |
int f = (t = m.table) == null ? 0 : t.length; |
|
4334 |
return new ValueSpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n); |
|
4335 |
} |
|
4336 |
||
4337 |
public void forEach(Consumer<? super V> action) { |
|
4338 |
if (action == null) throw new NullPointerException(); |
|
4339 |
Node<K,V>[] t; |
|
4340 |
if ((t = map.table) != null) { |
|
4341 |
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); |
|
4342 |
for (Node<K,V> p; (p = it.advance()) != null; ) |
|
4343 |
action.accept(p.val); |
|
4344 |
} |
|
4345 |
} |
|
4346 |
} |
|
4347 |
||
4348 |
/** |
|
4349 |
* A view of a ConcurrentHashMap as a {@link Set} of (key, value) |
|
4350 |
* entries. This class cannot be directly instantiated. See |
|
4351 |
* {@link #entrySet()}. |
|
4352 |
*/ |
|
4353 |
static final class EntrySetView<K,V> extends CollectionView<K,V,Map.Entry<K,V>> |
|
4354 |
implements Set<Map.Entry<K,V>>, java.io.Serializable { |
|
4355 |
private static final long serialVersionUID = 2249069246763182397L; |
|
4356 |
EntrySetView(ConcurrentHashMap<K,V> map) { super(map); } |
|
4357 |
||
4358 |
public boolean contains(Object o) { |
|
4359 |
Object k, v, r; Map.Entry<?,?> e; |
|
4360 |
return ((o instanceof Map.Entry) && |
|
4361 |
(k = (e = (Map.Entry<?,?>)o).getKey()) != null && |
|
4362 |
(r = map.get(k)) != null && |
|
4363 |
(v = e.getValue()) != null && |
|
4364 |
(v == r || v.equals(r))); |
|
4365 |
} |
|
4366 |
||
4367 |
public boolean remove(Object o) { |
|
4368 |
Object k, v; Map.Entry<?,?> e; |
|
4369 |
return ((o instanceof Map.Entry) && |
|
4370 |
(k = (e = (Map.Entry<?,?>)o).getKey()) != null && |
|
4371 |
(v = e.getValue()) != null && |
|
4372 |
map.remove(k, v)); |
|
4373 |
} |
|
4374 |
||
4375 |
/** |
|
4376 |
* @return an iterator over the entries of the backing map |
|
4377 |
*/ |
|
4378 |
public Iterator<Map.Entry<K,V>> iterator() { |
|
4379 |
ConcurrentHashMap<K,V> m = map; |
|
4380 |
Node<K,V>[] t; |
|
4381 |
int f = (t = m.table) == null ? 0 : t.length; |
|
4382 |
return new EntryIterator<K,V>(t, f, 0, f, m); |
|
4383 |
} |
|
4384 |
||
4385 |
public boolean add(Entry<K,V> e) { |
|
4386 |
return map.internalPut(e.getKey(), e.getValue(), false) == null; |
|
4387 |
} |
|
4388 |
||
4389 |
public boolean addAll(Collection<? extends Entry<K,V>> c) { |
|
4390 |
boolean added = false; |
|
4391 |
for (Entry<K,V> e : c) { |
|
4392 |
if (add(e)) |
|
4393 |
added = true; |
|
4394 |
} |
|
4395 |
return added; |
|
4396 |
} |
|
4397 |
||
4398 |
public final int hashCode() { |
|
4399 |
int h = 0; |
|
4400 |
Node<K,V>[] t; |
|
4401 |
if ((t = map.table) != null) { |
|
4402 |
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); |
|
4403 |
for (Node<K,V> p; (p = it.advance()) != null; ) { |
|
4404 |
h += p.hashCode(); |
|
4405 |
} |
|
4406 |
} |
|
4407 |
return h; |
|
4408 |
} |
|
4409 |
||
4410 |
public final boolean equals(Object o) { |
|
4411 |
Set<?> c; |
|
4412 |
return ((o instanceof Set) && |
|
4413 |
((c = (Set<?>)o) == this || |
|
4414 |
(containsAll(c) && c.containsAll(this)))); |
|
4415 |
} |
|
4416 |
||
4417 |
public Spliterator<Map.Entry<K,V>> spliterator() { |
|
4418 |
Node<K,V>[] t; |
|
4419 |
ConcurrentHashMap<K,V> m = map; |
|
4420 |
long n = m.sumCount(); |
|
4421 |
int f = (t = m.table) == null ? 0 : t.length; |
|
4422 |
return new EntrySpliterator<K,V>(t, f, 0, f, n < 0L ? 0L : n, m); |
|
4423 |
} |
|
4424 |
||
4425 |
public void forEach(Consumer<? super Map.Entry<K,V>> action) { |
|
4426 |
if (action == null) throw new NullPointerException(); |
|
4427 |
Node<K,V>[] t; |
|
4428 |
if ((t = map.table) != null) { |
|
4429 |
Traverser<K,V> it = new Traverser<K,V>(t, t.length, 0, t.length); |
|
4430 |
for (Node<K,V> p; (p = it.advance()) != null; ) |
|
4431 |
action.accept(new MapEntry<K,V>((K)p.key, p.val, map)); |
|
4432 |
} |
|
4433 |
} |
|
4434 |
||
4435 |
} |
|
4436 |
||
4437 |
// ------------------------------------------------------- |
|
4438 |
||
4439 |
/** |
|
4440 |
* Base class for bulk tasks. Repeats some fields and code from |
|
4441 |
* class Traverser, because we need to subclass CountedCompleter. |
|
4442 |
*/ |
|
4443 |
abstract static class BulkTask<K,V,R> extends CountedCompleter<R> { |
|
4444 |
Node<K,V>[] tab; // same as Traverser |
|
4445 |
Node<K,V> next; |
|
4446 |
int index; |
|
4447 |
int baseIndex; |
|
4448 |
int baseLimit; |
|
4449 |
final int baseSize; |
|
4450 |
int batch; // split control |
|
4451 |
||
4452 |
BulkTask(BulkTask<K,V,?> par, int b, int i, int f, Node<K,V>[] t) { |
|
4453 |
super(par); |
|
4454 |
this.batch = b; |
|
4455 |
this.index = this.baseIndex = i; |
|
4456 |
if ((this.tab = t) == null) |
|
4457 |
this.baseSize = this.baseLimit = 0; |
|
4458 |
else if (par == null) |
|
4459 |
this.baseSize = this.baseLimit = t.length; |
|
4460 |
else { |
|
4461 |
this.baseLimit = f; |
|
4462 |
this.baseSize = par.baseSize; |
|
4463 |
} |
|
4464 |
} |
|
4465 |
||
4466 |
/** |
|
4467 |
* Same as Traverser version |
|
4468 |
*/ |
|
4469 |
final Node<K,V> advance() { |
|
4470 |
Node<K,V> e; |
|
4471 |
if ((e = next) != null) |
|
4472 |
e = e.next; |
|
4473 |
for (;;) { |
|
4474 |
Node<K,V>[] t; int i, n; Object ek; |
|
4475 |
if (e != null) |
|
4476 |
return next = e; |
|
4477 |
if (baseIndex >= baseLimit || (t = tab) == null || |
|
4478 |
(n = t.length) <= (i = index) || i < 0) |
|
4479 |
return next = null; |
|
4480 |
if ((e = tabAt(t, index)) != null && e.hash < 0) { |
|
4481 |
if ((ek = e.key) instanceof TreeBin) |
|
4482 |
e = ((TreeBin<K,V>)ek).first; |
|
4483 |
else { |
|
4484 |
tab = (Node<K,V>[])ek; |
|
4485 |
e = null; |
|
4486 |
continue; |
|
4487 |
} |
|
4488 |
} |
|
4489 |
if ((index += baseSize) >= n) |
|
4490 |
index = ++baseIndex; |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
4491 |
} |
2 | 4492 |
} |
17945 | 4493 |
} |
4494 |
||
4495 |
/* |
|
4496 |
* Task classes. Coded in a regular but ugly format/style to |
|
4497 |
* simplify checks that each variant differs in the right way from |
|
4498 |
* others. The null screenings exist because compilers cannot tell |
|
4499 |
* that we've already null-checked task arguments, so we force |
|
4500 |
* simplest hoisted bypass to help avoid convoluted traps. |
|
4501 |
*/ |
|
4502 |
||
4503 |
static final class ForEachKeyTask<K,V> |
|
4504 |
extends BulkTask<K,V,Void> { |
|
4505 |
final Consumer<? super K> action; |
|
4506 |
ForEachKeyTask |
|
4507 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
4508 |
Consumer<? super K> action) { |
|
4509 |
super(p, b, i, f, t); |
|
4510 |
this.action = action; |
|
4511 |
} |
|
4512 |
public final void compute() { |
|
4513 |
final Consumer<? super K> action; |
|
4514 |
if ((action = this.action) != null) { |
|
4515 |
for (int i = baseIndex, f, h; batch > 0 && |
|
4516 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
4517 |
addToPendingCount(1); |
|
4518 |
new ForEachKeyTask<K,V> |
|
4519 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
4520 |
action).fork(); |
|
4521 |
} |
|
4522 |
for (Node<K,V> p; (p = advance()) != null;) |
|
4523 |
action.accept((K)p.key); |
|
4524 |
propagateCompletion(); |
|
4525 |
} |
|
4526 |
} |
|
4527 |
} |
|
4528 |
||
4529 |
static final class ForEachValueTask<K,V> |
|
4530 |
extends BulkTask<K,V,Void> { |
|
4531 |
final Consumer<? super V> action; |
|
4532 |
ForEachValueTask |
|
4533 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
4534 |
Consumer<? super V> action) { |
|
4535 |
super(p, b, i, f, t); |
|
4536 |
this.action = action; |
|
4537 |
} |
|
4538 |
public final void compute() { |
|
4539 |
final Consumer<? super V> action; |
|
4540 |
if ((action = this.action) != null) { |
|
4541 |
for (int i = baseIndex, f, h; batch > 0 && |
|
4542 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
4543 |
addToPendingCount(1); |
|
4544 |
new ForEachValueTask<K,V> |
|
4545 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
4546 |
action).fork(); |
|
4547 |
} |
|
4548 |
for (Node<K,V> p; (p = advance()) != null;) |
|
4549 |
action.accept(p.val); |
|
4550 |
propagateCompletion(); |
|
4551 |
} |
|
4552 |
} |
|
4553 |
} |
|
4554 |
||
4555 |
static final class ForEachEntryTask<K,V> |
|
4556 |
extends BulkTask<K,V,Void> { |
|
4557 |
final Consumer<? super Entry<K,V>> action; |
|
4558 |
ForEachEntryTask |
|
4559 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
4560 |
Consumer<? super Entry<K,V>> action) { |
|
4561 |
super(p, b, i, f, t); |
|
4562 |
this.action = action; |
|
4563 |
} |
|
4564 |
public final void compute() { |
|
4565 |
final Consumer<? super Entry<K,V>> action; |
|
4566 |
if ((action = this.action) != null) { |
|
4567 |
for (int i = baseIndex, f, h; batch > 0 && |
|
4568 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
4569 |
addToPendingCount(1); |
|
4570 |
new ForEachEntryTask<K,V> |
|
4571 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
4572 |
action).fork(); |
|
4573 |
} |
|
4574 |
for (Node<K,V> p; (p = advance()) != null; ) |
|
4575 |
action.accept(p); |
|
4576 |
propagateCompletion(); |
|
4577 |
} |
|
4578 |
} |
|
4579 |
} |
|
4580 |
||
4581 |
static final class ForEachMappingTask<K,V> |
|
4582 |
extends BulkTask<K,V,Void> { |
|
4583 |
final BiConsumer<? super K, ? super V> action; |
|
4584 |
ForEachMappingTask |
|
4585 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
4586 |
BiConsumer<? super K,? super V> action) { |
|
4587 |
super(p, b, i, f, t); |
|
4588 |
this.action = action; |
|
4589 |
} |
|
4590 |
public final void compute() { |
|
4591 |
final BiConsumer<? super K, ? super V> action; |
|
4592 |
if ((action = this.action) != null) { |
|
4593 |
for (int i = baseIndex, f, h; batch > 0 && |
|
4594 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
4595 |
addToPendingCount(1); |
|
4596 |
new ForEachMappingTask<K,V> |
|
4597 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
4598 |
action).fork(); |
|
4599 |
} |
|
4600 |
for (Node<K,V> p; (p = advance()) != null; ) |
|
4601 |
action.accept((K)p.key, p.val); |
|
4602 |
propagateCompletion(); |
|
4603 |
} |
|
4604 |
} |
|
4605 |
} |
|
4606 |
||
4607 |
static final class ForEachTransformedKeyTask<K,V,U> |
|
4608 |
extends BulkTask<K,V,Void> { |
|
4609 |
final Function<? super K, ? extends U> transformer; |
|
4610 |
final Consumer<? super U> action; |
|
4611 |
ForEachTransformedKeyTask |
|
4612 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
4613 |
Function<? super K, ? extends U> transformer, Consumer<? super U> action) { |
|
4614 |
super(p, b, i, f, t); |
|
4615 |
this.transformer = transformer; this.action = action; |
|
4616 |
} |
|
4617 |
public final void compute() { |
|
4618 |
final Function<? super K, ? extends U> transformer; |
|
4619 |
final Consumer<? super U> action; |
|
4620 |
if ((transformer = this.transformer) != null && |
|
4621 |
(action = this.action) != null) { |
|
4622 |
for (int i = baseIndex, f, h; batch > 0 && |
|
4623 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
4624 |
addToPendingCount(1); |
|
4625 |
new ForEachTransformedKeyTask<K,V,U> |
|
4626 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
4627 |
transformer, action).fork(); |
|
4628 |
} |
|
4629 |
for (Node<K,V> p; (p = advance()) != null; ) { |
|
4630 |
U u; |
|
4631 |
if ((u = transformer.apply((K)p.key)) != null) |
|
4632 |
action.accept(u); |
|
4633 |
} |
|
4634 |
propagateCompletion(); |
|
4635 |
} |
|
4636 |
} |
|
4637 |
} |
|
4638 |
||
4639 |
static final class ForEachTransformedValueTask<K,V,U> |
|
4640 |
extends BulkTask<K,V,Void> { |
|
4641 |
final Function<? super V, ? extends U> transformer; |
|
4642 |
final Consumer<? super U> action; |
|
4643 |
ForEachTransformedValueTask |
|
4644 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
4645 |
Function<? super V, ? extends U> transformer, Consumer<? super U> action) { |
|
4646 |
super(p, b, i, f, t); |
|
4647 |
this.transformer = transformer; this.action = action; |
|
4648 |
} |
|
4649 |
public final void compute() { |
|
4650 |
final Function<? super V, ? extends U> transformer; |
|
4651 |
final Consumer<? super U> action; |
|
4652 |
if ((transformer = this.transformer) != null && |
|
4653 |
(action = this.action) != null) { |
|
4654 |
for (int i = baseIndex, f, h; batch > 0 && |
|
4655 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
4656 |
addToPendingCount(1); |
|
4657 |
new ForEachTransformedValueTask<K,V,U> |
|
4658 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
4659 |
transformer, action).fork(); |
|
4660 |
} |
|
4661 |
for (Node<K,V> p; (p = advance()) != null; ) { |
|
4662 |
U u; |
|
4663 |
if ((u = transformer.apply(p.val)) != null) |
|
4664 |
action.accept(u); |
|
4665 |
} |
|
4666 |
propagateCompletion(); |
|
4667 |
} |
|
4668 |
} |
|
4669 |
} |
|
4670 |
||
4671 |
static final class ForEachTransformedEntryTask<K,V,U> |
|
4672 |
extends BulkTask<K,V,Void> { |
|
4673 |
final Function<Map.Entry<K,V>, ? extends U> transformer; |
|
4674 |
final Consumer<? super U> action; |
|
4675 |
ForEachTransformedEntryTask |
|
4676 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
4677 |
Function<Map.Entry<K,V>, ? extends U> transformer, Consumer<? super U> action) { |
|
4678 |
super(p, b, i, f, t); |
|
4679 |
this.transformer = transformer; this.action = action; |
|
4680 |
} |
|
4681 |
public final void compute() { |
|
4682 |
final Function<Map.Entry<K,V>, ? extends U> transformer; |
|
4683 |
final Consumer<? super U> action; |
|
4684 |
if ((transformer = this.transformer) != null && |
|
4685 |
(action = this.action) != null) { |
|
4686 |
for (int i = baseIndex, f, h; batch > 0 && |
|
4687 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
4688 |
addToPendingCount(1); |
|
4689 |
new ForEachTransformedEntryTask<K,V,U> |
|
4690 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
4691 |
transformer, action).fork(); |
|
4692 |
} |
|
4693 |
for (Node<K,V> p; (p = advance()) != null; ) { |
|
4694 |
U u; |
|
4695 |
if ((u = transformer.apply(p)) != null) |
|
4696 |
action.accept(u); |
|
4697 |
} |
|
4698 |
propagateCompletion(); |
|
4699 |
} |
|
4700 |
} |
|
4701 |
} |
|
4702 |
||
4703 |
static final class ForEachTransformedMappingTask<K,V,U> |
|
4704 |
extends BulkTask<K,V,Void> { |
|
4705 |
final BiFunction<? super K, ? super V, ? extends U> transformer; |
|
4706 |
final Consumer<? super U> action; |
|
4707 |
ForEachTransformedMappingTask |
|
4708 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
4709 |
BiFunction<? super K, ? super V, ? extends U> transformer, |
|
4710 |
Consumer<? super U> action) { |
|
4711 |
super(p, b, i, f, t); |
|
4712 |
this.transformer = transformer; this.action = action; |
|
4713 |
} |
|
4714 |
public final void compute() { |
|
4715 |
final BiFunction<? super K, ? super V, ? extends U> transformer; |
|
4716 |
final Consumer<? super U> action; |
|
4717 |
if ((transformer = this.transformer) != null && |
|
4718 |
(action = this.action) != null) { |
|
4719 |
for (int i = baseIndex, f, h; batch > 0 && |
|
4720 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
4721 |
addToPendingCount(1); |
|
4722 |
new ForEachTransformedMappingTask<K,V,U> |
|
4723 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
4724 |
transformer, action).fork(); |
|
4725 |
} |
|
4726 |
for (Node<K,V> p; (p = advance()) != null; ) { |
|
4727 |
U u; |
|
4728 |
if ((u = transformer.apply((K)p.key, p.val)) != null) |
|
4729 |
action.accept(u); |
|
4730 |
} |
|
4731 |
propagateCompletion(); |
|
4732 |
} |
|
4733 |
} |
|
4734 |
} |
|
4735 |
||
4736 |
static final class SearchKeysTask<K,V,U> |
|
4737 |
extends BulkTask<K,V,U> { |
|
4738 |
final Function<? super K, ? extends U> searchFunction; |
|
4739 |
final AtomicReference<U> result; |
|
4740 |
SearchKeysTask |
|
4741 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
4742 |
Function<? super K, ? extends U> searchFunction, |
|
4743 |
AtomicReference<U> result) { |
|
4744 |
super(p, b, i, f, t); |
|
4745 |
this.searchFunction = searchFunction; this.result = result; |
|
4746 |
} |
|
4747 |
public final U getRawResult() { return result.get(); } |
|
4748 |
public final void compute() { |
|
4749 |
final Function<? super K, ? extends U> searchFunction; |
|
4750 |
final AtomicReference<U> result; |
|
4751 |
if ((searchFunction = this.searchFunction) != null && |
|
4752 |
(result = this.result) != null) { |
|
4753 |
for (int i = baseIndex, f, h; batch > 0 && |
|
4754 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
4755 |
if (result.get() != null) |
|
4756 |
return; |
|
4757 |
addToPendingCount(1); |
|
4758 |
new SearchKeysTask<K,V,U> |
|
4759 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
4760 |
searchFunction, result).fork(); |
|
4761 |
} |
|
4762 |
while (result.get() == null) { |
|
4763 |
U u; |
|
4764 |
Node<K,V> p; |
|
4765 |
if ((p = advance()) == null) { |
|
4766 |
propagateCompletion(); |
|
4767 |
break; |
|
4768 |
} |
|
4769 |
if ((u = searchFunction.apply((K)p.key)) != null) { |
|
4770 |
if (result.compareAndSet(null, u)) |
|
4771 |
quietlyCompleteRoot(); |
|
4772 |
break; |
|
4773 |
} |
|
4774 |
} |
|
4775 |
} |
|
4776 |
} |
|
4777 |
} |
|
4778 |
||
4779 |
static final class SearchValuesTask<K,V,U> |
|
4780 |
extends BulkTask<K,V,U> { |
|
4781 |
final Function<? super V, ? extends U> searchFunction; |
|
4782 |
final AtomicReference<U> result; |
|
4783 |
SearchValuesTask |
|
4784 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
4785 |
Function<? super V, ? extends U> searchFunction, |
|
4786 |
AtomicReference<U> result) { |
|
4787 |
super(p, b, i, f, t); |
|
4788 |
this.searchFunction = searchFunction; this.result = result; |
|
4789 |
} |
|
4790 |
public final U getRawResult() { return result.get(); } |
|
4791 |
public final void compute() { |
|
4792 |
final Function<? super V, ? extends U> searchFunction; |
|
4793 |
final AtomicReference<U> result; |
|
4794 |
if ((searchFunction = this.searchFunction) != null && |
|
4795 |
(result = this.result) != null) { |
|
4796 |
for (int i = baseIndex, f, h; batch > 0 && |
|
4797 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
4798 |
if (result.get() != null) |
|
4799 |
return; |
|
4800 |
addToPendingCount(1); |
|
4801 |
new SearchValuesTask<K,V,U> |
|
4802 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
4803 |
searchFunction, result).fork(); |
|
4804 |
} |
|
4805 |
while (result.get() == null) { |
|
4806 |
U u; |
|
4807 |
Node<K,V> p; |
|
4808 |
if ((p = advance()) == null) { |
|
4809 |
propagateCompletion(); |
|
4810 |
break; |
|
4811 |
} |
|
4812 |
if ((u = searchFunction.apply(p.val)) != null) { |
|
4813 |
if (result.compareAndSet(null, u)) |
|
4814 |
quietlyCompleteRoot(); |
|
4815 |
break; |
|
4816 |
} |
|
4817 |
} |
|
4818 |
} |
|
4819 |
} |
|
4820 |
} |
|
4821 |
||
4822 |
static final class SearchEntriesTask<K,V,U> |
|
4823 |
extends BulkTask<K,V,U> { |
|
4824 |
final Function<Entry<K,V>, ? extends U> searchFunction; |
|
4825 |
final AtomicReference<U> result; |
|
4826 |
SearchEntriesTask |
|
4827 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
4828 |
Function<Entry<K,V>, ? extends U> searchFunction, |
|
4829 |
AtomicReference<U> result) { |
|
4830 |
super(p, b, i, f, t); |
|
4831 |
this.searchFunction = searchFunction; this.result = result; |
|
4832 |
} |
|
4833 |
public final U getRawResult() { return result.get(); } |
|
4834 |
public final void compute() { |
|
4835 |
final Function<Entry<K,V>, ? extends U> searchFunction; |
|
4836 |
final AtomicReference<U> result; |
|
4837 |
if ((searchFunction = this.searchFunction) != null && |
|
4838 |
(result = this.result) != null) { |
|
4839 |
for (int i = baseIndex, f, h; batch > 0 && |
|
4840 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
4841 |
if (result.get() != null) |
|
4842 |
return; |
|
4843 |
addToPendingCount(1); |
|
4844 |
new SearchEntriesTask<K,V,U> |
|
4845 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
4846 |
searchFunction, result).fork(); |
|
4847 |
} |
|
4848 |
while (result.get() == null) { |
|
4849 |
U u; |
|
4850 |
Node<K,V> p; |
|
4851 |
if ((p = advance()) == null) { |
|
4852 |
propagateCompletion(); |
|
4853 |
break; |
|
4854 |
} |
|
4855 |
if ((u = searchFunction.apply(p)) != null) { |
|
4856 |
if (result.compareAndSet(null, u)) |
|
4857 |
quietlyCompleteRoot(); |
|
4858 |
return; |
|
4859 |
} |
|
4860 |
} |
|
4861 |
} |
|
4862 |
} |
|
4863 |
} |
|
4864 |
||
4865 |
static final class SearchMappingsTask<K,V,U> |
|
4866 |
extends BulkTask<K,V,U> { |
|
4867 |
final BiFunction<? super K, ? super V, ? extends U> searchFunction; |
|
4868 |
final AtomicReference<U> result; |
|
4869 |
SearchMappingsTask |
|
4870 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
4871 |
BiFunction<? super K, ? super V, ? extends U> searchFunction, |
|
4872 |
AtomicReference<U> result) { |
|
4873 |
super(p, b, i, f, t); |
|
4874 |
this.searchFunction = searchFunction; this.result = result; |
|
4875 |
} |
|
4876 |
public final U getRawResult() { return result.get(); } |
|
4877 |
public final void compute() { |
|
4878 |
final BiFunction<? super K, ? super V, ? extends U> searchFunction; |
|
4879 |
final AtomicReference<U> result; |
|
4880 |
if ((searchFunction = this.searchFunction) != null && |
|
4881 |
(result = this.result) != null) { |
|
4882 |
for (int i = baseIndex, f, h; batch > 0 && |
|
4883 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
4884 |
if (result.get() != null) |
|
4885 |
return; |
|
4886 |
addToPendingCount(1); |
|
4887 |
new SearchMappingsTask<K,V,U> |
|
4888 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
4889 |
searchFunction, result).fork(); |
|
4890 |
} |
|
4891 |
while (result.get() == null) { |
|
4892 |
U u; |
|
4893 |
Node<K,V> p; |
|
4894 |
if ((p = advance()) == null) { |
|
4895 |
propagateCompletion(); |
|
4896 |
break; |
|
4897 |
} |
|
4898 |
if ((u = searchFunction.apply((K)p.key, p.val)) != null) { |
|
4899 |
if (result.compareAndSet(null, u)) |
|
4900 |
quietlyCompleteRoot(); |
|
4901 |
break; |
|
4902 |
} |
|
4903 |
} |
|
4904 |
} |
|
4905 |
} |
|
4906 |
} |
|
4907 |
||
4908 |
static final class ReduceKeysTask<K,V> |
|
4909 |
extends BulkTask<K,V,K> { |
|
4910 |
final BiFunction<? super K, ? super K, ? extends K> reducer; |
|
4911 |
K result; |
|
4912 |
ReduceKeysTask<K,V> rights, nextRight; |
|
4913 |
ReduceKeysTask |
|
4914 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
4915 |
ReduceKeysTask<K,V> nextRight, |
|
4916 |
BiFunction<? super K, ? super K, ? extends K> reducer) { |
|
4917 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
4918 |
this.reducer = reducer; |
|
4919 |
} |
|
4920 |
public final K getRawResult() { return result; } |
|
4921 |
public final void compute() { |
|
4922 |
final BiFunction<? super K, ? super K, ? extends K> reducer; |
|
4923 |
if ((reducer = this.reducer) != null) { |
|
4924 |
for (int i = baseIndex, f, h; batch > 0 && |
|
4925 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
4926 |
addToPendingCount(1); |
|
4927 |
(rights = new ReduceKeysTask<K,V> |
|
4928 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
4929 |
rights, reducer)).fork(); |
|
4930 |
} |
|
4931 |
K r = null; |
|
4932 |
for (Node<K,V> p; (p = advance()) != null; ) { |
|
4933 |
K u = (K)p.key; |
|
4934 |
r = (r == null) ? u : u == null ? r : reducer.apply(r, u); |
|
4935 |
} |
|
4936 |
result = r; |
|
4937 |
CountedCompleter<?> c; |
|
4938 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
4939 |
ReduceKeysTask<K,V> |
|
4940 |
t = (ReduceKeysTask<K,V>)c, |
|
4941 |
s = t.rights; |
|
4942 |
while (s != null) { |
|
4943 |
K tr, sr; |
|
4944 |
if ((sr = s.result) != null) |
|
4945 |
t.result = (((tr = t.result) == null) ? sr : |
|
4946 |
reducer.apply(tr, sr)); |
|
4947 |
s = t.rights = s.nextRight; |
|
4948 |
} |
|
4949 |
} |
|
4950 |
} |
|
4951 |
} |
|
4952 |
} |
|
4953 |
||
4954 |
static final class ReduceValuesTask<K,V> |
|
4955 |
extends BulkTask<K,V,V> { |
|
4956 |
final BiFunction<? super V, ? super V, ? extends V> reducer; |
|
4957 |
V result; |
|
4958 |
ReduceValuesTask<K,V> rights, nextRight; |
|
4959 |
ReduceValuesTask |
|
4960 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
4961 |
ReduceValuesTask<K,V> nextRight, |
|
4962 |
BiFunction<? super V, ? super V, ? extends V> reducer) { |
|
4963 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
4964 |
this.reducer = reducer; |
|
4965 |
} |
|
4966 |
public final V getRawResult() { return result; } |
|
4967 |
public final void compute() { |
|
4968 |
final BiFunction<? super V, ? super V, ? extends V> reducer; |
|
4969 |
if ((reducer = this.reducer) != null) { |
|
4970 |
for (int i = baseIndex, f, h; batch > 0 && |
|
4971 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
4972 |
addToPendingCount(1); |
|
4973 |
(rights = new ReduceValuesTask<K,V> |
|
4974 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
4975 |
rights, reducer)).fork(); |
|
4976 |
} |
|
4977 |
V r = null; |
|
4978 |
for (Node<K,V> p; (p = advance()) != null; ) { |
|
4979 |
V v = p.val; |
|
4980 |
r = (r == null) ? v : reducer.apply(r, v); |
|
4981 |
} |
|
4982 |
result = r; |
|
4983 |
CountedCompleter<?> c; |
|
4984 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
4985 |
ReduceValuesTask<K,V> |
|
4986 |
t = (ReduceValuesTask<K,V>)c, |
|
4987 |
s = t.rights; |
|
4988 |
while (s != null) { |
|
4989 |
V tr, sr; |
|
4990 |
if ((sr = s.result) != null) |
|
4991 |
t.result = (((tr = t.result) == null) ? sr : |
|
4992 |
reducer.apply(tr, sr)); |
|
4993 |
s = t.rights = s.nextRight; |
|
4994 |
} |
|
4995 |
} |
|
4996 |
} |
|
4997 |
} |
|
4998 |
} |
|
4999 |
||
5000 |
static final class ReduceEntriesTask<K,V> |
|
5001 |
extends BulkTask<K,V,Map.Entry<K,V>> { |
|
5002 |
final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer; |
|
5003 |
Map.Entry<K,V> result; |
|
5004 |
ReduceEntriesTask<K,V> rights, nextRight; |
|
5005 |
ReduceEntriesTask |
|
5006 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
5007 |
ReduceEntriesTask<K,V> nextRight, |
|
5008 |
BiFunction<Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer) { |
|
5009 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
5010 |
this.reducer = reducer; |
|
5011 |
} |
|
5012 |
public final Map.Entry<K,V> getRawResult() { return result; } |
|
5013 |
public final void compute() { |
|
5014 |
final BiFunction<Map.Entry<K,V>, Map.Entry<K,V>, ? extends Map.Entry<K,V>> reducer; |
|
5015 |
if ((reducer = this.reducer) != null) { |
|
5016 |
for (int i = baseIndex, f, h; batch > 0 && |
|
5017 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
5018 |
addToPendingCount(1); |
|
5019 |
(rights = new ReduceEntriesTask<K,V> |
|
5020 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
5021 |
rights, reducer)).fork(); |
|
5022 |
} |
|
5023 |
Map.Entry<K,V> r = null; |
|
5024 |
for (Node<K,V> p; (p = advance()) != null; ) |
|
5025 |
r = (r == null) ? p : reducer.apply(r, p); |
|
5026 |
result = r; |
|
5027 |
CountedCompleter<?> c; |
|
5028 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
5029 |
ReduceEntriesTask<K,V> |
|
5030 |
t = (ReduceEntriesTask<K,V>)c, |
|
5031 |
s = t.rights; |
|
5032 |
while (s != null) { |
|
5033 |
Map.Entry<K,V> tr, sr; |
|
5034 |
if ((sr = s.result) != null) |
|
5035 |
t.result = (((tr = t.result) == null) ? sr : |
|
5036 |
reducer.apply(tr, sr)); |
|
5037 |
s = t.rights = s.nextRight; |
|
5038 |
} |
|
5039 |
} |
|
5040 |
} |
|
5041 |
} |
|
5042 |
} |
|
5043 |
||
5044 |
static final class MapReduceKeysTask<K,V,U> |
|
5045 |
extends BulkTask<K,V,U> { |
|
5046 |
final Function<? super K, ? extends U> transformer; |
|
5047 |
final BiFunction<? super U, ? super U, ? extends U> reducer; |
|
5048 |
U result; |
|
5049 |
MapReduceKeysTask<K,V,U> rights, nextRight; |
|
5050 |
MapReduceKeysTask |
|
5051 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
5052 |
MapReduceKeysTask<K,V,U> nextRight, |
|
5053 |
Function<? super K, ? extends U> transformer, |
|
5054 |
BiFunction<? super U, ? super U, ? extends U> reducer) { |
|
5055 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
5056 |
this.transformer = transformer; |
|
5057 |
this.reducer = reducer; |
|
5058 |
} |
|
5059 |
public final U getRawResult() { return result; } |
|
5060 |
public final void compute() { |
|
5061 |
final Function<? super K, ? extends U> transformer; |
|
5062 |
final BiFunction<? super U, ? super U, ? extends U> reducer; |
|
5063 |
if ((transformer = this.transformer) != null && |
|
5064 |
(reducer = this.reducer) != null) { |
|
5065 |
for (int i = baseIndex, f, h; batch > 0 && |
|
5066 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
5067 |
addToPendingCount(1); |
|
5068 |
(rights = new MapReduceKeysTask<K,V,U> |
|
5069 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
5070 |
rights, transformer, reducer)).fork(); |
|
5071 |
} |
|
5072 |
U r = null; |
|
5073 |
for (Node<K,V> p; (p = advance()) != null; ) { |
|
5074 |
U u; |
|
5075 |
if ((u = transformer.apply((K)p.key)) != null) |
|
5076 |
r = (r == null) ? u : reducer.apply(r, u); |
|
5077 |
} |
|
5078 |
result = r; |
|
5079 |
CountedCompleter<?> c; |
|
5080 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
5081 |
MapReduceKeysTask<K,V,U> |
|
5082 |
t = (MapReduceKeysTask<K,V,U>)c, |
|
5083 |
s = t.rights; |
|
5084 |
while (s != null) { |
|
5085 |
U tr, sr; |
|
5086 |
if ((sr = s.result) != null) |
|
5087 |
t.result = (((tr = t.result) == null) ? sr : |
|
5088 |
reducer.apply(tr, sr)); |
|
5089 |
s = t.rights = s.nextRight; |
|
5090 |
} |
|
5091 |
} |
|
5092 |
} |
|
5093 |
} |
|
5094 |
} |
|
5095 |
||
5096 |
static final class MapReduceValuesTask<K,V,U> |
|
5097 |
extends BulkTask<K,V,U> { |
|
5098 |
final Function<? super V, ? extends U> transformer; |
|
5099 |
final BiFunction<? super U, ? super U, ? extends U> reducer; |
|
5100 |
U result; |
|
5101 |
MapReduceValuesTask<K,V,U> rights, nextRight; |
|
5102 |
MapReduceValuesTask |
|
5103 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
5104 |
MapReduceValuesTask<K,V,U> nextRight, |
|
5105 |
Function<? super V, ? extends U> transformer, |
|
5106 |
BiFunction<? super U, ? super U, ? extends U> reducer) { |
|
5107 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
5108 |
this.transformer = transformer; |
|
5109 |
this.reducer = reducer; |
|
5110 |
} |
|
5111 |
public final U getRawResult() { return result; } |
|
5112 |
public final void compute() { |
|
5113 |
final Function<? super V, ? extends U> transformer; |
|
5114 |
final BiFunction<? super U, ? super U, ? extends U> reducer; |
|
5115 |
if ((transformer = this.transformer) != null && |
|
5116 |
(reducer = this.reducer) != null) { |
|
5117 |
for (int i = baseIndex, f, h; batch > 0 && |
|
5118 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
5119 |
addToPendingCount(1); |
|
5120 |
(rights = new MapReduceValuesTask<K,V,U> |
|
5121 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
5122 |
rights, transformer, reducer)).fork(); |
|
5123 |
} |
|
5124 |
U r = null; |
|
5125 |
for (Node<K,V> p; (p = advance()) != null; ) { |
|
5126 |
U u; |
|
5127 |
if ((u = transformer.apply(p.val)) != null) |
|
5128 |
r = (r == null) ? u : reducer.apply(r, u); |
|
5129 |
} |
|
5130 |
result = r; |
|
5131 |
CountedCompleter<?> c; |
|
5132 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
5133 |
MapReduceValuesTask<K,V,U> |
|
5134 |
t = (MapReduceValuesTask<K,V,U>)c, |
|
5135 |
s = t.rights; |
|
5136 |
while (s != null) { |
|
5137 |
U tr, sr; |
|
5138 |
if ((sr = s.result) != null) |
|
5139 |
t.result = (((tr = t.result) == null) ? sr : |
|
5140 |
reducer.apply(tr, sr)); |
|
5141 |
s = t.rights = s.nextRight; |
|
5142 |
} |
|
5143 |
} |
|
5144 |
} |
|
5145 |
} |
|
5146 |
} |
|
5147 |
||
5148 |
static final class MapReduceEntriesTask<K,V,U> |
|
5149 |
extends BulkTask<K,V,U> { |
|
5150 |
final Function<Map.Entry<K,V>, ? extends U> transformer; |
|
5151 |
final BiFunction<? super U, ? super U, ? extends U> reducer; |
|
5152 |
U result; |
|
5153 |
MapReduceEntriesTask<K,V,U> rights, nextRight; |
|
5154 |
MapReduceEntriesTask |
|
5155 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
5156 |
MapReduceEntriesTask<K,V,U> nextRight, |
|
5157 |
Function<Map.Entry<K,V>, ? extends U> transformer, |
|
5158 |
BiFunction<? super U, ? super U, ? extends U> reducer) { |
|
5159 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
5160 |
this.transformer = transformer; |
|
5161 |
this.reducer = reducer; |
|
5162 |
} |
|
5163 |
public final U getRawResult() { return result; } |
|
5164 |
public final void compute() { |
|
5165 |
final Function<Map.Entry<K,V>, ? extends U> transformer; |
|
5166 |
final BiFunction<? super U, ? super U, ? extends U> reducer; |
|
5167 |
if ((transformer = this.transformer) != null && |
|
5168 |
(reducer = this.reducer) != null) { |
|
5169 |
for (int i = baseIndex, f, h; batch > 0 && |
|
5170 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
5171 |
addToPendingCount(1); |
|
5172 |
(rights = new MapReduceEntriesTask<K,V,U> |
|
5173 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
5174 |
rights, transformer, reducer)).fork(); |
|
5175 |
} |
|
5176 |
U r = null; |
|
5177 |
for (Node<K,V> p; (p = advance()) != null; ) { |
|
5178 |
U u; |
|
5179 |
if ((u = transformer.apply(p)) != null) |
|
5180 |
r = (r == null) ? u : reducer.apply(r, u); |
|
5181 |
} |
|
5182 |
result = r; |
|
5183 |
CountedCompleter<?> c; |
|
5184 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
5185 |
MapReduceEntriesTask<K,V,U> |
|
5186 |
t = (MapReduceEntriesTask<K,V,U>)c, |
|
5187 |
s = t.rights; |
|
5188 |
while (s != null) { |
|
5189 |
U tr, sr; |
|
5190 |
if ((sr = s.result) != null) |
|
5191 |
t.result = (((tr = t.result) == null) ? sr : |
|
5192 |
reducer.apply(tr, sr)); |
|
5193 |
s = t.rights = s.nextRight; |
|
5194 |
} |
|
5195 |
} |
|
5196 |
} |
|
5197 |
} |
|
5198 |
} |
|
5199 |
||
5200 |
static final class MapReduceMappingsTask<K,V,U> |
|
5201 |
extends BulkTask<K,V,U> { |
|
5202 |
final BiFunction<? super K, ? super V, ? extends U> transformer; |
|
5203 |
final BiFunction<? super U, ? super U, ? extends U> reducer; |
|
5204 |
U result; |
|
5205 |
MapReduceMappingsTask<K,V,U> rights, nextRight; |
|
5206 |
MapReduceMappingsTask |
|
5207 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
5208 |
MapReduceMappingsTask<K,V,U> nextRight, |
|
5209 |
BiFunction<? super K, ? super V, ? extends U> transformer, |
|
5210 |
BiFunction<? super U, ? super U, ? extends U> reducer) { |
|
5211 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
5212 |
this.transformer = transformer; |
|
5213 |
this.reducer = reducer; |
|
5214 |
} |
|
5215 |
public final U getRawResult() { return result; } |
|
5216 |
public final void compute() { |
|
5217 |
final BiFunction<? super K, ? super V, ? extends U> transformer; |
|
5218 |
final BiFunction<? super U, ? super U, ? extends U> reducer; |
|
5219 |
if ((transformer = this.transformer) != null && |
|
5220 |
(reducer = this.reducer) != null) { |
|
5221 |
for (int i = baseIndex, f, h; batch > 0 && |
|
5222 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
5223 |
addToPendingCount(1); |
|
5224 |
(rights = new MapReduceMappingsTask<K,V,U> |
|
5225 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
5226 |
rights, transformer, reducer)).fork(); |
|
5227 |
} |
|
5228 |
U r = null; |
|
5229 |
for (Node<K,V> p; (p = advance()) != null; ) { |
|
5230 |
U u; |
|
5231 |
if ((u = transformer.apply((K)p.key, p.val)) != null) |
|
5232 |
r = (r == null) ? u : reducer.apply(r, u); |
|
5233 |
} |
|
5234 |
result = r; |
|
5235 |
CountedCompleter<?> c; |
|
5236 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
5237 |
MapReduceMappingsTask<K,V,U> |
|
5238 |
t = (MapReduceMappingsTask<K,V,U>)c, |
|
5239 |
s = t.rights; |
|
5240 |
while (s != null) { |
|
5241 |
U tr, sr; |
|
5242 |
if ((sr = s.result) != null) |
|
5243 |
t.result = (((tr = t.result) == null) ? sr : |
|
5244 |
reducer.apply(tr, sr)); |
|
5245 |
s = t.rights = s.nextRight; |
|
5246 |
} |
|
5247 |
} |
|
5248 |
} |
|
5249 |
} |
|
5250 |
} |
|
5251 |
||
5252 |
static final class MapReduceKeysToDoubleTask<K,V> |
|
5253 |
extends BulkTask<K,V,Double> { |
|
5254 |
final ToDoubleFunction<? super K> transformer; |
|
5255 |
final DoubleBinaryOperator reducer; |
|
5256 |
final double basis; |
|
5257 |
double result; |
|
5258 |
MapReduceKeysToDoubleTask<K,V> rights, nextRight; |
|
5259 |
MapReduceKeysToDoubleTask |
|
5260 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
5261 |
MapReduceKeysToDoubleTask<K,V> nextRight, |
|
5262 |
ToDoubleFunction<? super K> transformer, |
|
5263 |
double basis, |
|
5264 |
DoubleBinaryOperator reducer) { |
|
5265 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
5266 |
this.transformer = transformer; |
|
5267 |
this.basis = basis; this.reducer = reducer; |
|
5268 |
} |
|
5269 |
public final Double getRawResult() { return result; } |
|
5270 |
public final void compute() { |
|
5271 |
final ToDoubleFunction<? super K> transformer; |
|
5272 |
final DoubleBinaryOperator reducer; |
|
5273 |
if ((transformer = this.transformer) != null && |
|
5274 |
(reducer = this.reducer) != null) { |
|
5275 |
double r = this.basis; |
|
5276 |
for (int i = baseIndex, f, h; batch > 0 && |
|
5277 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
5278 |
addToPendingCount(1); |
|
5279 |
(rights = new MapReduceKeysToDoubleTask<K,V> |
|
5280 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
5281 |
rights, transformer, r, reducer)).fork(); |
|
5282 |
} |
|
5283 |
for (Node<K,V> p; (p = advance()) != null; ) |
|
5284 |
r = reducer.applyAsDouble(r, transformer.applyAsDouble((K)p.key)); |
|
5285 |
result = r; |
|
5286 |
CountedCompleter<?> c; |
|
5287 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
5288 |
MapReduceKeysToDoubleTask<K,V> |
|
5289 |
t = (MapReduceKeysToDoubleTask<K,V>)c, |
|
5290 |
s = t.rights; |
|
5291 |
while (s != null) { |
|
5292 |
t.result = reducer.applyAsDouble(t.result, s.result); |
|
5293 |
s = t.rights = s.nextRight; |
|
5294 |
} |
|
5295 |
} |
|
5296 |
} |
|
5297 |
} |
|
5298 |
} |
|
5299 |
||
5300 |
static final class MapReduceValuesToDoubleTask<K,V> |
|
5301 |
extends BulkTask<K,V,Double> { |
|
5302 |
final ToDoubleFunction<? super V> transformer; |
|
5303 |
final DoubleBinaryOperator reducer; |
|
5304 |
final double basis; |
|
5305 |
double result; |
|
5306 |
MapReduceValuesToDoubleTask<K,V> rights, nextRight; |
|
5307 |
MapReduceValuesToDoubleTask |
|
5308 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
5309 |
MapReduceValuesToDoubleTask<K,V> nextRight, |
|
5310 |
ToDoubleFunction<? super V> transformer, |
|
5311 |
double basis, |
|
5312 |
DoubleBinaryOperator reducer) { |
|
5313 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
5314 |
this.transformer = transformer; |
|
5315 |
this.basis = basis; this.reducer = reducer; |
|
5316 |
} |
|
5317 |
public final Double getRawResult() { return result; } |
|
5318 |
public final void compute() { |
|
5319 |
final ToDoubleFunction<? super V> transformer; |
|
5320 |
final DoubleBinaryOperator reducer; |
|
5321 |
if ((transformer = this.transformer) != null && |
|
5322 |
(reducer = this.reducer) != null) { |
|
5323 |
double r = this.basis; |
|
5324 |
for (int i = baseIndex, f, h; batch > 0 && |
|
5325 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
5326 |
addToPendingCount(1); |
|
5327 |
(rights = new MapReduceValuesToDoubleTask<K,V> |
|
5328 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
5329 |
rights, transformer, r, reducer)).fork(); |
|
5330 |
} |
|
5331 |
for (Node<K,V> p; (p = advance()) != null; ) |
|
5332 |
r = reducer.applyAsDouble(r, transformer.applyAsDouble(p.val)); |
|
5333 |
result = r; |
|
5334 |
CountedCompleter<?> c; |
|
5335 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
5336 |
MapReduceValuesToDoubleTask<K,V> |
|
5337 |
t = (MapReduceValuesToDoubleTask<K,V>)c, |
|
5338 |
s = t.rights; |
|
5339 |
while (s != null) { |
|
5340 |
t.result = reducer.applyAsDouble(t.result, s.result); |
|
5341 |
s = t.rights = s.nextRight; |
|
5342 |
} |
|
5343 |
} |
|
5344 |
} |
|
5345 |
} |
|
5346 |
} |
|
5347 |
||
5348 |
static final class MapReduceEntriesToDoubleTask<K,V> |
|
5349 |
extends BulkTask<K,V,Double> { |
|
5350 |
final ToDoubleFunction<Map.Entry<K,V>> transformer; |
|
5351 |
final DoubleBinaryOperator reducer; |
|
5352 |
final double basis; |
|
5353 |
double result; |
|
5354 |
MapReduceEntriesToDoubleTask<K,V> rights, nextRight; |
|
5355 |
MapReduceEntriesToDoubleTask |
|
5356 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
5357 |
MapReduceEntriesToDoubleTask<K,V> nextRight, |
|
5358 |
ToDoubleFunction<Map.Entry<K,V>> transformer, |
|
5359 |
double basis, |
|
5360 |
DoubleBinaryOperator reducer) { |
|
5361 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
5362 |
this.transformer = transformer; |
|
5363 |
this.basis = basis; this.reducer = reducer; |
|
5364 |
} |
|
5365 |
public final Double getRawResult() { return result; } |
|
5366 |
public final void compute() { |
|
5367 |
final ToDoubleFunction<Map.Entry<K,V>> transformer; |
|
5368 |
final DoubleBinaryOperator reducer; |
|
5369 |
if ((transformer = this.transformer) != null && |
|
5370 |
(reducer = this.reducer) != null) { |
|
5371 |
double r = this.basis; |
|
5372 |
for (int i = baseIndex, f, h; batch > 0 && |
|
5373 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
5374 |
addToPendingCount(1); |
|
5375 |
(rights = new MapReduceEntriesToDoubleTask<K,V> |
|
5376 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
5377 |
rights, transformer, r, reducer)).fork(); |
|
5378 |
} |
|
5379 |
for (Node<K,V> p; (p = advance()) != null; ) |
|
5380 |
r = reducer.applyAsDouble(r, transformer.applyAsDouble(p)); |
|
5381 |
result = r; |
|
5382 |
CountedCompleter<?> c; |
|
5383 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
5384 |
MapReduceEntriesToDoubleTask<K,V> |
|
5385 |
t = (MapReduceEntriesToDoubleTask<K,V>)c, |
|
5386 |
s = t.rights; |
|
5387 |
while (s != null) { |
|
5388 |
t.result = reducer.applyAsDouble(t.result, s.result); |
|
5389 |
s = t.rights = s.nextRight; |
|
5390 |
} |
|
5391 |
} |
|
5392 |
} |
|
5393 |
} |
|
5394 |
} |
|
5395 |
||
5396 |
static final class MapReduceMappingsToDoubleTask<K,V> |
|
5397 |
extends BulkTask<K,V,Double> { |
|
5398 |
final ToDoubleBiFunction<? super K, ? super V> transformer; |
|
5399 |
final DoubleBinaryOperator reducer; |
|
5400 |
final double basis; |
|
5401 |
double result; |
|
5402 |
MapReduceMappingsToDoubleTask<K,V> rights, nextRight; |
|
5403 |
MapReduceMappingsToDoubleTask |
|
5404 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
5405 |
MapReduceMappingsToDoubleTask<K,V> nextRight, |
|
5406 |
ToDoubleBiFunction<? super K, ? super V> transformer, |
|
5407 |
double basis, |
|
5408 |
DoubleBinaryOperator reducer) { |
|
5409 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
5410 |
this.transformer = transformer; |
|
5411 |
this.basis = basis; this.reducer = reducer; |
|
5412 |
} |
|
5413 |
public final Double getRawResult() { return result; } |
|
5414 |
public final void compute() { |
|
5415 |
final ToDoubleBiFunction<? super K, ? super V> transformer; |
|
5416 |
final DoubleBinaryOperator reducer; |
|
5417 |
if ((transformer = this.transformer) != null && |
|
5418 |
(reducer = this.reducer) != null) { |
|
5419 |
double r = this.basis; |
|
5420 |
for (int i = baseIndex, f, h; batch > 0 && |
|
5421 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
5422 |
addToPendingCount(1); |
|
5423 |
(rights = new MapReduceMappingsToDoubleTask<K,V> |
|
5424 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
5425 |
rights, transformer, r, reducer)).fork(); |
|
5426 |
} |
|
5427 |
for (Node<K,V> p; (p = advance()) != null; ) |
|
5428 |
r = reducer.applyAsDouble(r, transformer.applyAsDouble((K)p.key, p.val)); |
|
5429 |
result = r; |
|
5430 |
CountedCompleter<?> c; |
|
5431 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
5432 |
MapReduceMappingsToDoubleTask<K,V> |
|
5433 |
t = (MapReduceMappingsToDoubleTask<K,V>)c, |
|
5434 |
s = t.rights; |
|
5435 |
while (s != null) { |
|
5436 |
t.result = reducer.applyAsDouble(t.result, s.result); |
|
5437 |
s = t.rights = s.nextRight; |
|
5438 |
} |
|
5439 |
} |
|
5440 |
} |
|
5441 |
} |
|
5442 |
} |
|
5443 |
||
5444 |
static final class MapReduceKeysToLongTask<K,V> |
|
5445 |
extends BulkTask<K,V,Long> { |
|
5446 |
final ToLongFunction<? super K> transformer; |
|
5447 |
final LongBinaryOperator reducer; |
|
5448 |
final long basis; |
|
5449 |
long result; |
|
5450 |
MapReduceKeysToLongTask<K,V> rights, nextRight; |
|
5451 |
MapReduceKeysToLongTask |
|
5452 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
5453 |
MapReduceKeysToLongTask<K,V> nextRight, |
|
5454 |
ToLongFunction<? super K> transformer, |
|
5455 |
long basis, |
|
5456 |
LongBinaryOperator reducer) { |
|
5457 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
5458 |
this.transformer = transformer; |
|
5459 |
this.basis = basis; this.reducer = reducer; |
|
5460 |
} |
|
5461 |
public final Long getRawResult() { return result; } |
|
5462 |
public final void compute() { |
|
5463 |
final ToLongFunction<? super K> transformer; |
|
5464 |
final LongBinaryOperator reducer; |
|
5465 |
if ((transformer = this.transformer) != null && |
|
5466 |
(reducer = this.reducer) != null) { |
|
5467 |
long r = this.basis; |
|
5468 |
for (int i = baseIndex, f, h; batch > 0 && |
|
5469 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
5470 |
addToPendingCount(1); |
|
5471 |
(rights = new MapReduceKeysToLongTask<K,V> |
|
5472 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
5473 |
rights, transformer, r, reducer)).fork(); |
|
5474 |
} |
|
5475 |
for (Node<K,V> p; (p = advance()) != null; ) |
|
5476 |
r = reducer.applyAsLong(r, transformer.applyAsLong((K)p.key)); |
|
5477 |
result = r; |
|
5478 |
CountedCompleter<?> c; |
|
5479 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
5480 |
MapReduceKeysToLongTask<K,V> |
|
5481 |
t = (MapReduceKeysToLongTask<K,V>)c, |
|
5482 |
s = t.rights; |
|
5483 |
while (s != null) { |
|
5484 |
t.result = reducer.applyAsLong(t.result, s.result); |
|
5485 |
s = t.rights = s.nextRight; |
|
5486 |
} |
|
5487 |
} |
|
5488 |
} |
|
5489 |
} |
|
5490 |
} |
|
5491 |
||
5492 |
static final class MapReduceValuesToLongTask<K,V> |
|
5493 |
extends BulkTask<K,V,Long> { |
|
5494 |
final ToLongFunction<? super V> transformer; |
|
5495 |
final LongBinaryOperator reducer; |
|
5496 |
final long basis; |
|
5497 |
long result; |
|
5498 |
MapReduceValuesToLongTask<K,V> rights, nextRight; |
|
5499 |
MapReduceValuesToLongTask |
|
5500 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
5501 |
MapReduceValuesToLongTask<K,V> nextRight, |
|
5502 |
ToLongFunction<? super V> transformer, |
|
5503 |
long basis, |
|
5504 |
LongBinaryOperator reducer) { |
|
5505 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
5506 |
this.transformer = transformer; |
|
5507 |
this.basis = basis; this.reducer = reducer; |
|
5508 |
} |
|
5509 |
public final Long getRawResult() { return result; } |
|
5510 |
public final void compute() { |
|
5511 |
final ToLongFunction<? super V> transformer; |
|
5512 |
final LongBinaryOperator reducer; |
|
5513 |
if ((transformer = this.transformer) != null && |
|
5514 |
(reducer = this.reducer) != null) { |
|
5515 |
long r = this.basis; |
|
5516 |
for (int i = baseIndex, f, h; batch > 0 && |
|
5517 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
5518 |
addToPendingCount(1); |
|
5519 |
(rights = new MapReduceValuesToLongTask<K,V> |
|
5520 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
5521 |
rights, transformer, r, reducer)).fork(); |
|
5522 |
} |
|
5523 |
for (Node<K,V> p; (p = advance()) != null; ) |
|
5524 |
r = reducer.applyAsLong(r, transformer.applyAsLong(p.val)); |
|
5525 |
result = r; |
|
5526 |
CountedCompleter<?> c; |
|
5527 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
5528 |
MapReduceValuesToLongTask<K,V> |
|
5529 |
t = (MapReduceValuesToLongTask<K,V>)c, |
|
5530 |
s = t.rights; |
|
5531 |
while (s != null) { |
|
5532 |
t.result = reducer.applyAsLong(t.result, s.result); |
|
5533 |
s = t.rights = s.nextRight; |
|
5534 |
} |
|
5535 |
} |
|
5536 |
} |
|
5537 |
} |
|
5538 |
} |
|
5539 |
||
5540 |
static final class MapReduceEntriesToLongTask<K,V> |
|
5541 |
extends BulkTask<K,V,Long> { |
|
5542 |
final ToLongFunction<Map.Entry<K,V>> transformer; |
|
5543 |
final LongBinaryOperator reducer; |
|
5544 |
final long basis; |
|
5545 |
long result; |
|
5546 |
MapReduceEntriesToLongTask<K,V> rights, nextRight; |
|
5547 |
MapReduceEntriesToLongTask |
|
5548 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
5549 |
MapReduceEntriesToLongTask<K,V> nextRight, |
|
5550 |
ToLongFunction<Map.Entry<K,V>> transformer, |
|
5551 |
long basis, |
|
5552 |
LongBinaryOperator reducer) { |
|
5553 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
5554 |
this.transformer = transformer; |
|
5555 |
this.basis = basis; this.reducer = reducer; |
|
5556 |
} |
|
5557 |
public final Long getRawResult() { return result; } |
|
5558 |
public final void compute() { |
|
5559 |
final ToLongFunction<Map.Entry<K,V>> transformer; |
|
5560 |
final LongBinaryOperator reducer; |
|
5561 |
if ((transformer = this.transformer) != null && |
|
5562 |
(reducer = this.reducer) != null) { |
|
5563 |
long r = this.basis; |
|
5564 |
for (int i = baseIndex, f, h; batch > 0 && |
|
5565 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
5566 |
addToPendingCount(1); |
|
5567 |
(rights = new MapReduceEntriesToLongTask<K,V> |
|
5568 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
5569 |
rights, transformer, r, reducer)).fork(); |
|
5570 |
} |
|
5571 |
for (Node<K,V> p; (p = advance()) != null; ) |
|
5572 |
r = reducer.applyAsLong(r, transformer.applyAsLong(p)); |
|
5573 |
result = r; |
|
5574 |
CountedCompleter<?> c; |
|
5575 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
5576 |
MapReduceEntriesToLongTask<K,V> |
|
5577 |
t = (MapReduceEntriesToLongTask<K,V>)c, |
|
5578 |
s = t.rights; |
|
5579 |
while (s != null) { |
|
5580 |
t.result = reducer.applyAsLong(t.result, s.result); |
|
5581 |
s = t.rights = s.nextRight; |
|
5582 |
} |
|
5583 |
} |
|
5584 |
} |
|
5585 |
} |
|
5586 |
} |
|
5587 |
||
5588 |
static final class MapReduceMappingsToLongTask<K,V> |
|
5589 |
extends BulkTask<K,V,Long> { |
|
5590 |
final ToLongBiFunction<? super K, ? super V> transformer; |
|
5591 |
final LongBinaryOperator reducer; |
|
5592 |
final long basis; |
|
5593 |
long result; |
|
5594 |
MapReduceMappingsToLongTask<K,V> rights, nextRight; |
|
5595 |
MapReduceMappingsToLongTask |
|
5596 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
5597 |
MapReduceMappingsToLongTask<K,V> nextRight, |
|
5598 |
ToLongBiFunction<? super K, ? super V> transformer, |
|
5599 |
long basis, |
|
5600 |
LongBinaryOperator reducer) { |
|
5601 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
5602 |
this.transformer = transformer; |
|
5603 |
this.basis = basis; this.reducer = reducer; |
|
5604 |
} |
|
5605 |
public final Long getRawResult() { return result; } |
|
5606 |
public final void compute() { |
|
5607 |
final ToLongBiFunction<? super K, ? super V> transformer; |
|
5608 |
final LongBinaryOperator reducer; |
|
5609 |
if ((transformer = this.transformer) != null && |
|
5610 |
(reducer = this.reducer) != null) { |
|
5611 |
long r = this.basis; |
|
5612 |
for (int i = baseIndex, f, h; batch > 0 && |
|
5613 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
5614 |
addToPendingCount(1); |
|
5615 |
(rights = new MapReduceMappingsToLongTask<K,V> |
|
5616 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
5617 |
rights, transformer, r, reducer)).fork(); |
|
5618 |
} |
|
5619 |
for (Node<K,V> p; (p = advance()) != null; ) |
|
5620 |
r = reducer.applyAsLong(r, transformer.applyAsLong((K)p.key, p.val)); |
|
5621 |
result = r; |
|
5622 |
CountedCompleter<?> c; |
|
5623 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
5624 |
MapReduceMappingsToLongTask<K,V> |
|
5625 |
t = (MapReduceMappingsToLongTask<K,V>)c, |
|
5626 |
s = t.rights; |
|
5627 |
while (s != null) { |
|
5628 |
t.result = reducer.applyAsLong(t.result, s.result); |
|
5629 |
s = t.rights = s.nextRight; |
|
5630 |
} |
|
5631 |
} |
|
5632 |
} |
|
5633 |
} |
|
5634 |
} |
|
5635 |
||
5636 |
static final class MapReduceKeysToIntTask<K,V> |
|
5637 |
extends BulkTask<K,V,Integer> { |
|
5638 |
final ToIntFunction<? super K> transformer; |
|
5639 |
final IntBinaryOperator reducer; |
|
5640 |
final int basis; |
|
5641 |
int result; |
|
5642 |
MapReduceKeysToIntTask<K,V> rights, nextRight; |
|
5643 |
MapReduceKeysToIntTask |
|
5644 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
5645 |
MapReduceKeysToIntTask<K,V> nextRight, |
|
5646 |
ToIntFunction<? super K> transformer, |
|
5647 |
int basis, |
|
5648 |
IntBinaryOperator reducer) { |
|
5649 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
5650 |
this.transformer = transformer; |
|
5651 |
this.basis = basis; this.reducer = reducer; |
|
5652 |
} |
|
5653 |
public final Integer getRawResult() { return result; } |
|
5654 |
public final void compute() { |
|
5655 |
final ToIntFunction<? super K> transformer; |
|
5656 |
final IntBinaryOperator reducer; |
|
5657 |
if ((transformer = this.transformer) != null && |
|
5658 |
(reducer = this.reducer) != null) { |
|
5659 |
int r = this.basis; |
|
5660 |
for (int i = baseIndex, f, h; batch > 0 && |
|
5661 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
5662 |
addToPendingCount(1); |
|
5663 |
(rights = new MapReduceKeysToIntTask<K,V> |
|
5664 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
5665 |
rights, transformer, r, reducer)).fork(); |
|
5666 |
} |
|
5667 |
for (Node<K,V> p; (p = advance()) != null; ) |
|
5668 |
r = reducer.applyAsInt(r, transformer.applyAsInt((K)p.key)); |
|
5669 |
result = r; |
|
5670 |
CountedCompleter<?> c; |
|
5671 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
5672 |
MapReduceKeysToIntTask<K,V> |
|
5673 |
t = (MapReduceKeysToIntTask<K,V>)c, |
|
5674 |
s = t.rights; |
|
5675 |
while (s != null) { |
|
5676 |
t.result = reducer.applyAsInt(t.result, s.result); |
|
5677 |
s = t.rights = s.nextRight; |
|
5678 |
} |
|
5679 |
} |
|
5680 |
} |
|
5681 |
} |
|
5682 |
} |
|
5683 |
||
5684 |
static final class MapReduceValuesToIntTask<K,V> |
|
5685 |
extends BulkTask<K,V,Integer> { |
|
5686 |
final ToIntFunction<? super V> transformer; |
|
5687 |
final IntBinaryOperator reducer; |
|
5688 |
final int basis; |
|
5689 |
int result; |
|
5690 |
MapReduceValuesToIntTask<K,V> rights, nextRight; |
|
5691 |
MapReduceValuesToIntTask |
|
5692 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
5693 |
MapReduceValuesToIntTask<K,V> nextRight, |
|
5694 |
ToIntFunction<? super V> transformer, |
|
5695 |
int basis, |
|
5696 |
IntBinaryOperator reducer) { |
|
5697 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
5698 |
this.transformer = transformer; |
|
5699 |
this.basis = basis; this.reducer = reducer; |
|
5700 |
} |
|
5701 |
public final Integer getRawResult() { return result; } |
|
5702 |
public final void compute() { |
|
5703 |
final ToIntFunction<? super V> transformer; |
|
5704 |
final IntBinaryOperator reducer; |
|
5705 |
if ((transformer = this.transformer) != null && |
|
5706 |
(reducer = this.reducer) != null) { |
|
5707 |
int r = this.basis; |
|
5708 |
for (int i = baseIndex, f, h; batch > 0 && |
|
5709 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
5710 |
addToPendingCount(1); |
|
5711 |
(rights = new MapReduceValuesToIntTask<K,V> |
|
5712 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
5713 |
rights, transformer, r, reducer)).fork(); |
|
5714 |
} |
|
5715 |
for (Node<K,V> p; (p = advance()) != null; ) |
|
5716 |
r = reducer.applyAsInt(r, transformer.applyAsInt(p.val)); |
|
5717 |
result = r; |
|
5718 |
CountedCompleter<?> c; |
|
5719 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
5720 |
MapReduceValuesToIntTask<K,V> |
|
5721 |
t = (MapReduceValuesToIntTask<K,V>)c, |
|
5722 |
s = t.rights; |
|
5723 |
while (s != null) { |
|
5724 |
t.result = reducer.applyAsInt(t.result, s.result); |
|
5725 |
s = t.rights = s.nextRight; |
|
5726 |
} |
|
5727 |
} |
|
5728 |
} |
|
5729 |
} |
|
5730 |
} |
|
5731 |
||
5732 |
static final class MapReduceEntriesToIntTask<K,V> |
|
5733 |
extends BulkTask<K,V,Integer> { |
|
5734 |
final ToIntFunction<Map.Entry<K,V>> transformer; |
|
5735 |
final IntBinaryOperator reducer; |
|
5736 |
final int basis; |
|
5737 |
int result; |
|
5738 |
MapReduceEntriesToIntTask<K,V> rights, nextRight; |
|
5739 |
MapReduceEntriesToIntTask |
|
5740 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
5741 |
MapReduceEntriesToIntTask<K,V> nextRight, |
|
5742 |
ToIntFunction<Map.Entry<K,V>> transformer, |
|
5743 |
int basis, |
|
5744 |
IntBinaryOperator reducer) { |
|
5745 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
5746 |
this.transformer = transformer; |
|
5747 |
this.basis = basis; this.reducer = reducer; |
|
5748 |
} |
|
5749 |
public final Integer getRawResult() { return result; } |
|
5750 |
public final void compute() { |
|
5751 |
final ToIntFunction<Map.Entry<K,V>> transformer; |
|
5752 |
final IntBinaryOperator reducer; |
|
5753 |
if ((transformer = this.transformer) != null && |
|
5754 |
(reducer = this.reducer) != null) { |
|
5755 |
int r = this.basis; |
|
5756 |
for (int i = baseIndex, f, h; batch > 0 && |
|
5757 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
5758 |
addToPendingCount(1); |
|
5759 |
(rights = new MapReduceEntriesToIntTask<K,V> |
|
5760 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
5761 |
rights, transformer, r, reducer)).fork(); |
|
5762 |
} |
|
5763 |
for (Node<K,V> p; (p = advance()) != null; ) |
|
5764 |
r = reducer.applyAsInt(r, transformer.applyAsInt(p)); |
|
5765 |
result = r; |
|
5766 |
CountedCompleter<?> c; |
|
5767 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
5768 |
MapReduceEntriesToIntTask<K,V> |
|
5769 |
t = (MapReduceEntriesToIntTask<K,V>)c, |
|
5770 |
s = t.rights; |
|
5771 |
while (s != null) { |
|
5772 |
t.result = reducer.applyAsInt(t.result, s.result); |
|
5773 |
s = t.rights = s.nextRight; |
|
5774 |
} |
|
5775 |
} |
|
5776 |
} |
|
5777 |
} |
|
5778 |
} |
|
5779 |
||
5780 |
static final class MapReduceMappingsToIntTask<K,V> |
|
5781 |
extends BulkTask<K,V,Integer> { |
|
5782 |
final ToIntBiFunction<? super K, ? super V> transformer; |
|
5783 |
final IntBinaryOperator reducer; |
|
5784 |
final int basis; |
|
5785 |
int result; |
|
5786 |
MapReduceMappingsToIntTask<K,V> rights, nextRight; |
|
5787 |
MapReduceMappingsToIntTask |
|
5788 |
(BulkTask<K,V,?> p, int b, int i, int f, Node<K,V>[] t, |
|
5789 |
MapReduceMappingsToIntTask<K,V> nextRight, |
|
5790 |
ToIntBiFunction<? super K, ? super V> transformer, |
|
5791 |
int basis, |
|
5792 |
IntBinaryOperator reducer) { |
|
5793 |
super(p, b, i, f, t); this.nextRight = nextRight; |
|
5794 |
this.transformer = transformer; |
|
5795 |
this.basis = basis; this.reducer = reducer; |
|
5796 |
} |
|
5797 |
public final Integer getRawResult() { return result; } |
|
5798 |
public final void compute() { |
|
5799 |
final ToIntBiFunction<? super K, ? super V> transformer; |
|
5800 |
final IntBinaryOperator reducer; |
|
5801 |
if ((transformer = this.transformer) != null && |
|
5802 |
(reducer = this.reducer) != null) { |
|
5803 |
int r = this.basis; |
|
5804 |
for (int i = baseIndex, f, h; batch > 0 && |
|
5805 |
(h = ((f = baseLimit) + i) >>> 1) > i;) { |
|
5806 |
addToPendingCount(1); |
|
5807 |
(rights = new MapReduceMappingsToIntTask<K,V> |
|
5808 |
(this, batch >>>= 1, baseLimit = h, f, tab, |
|
5809 |
rights, transformer, r, reducer)).fork(); |
|
5810 |
} |
|
5811 |
for (Node<K,V> p; (p = advance()) != null; ) |
|
5812 |
r = reducer.applyAsInt(r, transformer.applyAsInt((K)p.key, p.val)); |
|
5813 |
result = r; |
|
5814 |
CountedCompleter<?> c; |
|
5815 |
for (c = firstComplete(); c != null; c = c.nextComplete()) { |
|
5816 |
MapReduceMappingsToIntTask<K,V> |
|
5817 |
t = (MapReduceMappingsToIntTask<K,V>)c, |
|
5818 |
s = t.rights; |
|
5819 |
while (s != null) { |
|
5820 |
t.result = reducer.applyAsInt(t.result, s.result); |
|
5821 |
s = t.rights = s.nextRight; |
|
5822 |
} |
|
5823 |
} |
|
5824 |
} |
|
2 | 5825 |
} |
5826 |
} |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
5827 |
|
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
5828 |
// Unsafe mechanics |
17945 | 5829 |
private static final sun.misc.Unsafe U; |
5830 |
private static final long SIZECTL; |
|
5831 |
private static final long TRANSFERINDEX; |
|
5832 |
private static final long TRANSFERORIGIN; |
|
5833 |
private static final long BASECOUNT; |
|
5834 |
private static final long CELLSBUSY; |
|
5835 |
private static final long CELLVALUE; |
|
5836 |
private static final long ABASE; |
|
5837 |
private static final int ASHIFT; |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
5838 |
|
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
5839 |
static { |
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
5840 |
try { |
17945 | 5841 |
U = sun.misc.Unsafe.getUnsafe(); |
5842 |
Class<?> k = ConcurrentHashMap.class; |
|
5843 |
SIZECTL = U.objectFieldOffset |
|
5844 |
(k.getDeclaredField("sizeCtl")); |
|
5845 |
TRANSFERINDEX = U.objectFieldOffset |
|
5846 |
(k.getDeclaredField("transferIndex")); |
|
5847 |
TRANSFERORIGIN = U.objectFieldOffset |
|
5848 |
(k.getDeclaredField("transferOrigin")); |
|
5849 |
BASECOUNT = U.objectFieldOffset |
|
5850 |
(k.getDeclaredField("baseCount")); |
|
5851 |
CELLSBUSY = U.objectFieldOffset |
|
5852 |
(k.getDeclaredField("cellsBusy")); |
|
5853 |
Class<?> ck = Cell.class; |
|
5854 |
CELLVALUE = U.objectFieldOffset |
|
5855 |
(ck.getDeclaredField("value")); |
|
5856 |
Class<?> sc = Node[].class; |
|
5857 |
ABASE = U.arrayBaseOffset(sc); |
|
5858 |
int scale = U.arrayIndexScale(sc); |
|
5859 |
if ((scale & (scale - 1)) != 0) |
|
5860 |
throw new Error("data type scale not a power of two"); |
|
5861 |
ASHIFT = 31 - Integer.numberOfLeadingZeros(scale); |
|
9279
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
5862 |
} catch (Exception e) { |
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
5863 |
throw new Error(e); |
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
5864 |
} |
5f5d493d30a0
7036559: ConcurrentHashMap footprint and contention improvements
dl
parents:
9242
diff
changeset
|
5865 |
} |
2 | 5866 |
} |