author | bchristi |
Wed, 12 Jun 2013 11:11:59 -0700 | |
changeset 18166 | a24e00a7c5ae |
parent 17939 | bd750ec19d82 |
child 18280 | 6c3c0ff49eb5 |
permissions | -rw-r--r-- |
2 | 1 |
/* |
16867 | 2 |
* Copyright (c) 1994, 2013, Oracle and/or its affiliates. All rights reserved. |
2 | 3 |
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 |
* |
|
5 |
* This code is free software; you can redistribute it and/or modify it |
|
6 |
* under the terms of the GNU General Public License version 2 only, as |
|
5506 | 7 |
* published by the Free Software Foundation. Oracle designates this |
2 | 8 |
* particular file as subject to the "Classpath" exception as provided |
5506 | 9 |
* by Oracle in the LICENSE file that accompanied this code. |
2 | 10 |
* |
11 |
* This code is distributed in the hope that it will be useful, but WITHOUT |
|
12 |
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
13 |
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
14 |
* version 2 for more details (a copy is included in the LICENSE file that |
|
15 |
* accompanied this code). |
|
16 |
* |
|
17 |
* You should have received a copy of the GNU General Public License version |
|
18 |
* 2 along with this work; if not, write to the Free Software Foundation, |
|
19 |
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
20 |
* |
|
5506 | 21 |
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
22 |
* or visit www.oracle.com if you need additional information or have any |
|
23 |
* questions. |
|
2 | 24 |
*/ |
25 |
||
26 |
package java.util; |
|
16867 | 27 |
|
2 | 28 |
import java.io.*; |
18166
a24e00a7c5ae
8010325: Remove hash32() method and hash32 int field from java.lang.String
bchristi
parents:
17939
diff
changeset
|
29 |
import java.util.concurrent.ThreadLocalRandom; |
16867 | 30 |
import java.util.function.BiConsumer; |
31 |
import java.util.function.Function; |
|
32 |
import java.util.function.BiFunction; |
|
2 | 33 |
|
34 |
/** |
|
56
48451b4616e8
5080227: (coll spec) Bug in documentation for WeakHashMap
martin
parents:
2
diff
changeset
|
35 |
* This class implements a hash table, which maps keys to values. Any |
2 | 36 |
* non-<code>null</code> object can be used as a key or as a value. <p> |
37 |
* |
|
38 |
* To successfully store and retrieve objects from a hashtable, the |
|
39 |
* objects used as keys must implement the <code>hashCode</code> |
|
40 |
* method and the <code>equals</code> method. <p> |
|
41 |
* |
|
42 |
* An instance of <code>Hashtable</code> has two parameters that affect its |
|
43 |
* performance: <i>initial capacity</i> and <i>load factor</i>. The |
|
44 |
* <i>capacity</i> is the number of <i>buckets</i> in the hash table, and the |
|
45 |
* <i>initial capacity</i> is simply the capacity at the time the hash table |
|
46 |
* is created. Note that the hash table is <i>open</i>: in the case of a "hash |
|
47 |
* collision", a single bucket stores multiple entries, which must be searched |
|
48 |
* sequentially. The <i>load factor</i> is a measure of how full the hash |
|
49 |
* table is allowed to get before its capacity is automatically increased. |
|
50 |
* The initial capacity and load factor parameters are merely hints to |
|
51 |
* the implementation. The exact details as to when and whether the rehash |
|
52 |
* method is invoked are implementation-dependent.<p> |
|
53 |
* |
|
54 |
* Generally, the default load factor (.75) offers a good tradeoff between |
|
55 |
* time and space costs. Higher values decrease the space overhead but |
|
56 |
* increase the time cost to look up an entry (which is reflected in most |
|
57 |
* <tt>Hashtable</tt> operations, including <tt>get</tt> and <tt>put</tt>).<p> |
|
58 |
* |
|
59 |
* The initial capacity controls a tradeoff between wasted space and the |
|
60 |
* need for <code>rehash</code> operations, which are time-consuming. |
|
61 |
* No <code>rehash</code> operations will <i>ever</i> occur if the initial |
|
62 |
* capacity is greater than the maximum number of entries the |
|
63 |
* <tt>Hashtable</tt> will contain divided by its load factor. However, |
|
64 |
* setting the initial capacity too high can waste space.<p> |
|
65 |
* |
|
66 |
* If many entries are to be made into a <code>Hashtable</code>, |
|
67 |
* creating it with a sufficiently large capacity may allow the |
|
68 |
* entries to be inserted more efficiently than letting it perform |
|
69 |
* automatic rehashing as needed to grow the table. <p> |
|
70 |
* |
|
71 |
* This example creates a hashtable of numbers. It uses the names of |
|
72 |
* the numbers as keys: |
|
73 |
* <pre> {@code |
|
74 |
* Hashtable<String, Integer> numbers |
|
75 |
* = new Hashtable<String, Integer>(); |
|
76 |
* numbers.put("one", 1); |
|
77 |
* numbers.put("two", 2); |
|
78 |
* numbers.put("three", 3);}</pre> |
|
79 |
* |
|
80 |
* <p>To retrieve a number, use the following code: |
|
81 |
* <pre> {@code |
|
82 |
* Integer n = numbers.get("two"); |
|
83 |
* if (n != null) { |
|
84 |
* System.out.println("two = " + n); |
|
85 |
* }}</pre> |
|
86 |
* |
|
87 |
* <p>The iterators returned by the <tt>iterator</tt> method of the collections |
|
88 |
* returned by all of this class's "collection view methods" are |
|
89 |
* <em>fail-fast</em>: if the Hashtable is structurally modified at any time |
|
90 |
* after the iterator is created, in any way except through the iterator's own |
|
91 |
* <tt>remove</tt> method, the iterator will throw a {@link |
|
92 |
* ConcurrentModificationException}. Thus, in the face of concurrent |
|
93 |
* modification, the iterator fails quickly and cleanly, rather than risking |
|
94 |
* arbitrary, non-deterministic behavior at an undetermined time in the future. |
|
95 |
* The Enumerations returned by Hashtable's keys and elements methods are |
|
96 |
* <em>not</em> fail-fast. |
|
97 |
* |
|
98 |
* <p>Note that the fail-fast behavior of an iterator cannot be guaranteed |
|
99 |
* as it is, generally speaking, impossible to make any hard guarantees in the |
|
100 |
* presence of unsynchronized concurrent modification. Fail-fast iterators |
|
101 |
* throw <tt>ConcurrentModificationException</tt> on a best-effort basis. |
|
102 |
* Therefore, it would be wrong to write a program that depended on this |
|
103 |
* exception for its correctness: <i>the fail-fast behavior of iterators |
|
104 |
* should be used only to detect bugs.</i> |
|
105 |
* |
|
106 |
* <p>As of the Java 2 platform v1.2, this class was retrofitted to |
|
107 |
* implement the {@link Map} interface, making it a member of the |
|
64
3244b8bab101
6583872: (coll) Direct uninformed users away from Vector/Hashtable
martin
parents:
56
diff
changeset
|
108 |
* <a href="{@docRoot}/../technotes/guides/collections/index.html"> |
3244b8bab101
6583872: (coll) Direct uninformed users away from Vector/Hashtable
martin
parents:
56
diff
changeset
|
109 |
* |
3244b8bab101
6583872: (coll) Direct uninformed users away from Vector/Hashtable
martin
parents:
56
diff
changeset
|
110 |
* Java Collections Framework</a>. Unlike the new collection |
3244b8bab101
6583872: (coll) Direct uninformed users away from Vector/Hashtable
martin
parents:
56
diff
changeset
|
111 |
* implementations, {@code Hashtable} is synchronized. If a |
3244b8bab101
6583872: (coll) Direct uninformed users away from Vector/Hashtable
martin
parents:
56
diff
changeset
|
112 |
* thread-safe implementation is not needed, it is recommended to use |
3244b8bab101
6583872: (coll) Direct uninformed users away from Vector/Hashtable
martin
parents:
56
diff
changeset
|
113 |
* {@link HashMap} in place of {@code Hashtable}. If a thread-safe |
3244b8bab101
6583872: (coll) Direct uninformed users away from Vector/Hashtable
martin
parents:
56
diff
changeset
|
114 |
* highly-concurrent implementation is desired, then it is recommended |
3244b8bab101
6583872: (coll) Direct uninformed users away from Vector/Hashtable
martin
parents:
56
diff
changeset
|
115 |
* to use {@link java.util.concurrent.ConcurrentHashMap} in place of |
3244b8bab101
6583872: (coll) Direct uninformed users away from Vector/Hashtable
martin
parents:
56
diff
changeset
|
116 |
* {@code Hashtable}. |
2 | 117 |
* |
118 |
* @author Arthur van Hoff |
|
119 |
* @author Josh Bloch |
|
120 |
* @author Neal Gafter |
|
121 |
* @see Object#equals(java.lang.Object) |
|
122 |
* @see Object#hashCode() |
|
123 |
* @see Hashtable#rehash() |
|
124 |
* @see Collection |
|
125 |
* @see Map |
|
126 |
* @see HashMap |
|
127 |
* @see TreeMap |
|
128 |
* @since JDK1.0 |
|
129 |
*/ |
|
130 |
public class Hashtable<K,V> |
|
131 |
extends Dictionary<K,V> |
|
132 |
implements Map<K,V>, Cloneable, java.io.Serializable { |
|
133 |
||
134 |
/** |
|
135 |
* The hash table data. |
|
136 |
*/ |
|
12448 | 137 |
private transient Entry<?,?>[] table; |
2 | 138 |
|
139 |
/** |
|
140 |
* The total number of entries in the hash table. |
|
141 |
*/ |
|
142 |
private transient int count; |
|
143 |
||
144 |
/** |
|
145 |
* The table is rehashed when its size exceeds this threshold. (The |
|
146 |
* value of this field is (int)(capacity * loadFactor).) |
|
147 |
* |
|
148 |
* @serial |
|
149 |
*/ |
|
150 |
private int threshold; |
|
151 |
||
152 |
/** |
|
153 |
* The load factor for the hashtable. |
|
154 |
* |
|
155 |
* @serial |
|
156 |
*/ |
|
157 |
private float loadFactor; |
|
158 |
||
159 |
/** |
|
160 |
* The number of times this Hashtable has been structurally modified |
|
161 |
* Structural modifications are those that change the number of entries in |
|
162 |
* the Hashtable or otherwise modify its internal structure (e.g., |
|
163 |
* rehash). This field is used to make iterators on Collection-views of |
|
164 |
* the Hashtable fail-fast. (See ConcurrentModificationException). |
|
165 |
*/ |
|
166 |
private transient int modCount = 0; |
|
167 |
||
168 |
/** use serialVersionUID from JDK 1.0.2 for interoperability */ |
|
169 |
private static final long serialVersionUID = 1421746759512286392L; |
|
170 |
||
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
171 |
private static class Holder { |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
172 |
// Unsafe mechanics |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
173 |
/** |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
174 |
* |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
175 |
*/ |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
176 |
static final sun.misc.Unsafe UNSAFE; |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
177 |
|
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
178 |
/** |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
179 |
* Offset of "final" hashSeed field we must set in |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
180 |
* readObject() method. |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
181 |
*/ |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
182 |
static final long HASHSEED_OFFSET; |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
183 |
|
17939
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
184 |
static final boolean USE_HASHSEED; |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
185 |
|
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
186 |
static { |
17939
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
187 |
String hashSeedProp = java.security.AccessController.doPrivileged( |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
188 |
new sun.security.action.GetPropertyAction( |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
189 |
"jdk.map.useRandomSeed")); |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
190 |
boolean localBool = (null != hashSeedProp) |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
191 |
? Boolean.parseBoolean(hashSeedProp) : false; |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
192 |
USE_HASHSEED = localBool; |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
193 |
|
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
194 |
if (USE_HASHSEED) { |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
195 |
try { |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
196 |
UNSAFE = sun.misc.Unsafe.getUnsafe(); |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
197 |
HASHSEED_OFFSET = UNSAFE.objectFieldOffset( |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
198 |
Hashtable.class.getDeclaredField("hashSeed")); |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
199 |
} catch (NoSuchFieldException | SecurityException e) { |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
200 |
throw new InternalError("Failed to record hashSeed offset", e); |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
201 |
} |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
202 |
} else { |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
203 |
UNSAFE = null; |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
204 |
HASHSEED_OFFSET = 0; |
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
205 |
} |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
206 |
} |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
207 |
} |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
208 |
|
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
209 |
/** |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
210 |
* A randomizing value associated with this instance that is applied to |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
211 |
* hash code of keys to make hash collisions harder to find. |
17939
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
212 |
* |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
213 |
* Non-final so it can be set lazily, but be sure not to set more than once. |
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
214 |
*/ |
17939
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
215 |
transient final int hashSeed; |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
216 |
|
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
217 |
/** |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
218 |
* Return an initial value for the hashSeed, or 0 if the random seed is not |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
219 |
* enabled. |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
220 |
*/ |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
221 |
final int initHashSeed() { |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
222 |
if (sun.misc.VM.isBooted() && Holder.USE_HASHSEED) { |
18166
a24e00a7c5ae
8010325: Remove hash32() method and hash32 int field from java.lang.String
bchristi
parents:
17939
diff
changeset
|
223 |
int seed = ThreadLocalRandom.current().nextInt(); |
a24e00a7c5ae
8010325: Remove hash32() method and hash32 int field from java.lang.String
bchristi
parents:
17939
diff
changeset
|
224 |
return (seed != 0) ? seed : 1; |
17939
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
225 |
} |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
226 |
return 0; |
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
227 |
} |
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
228 |
|
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
229 |
private int hash(Object k) { |
17939
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
230 |
return hashSeed ^ k.hashCode(); |
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
231 |
} |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
232 |
|
2 | 233 |
/** |
234 |
* Constructs a new, empty hashtable with the specified initial |
|
235 |
* capacity and the specified load factor. |
|
236 |
* |
|
237 |
* @param initialCapacity the initial capacity of the hashtable. |
|
238 |
* @param loadFactor the load factor of the hashtable. |
|
239 |
* @exception IllegalArgumentException if the initial capacity is less |
|
240 |
* than zero, or if the load factor is nonpositive. |
|
241 |
*/ |
|
242 |
public Hashtable(int initialCapacity, float loadFactor) { |
|
243 |
if (initialCapacity < 0) |
|
244 |
throw new IllegalArgumentException("Illegal Capacity: "+ |
|
245 |
initialCapacity); |
|
246 |
if (loadFactor <= 0 || Float.isNaN(loadFactor)) |
|
247 |
throw new IllegalArgumentException("Illegal Load: "+loadFactor); |
|
248 |
||
249 |
if (initialCapacity==0) |
|
250 |
initialCapacity = 1; |
|
251 |
this.loadFactor = loadFactor; |
|
12448 | 252 |
table = new Entry<?,?>[initialCapacity]; |
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
253 |
threshold = (int)Math.min(initialCapacity * loadFactor, MAX_ARRAY_SIZE + 1); |
17939
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
254 |
hashSeed = initHashSeed(); |
2 | 255 |
} |
256 |
||
257 |
/** |
|
258 |
* Constructs a new, empty hashtable with the specified initial capacity |
|
259 |
* and default load factor (0.75). |
|
260 |
* |
|
261 |
* @param initialCapacity the initial capacity of the hashtable. |
|
262 |
* @exception IllegalArgumentException if the initial capacity is less |
|
263 |
* than zero. |
|
264 |
*/ |
|
265 |
public Hashtable(int initialCapacity) { |
|
266 |
this(initialCapacity, 0.75f); |
|
267 |
} |
|
268 |
||
269 |
/** |
|
270 |
* Constructs a new, empty hashtable with a default initial capacity (11) |
|
271 |
* and load factor (0.75). |
|
272 |
*/ |
|
273 |
public Hashtable() { |
|
274 |
this(11, 0.75f); |
|
275 |
} |
|
276 |
||
277 |
/** |
|
278 |
* Constructs a new hashtable with the same mappings as the given |
|
279 |
* Map. The hashtable is created with an initial capacity sufficient to |
|
280 |
* hold the mappings in the given Map and a default load factor (0.75). |
|
281 |
* |
|
282 |
* @param t the map whose mappings are to be placed in this map. |
|
283 |
* @throws NullPointerException if the specified map is null. |
|
284 |
* @since 1.2 |
|
285 |
*/ |
|
286 |
public Hashtable(Map<? extends K, ? extends V> t) { |
|
287 |
this(Math.max(2*t.size(), 11), 0.75f); |
|
288 |
putAll(t); |
|
289 |
} |
|
290 |
||
291 |
/** |
|
292 |
* Returns the number of keys in this hashtable. |
|
293 |
* |
|
294 |
* @return the number of keys in this hashtable. |
|
295 |
*/ |
|
296 |
public synchronized int size() { |
|
297 |
return count; |
|
298 |
} |
|
299 |
||
300 |
/** |
|
301 |
* Tests if this hashtable maps no keys to values. |
|
302 |
* |
|
303 |
* @return <code>true</code> if this hashtable maps no keys to values; |
|
304 |
* <code>false</code> otherwise. |
|
305 |
*/ |
|
306 |
public synchronized boolean isEmpty() { |
|
307 |
return count == 0; |
|
308 |
} |
|
309 |
||
310 |
/** |
|
311 |
* Returns an enumeration of the keys in this hashtable. |
|
312 |
* |
|
313 |
* @return an enumeration of the keys in this hashtable. |
|
314 |
* @see Enumeration |
|
315 |
* @see #elements() |
|
316 |
* @see #keySet() |
|
317 |
* @see Map |
|
318 |
*/ |
|
319 |
public synchronized Enumeration<K> keys() { |
|
320 |
return this.<K>getEnumeration(KEYS); |
|
321 |
} |
|
322 |
||
323 |
/** |
|
324 |
* Returns an enumeration of the values in this hashtable. |
|
325 |
* Use the Enumeration methods on the returned object to fetch the elements |
|
326 |
* sequentially. |
|
327 |
* |
|
328 |
* @return an enumeration of the values in this hashtable. |
|
329 |
* @see java.util.Enumeration |
|
330 |
* @see #keys() |
|
331 |
* @see #values() |
|
332 |
* @see Map |
|
333 |
*/ |
|
334 |
public synchronized Enumeration<V> elements() { |
|
335 |
return this.<V>getEnumeration(VALUES); |
|
336 |
} |
|
337 |
||
338 |
/** |
|
339 |
* Tests if some key maps into the specified value in this hashtable. |
|
340 |
* This operation is more expensive than the {@link #containsKey |
|
341 |
* containsKey} method. |
|
342 |
* |
|
343 |
* <p>Note that this method is identical in functionality to |
|
344 |
* {@link #containsValue containsValue}, (which is part of the |
|
345 |
* {@link Map} interface in the collections framework). |
|
346 |
* |
|
347 |
* @param value a value to search for |
|
348 |
* @return <code>true</code> if and only if some key maps to the |
|
349 |
* <code>value</code> argument in this hashtable as |
|
350 |
* determined by the <tt>equals</tt> method; |
|
351 |
* <code>false</code> otherwise. |
|
352 |
* @exception NullPointerException if the value is <code>null</code> |
|
353 |
*/ |
|
354 |
public synchronized boolean contains(Object value) { |
|
355 |
if (value == null) { |
|
356 |
throw new NullPointerException(); |
|
357 |
} |
|
358 |
||
12448 | 359 |
Entry<?,?> tab[] = table; |
2 | 360 |
for (int i = tab.length ; i-- > 0 ;) { |
12448 | 361 |
for (Entry<?,?> e = tab[i] ; e != null ; e = e.next) { |
2 | 362 |
if (e.value.equals(value)) { |
363 |
return true; |
|
364 |
} |
|
365 |
} |
|
366 |
} |
|
367 |
return false; |
|
368 |
} |
|
369 |
||
370 |
/** |
|
371 |
* Returns true if this hashtable maps one or more keys to this value. |
|
372 |
* |
|
373 |
* <p>Note that this method is identical in functionality to {@link |
|
374 |
* #contains contains} (which predates the {@link Map} interface). |
|
375 |
* |
|
376 |
* @param value value whose presence in this hashtable is to be tested |
|
377 |
* @return <tt>true</tt> if this map maps one or more keys to the |
|
378 |
* specified value |
|
379 |
* @throws NullPointerException if the value is <code>null</code> |
|
380 |
* @since 1.2 |
|
381 |
*/ |
|
382 |
public boolean containsValue(Object value) { |
|
383 |
return contains(value); |
|
384 |
} |
|
385 |
||
386 |
/** |
|
387 |
* Tests if the specified object is a key in this hashtable. |
|
388 |
* |
|
389 |
* @param key possible key |
|
390 |
* @return <code>true</code> if and only if the specified object |
|
391 |
* is a key in this hashtable, as determined by the |
|
392 |
* <tt>equals</tt> method; <code>false</code> otherwise. |
|
393 |
* @throws NullPointerException if the key is <code>null</code> |
|
394 |
* @see #contains(Object) |
|
395 |
*/ |
|
396 |
public synchronized boolean containsKey(Object key) { |
|
12448 | 397 |
Entry<?,?> tab[] = table; |
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
398 |
int hash = hash(key); |
2 | 399 |
int index = (hash & 0x7FFFFFFF) % tab.length; |
12448 | 400 |
for (Entry<?,?> e = tab[index] ; e != null ; e = e.next) { |
2 | 401 |
if ((e.hash == hash) && e.key.equals(key)) { |
402 |
return true; |
|
403 |
} |
|
404 |
} |
|
405 |
return false; |
|
406 |
} |
|
407 |
||
408 |
/** |
|
409 |
* Returns the value to which the specified key is mapped, |
|
410 |
* or {@code null} if this map contains no mapping for the key. |
|
411 |
* |
|
412 |
* <p>More formally, if this map contains a mapping from a key |
|
413 |
* {@code k} to a value {@code v} such that {@code (key.equals(k))}, |
|
414 |
* then this method returns {@code v}; otherwise it returns |
|
415 |
* {@code null}. (There can be at most one such mapping.) |
|
416 |
* |
|
417 |
* @param key the key whose associated value is to be returned |
|
418 |
* @return the value to which the specified key is mapped, or |
|
419 |
* {@code null} if this map contains no mapping for the key |
|
420 |
* @throws NullPointerException if the specified key is null |
|
421 |
* @see #put(Object, Object) |
|
422 |
*/ |
|
12448 | 423 |
@SuppressWarnings("unchecked") |
2 | 424 |
public synchronized V get(Object key) { |
12448 | 425 |
Entry<?,?> tab[] = table; |
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
426 |
int hash = hash(key); |
2 | 427 |
int index = (hash & 0x7FFFFFFF) % tab.length; |
12448 | 428 |
for (Entry<?,?> e = tab[index] ; e != null ; e = e.next) { |
2 | 429 |
if ((e.hash == hash) && e.key.equals(key)) { |
12448 | 430 |
return (V)e.value; |
2 | 431 |
} |
432 |
} |
|
433 |
return null; |
|
434 |
} |
|
435 |
||
436 |
/** |
|
5466
f130bb07764b
6933217: Huge arrays handled poorly in core libraries
martin
parents:
715
diff
changeset
|
437 |
* The maximum size of array to allocate. |
f130bb07764b
6933217: Huge arrays handled poorly in core libraries
martin
parents:
715
diff
changeset
|
438 |
* Some VMs reserve some header words in an array. |
f130bb07764b
6933217: Huge arrays handled poorly in core libraries
martin
parents:
715
diff
changeset
|
439 |
* Attempts to allocate larger arrays may result in |
f130bb07764b
6933217: Huge arrays handled poorly in core libraries
martin
parents:
715
diff
changeset
|
440 |
* OutOfMemoryError: Requested array size exceeds VM limit |
f130bb07764b
6933217: Huge arrays handled poorly in core libraries
martin
parents:
715
diff
changeset
|
441 |
*/ |
f130bb07764b
6933217: Huge arrays handled poorly in core libraries
martin
parents:
715
diff
changeset
|
442 |
private static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; |
f130bb07764b
6933217: Huge arrays handled poorly in core libraries
martin
parents:
715
diff
changeset
|
443 |
|
f130bb07764b
6933217: Huge arrays handled poorly in core libraries
martin
parents:
715
diff
changeset
|
444 |
/** |
2 | 445 |
* Increases the capacity of and internally reorganizes this |
446 |
* hashtable, in order to accommodate and access its entries more |
|
447 |
* efficiently. This method is called automatically when the |
|
448 |
* number of keys in the hashtable exceeds this hashtable's capacity |
|
449 |
* and load factor. |
|
450 |
*/ |
|
12448 | 451 |
@SuppressWarnings("unchecked") |
2 | 452 |
protected void rehash() { |
453 |
int oldCapacity = table.length; |
|
12448 | 454 |
Entry<?,?>[] oldMap = table; |
2 | 455 |
|
5466
f130bb07764b
6933217: Huge arrays handled poorly in core libraries
martin
parents:
715
diff
changeset
|
456 |
// overflow-conscious code |
f130bb07764b
6933217: Huge arrays handled poorly in core libraries
martin
parents:
715
diff
changeset
|
457 |
int newCapacity = (oldCapacity << 1) + 1; |
f130bb07764b
6933217: Huge arrays handled poorly in core libraries
martin
parents:
715
diff
changeset
|
458 |
if (newCapacity - MAX_ARRAY_SIZE > 0) { |
f130bb07764b
6933217: Huge arrays handled poorly in core libraries
martin
parents:
715
diff
changeset
|
459 |
if (oldCapacity == MAX_ARRAY_SIZE) |
f130bb07764b
6933217: Huge arrays handled poorly in core libraries
martin
parents:
715
diff
changeset
|
460 |
// Keep running with MAX_ARRAY_SIZE buckets |
f130bb07764b
6933217: Huge arrays handled poorly in core libraries
martin
parents:
715
diff
changeset
|
461 |
return; |
f130bb07764b
6933217: Huge arrays handled poorly in core libraries
martin
parents:
715
diff
changeset
|
462 |
newCapacity = MAX_ARRAY_SIZE; |
f130bb07764b
6933217: Huge arrays handled poorly in core libraries
martin
parents:
715
diff
changeset
|
463 |
} |
12448 | 464 |
Entry<?,?>[] newMap = new Entry<?,?>[newCapacity]; |
2 | 465 |
|
466 |
modCount++; |
|
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
467 |
threshold = (int)Math.min(newCapacity * loadFactor, MAX_ARRAY_SIZE + 1); |
2 | 468 |
table = newMap; |
469 |
||
470 |
for (int i = oldCapacity ; i-- > 0 ;) { |
|
12448 | 471 |
for (Entry<K,V> old = (Entry<K,V>)oldMap[i] ; old != null ; ) { |
2 | 472 |
Entry<K,V> e = old; |
473 |
old = old.next; |
|
474 |
||
475 |
int index = (e.hash & 0x7FFFFFFF) % newCapacity; |
|
12448 | 476 |
e.next = (Entry<K,V>)newMap[index]; |
2 | 477 |
newMap[index] = e; |
478 |
} |
|
479 |
} |
|
480 |
} |
|
481 |
||
16867 | 482 |
private void addEntry(int hash, K key, V value, int index) { |
483 |
modCount++; |
|
484 |
||
485 |
Entry<?,?> tab[] = table; |
|
486 |
if (count >= threshold) { |
|
487 |
// Rehash the table if the threshold is exceeded |
|
488 |
rehash(); |
|
489 |
||
490 |
tab = table; |
|
491 |
hash = hash(key); |
|
492 |
index = (hash & 0x7FFFFFFF) % tab.length; |
|
493 |
} |
|
494 |
||
495 |
// Creates the new entry. |
|
496 |
@SuppressWarnings("unchecked") |
|
497 |
Entry<K,V> e = (Entry<K,V>) tab[index]; |
|
498 |
tab[index] = new Entry<>(hash, key, value, e); |
|
499 |
count++; |
|
500 |
} |
|
501 |
||
2 | 502 |
/** |
503 |
* Maps the specified <code>key</code> to the specified |
|
504 |
* <code>value</code> in this hashtable. Neither the key nor the |
|
505 |
* value can be <code>null</code>. <p> |
|
506 |
* |
|
507 |
* The value can be retrieved by calling the <code>get</code> method |
|
508 |
* with a key that is equal to the original key. |
|
509 |
* |
|
510 |
* @param key the hashtable key |
|
511 |
* @param value the value |
|
512 |
* @return the previous value of the specified key in this hashtable, |
|
513 |
* or <code>null</code> if it did not have one |
|
514 |
* @exception NullPointerException if the key or value is |
|
515 |
* <code>null</code> |
|
516 |
* @see Object#equals(Object) |
|
517 |
* @see #get(Object) |
|
518 |
*/ |
|
519 |
public synchronized V put(K key, V value) { |
|
520 |
// Make sure the value is not null |
|
521 |
if (value == null) { |
|
522 |
throw new NullPointerException(); |
|
523 |
} |
|
524 |
||
525 |
// Makes sure the key is not already in the hashtable. |
|
12448 | 526 |
Entry<?,?> tab[] = table; |
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
527 |
int hash = hash(key); |
2 | 528 |
int index = (hash & 0x7FFFFFFF) % tab.length; |
12448 | 529 |
@SuppressWarnings("unchecked") |
530 |
Entry<K,V> entry = (Entry<K,V>)tab[index]; |
|
531 |
for(; entry != null ; entry = entry.next) { |
|
532 |
if ((entry.hash == hash) && entry.key.equals(key)) { |
|
533 |
V old = entry.value; |
|
534 |
entry.value = value; |
|
2 | 535 |
return old; |
536 |
} |
|
537 |
} |
|
538 |
||
16867 | 539 |
addEntry(hash, key, value, index); |
2 | 540 |
return null; |
541 |
} |
|
542 |
||
543 |
/** |
|
544 |
* Removes the key (and its corresponding value) from this |
|
545 |
* hashtable. This method does nothing if the key is not in the hashtable. |
|
546 |
* |
|
547 |
* @param key the key that needs to be removed |
|
548 |
* @return the value to which the key had been mapped in this hashtable, |
|
549 |
* or <code>null</code> if the key did not have a mapping |
|
550 |
* @throws NullPointerException if the key is <code>null</code> |
|
551 |
*/ |
|
552 |
public synchronized V remove(Object key) { |
|
12448 | 553 |
Entry<?,?> tab[] = table; |
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
554 |
int hash = hash(key); |
2 | 555 |
int index = (hash & 0x7FFFFFFF) % tab.length; |
12448 | 556 |
@SuppressWarnings("unchecked") |
557 |
Entry<K,V> e = (Entry<K,V>)tab[index]; |
|
558 |
for(Entry<K,V> prev = null ; e != null ; prev = e, e = e.next) { |
|
2 | 559 |
if ((e.hash == hash) && e.key.equals(key)) { |
560 |
modCount++; |
|
561 |
if (prev != null) { |
|
562 |
prev.next = e.next; |
|
563 |
} else { |
|
564 |
tab[index] = e.next; |
|
565 |
} |
|
566 |
count--; |
|
567 |
V oldValue = e.value; |
|
568 |
e.value = null; |
|
569 |
return oldValue; |
|
570 |
} |
|
571 |
} |
|
572 |
return null; |
|
573 |
} |
|
574 |
||
575 |
/** |
|
576 |
* Copies all of the mappings from the specified map to this hashtable. |
|
577 |
* These mappings will replace any mappings that this hashtable had for any |
|
578 |
* of the keys currently in the specified map. |
|
579 |
* |
|
580 |
* @param t mappings to be stored in this map |
|
581 |
* @throws NullPointerException if the specified map is null |
|
582 |
* @since 1.2 |
|
583 |
*/ |
|
584 |
public synchronized void putAll(Map<? extends K, ? extends V> t) { |
|
585 |
for (Map.Entry<? extends K, ? extends V> e : t.entrySet()) |
|
586 |
put(e.getKey(), e.getValue()); |
|
587 |
} |
|
588 |
||
589 |
/** |
|
590 |
* Clears this hashtable so that it contains no keys. |
|
591 |
*/ |
|
592 |
public synchronized void clear() { |
|
12448 | 593 |
Entry<?,?> tab[] = table; |
2 | 594 |
modCount++; |
595 |
for (int index = tab.length; --index >= 0; ) |
|
596 |
tab[index] = null; |
|
597 |
count = 0; |
|
598 |
} |
|
599 |
||
600 |
/** |
|
601 |
* Creates a shallow copy of this hashtable. All the structure of the |
|
602 |
* hashtable itself is copied, but the keys and values are not cloned. |
|
603 |
* This is a relatively expensive operation. |
|
604 |
* |
|
605 |
* @return a clone of the hashtable |
|
606 |
*/ |
|
607 |
public synchronized Object clone() { |
|
608 |
try { |
|
12448 | 609 |
Hashtable<?,?> t = (Hashtable<?,?>)super.clone(); |
610 |
t.table = new Entry<?,?>[table.length]; |
|
2 | 611 |
for (int i = table.length ; i-- > 0 ; ) { |
612 |
t.table[i] = (table[i] != null) |
|
12448 | 613 |
? (Entry<?,?>) table[i].clone() : null; |
2 | 614 |
} |
615 |
t.keySet = null; |
|
616 |
t.entrySet = null; |
|
617 |
t.values = null; |
|
618 |
t.modCount = 0; |
|
619 |
return t; |
|
620 |
} catch (CloneNotSupportedException e) { |
|
621 |
// this shouldn't happen, since we are Cloneable |
|
10419
12c063b39232
7084245: Update usages of InternalError to use exception chaining
sherman
parents:
9035
diff
changeset
|
622 |
throw new InternalError(e); |
2 | 623 |
} |
624 |
} |
|
625 |
||
626 |
/** |
|
627 |
* Returns a string representation of this <tt>Hashtable</tt> object |
|
628 |
* in the form of a set of entries, enclosed in braces and separated |
|
629 |
* by the ASCII characters "<tt>, </tt>" (comma and space). Each |
|
630 |
* entry is rendered as the key, an equals sign <tt>=</tt>, and the |
|
631 |
* associated element, where the <tt>toString</tt> method is used to |
|
632 |
* convert the key and element to strings. |
|
633 |
* |
|
634 |
* @return a string representation of this hashtable |
|
635 |
*/ |
|
636 |
public synchronized String toString() { |
|
637 |
int max = size() - 1; |
|
638 |
if (max == -1) |
|
639 |
return "{}"; |
|
640 |
||
641 |
StringBuilder sb = new StringBuilder(); |
|
642 |
Iterator<Map.Entry<K,V>> it = entrySet().iterator(); |
|
643 |
||
644 |
sb.append('{'); |
|
645 |
for (int i = 0; ; i++) { |
|
646 |
Map.Entry<K,V> e = it.next(); |
|
647 |
K key = e.getKey(); |
|
648 |
V value = e.getValue(); |
|
649 |
sb.append(key == this ? "(this Map)" : key.toString()); |
|
650 |
sb.append('='); |
|
651 |
sb.append(value == this ? "(this Map)" : value.toString()); |
|
652 |
||
653 |
if (i == max) |
|
654 |
return sb.append('}').toString(); |
|
655 |
sb.append(", "); |
|
656 |
} |
|
657 |
} |
|
658 |
||
659 |
||
660 |
private <T> Enumeration<T> getEnumeration(int type) { |
|
661 |
if (count == 0) { |
|
662 |
return Collections.emptyEnumeration(); |
|
663 |
} else { |
|
7803
56bc97d69d93
6880112: Project Coin: Port JDK core library code to use diamond operator
smarks
parents:
5506
diff
changeset
|
664 |
return new Enumerator<>(type, false); |
2 | 665 |
} |
666 |
} |
|
667 |
||
668 |
private <T> Iterator<T> getIterator(int type) { |
|
669 |
if (count == 0) { |
|
670 |
return Collections.emptyIterator(); |
|
671 |
} else { |
|
7803
56bc97d69d93
6880112: Project Coin: Port JDK core library code to use diamond operator
smarks
parents:
5506
diff
changeset
|
672 |
return new Enumerator<>(type, true); |
2 | 673 |
} |
674 |
} |
|
675 |
||
676 |
// Views |
|
677 |
||
678 |
/** |
|
679 |
* Each of these fields are initialized to contain an instance of the |
|
680 |
* appropriate view the first time this view is requested. The views are |
|
681 |
* stateless, so there's no reason to create more than one of each. |
|
682 |
*/ |
|
683 |
private transient volatile Set<K> keySet = null; |
|
684 |
private transient volatile Set<Map.Entry<K,V>> entrySet = null; |
|
685 |
private transient volatile Collection<V> values = null; |
|
686 |
||
687 |
/** |
|
688 |
* Returns a {@link Set} view of the keys contained in this map. |
|
689 |
* The set is backed by the map, so changes to the map are |
|
690 |
* reflected in the set, and vice-versa. If the map is modified |
|
691 |
* while an iteration over the set is in progress (except through |
|
692 |
* the iterator's own <tt>remove</tt> operation), the results of |
|
693 |
* the iteration are undefined. The set supports element removal, |
|
694 |
* which removes the corresponding mapping from the map, via the |
|
695 |
* <tt>Iterator.remove</tt>, <tt>Set.remove</tt>, |
|
696 |
* <tt>removeAll</tt>, <tt>retainAll</tt>, and <tt>clear</tt> |
|
697 |
* operations. It does not support the <tt>add</tt> or <tt>addAll</tt> |
|
698 |
* operations. |
|
699 |
* |
|
700 |
* @since 1.2 |
|
701 |
*/ |
|
702 |
public Set<K> keySet() { |
|
703 |
if (keySet == null) |
|
704 |
keySet = Collections.synchronizedSet(new KeySet(), this); |
|
705 |
return keySet; |
|
706 |
} |
|
707 |
||
708 |
private class KeySet extends AbstractSet<K> { |
|
709 |
public Iterator<K> iterator() { |
|
710 |
return getIterator(KEYS); |
|
711 |
} |
|
712 |
public int size() { |
|
713 |
return count; |
|
714 |
} |
|
715 |
public boolean contains(Object o) { |
|
716 |
return containsKey(o); |
|
717 |
} |
|
718 |
public boolean remove(Object o) { |
|
719 |
return Hashtable.this.remove(o) != null; |
|
720 |
} |
|
721 |
public void clear() { |
|
722 |
Hashtable.this.clear(); |
|
723 |
} |
|
724 |
} |
|
725 |
||
726 |
/** |
|
727 |
* Returns a {@link Set} view of the mappings contained in this map. |
|
728 |
* The set is backed by the map, so changes to the map are |
|
729 |
* reflected in the set, and vice-versa. If the map is modified |
|
730 |
* while an iteration over the set is in progress (except through |
|
731 |
* the iterator's own <tt>remove</tt> operation, or through the |
|
732 |
* <tt>setValue</tt> operation on a map entry returned by the |
|
733 |
* iterator) the results of the iteration are undefined. The set |
|
734 |
* supports element removal, which removes the corresponding |
|
735 |
* mapping from the map, via the <tt>Iterator.remove</tt>, |
|
736 |
* <tt>Set.remove</tt>, <tt>removeAll</tt>, <tt>retainAll</tt> and |
|
737 |
* <tt>clear</tt> operations. It does not support the |
|
738 |
* <tt>add</tt> or <tt>addAll</tt> operations. |
|
739 |
* |
|
740 |
* @since 1.2 |
|
741 |
*/ |
|
742 |
public Set<Map.Entry<K,V>> entrySet() { |
|
743 |
if (entrySet==null) |
|
744 |
entrySet = Collections.synchronizedSet(new EntrySet(), this); |
|
745 |
return entrySet; |
|
746 |
} |
|
747 |
||
748 |
private class EntrySet extends AbstractSet<Map.Entry<K,V>> { |
|
749 |
public Iterator<Map.Entry<K,V>> iterator() { |
|
750 |
return getIterator(ENTRIES); |
|
751 |
} |
|
752 |
||
753 |
public boolean add(Map.Entry<K,V> o) { |
|
754 |
return super.add(o); |
|
755 |
} |
|
756 |
||
757 |
public boolean contains(Object o) { |
|
758 |
if (!(o instanceof Map.Entry)) |
|
759 |
return false; |
|
12448 | 760 |
Map.Entry<?,?> entry = (Map.Entry<?,?>)o; |
2 | 761 |
Object key = entry.getKey(); |
12448 | 762 |
Entry<?,?>[] tab = table; |
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
763 |
int hash = hash(key); |
2 | 764 |
int index = (hash & 0x7FFFFFFF) % tab.length; |
765 |
||
12448 | 766 |
for (Entry<?,?> e = tab[index]; e != null; e = e.next) |
2 | 767 |
if (e.hash==hash && e.equals(entry)) |
768 |
return true; |
|
769 |
return false; |
|
770 |
} |
|
771 |
||
772 |
public boolean remove(Object o) { |
|
773 |
if (!(o instanceof Map.Entry)) |
|
774 |
return false; |
|
12448 | 775 |
Map.Entry<?,?> entry = (Map.Entry<?,?>) o; |
776 |
Object key = entry.getKey(); |
|
777 |
Entry<?,?>[] tab = table; |
|
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
778 |
int hash = hash(key); |
2 | 779 |
int index = (hash & 0x7FFFFFFF) % tab.length; |
780 |
||
12448 | 781 |
@SuppressWarnings("unchecked") |
782 |
Entry<K,V> e = (Entry<K,V>)tab[index]; |
|
783 |
for(Entry<K,V> prev = null; e != null; prev = e, e = e.next) { |
|
2 | 784 |
if (e.hash==hash && e.equals(entry)) { |
785 |
modCount++; |
|
786 |
if (prev != null) |
|
787 |
prev.next = e.next; |
|
788 |
else |
|
789 |
tab[index] = e.next; |
|
790 |
||
791 |
count--; |
|
792 |
e.value = null; |
|
793 |
return true; |
|
794 |
} |
|
795 |
} |
|
796 |
return false; |
|
797 |
} |
|
798 |
||
799 |
public int size() { |
|
800 |
return count; |
|
801 |
} |
|
802 |
||
803 |
public void clear() { |
|
804 |
Hashtable.this.clear(); |
|
805 |
} |
|
806 |
} |
|
807 |
||
808 |
/** |
|
809 |
* Returns a {@link Collection} view of the values contained in this map. |
|
810 |
* The collection is backed by the map, so changes to the map are |
|
811 |
* reflected in the collection, and vice-versa. If the map is |
|
812 |
* modified while an iteration over the collection is in progress |
|
813 |
* (except through the iterator's own <tt>remove</tt> operation), |
|
814 |
* the results of the iteration are undefined. The collection |
|
815 |
* supports element removal, which removes the corresponding |
|
816 |
* mapping from the map, via the <tt>Iterator.remove</tt>, |
|
817 |
* <tt>Collection.remove</tt>, <tt>removeAll</tt>, |
|
818 |
* <tt>retainAll</tt> and <tt>clear</tt> operations. It does not |
|
819 |
* support the <tt>add</tt> or <tt>addAll</tt> operations. |
|
820 |
* |
|
821 |
* @since 1.2 |
|
822 |
*/ |
|
823 |
public Collection<V> values() { |
|
824 |
if (values==null) |
|
825 |
values = Collections.synchronizedCollection(new ValueCollection(), |
|
826 |
this); |
|
827 |
return values; |
|
828 |
} |
|
829 |
||
830 |
private class ValueCollection extends AbstractCollection<V> { |
|
831 |
public Iterator<V> iterator() { |
|
832 |
return getIterator(VALUES); |
|
833 |
} |
|
834 |
public int size() { |
|
835 |
return count; |
|
836 |
} |
|
837 |
public boolean contains(Object o) { |
|
838 |
return containsValue(o); |
|
839 |
} |
|
840 |
public void clear() { |
|
841 |
Hashtable.this.clear(); |
|
842 |
} |
|
843 |
} |
|
844 |
||
845 |
// Comparison and hashing |
|
846 |
||
847 |
/** |
|
848 |
* Compares the specified Object with this Map for equality, |
|
849 |
* as per the definition in the Map interface. |
|
850 |
* |
|
851 |
* @param o object to be compared for equality with this hashtable |
|
852 |
* @return true if the specified Object is equal to this Map |
|
853 |
* @see Map#equals(Object) |
|
854 |
* @since 1.2 |
|
855 |
*/ |
|
856 |
public synchronized boolean equals(Object o) { |
|
857 |
if (o == this) |
|
858 |
return true; |
|
859 |
||
860 |
if (!(o instanceof Map)) |
|
861 |
return false; |
|
12448 | 862 |
Map<?,?> t = (Map<?,?>) o; |
2 | 863 |
if (t.size() != size()) |
864 |
return false; |
|
865 |
||
866 |
try { |
|
867 |
Iterator<Map.Entry<K,V>> i = entrySet().iterator(); |
|
868 |
while (i.hasNext()) { |
|
869 |
Map.Entry<K,V> e = i.next(); |
|
870 |
K key = e.getKey(); |
|
871 |
V value = e.getValue(); |
|
872 |
if (value == null) { |
|
873 |
if (!(t.get(key)==null && t.containsKey(key))) |
|
874 |
return false; |
|
875 |
} else { |
|
876 |
if (!value.equals(t.get(key))) |
|
877 |
return false; |
|
878 |
} |
|
879 |
} |
|
880 |
} catch (ClassCastException unused) { |
|
881 |
return false; |
|
882 |
} catch (NullPointerException unused) { |
|
883 |
return false; |
|
884 |
} |
|
885 |
||
886 |
return true; |
|
887 |
} |
|
888 |
||
889 |
/** |
|
890 |
* Returns the hash code value for this Map as per the definition in the |
|
891 |
* Map interface. |
|
892 |
* |
|
893 |
* @see Map#hashCode() |
|
894 |
* @since 1.2 |
|
895 |
*/ |
|
896 |
public synchronized int hashCode() { |
|
897 |
/* |
|
898 |
* This code detects the recursion caused by computing the hash code |
|
899 |
* of a self-referential hash table and prevents the stack overflow |
|
900 |
* that would otherwise result. This allows certain 1.1-era |
|
901 |
* applets with self-referential hash tables to work. This code |
|
902 |
* abuses the loadFactor field to do double-duty as a hashCode |
|
903 |
* in progress flag, so as not to worsen the space performance. |
|
904 |
* A negative load factor indicates that hash code computation is |
|
905 |
* in progress. |
|
906 |
*/ |
|
907 |
int h = 0; |
|
908 |
if (count == 0 || loadFactor < 0) |
|
909 |
return h; // Returns zero |
|
910 |
||
911 |
loadFactor = -loadFactor; // Mark hashCode computation in progress |
|
12448 | 912 |
Entry<?,?>[] tab = table; |
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
913 |
for (Entry<?,?> entry : tab) { |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
914 |
while (entry != null) { |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
915 |
h += entry.hashCode(); |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
916 |
entry = entry.next; |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
917 |
} |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
918 |
} |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
919 |
|
2 | 920 |
loadFactor = -loadFactor; // Mark hashCode computation complete |
921 |
||
922 |
return h; |
|
923 |
} |
|
924 |
||
16867 | 925 |
@Override |
926 |
public synchronized V getOrDefault(Object key, V defaultValue) { |
|
927 |
V result = get(key); |
|
928 |
return (null == result) ? defaultValue : result; |
|
929 |
} |
|
930 |
||
931 |
@Override |
|
932 |
public synchronized void forEach(BiConsumer<? super K, ? super V> action) { |
|
933 |
Objects.requireNonNull(action); // explicit check required in case |
|
934 |
// table is empty. |
|
935 |
Entry<?,?>[] tab = table; |
|
936 |
for (Entry<?,?> entry : tab) { |
|
937 |
while (entry != null) { |
|
938 |
action.accept((K)entry.key, (V)entry.value); |
|
939 |
entry = entry.next; |
|
940 |
} |
|
941 |
} |
|
942 |
} |
|
943 |
||
944 |
@Override |
|
945 |
public synchronized void replaceAll( |
|
946 |
BiFunction<? super K, ? super V, ? extends V> function) { |
|
947 |
Map.super.replaceAll(function); |
|
948 |
} |
|
949 |
||
950 |
@Override |
|
951 |
public synchronized V putIfAbsent(K key, V value) { |
|
952 |
Objects.requireNonNull(value); |
|
953 |
||
954 |
// Makes sure the key is not already in the hashtable. |
|
955 |
Entry<?,?> tab[] = table; |
|
956 |
int hash = hash(key); |
|
957 |
int index = (hash & 0x7FFFFFFF) % tab.length; |
|
958 |
@SuppressWarnings("unchecked") |
|
959 |
Entry<K,V> entry = (Entry<K,V>)tab[index]; |
|
960 |
for (; entry != null; entry = entry.next) { |
|
961 |
if ((entry.hash == hash) && entry.key.equals(key)) { |
|
962 |
V old = entry.value; |
|
963 |
if (old == null) { |
|
964 |
entry.value = value; |
|
965 |
} |
|
966 |
return old; |
|
967 |
} |
|
968 |
} |
|
969 |
||
970 |
addEntry(hash, key, value, index); |
|
971 |
return null; |
|
972 |
} |
|
973 |
||
974 |
@Override |
|
975 |
public synchronized boolean remove(Object key, Object value) { |
|
976 |
Objects.requireNonNull(value); |
|
977 |
||
978 |
Entry<?,?> tab[] = table; |
|
979 |
int hash = hash(key); |
|
980 |
int index = (hash & 0x7FFFFFFF) % tab.length; |
|
981 |
@SuppressWarnings("unchecked") |
|
982 |
Entry<K,V> e = (Entry<K,V>)tab[index]; |
|
983 |
for (Entry<K,V> prev = null; e != null; prev = e, e = e.next) { |
|
984 |
if ((e.hash == hash) && e.key.equals(key) && e.value.equals(value)) { |
|
985 |
modCount++; |
|
986 |
if (prev != null) { |
|
987 |
prev.next = e.next; |
|
988 |
} else { |
|
989 |
tab[index] = e.next; |
|
990 |
} |
|
991 |
count--; |
|
992 |
e.value = null; |
|
993 |
return true; |
|
994 |
} |
|
995 |
} |
|
996 |
return false; |
|
997 |
} |
|
998 |
||
999 |
@Override |
|
1000 |
public synchronized boolean replace(K key, V oldValue, V newValue) { |
|
1001 |
Entry<?,?> tab[] = table; |
|
1002 |
int hash = hash(key); |
|
1003 |
int index = (hash & 0x7FFFFFFF) % tab.length; |
|
1004 |
@SuppressWarnings("unchecked") |
|
1005 |
Entry<K,V> e = (Entry<K,V>)tab[index]; |
|
1006 |
for (; e != null; e = e.next) { |
|
1007 |
if ((e.hash == hash) && e.key.equals(key)) { |
|
1008 |
if (e.value.equals(oldValue)) { |
|
1009 |
e.value = newValue; |
|
1010 |
return true; |
|
1011 |
} else { |
|
1012 |
return false; |
|
1013 |
} |
|
1014 |
} |
|
1015 |
} |
|
1016 |
return false; |
|
1017 |
} |
|
1018 |
||
1019 |
@Override |
|
1020 |
public synchronized V replace(K key, V value) { |
|
1021 |
Entry<?,?> tab[] = table; |
|
1022 |
int hash = hash(key); |
|
1023 |
int index = (hash & 0x7FFFFFFF) % tab.length; |
|
1024 |
@SuppressWarnings("unchecked") |
|
1025 |
Entry<K,V> e = (Entry<K,V>)tab[index]; |
|
1026 |
for (; e != null; e = e.next) { |
|
1027 |
if ((e.hash == hash) && e.key.equals(key)) { |
|
1028 |
V oldValue = e.value; |
|
1029 |
e.value = value; |
|
1030 |
return oldValue; |
|
1031 |
} |
|
1032 |
} |
|
1033 |
return null; |
|
1034 |
} |
|
1035 |
||
1036 |
@Override |
|
1037 |
public synchronized V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) { |
|
1038 |
Objects.requireNonNull(mappingFunction); |
|
1039 |
||
1040 |
Entry<?,?> tab[] = table; |
|
1041 |
int hash = hash(key); |
|
1042 |
int index = (hash & 0x7FFFFFFF) % tab.length; |
|
1043 |
@SuppressWarnings("unchecked") |
|
1044 |
Entry<K,V> e = (Entry<K,V>)tab[index]; |
|
1045 |
for (; e != null; e = e.next) { |
|
1046 |
if (e.hash == hash && e.key.equals(key)) { |
|
1047 |
// Hashtable not accept null value |
|
1048 |
return e.value; |
|
1049 |
} |
|
1050 |
} |
|
1051 |
||
1052 |
V newValue = mappingFunction.apply(key); |
|
1053 |
if (newValue != null) { |
|
1054 |
addEntry(hash, key, newValue, index); |
|
1055 |
} |
|
1056 |
||
1057 |
return newValue; |
|
1058 |
} |
|
1059 |
||
1060 |
@Override |
|
1061 |
public V computeIfPresent(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { |
|
1062 |
Objects.requireNonNull(remappingFunction); |
|
1063 |
||
1064 |
Entry<?,?> tab[] = table; |
|
1065 |
int hash = hash(key); |
|
1066 |
int index = (hash & 0x7FFFFFFF) % tab.length; |
|
1067 |
@SuppressWarnings("unchecked") |
|
1068 |
Entry<K,V> e = (Entry<K,V>)tab[index]; |
|
1069 |
for (Entry<K,V> prev = null; e != null; prev = e, e = e.next) { |
|
1070 |
if (e.hash == hash && e.key.equals(key)) { |
|
1071 |
V newValue = remappingFunction.apply(key, e.value); |
|
1072 |
if (newValue == null) { |
|
1073 |
modCount++; |
|
1074 |
if (prev != null) { |
|
1075 |
prev.next = e.next; |
|
1076 |
} else { |
|
1077 |
tab[index] = e.next; |
|
1078 |
} |
|
1079 |
count--; |
|
1080 |
} else { |
|
1081 |
e.value = newValue; |
|
1082 |
} |
|
1083 |
return newValue; |
|
1084 |
} |
|
1085 |
} |
|
1086 |
return null; |
|
1087 |
} |
|
1088 |
||
1089 |
@Override |
|
1090 |
public V compute(K key, BiFunction<? super K, ? super V, ? extends V> remappingFunction) { |
|
1091 |
Objects.requireNonNull(remappingFunction); |
|
1092 |
||
1093 |
Entry<?,?> tab[] = table; |
|
1094 |
int hash = hash(key); |
|
1095 |
int index = (hash & 0x7FFFFFFF) % tab.length; |
|
1096 |
@SuppressWarnings("unchecked") |
|
1097 |
Entry<K,V> e = (Entry<K,V>)tab[index]; |
|
1098 |
for (Entry<K,V> prev = null; e != null; prev = e, e = e.next) { |
|
1099 |
if (e.hash == hash && Objects.equals(e.key, key)) { |
|
1100 |
V newValue = remappingFunction.apply(key, e.value); |
|
1101 |
if (newValue == null) { |
|
1102 |
modCount++; |
|
1103 |
if (prev != null) { |
|
1104 |
prev.next = e.next; |
|
1105 |
} else { |
|
1106 |
tab[index] = e.next; |
|
1107 |
} |
|
1108 |
count--; |
|
1109 |
} else { |
|
1110 |
e.value = newValue; |
|
1111 |
} |
|
1112 |
return newValue; |
|
1113 |
} |
|
1114 |
} |
|
1115 |
||
1116 |
V newValue = remappingFunction.apply(key, null); |
|
1117 |
if (newValue != null) { |
|
1118 |
addEntry(hash, key, newValue, index); |
|
1119 |
} |
|
1120 |
||
1121 |
return newValue; |
|
1122 |
} |
|
1123 |
||
1124 |
@Override |
|
1125 |
public V merge(K key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { |
|
1126 |
Objects.requireNonNull(remappingFunction); |
|
1127 |
||
1128 |
Entry<?,?> tab[] = table; |
|
1129 |
int hash = hash(key); |
|
1130 |
int index = (hash & 0x7FFFFFFF) % tab.length; |
|
1131 |
@SuppressWarnings("unchecked") |
|
1132 |
Entry<K,V> e = (Entry<K,V>)tab[index]; |
|
1133 |
for (Entry<K,V> prev = null; e != null; prev = e, e = e.next) { |
|
1134 |
if (e.hash == hash && e.key.equals(key)) { |
|
1135 |
V newValue = remappingFunction.apply(e.value, value); |
|
1136 |
if (newValue == null) { |
|
1137 |
modCount++; |
|
1138 |
if (prev != null) { |
|
1139 |
prev.next = e.next; |
|
1140 |
} else { |
|
1141 |
tab[index] = e.next; |
|
1142 |
} |
|
1143 |
count--; |
|
1144 |
} else { |
|
1145 |
e.value = newValue; |
|
1146 |
} |
|
1147 |
return newValue; |
|
1148 |
} |
|
1149 |
} |
|
1150 |
||
1151 |
if (value != null) { |
|
1152 |
addEntry(hash, key, value, index); |
|
1153 |
} |
|
1154 |
||
1155 |
return value; |
|
1156 |
} |
|
1157 |
||
2 | 1158 |
/** |
1159 |
* Save the state of the Hashtable to a stream (i.e., serialize it). |
|
1160 |
* |
|
1161 |
* @serialData The <i>capacity</i> of the Hashtable (the length of the |
|
1162 |
* bucket array) is emitted (int), followed by the |
|
1163 |
* <i>size</i> of the Hashtable (the number of key-value |
|
1164 |
* mappings), followed by the key (Object) and value (Object) |
|
1165 |
* for each key-value mapping represented by the Hashtable |
|
1166 |
* The key-value mappings are emitted in no particular order. |
|
1167 |
*/ |
|
8394 | 1168 |
private void writeObject(java.io.ObjectOutputStream s) |
1169 |
throws IOException { |
|
1170 |
Entry<Object, Object> entryStack = null; |
|
1171 |
||
1172 |
synchronized (this) { |
|
1173 |
// Write out the length, threshold, loadfactor |
|
1174 |
s.defaultWriteObject(); |
|
1175 |
||
1176 |
// Write out length, count of elements |
|
1177 |
s.writeInt(table.length); |
|
1178 |
s.writeInt(count); |
|
2 | 1179 |
|
8394 | 1180 |
// Stack copies of the entries in the table |
1181 |
for (int index = 0; index < table.length; index++) { |
|
12448 | 1182 |
Entry<?,?> entry = table[index]; |
2 | 1183 |
|
8394 | 1184 |
while (entry != null) { |
1185 |
entryStack = |
|
1186 |
new Entry<>(0, entry.key, entry.value, entryStack); |
|
1187 |
entry = entry.next; |
|
1188 |
} |
|
2 | 1189 |
} |
1190 |
} |
|
8394 | 1191 |
|
1192 |
// Write out the key/value objects from the stacked entries |
|
1193 |
while (entryStack != null) { |
|
1194 |
s.writeObject(entryStack.key); |
|
1195 |
s.writeObject(entryStack.value); |
|
1196 |
entryStack = entryStack.next; |
|
1197 |
} |
|
2 | 1198 |
} |
1199 |
||
1200 |
/** |
|
1201 |
* Reconstitute the Hashtable from a stream (i.e., deserialize it). |
|
1202 |
*/ |
|
1203 |
private void readObject(java.io.ObjectInputStream s) |
|
1204 |
throws IOException, ClassNotFoundException |
|
1205 |
{ |
|
1206 |
// Read in the length, threshold, and loadfactor |
|
1207 |
s.defaultReadObject(); |
|
1208 |
||
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
1209 |
// set hashMask |
17939
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
1210 |
if (Holder.USE_HASHSEED) { |
18166
a24e00a7c5ae
8010325: Remove hash32() method and hash32 int field from java.lang.String
bchristi
parents:
17939
diff
changeset
|
1211 |
int seed = ThreadLocalRandom.current().nextInt(); |
17939
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
1212 |
Holder.UNSAFE.putIntVolatile(this, Holder.HASHSEED_OFFSET, |
18166
a24e00a7c5ae
8010325: Remove hash32() method and hash32 int field from java.lang.String
bchristi
parents:
17939
diff
changeset
|
1213 |
(seed != 0) ? seed : 1); |
17939
bd750ec19d82
8005698: Handle Frequent HashMap Collisions with Balanced Trees
bchristi
parents:
16867
diff
changeset
|
1214 |
} |
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
1215 |
|
2 | 1216 |
// Read the original length of the array and number of elements |
1217 |
int origlength = s.readInt(); |
|
1218 |
int elements = s.readInt(); |
|
1219 |
||
1220 |
// Compute new size with a bit of room 5% to grow but |
|
1221 |
// no larger than the original size. Make the length |
|
1222 |
// odd if it's large enough, this helps distribute the entries. |
|
1223 |
// Guard against the length ending up zero, that's not valid. |
|
1224 |
int length = (int)(elements * loadFactor) + (elements / 20) + 3; |
|
1225 |
if (length > elements && (length & 1) == 0) |
|
1226 |
length--; |
|
1227 |
if (origlength > 0 && length > origlength) |
|
1228 |
length = origlength; |
|
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
1229 |
table = new Entry<?,?>[length]; |
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
1230 |
threshold = (int)Math.min(length * loadFactor, MAX_ARRAY_SIZE + 1); |
2 | 1231 |
count = 0; |
1232 |
||
1233 |
// Read the number of elements and then all the key/value objects |
|
1234 |
for (; elements > 0; elements--) { |
|
12448 | 1235 |
@SuppressWarnings("unchecked") |
1236 |
K key = (K)s.readObject(); |
|
1237 |
@SuppressWarnings("unchecked") |
|
1238 |
V value = (V)s.readObject(); |
|
2 | 1239 |
// synch could be eliminated for performance |
1240 |
reconstitutionPut(table, key, value); |
|
1241 |
} |
|
1242 |
} |
|
1243 |
||
1244 |
/** |
|
1245 |
* The put method used by readObject. This is provided because put |
|
1246 |
* is overridable and should not be called in readObject since the |
|
1247 |
* subclass will not yet be initialized. |
|
1248 |
* |
|
1249 |
* <p>This differs from the regular put method in several ways. No |
|
1250 |
* checking for rehashing is necessary since the number of elements |
|
1251 |
* initially in the table is known. The modCount is not incremented |
|
1252 |
* because we are creating a new instance. Also, no return value |
|
1253 |
* is needed. |
|
1254 |
*/ |
|
12448 | 1255 |
private void reconstitutionPut(Entry<?,?>[] tab, K key, V value) |
2 | 1256 |
throws StreamCorruptedException |
1257 |
{ |
|
1258 |
if (value == null) { |
|
1259 |
throw new java.io.StreamCorruptedException(); |
|
1260 |
} |
|
1261 |
// Makes sure the key is not already in the hashtable. |
|
1262 |
// This should not happen in deserialized version. |
|
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
1263 |
int hash = hash(key); |
2 | 1264 |
int index = (hash & 0x7FFFFFFF) % tab.length; |
12448 | 1265 |
for (Entry<?,?> e = tab[index] ; e != null ; e = e.next) { |
2 | 1266 |
if ((e.hash == hash) && e.key.equals(key)) { |
1267 |
throw new java.io.StreamCorruptedException(); |
|
1268 |
} |
|
1269 |
} |
|
1270 |
// Creates the new entry. |
|
12448 | 1271 |
@SuppressWarnings("unchecked") |
1272 |
Entry<K,V> e = (Entry<K,V>)tab[index]; |
|
7803
56bc97d69d93
6880112: Project Coin: Port JDK core library code to use diamond operator
smarks
parents:
5506
diff
changeset
|
1273 |
tab[index] = new Entry<>(hash, key, value, e); |
2 | 1274 |
count++; |
1275 |
} |
|
1276 |
||
1277 |
/** |
|
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
1278 |
* Hashtable bucket collision list entry |
2 | 1279 |
*/ |
1280 |
private static class Entry<K,V> implements Map.Entry<K,V> { |
|
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
1281 |
final int hash; |
13018 | 1282 |
final K key; |
2 | 1283 |
V value; |
1284 |
Entry<K,V> next; |
|
1285 |
||
1286 |
protected Entry(int hash, K key, V value, Entry<K,V> next) { |
|
1287 |
this.hash = hash; |
|
12859
c44b88bb9b5e
7126277: Alternative String hashing implementation
mduigou
parents:
12448
diff
changeset
|
1288 |
this.key = key; |
2 | 1289 |
this.value = value; |
1290 |
this.next = next; |
|
1291 |
} |
|
1292 |
||
12448 | 1293 |
@SuppressWarnings("unchecked") |
2 | 1294 |
protected Object clone() { |
7803
56bc97d69d93
6880112: Project Coin: Port JDK core library code to use diamond operator
smarks
parents:
5506
diff
changeset
|
1295 |
return new Entry<>(hash, key, value, |
2 | 1296 |
(next==null ? null : (Entry<K,V>) next.clone())); |
1297 |
} |
|
1298 |
||
1299 |
// Map.Entry Ops |
|
1300 |
||
1301 |
public K getKey() { |
|
1302 |
return key; |
|
1303 |
} |
|
1304 |
||
1305 |
public V getValue() { |
|
1306 |
return value; |
|
1307 |
} |
|
1308 |
||
1309 |
public V setValue(V value) { |
|
1310 |
if (value == null) |
|
1311 |
throw new NullPointerException(); |
|
1312 |
||
1313 |
V oldValue = this.value; |
|
1314 |
this.value = value; |
|
1315 |
return oldValue; |
|
1316 |
} |
|
1317 |
||
1318 |
public boolean equals(Object o) { |
|
1319 |
if (!(o instanceof Map.Entry)) |
|
1320 |
return false; |
|
12448 | 1321 |
Map.Entry<?,?> e = (Map.Entry<?,?>)o; |
2 | 1322 |
|
1323 |
return (key==null ? e.getKey()==null : key.equals(e.getKey())) && |
|
1324 |
(value==null ? e.getValue()==null : value.equals(e.getValue())); |
|
1325 |
} |
|
1326 |
||
1327 |
public int hashCode() { |
|
14188
82c029bd5f70
8000955: Hashtable.Entry.hashCode() does not conform to Map.Entry.hashCode() defined behaviour
ngmr
parents:
13018
diff
changeset
|
1328 |
return (Objects.hashCode(key) ^ Objects.hashCode(value)); |
2 | 1329 |
} |
1330 |
||
1331 |
public String toString() { |
|
1332 |
return key.toString()+"="+value.toString(); |
|
1333 |
} |
|
1334 |
} |
|
1335 |
||
1336 |
// Types of Enumerations/Iterations |
|
1337 |
private static final int KEYS = 0; |
|
1338 |
private static final int VALUES = 1; |
|
1339 |
private static final int ENTRIES = 2; |
|
1340 |
||
1341 |
/** |
|
1342 |
* A hashtable enumerator class. This class implements both the |
|
1343 |
* Enumeration and Iterator interfaces, but individual instances |
|
1344 |
* can be created with the Iterator methods disabled. This is necessary |
|
1345 |
* to avoid unintentionally increasing the capabilities granted a user |
|
1346 |
* by passing an Enumeration. |
|
1347 |
*/ |
|
1348 |
private class Enumerator<T> implements Enumeration<T>, Iterator<T> { |
|
12448 | 1349 |
Entry<?,?>[] table = Hashtable.this.table; |
2 | 1350 |
int index = table.length; |
12448 | 1351 |
Entry<?,?> entry = null; |
1352 |
Entry<?,?> lastReturned = null; |
|
2 | 1353 |
int type; |
1354 |
||
1355 |
/** |
|
1356 |
* Indicates whether this Enumerator is serving as an Iterator |
|
1357 |
* or an Enumeration. (true -> Iterator). |
|
1358 |
*/ |
|
1359 |
boolean iterator; |
|
1360 |
||
1361 |
/** |
|
1362 |
* The modCount value that the iterator believes that the backing |
|
1363 |
* Hashtable should have. If this expectation is violated, the iterator |
|
1364 |
* has detected concurrent modification. |
|
1365 |
*/ |
|
1366 |
protected int expectedModCount = modCount; |
|
1367 |
||
1368 |
Enumerator(int type, boolean iterator) { |
|
1369 |
this.type = type; |
|
1370 |
this.iterator = iterator; |
|
1371 |
} |
|
1372 |
||
1373 |
public boolean hasMoreElements() { |
|
12448 | 1374 |
Entry<?,?> e = entry; |
2 | 1375 |
int i = index; |
12448 | 1376 |
Entry<?,?>[] t = table; |
2 | 1377 |
/* Use locals for faster loop iteration */ |
1378 |
while (e == null && i > 0) { |
|
1379 |
e = t[--i]; |
|
1380 |
} |
|
1381 |
entry = e; |
|
1382 |
index = i; |
|
1383 |
return e != null; |
|
1384 |
} |
|
1385 |
||
12448 | 1386 |
@SuppressWarnings("unchecked") |
2 | 1387 |
public T nextElement() { |
12448 | 1388 |
Entry<?,?> et = entry; |
2 | 1389 |
int i = index; |
12448 | 1390 |
Entry<?,?>[] t = table; |
2 | 1391 |
/* Use locals for faster loop iteration */ |
1392 |
while (et == null && i > 0) { |
|
1393 |
et = t[--i]; |
|
1394 |
} |
|
1395 |
entry = et; |
|
1396 |
index = i; |
|
1397 |
if (et != null) { |
|
12448 | 1398 |
Entry<?,?> e = lastReturned = entry; |
2 | 1399 |
entry = e.next; |
1400 |
return type == KEYS ? (T)e.key : (type == VALUES ? (T)e.value : (T)e); |
|
1401 |
} |
|
1402 |
throw new NoSuchElementException("Hashtable Enumerator"); |
|
1403 |
} |
|
1404 |
||
1405 |
// Iterator methods |
|
1406 |
public boolean hasNext() { |
|
1407 |
return hasMoreElements(); |
|
1408 |
} |
|
1409 |
||
1410 |
public T next() { |
|
1411 |
if (modCount != expectedModCount) |
|
1412 |
throw new ConcurrentModificationException(); |
|
1413 |
return nextElement(); |
|
1414 |
} |
|
1415 |
||
1416 |
public void remove() { |
|
1417 |
if (!iterator) |
|
1418 |
throw new UnsupportedOperationException(); |
|
1419 |
if (lastReturned == null) |
|
1420 |
throw new IllegalStateException("Hashtable Enumerator"); |
|
1421 |
if (modCount != expectedModCount) |
|
1422 |
throw new ConcurrentModificationException(); |
|
1423 |
||
1424 |
synchronized(Hashtable.this) { |
|
12448 | 1425 |
Entry<?,?>[] tab = Hashtable.this.table; |
2 | 1426 |
int index = (lastReturned.hash & 0x7FFFFFFF) % tab.length; |
1427 |
||
12448 | 1428 |
@SuppressWarnings("unchecked") |
1429 |
Entry<K,V> e = (Entry<K,V>)tab[index]; |
|
1430 |
for(Entry<K,V> prev = null; e != null; prev = e, e = e.next) { |
|
2 | 1431 |
if (e == lastReturned) { |
1432 |
modCount++; |
|
1433 |
expectedModCount++; |
|
1434 |
if (prev == null) |
|
1435 |
tab[index] = e.next; |
|
1436 |
else |
|
1437 |
prev.next = e.next; |
|
1438 |
count--; |
|
1439 |
lastReturned = null; |
|
1440 |
return; |
|
1441 |
} |
|
1442 |
} |
|
1443 |
throw new ConcurrentModificationException(); |
|
1444 |
} |
|
1445 |
} |
|
1446 |
} |
|
1447 |
} |