1 /* |
1 /* |
2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. |
2 * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 * |
4 * |
5 * This code is free software; you can redistribute it and/or modify it |
5 * This code is free software; you can redistribute it and/or modify it |
6 * under the terms of the GNU General Public License version 2 only, as |
6 * under the terms of the GNU General Public License version 2 only, as |
7 * published by the Free Software Foundation. |
7 * published by the Free Software Foundation. |
83 this, hashValue, (uintptr_t) obj, entry); |
83 this, hashValue, (uintptr_t) obj, entry); |
84 #endif /* USDT2 */ |
84 #endif /* USDT2 */ |
85 return entry; |
85 return entry; |
86 } |
86 } |
87 |
87 |
|
88 |
|
89 // Check to see if the hashtable is unbalanced. The caller set a flag to |
|
90 // rehash at the next safepoint. If this bucket is 60 times greater than the |
|
91 // expected average bucket length, it's an unbalanced hashtable. |
|
92 // This is somewhat an arbitrary heuristic but if one bucket gets to |
|
93 // rehash_count which is currently 100, there's probably something wrong. |
|
94 |
|
95 bool BasicHashtable::check_rehash_table(int count) { |
|
96 assert(table_size() != 0, "underflow"); |
|
97 if (count > (((double)number_of_entries()/(double)table_size())*rehash_multiple)) { |
|
98 // Set a flag for the next safepoint, which should be at some guaranteed |
|
99 // safepoint interval. |
|
100 return true; |
|
101 } |
|
102 return false; |
|
103 } |
|
104 |
|
105 // Create a new table and using alternate hash code, populate the new table |
|
106 // with the existing elements. This can be used to change the hash code |
|
107 // and could in the future change the size of the table. |
|
108 |
|
109 template <class T> void Hashtable<T>::move_to(Hashtable<T>* new_table) { |
|
110 int saved_entry_count = number_of_entries(); |
|
111 |
|
112 // Iterate through the table and create a new entry for the new table |
|
113 for (int i = 0; i < new_table->table_size(); ++i) { |
|
114 for (HashtableEntry<T>* p = bucket(i); p != NULL; ) { |
|
115 HashtableEntry<T>* next = p->next(); |
|
116 T string = p->literal(); |
|
117 // Use alternate hashing algorithm on the symbol in the first table |
|
118 unsigned int hashValue = new_hash(string); |
|
119 // Get a new index relative to the new table (can also change size) |
|
120 int index = new_table->hash_to_index(hashValue); |
|
121 p->set_hash(hashValue); |
|
122 unlink_entry(p); |
|
123 new_table->add_entry(index, p); |
|
124 p = next; |
|
125 } |
|
126 } |
|
127 // give the new table the free list as well |
|
128 new_table->copy_freelist(this); |
|
129 assert(new_table->number_of_entries() == saved_entry_count, "lost entry on dictionary copy?"); |
|
130 |
|
131 // Destroy memory used by the buckets in the hashtable. The memory |
|
132 // for the elements has been used in a new table and is not |
|
133 // destroyed. The memory reuse will benefit resizing the SystemDictionary |
|
134 // to avoid a memory allocation spike at safepoint. |
|
135 free_buckets(); |
|
136 } |
88 |
137 |
89 // Reverse the order of elements in the hash buckets. |
138 // Reverse the order of elements in the hash buckets. |
90 |
139 |
91 void BasicHashtable::reverse() { |
140 void BasicHashtable::reverse() { |
92 |
141 |