30 #include "memory/sharedHeap.hpp" |
30 #include "memory/sharedHeap.hpp" |
31 #include "oops/oop.inline.hpp" |
31 #include "oops/oop.inline.hpp" |
32 #include "runtime/atomic.inline.hpp" |
32 #include "runtime/atomic.inline.hpp" |
33 #include "runtime/fprofiler.hpp" |
33 #include "runtime/fprofiler.hpp" |
34 #include "runtime/java.hpp" |
34 #include "runtime/java.hpp" |
35 #include "services/management.hpp" |
|
36 #include "utilities/copy.hpp" |
35 #include "utilities/copy.hpp" |
37 #include "utilities/workgroup.hpp" |
36 #include "utilities/workgroup.hpp" |
38 |
37 |
39 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
38 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
40 |
39 |
41 SharedHeap* SharedHeap::_sh; |
40 SharedHeap* SharedHeap::_sh; |
42 |
41 |
43 // The set of potentially parallel tasks in root scanning. |
|
44 enum SH_process_roots_tasks { |
|
45 SH_PS_Universe_oops_do, |
|
46 SH_PS_JNIHandles_oops_do, |
|
47 SH_PS_ObjectSynchronizer_oops_do, |
|
48 SH_PS_FlatProfiler_oops_do, |
|
49 SH_PS_Management_oops_do, |
|
50 SH_PS_SystemDictionary_oops_do, |
|
51 SH_PS_ClassLoaderDataGraph_oops_do, |
|
52 SH_PS_jvmti_oops_do, |
|
53 SH_PS_CodeCache_oops_do, |
|
54 // Leave this one last. |
|
55 SH_PS_NumElements |
|
56 }; |
|
57 |
|
58 SharedHeap::SharedHeap(CollectorPolicy* policy_) : |
42 SharedHeap::SharedHeap(CollectorPolicy* policy_) : |
59 CollectedHeap(), |
43 CollectedHeap(), |
60 _collector_policy(policy_), |
44 _collector_policy(policy_), |
61 _strong_roots_scope(NULL), |
|
62 _strong_roots_parity(0), |
45 _strong_roots_parity(0), |
63 _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)), |
|
64 _workers(NULL) |
46 _workers(NULL) |
65 { |
47 { |
66 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { |
|
67 vm_exit_during_initialization("Failed necessary allocation."); |
|
68 } |
|
69 _sh = this; // ch is static, should be set only once. |
48 _sh = this; // ch is static, should be set only once. |
70 if (UseConcMarkSweepGC || UseG1GC) { |
49 if (UseConcMarkSweepGC || UseG1GC) { |
71 _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads, |
50 _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads, |
72 /* are_GC_task_threads */true, |
51 /* are_GC_task_threads */true, |
73 /* are_ConcurrentGC_threads */false); |
52 /* are_ConcurrentGC_threads */false); |
77 _workers->initialize_workers(); |
56 _workers->initialize_workers(); |
78 } |
57 } |
79 } |
58 } |
80 } |
59 } |
81 |
60 |
82 int SharedHeap::n_termination() { |
|
83 return _process_strong_tasks->n_threads(); |
|
84 } |
|
85 |
|
86 void SharedHeap::set_n_termination(int t) { |
|
87 _process_strong_tasks->set_n_threads(t); |
|
88 } |
|
89 |
|
90 bool SharedHeap::heap_lock_held_for_gc() { |
61 bool SharedHeap::heap_lock_held_for_gc() { |
91 Thread* t = Thread::current(); |
62 Thread* t = Thread::current(); |
92 return Heap_lock->owned_by_self() |
63 return Heap_lock->owned_by_self() |
93 || ( (t->is_GC_task_thread() || t->is_VM_thread()) |
64 || ( (t->is_GC_task_thread() || t->is_VM_thread()) |
94 && _thread_holds_heap_lock_for_gc); |
65 && _thread_holds_heap_lock_for_gc); |
95 } |
66 } |
96 |
67 |
97 void SharedHeap::set_par_threads(uint t) { |
68 void SharedHeap::set_par_threads(uint t) { |
98 assert(t == 0 || !UseSerialGC, "Cannot have parallel threads"); |
69 assert(t == 0 || !UseSerialGC, "Cannot have parallel threads"); |
99 _n_par_threads = t; |
70 _n_par_threads = t; |
100 _process_strong_tasks->set_n_threads(t); |
|
101 } |
|
102 |
|
103 #ifdef ASSERT |
|
104 class AssertNonScavengableClosure: public OopClosure { |
|
105 public: |
|
106 virtual void do_oop(oop* p) { |
|
107 assert(!Universe::heap()->is_in_partial_collection(*p), |
|
108 "Referent should not be scavengable."); } |
|
109 virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); } |
|
110 }; |
|
111 static AssertNonScavengableClosure assert_is_non_scavengable_closure; |
|
112 #endif |
|
113 |
|
114 SharedHeap::StrongRootsScope* SharedHeap::active_strong_roots_scope() const { |
|
115 return _strong_roots_scope; |
|
116 } |
|
117 void SharedHeap::register_strong_roots_scope(SharedHeap::StrongRootsScope* scope) { |
|
118 assert(_strong_roots_scope == NULL, "Should only have one StrongRootsScope active"); |
|
119 assert(scope != NULL, "Illegal argument"); |
|
120 _strong_roots_scope = scope; |
|
121 } |
|
122 void SharedHeap::unregister_strong_roots_scope(SharedHeap::StrongRootsScope* scope) { |
|
123 assert(_strong_roots_scope == scope, "Wrong scope unregistered"); |
|
124 _strong_roots_scope = NULL; |
|
125 } |
71 } |
126 |
72 |
127 void SharedHeap::change_strong_roots_parity() { |
73 void SharedHeap::change_strong_roots_parity() { |
128 // Also set the new collection parity. |
74 // Also set the new collection parity. |
129 assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2, |
75 assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2, |
133 assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2, |
79 assert(_strong_roots_parity >= 1 && _strong_roots_parity <= 2, |
134 "Not in range."); |
80 "Not in range."); |
135 } |
81 } |
136 |
82 |
137 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate) |
83 SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* heap, bool activate) |
138 : MarkScope(activate), _sh(heap), _n_workers_done_with_threads(0) |
84 : MarkScope(activate), _sh(heap) |
139 { |
85 { |
140 if (_active) { |
86 if (_active) { |
141 _sh->register_strong_roots_scope(this); |
|
142 _sh->change_strong_roots_parity(); |
87 _sh->change_strong_roots_parity(); |
143 // Zero the claimed high water mark in the StringTable |
88 // Zero the claimed high water mark in the StringTable |
144 StringTable::clear_parallel_claimed_index(); |
89 StringTable::clear_parallel_claimed_index(); |
145 } |
90 } |
146 } |
|
147 |
|
148 SharedHeap::StrongRootsScope::~StrongRootsScope() { |
|
149 if (_active) { |
|
150 _sh->unregister_strong_roots_scope(this); |
|
151 } |
|
152 } |
|
153 |
|
154 Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false, Monitor::_safepoint_check_never); |
|
155 |
|
156 void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) { |
|
157 // The Thread work barrier is only needed by G1 Class Unloading. |
|
158 // No need to use the barrier if this is single-threaded code. |
|
159 if (UseG1GC && ClassUnloadingWithConcurrentMark && n_workers > 0) { |
|
160 uint new_value = (uint)Atomic::add(1, &_n_workers_done_with_threads); |
|
161 if (new_value == n_workers) { |
|
162 // This thread is last. Notify the others. |
|
163 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); |
|
164 _lock->notify_all(); |
|
165 } |
|
166 } |
|
167 } |
|
168 |
|
169 void SharedHeap::StrongRootsScope::wait_until_all_workers_done_with_threads(uint n_workers) { |
|
170 assert(UseG1GC, "Currently only used by G1"); |
|
171 assert(ClassUnloadingWithConcurrentMark, "Currently only needed when doing G1 Class Unloading"); |
|
172 |
|
173 // No need to use the barrier if this is single-threaded code. |
|
174 if (n_workers > 0 && (uint)_n_workers_done_with_threads != n_workers) { |
|
175 MonitorLockerEx ml(_lock, Mutex::_no_safepoint_check_flag); |
|
176 while ((uint)_n_workers_done_with_threads != n_workers) { |
|
177 _lock->wait(Mutex::_no_safepoint_check_flag, 0, false); |
|
178 } |
|
179 } |
|
180 } |
|
181 |
|
182 void SharedHeap::process_roots(bool activate_scope, |
|
183 ScanningOption so, |
|
184 OopClosure* strong_roots, |
|
185 OopClosure* weak_roots, |
|
186 CLDClosure* strong_cld_closure, |
|
187 CLDClosure* weak_cld_closure, |
|
188 CodeBlobClosure* code_roots) { |
|
189 StrongRootsScope srs(this, activate_scope); |
|
190 |
|
191 // General roots. |
|
192 assert(_strong_roots_parity != 0, "must have called prologue code"); |
|
193 assert(code_roots != NULL, "code root closure should always be set"); |
|
194 // _n_termination for _process_strong_tasks should be set up stream |
|
195 // in a method not running in a GC worker. Otherwise the GC worker |
|
196 // could be trying to change the termination condition while the task |
|
197 // is executing in another GC worker. |
|
198 |
|
199 // Iterating over the CLDG and the Threads are done early to allow G1 to |
|
200 // first process the strong CLDs and nmethods and then, after a barrier, |
|
201 // let the thread process the weak CLDs and nmethods. |
|
202 |
|
203 if (!_process_strong_tasks->is_task_claimed(SH_PS_ClassLoaderDataGraph_oops_do)) { |
|
204 ClassLoaderDataGraph::roots_cld_do(strong_cld_closure, weak_cld_closure); |
|
205 } |
|
206 |
|
207 // Some CLDs contained in the thread frames should be considered strong. |
|
208 // Don't process them if they will be processed during the ClassLoaderDataGraph phase. |
|
209 CLDClosure* roots_from_clds_p = (strong_cld_closure != weak_cld_closure) ? strong_cld_closure : NULL; |
|
210 // Only process code roots from thread stacks if we aren't visiting the entire CodeCache anyway |
|
211 CodeBlobClosure* roots_from_code_p = (so & SO_AllCodeCache) ? NULL : code_roots; |
|
212 |
|
213 Threads::possibly_parallel_oops_do(strong_roots, roots_from_clds_p, roots_from_code_p); |
|
214 |
|
215 // This is the point where this worker thread will not find more strong CLDs/nmethods. |
|
216 // Report this so G1 can synchronize the strong and weak CLDs/nmethods processing. |
|
217 active_strong_roots_scope()->mark_worker_done_with_threads(n_par_threads()); |
|
218 |
|
219 if (!_process_strong_tasks->is_task_claimed(SH_PS_Universe_oops_do)) { |
|
220 Universe::oops_do(strong_roots); |
|
221 } |
|
222 // Global (strong) JNI handles |
|
223 if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do)) |
|
224 JNIHandles::oops_do(strong_roots); |
|
225 |
|
226 if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do)) |
|
227 ObjectSynchronizer::oops_do(strong_roots); |
|
228 if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do)) |
|
229 FlatProfiler::oops_do(strong_roots); |
|
230 if (!_process_strong_tasks->is_task_claimed(SH_PS_Management_oops_do)) |
|
231 Management::oops_do(strong_roots); |
|
232 if (!_process_strong_tasks->is_task_claimed(SH_PS_jvmti_oops_do)) |
|
233 JvmtiExport::oops_do(strong_roots); |
|
234 |
|
235 if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) { |
|
236 SystemDictionary::roots_oops_do(strong_roots, weak_roots); |
|
237 } |
|
238 |
|
239 // All threads execute the following. A specific chunk of buckets |
|
240 // from the StringTable are the individual tasks. |
|
241 if (weak_roots != NULL) { |
|
242 if (CollectedHeap::use_parallel_gc_threads()) { |
|
243 StringTable::possibly_parallel_oops_do(weak_roots); |
|
244 } else { |
|
245 StringTable::oops_do(weak_roots); |
|
246 } |
|
247 } |
|
248 |
|
249 if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) { |
|
250 if (so & SO_ScavengeCodeCache) { |
|
251 assert(code_roots != NULL, "must supply closure for code cache"); |
|
252 |
|
253 // We only visit parts of the CodeCache when scavenging. |
|
254 CodeCache::scavenge_root_nmethods_do(code_roots); |
|
255 } |
|
256 if (so & SO_AllCodeCache) { |
|
257 assert(code_roots != NULL, "must supply closure for code cache"); |
|
258 |
|
259 // CMSCollector uses this to do intermediate-strength collections. |
|
260 // We scan the entire code cache, since CodeCache::do_unloading is not called. |
|
261 CodeCache::blobs_do(code_roots); |
|
262 } |
|
263 // Verify that the code cache contents are not subject to |
|
264 // movement by a scavenging collection. |
|
265 DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, !CodeBlobToOopClosure::FixRelocations)); |
|
266 DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable)); |
|
267 } |
|
268 |
|
269 _process_strong_tasks->all_tasks_completed(); |
|
270 } |
|
271 |
|
272 void SharedHeap::process_all_roots(bool activate_scope, |
|
273 ScanningOption so, |
|
274 OopClosure* roots, |
|
275 CLDClosure* cld_closure, |
|
276 CodeBlobClosure* code_closure) { |
|
277 process_roots(activate_scope, so, |
|
278 roots, roots, |
|
279 cld_closure, cld_closure, |
|
280 code_closure); |
|
281 } |
|
282 |
|
283 void SharedHeap::process_strong_roots(bool activate_scope, |
|
284 ScanningOption so, |
|
285 OopClosure* roots, |
|
286 CLDClosure* cld_closure, |
|
287 CodeBlobClosure* code_closure) { |
|
288 process_roots(activate_scope, so, |
|
289 roots, NULL, |
|
290 cld_closure, NULL, |
|
291 code_closure); |
|
292 } |
|
293 |
|
294 |
|
295 class AlwaysTrueClosure: public BoolObjectClosure { |
|
296 public: |
|
297 bool do_object_b(oop p) { return true; } |
|
298 }; |
|
299 static AlwaysTrueClosure always_true; |
|
300 |
|
301 void SharedHeap::process_weak_roots(OopClosure* root_closure) { |
|
302 // Global (weak) JNI handles |
|
303 JNIHandles::weak_oops_do(&always_true, root_closure); |
|
304 } |
91 } |
305 |
92 |
306 void SharedHeap::set_barrier_set(BarrierSet* bs) { |
93 void SharedHeap::set_barrier_set(BarrierSet* bs) { |
307 _barrier_set = bs; |
94 _barrier_set = bs; |
308 // Cached barrier set for fast access in oops |
95 // Cached barrier set for fast access in oops |