|
1 /* |
|
2 * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp" |
|
27 #include "gc/cms/concurrentMarkSweepThread.hpp" |
|
28 #include "gc/cms/vmCMSOperations.hpp" |
|
29 #include "gc/shared/gcLocker.inline.hpp" |
|
30 #include "gc/shared/gcTimer.hpp" |
|
31 #include "gc/shared/gcTraceTime.hpp" |
|
32 #include "gc/shared/isGCActiveMark.hpp" |
|
33 #include "runtime/interfaceSupport.hpp" |
|
34 #include "runtime/os.hpp" |
|
35 #include "utilities/dtrace.hpp" |
|
36 |
|
37 ////////////////////////////////////////////////////////// |
|
38 // Methods in abstract class VM_CMS_Operation |
|
39 ////////////////////////////////////////////////////////// |
|
40 void VM_CMS_Operation::acquire_pending_list_lock() { |
|
41 // The caller may block while communicating |
|
42 // with the SLT thread in order to acquire/release the PLL. |
|
43 SurrogateLockerThread* slt = ConcurrentMarkSweepThread::slt(); |
|
44 if (slt != NULL) { |
|
45 slt->manipulatePLL(SurrogateLockerThread::acquirePLL); |
|
46 } else { |
|
47 SurrogateLockerThread::report_missing_slt(); |
|
48 } |
|
49 } |
|
50 |
|
51 void VM_CMS_Operation::release_and_notify_pending_list_lock() { |
|
52 // The caller may block while communicating |
|
53 // with the SLT thread in order to acquire/release the PLL. |
|
54 ConcurrentMarkSweepThread::slt()-> |
|
55 manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL); |
|
56 } |
|
57 |
|
58 void VM_CMS_Operation::verify_before_gc() { |
|
59 if (VerifyBeforeGC && |
|
60 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { |
|
61 GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id()); |
|
62 HandleMark hm; |
|
63 FreelistLocker x(_collector); |
|
64 MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); |
|
65 GenCollectedHeap::heap()->prepare_for_verify(); |
|
66 Universe::verify(); |
|
67 } |
|
68 } |
|
69 |
|
70 void VM_CMS_Operation::verify_after_gc() { |
|
71 if (VerifyAfterGC && |
|
72 GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) { |
|
73 GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm, _collector->_gc_tracer_cm->gc_id()); |
|
74 HandleMark hm; |
|
75 FreelistLocker x(_collector); |
|
76 MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag); |
|
77 Universe::verify(); |
|
78 } |
|
79 } |
|
80 |
|
81 bool VM_CMS_Operation::lost_race() const { |
|
82 if (CMSCollector::abstract_state() == CMSCollector::Idling) { |
|
83 // We lost a race to a foreground collection |
|
84 // -- there's nothing to do |
|
85 return true; |
|
86 } |
|
87 assert(CMSCollector::abstract_state() == legal_state(), |
|
88 "Inconsistent collector state?"); |
|
89 return false; |
|
90 } |
|
91 |
|
92 bool VM_CMS_Operation::doit_prologue() { |
|
93 assert(Thread::current()->is_ConcurrentGC_thread(), "just checking"); |
|
94 assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock"); |
|
95 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
|
96 "Possible deadlock"); |
|
97 |
|
98 if (needs_pll()) { |
|
99 acquire_pending_list_lock(); |
|
100 } |
|
101 // Get the Heap_lock after the pending_list_lock. |
|
102 Heap_lock->lock(); |
|
103 if (lost_race()) { |
|
104 assert(_prologue_succeeded == false, "Initialized in c'tor"); |
|
105 Heap_lock->unlock(); |
|
106 if (needs_pll()) { |
|
107 release_and_notify_pending_list_lock(); |
|
108 } |
|
109 } else { |
|
110 _prologue_succeeded = true; |
|
111 } |
|
112 return _prologue_succeeded; |
|
113 } |
|
114 |
|
115 void VM_CMS_Operation::doit_epilogue() { |
|
116 assert(Thread::current()->is_ConcurrentGC_thread(), "just checking"); |
|
117 assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock"); |
|
118 assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(), |
|
119 "Possible deadlock"); |
|
120 |
|
121 // Release the Heap_lock first. |
|
122 Heap_lock->unlock(); |
|
123 if (needs_pll()) { |
|
124 release_and_notify_pending_list_lock(); |
|
125 } |
|
126 } |
|
127 |
|
128 ////////////////////////////////////////////////////////// |
|
129 // Methods in class VM_CMS_Initial_Mark |
|
130 ////////////////////////////////////////////////////////// |
|
131 void VM_CMS_Initial_Mark::doit() { |
|
132 if (lost_race()) { |
|
133 // Nothing to do. |
|
134 return; |
|
135 } |
|
136 HS_PRIVATE_CMS_INITMARK_BEGIN(); |
|
137 |
|
138 _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark"); |
|
139 |
|
140 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
141 GCCauseSetter gccs(gch, GCCause::_cms_initial_mark); |
|
142 |
|
143 VM_CMS_Operation::verify_before_gc(); |
|
144 |
|
145 IsGCActiveMark x; // stop-world GC active |
|
146 _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause()); |
|
147 |
|
148 VM_CMS_Operation::verify_after_gc(); |
|
149 |
|
150 _collector->_gc_timer_cm->register_gc_pause_end(); |
|
151 |
|
152 HS_PRIVATE_CMS_INITMARK_END(); |
|
153 } |
|
154 |
|
155 ////////////////////////////////////////////////////////// |
|
156 // Methods in class VM_CMS_Final_Remark_Operation |
|
157 ////////////////////////////////////////////////////////// |
|
158 void VM_CMS_Final_Remark::doit() { |
|
159 if (lost_race()) { |
|
160 // Nothing to do. |
|
161 return; |
|
162 } |
|
163 HS_PRIVATE_CMS_REMARK_BEGIN(); |
|
164 |
|
165 _collector->_gc_timer_cm->register_gc_pause_start("Final Mark"); |
|
166 |
|
167 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
168 GCCauseSetter gccs(gch, GCCause::_cms_final_remark); |
|
169 |
|
170 VM_CMS_Operation::verify_before_gc(); |
|
171 |
|
172 IsGCActiveMark x; // stop-world GC active |
|
173 _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause()); |
|
174 |
|
175 VM_CMS_Operation::verify_after_gc(); |
|
176 |
|
177 _collector->save_heap_summary(); |
|
178 _collector->_gc_timer_cm->register_gc_pause_end(); |
|
179 |
|
180 HS_PRIVATE_CMS_REMARK_END(); |
|
181 } |
|
182 |
|
183 // VM operation to invoke a concurrent collection of a |
|
184 // GenCollectedHeap heap. |
|
185 void VM_GenCollectFullConcurrent::doit() { |
|
186 assert(Thread::current()->is_VM_thread(), "Should be VM thread"); |
|
187 assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected"); |
|
188 |
|
189 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
190 if (_gc_count_before == gch->total_collections()) { |
|
191 // The "full" of do_full_collection call below "forces" |
|
192 // a collection; the second arg, 0, below ensures that |
|
193 // only the young gen is collected. XXX In the future, |
|
194 // we'll probably need to have something in this interface |
|
195 // to say do this only if we are sure we will not bail |
|
196 // out to a full collection in this attempt, but that's |
|
197 // for the future. |
|
198 assert(SafepointSynchronize::is_at_safepoint(), |
|
199 "We can only be executing this arm of if at a safepoint"); |
|
200 GCCauseSetter gccs(gch, _gc_cause); |
|
201 gch->do_full_collection(gch->must_clear_all_soft_refs(), |
|
202 0 /* collect only youngest gen */); |
|
203 } // Else no need for a foreground young gc |
|
204 assert((_gc_count_before < gch->total_collections()) || |
|
205 (GC_locker::is_active() /* gc may have been skipped */ |
|
206 && (_gc_count_before == gch->total_collections())), |
|
207 "total_collections() should be monotonically increasing"); |
|
208 |
|
209 MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
|
210 assert(_full_gc_count_before <= gch->total_full_collections(), "Error"); |
|
211 if (gch->total_full_collections() == _full_gc_count_before) { |
|
212 // Nudge the CMS thread to start a concurrent collection. |
|
213 CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause); |
|
214 } else { |
|
215 assert(_full_gc_count_before < gch->total_full_collections(), "Error"); |
|
216 FullGCCount_lock->notify_all(); // Inform the Java thread its work is done |
|
217 } |
|
218 } |
|
219 |
|
220 bool VM_GenCollectFullConcurrent::evaluate_at_safepoint() const { |
|
221 Thread* thr = Thread::current(); |
|
222 assert(thr != NULL, "Unexpected tid"); |
|
223 if (!thr->is_Java_thread()) { |
|
224 assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread"); |
|
225 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
226 if (_gc_count_before != gch->total_collections()) { |
|
227 // No need to do a young gc, we'll just nudge the CMS thread |
|
228 // in the doit() method above, to be executed soon. |
|
229 assert(_gc_count_before < gch->total_collections(), |
|
230 "total_collections() should be monotonically increasing"); |
|
231 return false; // no need for foreground young gc |
|
232 } |
|
233 } |
|
234 return true; // may still need foreground young gc |
|
235 } |
|
236 |
|
237 |
|
238 void VM_GenCollectFullConcurrent::doit_epilogue() { |
|
239 Thread* thr = Thread::current(); |
|
240 assert(thr->is_Java_thread(), "just checking"); |
|
241 JavaThread* jt = (JavaThread*)thr; |
|
242 // Release the Heap_lock first. |
|
243 Heap_lock->unlock(); |
|
244 release_and_notify_pending_list_lock(); |
|
245 |
|
246 // It is fine to test whether completed collections has |
|
247 // exceeded our request count without locking because |
|
248 // the completion count is monotonically increasing; |
|
249 // this will break for very long-running apps when the |
|
250 // count overflows and wraps around. XXX fix me !!! |
|
251 // e.g. at the rate of 1 full gc per ms, this could |
|
252 // overflow in about 1000 years. |
|
253 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
254 if (_gc_cause != GCCause::_gc_locker && |
|
255 gch->total_full_collections_completed() <= _full_gc_count_before) { |
|
256 // maybe we should change the condition to test _gc_cause == |
|
257 // GCCause::_java_lang_system_gc, instead of |
|
258 // _gc_cause != GCCause::_gc_locker |
|
259 assert(_gc_cause == GCCause::_java_lang_system_gc, |
|
260 "the only way to get here if this was a System.gc()-induced GC"); |
|
261 assert(ExplicitGCInvokesConcurrent, "Error"); |
|
262 // Now, wait for witnessing concurrent gc cycle to complete, |
|
263 // but do so in native mode, because we want to lock the |
|
264 // FullGCEvent_lock, which may be needed by the VM thread |
|
265 // or by the CMS thread, so we do not want to be suspended |
|
266 // while holding that lock. |
|
267 ThreadToNativeFromVM native(jt); |
|
268 MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
|
269 // Either a concurrent or a stop-world full gc is sufficient |
|
270 // witness to our request. |
|
271 while (gch->total_full_collections_completed() <= _full_gc_count_before) { |
|
272 FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag); |
|
273 } |
|
274 } |
|
275 } |