38 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
38 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
39 GCCauseSetter x(g1h, _gc_cause); |
39 GCCauseSetter x(g1h, _gc_cause); |
40 _gc_succeeded = g1h->do_full_collection(true /* explicit_gc */, false /* clear_all_soft_refs */); |
40 _gc_succeeded = g1h->do_full_collection(true /* explicit_gc */, false /* clear_all_soft_refs */); |
41 } |
41 } |
42 |
42 |
|
43 VM_G1TryInitiateConcMark::VM_G1TryInitiateConcMark(uint gc_count_before, |
|
44 GCCause::Cause gc_cause, |
|
45 double target_pause_time_ms) : |
|
46 VM_GC_Operation(gc_count_before, gc_cause), |
|
47 _target_pause_time_ms(target_pause_time_ms), |
|
48 _transient_failure(false), |
|
49 _cycle_already_in_progress(false), |
|
50 _gc_succeeded(false) |
|
51 {} |
|
52 |
|
53 bool VM_G1TryInitiateConcMark::doit_prologue() { |
|
54 bool result = VM_GC_Operation::doit_prologue(); |
|
55 // The prologue can fail for a couple of reasons. The first is that another GC |
|
56 // got scheduled and prevented the scheduling of the initial mark GC. The |
|
57 // second is that the GC locker may be active and the heap can't be expanded. |
|
58 // In both cases we want to retry the GC so that the initial mark pause is |
|
59 // actually scheduled. In the second case, however, we should stall until |
|
60 // until the GC locker is no longer active and then retry the initial mark GC. |
|
61 if (!result) _transient_failure = true; |
|
62 return result; |
|
63 } |
|
64 |
|
65 void VM_G1TryInitiateConcMark::doit() { |
|
66 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
67 |
|
68 GCCauseSetter x(g1h, _gc_cause); |
|
69 if (!g1h->policy()->force_initial_mark_if_outside_cycle(_gc_cause)) { |
|
70 // Failure to force the next GC pause to be an initial mark indicates |
|
71 // there is already a concurrent marking cycle in progress. Set flag |
|
72 // to notify the caller and return immediately. |
|
73 _cycle_already_in_progress = true; |
|
74 } else if (!g1h->do_collection_pause_at_safepoint(_target_pause_time_ms)) { |
|
75 // Failure to perform the collection at all occurs because GCLocker is |
|
76 // active, and we have the bad luck to be the collection request that |
|
77 // makes a later _gc_locker collection needed. (Else we would have hit |
|
78 // the GCLocker check in the prologue.) |
|
79 _transient_failure = true; |
|
80 } else if (g1h->should_upgrade_to_full_gc(_gc_cause)) { |
|
81 // GC ran, but we're still in trouble and need a full GC. |
|
82 log_info(gc, ergo)("Attempting maximally compacting collection"); |
|
83 _gc_succeeded = g1h->do_full_collection(false, /* explicit gc */ |
|
84 true /* clear_all_soft_refs */); |
|
85 guarantee(_gc_succeeded, "Elevated collections during the safepoint must always succeed"); |
|
86 } else { |
|
87 _gc_succeeded = true; |
|
88 } |
|
89 } |
|
90 |
43 VM_G1CollectForAllocation::VM_G1CollectForAllocation(size_t word_size, |
91 VM_G1CollectForAllocation::VM_G1CollectForAllocation(size_t word_size, |
44 uint gc_count_before, |
92 uint gc_count_before, |
45 GCCause::Cause gc_cause, |
93 GCCause::Cause gc_cause, |
46 bool should_initiate_conc_mark, |
|
47 double target_pause_time_ms) : |
94 double target_pause_time_ms) : |
48 VM_CollectForAllocation(word_size, gc_count_before, gc_cause), |
95 VM_CollectForAllocation(word_size, gc_count_before, gc_cause), |
49 _gc_succeeded(false), |
96 _gc_succeeded(false), |
50 _should_initiate_conc_mark(should_initiate_conc_mark), |
97 _target_pause_time_ms(target_pause_time_ms) { |
51 _should_retry_gc(false), |
|
52 _target_pause_time_ms(target_pause_time_ms), |
|
53 _old_marking_cycles_completed_before(0) { |
|
54 |
98 |
55 guarantee(target_pause_time_ms > 0.0, |
99 guarantee(target_pause_time_ms > 0.0, |
56 "target_pause_time_ms = %1.6lf should be positive", |
100 "target_pause_time_ms = %1.6lf should be positive", |
57 target_pause_time_ms); |
101 target_pause_time_ms); |
58 _gc_cause = gc_cause; |
102 _gc_cause = gc_cause; |
59 } |
103 } |
60 |
104 |
61 bool VM_G1CollectForAllocation::doit_prologue() { |
|
62 bool res = VM_CollectForAllocation::doit_prologue(); |
|
63 if (!res) { |
|
64 if (_should_initiate_conc_mark) { |
|
65 // The prologue can fail for a couple of reasons. The first is that another GC |
|
66 // got scheduled and prevented the scheduling of the initial mark GC. The |
|
67 // second is that the GC locker may be active and the heap can't be expanded. |
|
68 // In both cases we want to retry the GC so that the initial mark pause is |
|
69 // actually scheduled. In the second case, however, we should stall until |
|
70 // until the GC locker is no longer active and then retry the initial mark GC. |
|
71 _should_retry_gc = true; |
|
72 } |
|
73 } |
|
74 return res; |
|
75 } |
|
76 |
|
77 void VM_G1CollectForAllocation::doit() { |
105 void VM_G1CollectForAllocation::doit() { |
78 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
106 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
79 assert(!_should_initiate_conc_mark || g1h->should_do_concurrent_full_gc(_gc_cause), |
|
80 "only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle"); |
|
81 |
107 |
82 if (_word_size > 0) { |
108 if (_word_size > 0) { |
83 // An allocation has been requested. So, try to do that first. |
109 // An allocation has been requested. So, try to do that first. |
84 _result = g1h->attempt_allocation_at_safepoint(_word_size, |
110 _result = g1h->attempt_allocation_at_safepoint(_word_size, |
85 false /* expect_null_cur_alloc_region */); |
111 false /* expect_null_cur_alloc_region */); |
90 return; |
116 return; |
91 } |
117 } |
92 } |
118 } |
93 |
119 |
94 GCCauseSetter x(g1h, _gc_cause); |
120 GCCauseSetter x(g1h, _gc_cause); |
95 if (_should_initiate_conc_mark) { |
|
96 // It's safer to read old_marking_cycles_completed() here, given |
|
97 // that noone else will be updating it concurrently. Since we'll |
|
98 // only need it if we're initiating a marking cycle, no point in |
|
99 // setting it earlier. |
|
100 _old_marking_cycles_completed_before = g1h->old_marking_cycles_completed(); |
|
101 |
|
102 // At this point we are supposed to start a concurrent cycle. We |
|
103 // will do so if one is not already in progress. |
|
104 bool res = g1h->policy()->force_initial_mark_if_outside_cycle(_gc_cause); |
|
105 |
|
106 // The above routine returns true if we were able to force the |
|
107 // next GC pause to be an initial mark; it returns false if a |
|
108 // marking cycle is already in progress. |
|
109 // |
|
110 // If a marking cycle is already in progress just return and skip the |
|
111 // pause below - if the reason for requesting this initial mark pause |
|
112 // was due to a System.gc() then the requesting thread should block in |
|
113 // doit_epilogue() until the marking cycle is complete. |
|
114 // |
|
115 // If this initial mark pause was requested as part of a humongous |
|
116 // allocation then we know that the marking cycle must just have |
|
117 // been started by another thread (possibly also allocating a humongous |
|
118 // object) as there was no active marking cycle when the requesting |
|
119 // thread checked before calling collect() in |
|
120 // attempt_allocation_humongous(). Retrying the GC, in this case, |
|
121 // will cause the requesting thread to spin inside collect() until the |
|
122 // just started marking cycle is complete - which may be a while. So |
|
123 // we do NOT retry the GC. |
|
124 if (!res) { |
|
125 assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating"); |
|
126 if (_gc_cause != GCCause::_g1_humongous_allocation) { |
|
127 _should_retry_gc = true; |
|
128 } |
|
129 return; |
|
130 } |
|
131 } |
|
132 |
|
133 // Try a partial collection of some kind. |
121 // Try a partial collection of some kind. |
134 _gc_succeeded = g1h->do_collection_pause_at_safepoint(_target_pause_time_ms); |
122 _gc_succeeded = g1h->do_collection_pause_at_safepoint(_target_pause_time_ms); |
135 |
123 |
136 if (_gc_succeeded) { |
124 if (_gc_succeeded) { |
137 if (_word_size > 0) { |
125 if (_word_size > 0) { |
138 // An allocation had been requested. Do it, eventually trying a stronger |
126 // An allocation had been requested. Do it, eventually trying a stronger |
139 // kind of GC. |
127 // kind of GC. |
140 _result = g1h->satisfy_failed_allocation(_word_size, &_gc_succeeded); |
128 _result = g1h->satisfy_failed_allocation(_word_size, &_gc_succeeded); |
141 } else { |
129 } else if (g1h->should_upgrade_to_full_gc(_gc_cause)) { |
142 bool should_upgrade_to_full = g1h->should_upgrade_to_full_gc(_gc_cause); |
130 // There has been a request to perform a GC to free some space. We have no |
143 |
131 // information on how much memory has been asked for. In case there are |
144 if (should_upgrade_to_full) { |
132 // absolutely no regions left to allocate into, do a maximally compacting full GC. |
145 // There has been a request to perform a GC to free some space. We have no |
133 log_info(gc, ergo)("Attempting maximally compacting collection"); |
146 // information on how much memory has been asked for. In case there are |
134 _gc_succeeded = g1h->do_full_collection(false, /* explicit gc */ |
147 // absolutely no regions left to allocate into, do a maximally compacting full GC. |
135 true /* clear_all_soft_refs */); |
148 log_info(gc, ergo)("Attempting maximally compacting collection"); |
|
149 _gc_succeeded = g1h->do_full_collection(false, /* explicit gc */ |
|
150 true /* clear_all_soft_refs */); |
|
151 } |
|
152 } |
136 } |
153 guarantee(_gc_succeeded, "Elevated collections during the safepoint must always succeed."); |
137 guarantee(_gc_succeeded, "Elevated collections during the safepoint must always succeed."); |
154 } else { |
|
155 assert(_result == NULL, "invariant"); |
|
156 // The only reason for the pause to not be successful is that, the GC locker is |
|
157 // active (or has become active since the prologue was executed). In this case |
|
158 // we should retry the pause after waiting for the GC locker to become inactive. |
|
159 _should_retry_gc = true; |
|
160 } |
|
161 } |
|
162 |
|
163 void VM_G1CollectForAllocation::doit_epilogue() { |
|
164 VM_CollectForAllocation::doit_epilogue(); |
|
165 |
|
166 // If the pause was initiated by a System.gc() and |
|
167 // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle |
|
168 // that just started (or maybe one that was already in progress) to |
|
169 // finish. |
|
170 if (GCCause::is_user_requested_gc(_gc_cause) && |
|
171 _should_initiate_conc_mark) { |
|
172 assert(ExplicitGCInvokesConcurrent, |
|
173 "the only way to be here is if ExplicitGCInvokesConcurrent is set"); |
|
174 |
|
175 G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
|
176 |
|
177 // In the doit() method we saved g1h->old_marking_cycles_completed() |
|
178 // in the _old_marking_cycles_completed_before field. We have to |
|
179 // wait until we observe that g1h->old_marking_cycles_completed() |
|
180 // has increased by at least one. This can happen if a) we started |
|
181 // a cycle and it completes, b) a cycle already in progress |
|
182 // completes, or c) a Full GC happens. |
|
183 |
|
184 // If the condition has already been reached, there's no point in |
|
185 // actually taking the lock and doing the wait. |
|
186 if (g1h->old_marking_cycles_completed() <= |
|
187 _old_marking_cycles_completed_before) { |
|
188 // The following is largely copied from CMS |
|
189 |
|
190 Thread* thr = Thread::current(); |
|
191 assert(thr->is_Java_thread(), "invariant"); |
|
192 JavaThread* jt = (JavaThread*)thr; |
|
193 ThreadToNativeFromVM native(jt); |
|
194 |
|
195 MonitorLocker ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag); |
|
196 while (g1h->old_marking_cycles_completed() <= |
|
197 _old_marking_cycles_completed_before) { |
|
198 ml.wait(); |
|
199 } |
|
200 } |
|
201 } |
138 } |
202 } |
139 } |
203 |
140 |
204 void VM_G1Concurrent::doit() { |
141 void VM_G1Concurrent::doit() { |
205 GCIdMark gc_id_mark(_gc_id); |
142 GCIdMark gc_id_mark(_gc_id); |