--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/DefNewGeneration.java Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/DefNewGeneration.java Fri Oct 31 09:10:51 2014 +0100
@@ -64,8 +64,8 @@
}
// Accessing spaces
- public EdenSpace eden() {
- return (EdenSpace) VMObjectFactory.newObject(EdenSpace.class, edenSpaceField.getValue(addr));
+ public ContiguousSpace eden() {
+ return (ContiguousSpace) VMObjectFactory.newObject(ContiguousSpace.class, edenSpaceField.getValue(addr));
}
public ContiguousSpace from() {
--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/EdenSpace.java Thu Oct 30 12:45:22 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,40 +0,0 @@
-/*
- * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.memory;
-
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-
-/** <P> Class EdenSpace describes eden-space in new
- generation. (Currently it does not add any significant
- functionality beyond ContiguousSpace.) */
-
-public class EdenSpace extends ContiguousSpace {
- public EdenSpace(Address addr) {
- super(addr);
- }
-}
--- a/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp Fri Oct 31 09:10:51 2014 +0100
@@ -3513,7 +3513,7 @@
Rtags = R3_ARG1,
Rindex = R5_ARG3;
- const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
+ const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc();
// --------------------------------------------------------------------------
// Check if fast case is possible.
--- a/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/cpu/sparc/vm/macroAssembler_sparc.cpp Fri Oct 31 09:10:51 2014 +0100
@@ -3196,7 +3196,7 @@
assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
- if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
+ if (!Universe::heap()->supports_inline_contig_alloc()) {
// No allocation in the shared eden.
ba(slow_case);
delayed()->nop();
@@ -3331,7 +3331,7 @@
assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
Label do_refill, discard_tlab;
- if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
+ if (!Universe::heap()->supports_inline_contig_alloc()) {
// No allocation in the shared eden.
ba(slow_case);
delayed()->nop();
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Fri Oct 31 09:10:51 2014 +0100
@@ -3309,7 +3309,7 @@
// (creates a new TLAB, etc.)
const bool allow_shared_alloc =
- Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
+ Universe::heap()->supports_inline_contig_alloc();
if(UseTLAB) {
Register RoldTopValue = RallocatedObject;
--- a/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/cpu/x86/vm/macroAssembler_x86.cpp Fri Oct 31 09:10:51 2014 +0100
@@ -2964,7 +2964,7 @@
Label& slow_case) {
assert(obj == rax, "obj must be in rax, for cmpxchg");
assert_different_registers(obj, var_size_in_bytes, t1);
- if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
+ if (!Universe::heap()->supports_inline_contig_alloc()) {
jmp(slow_case);
} else {
Register end = t1;
@@ -4437,7 +4437,7 @@
assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
Label do_refill, discard_tlab;
- if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
+ if (!Universe::heap()->supports_inline_contig_alloc()) {
// No allocation in the shared eden.
jmp(slow_case);
}
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp Fri Oct 31 09:10:51 2014 +0100
@@ -3214,7 +3214,7 @@
// (creates a new TLAB, etc.)
const bool allow_shared_alloc =
- Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
+ Universe::heap()->supports_inline_contig_alloc();
const Register thread = rcx;
if (UseTLAB || allow_shared_alloc) {
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Fri Oct 31 09:10:51 2014 +0100
@@ -3269,7 +3269,7 @@
// (creates a new TLAB, etc.)
const bool allow_shared_alloc =
- Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
+ Universe::heap()->supports_inline_contig_alloc();
if (UseTLAB) {
__ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
--- a/hotspot/src/share/vm/Xusage.txt Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/Xusage.txt Fri Oct 31 09:10:51 2014 +0100
@@ -7,7 +7,6 @@
-Xbootclasspath/p:<directories and zip/jar files separated by ;>
prepend in front of bootstrap class path
-Xnoclassgc disable class garbage collection
- -Xincgc enable incremental garbage collection
-Xloggc:<file> log GC status to a file with time stamps
-Xbatch disable background compilation
-Xms<size> set initial Java heap size
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.cpp Fri Oct 31 09:10:51 2014 +0100
@@ -89,9 +89,3 @@
_gc_policy_counters = new GCPolicyCounters("Copy:CMS", 2, 3);
}
}
-
-// Returns true if the incremental mode is enabled.
-bool ConcurrentMarkSweepPolicy::has_soft_ended_eden()
-{
- return CMSIncrementalMode;
-}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp Fri Oct 31 09:10:51 2014 +0100
@@ -42,9 +42,6 @@
virtual void initialize_size_policy(size_t init_eden_size,
size_t init_promo_size,
size_t init_survivor_size);
-
- // Returns true if the incremental mode is enabled.
- virtual bool has_soft_ended_eden();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CMSCOLLECTORPOLICY_HPP
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Oct 31 09:10:51 2014 +0100
@@ -167,16 +167,6 @@
};
-// Wrapper class to temporarily disable icms during a foreground cms collection.
-class ICMSDisabler: public StackObj {
- public:
- // The ctor disables icms and wakes up the thread so it notices the change;
- // the dtor re-enables icms. Note that the CMSCollector methods will check
- // CMSIncrementalMode.
- ICMSDisabler() { CMSCollector::disable_icms(); CMSCollector::start_icms(); }
- ~ICMSDisabler() { CMSCollector::enable_icms(); }
-};
-
//////////////////////////////////////////////////////////////////
// Concurrent Mark-Sweep Generation /////////////////////////////
//////////////////////////////////////////////////////////////////
@@ -363,7 +353,6 @@
_cms_used_at_gc0_end = 0;
_allow_duty_cycle_reduction = false;
_valid_bits = 0;
- _icms_duty_cycle = CMSIncrementalDutyCycle;
}
double CMSStats::cms_free_adjustment_factor(size_t free) const {
@@ -442,86 +431,17 @@
return work - deadline;
}
-// Return a duty cycle based on old_duty_cycle and new_duty_cycle, limiting the
-// amount of change to prevent wild oscillation.
-unsigned int CMSStats::icms_damped_duty_cycle(unsigned int old_duty_cycle,
- unsigned int new_duty_cycle) {
- assert(old_duty_cycle <= 100, "bad input value");
- assert(new_duty_cycle <= 100, "bad input value");
-
- // Note: use subtraction with caution since it may underflow (values are
- // unsigned). Addition is safe since we're in the range 0-100.
- unsigned int damped_duty_cycle = new_duty_cycle;
- if (new_duty_cycle < old_duty_cycle) {
- const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 5U);
- if (new_duty_cycle + largest_delta < old_duty_cycle) {
- damped_duty_cycle = old_duty_cycle - largest_delta;
- }
- } else if (new_duty_cycle > old_duty_cycle) {
- const unsigned int largest_delta = MAX2(old_duty_cycle / 4, 15U);
- if (new_duty_cycle > old_duty_cycle + largest_delta) {
- damped_duty_cycle = MIN2(old_duty_cycle + largest_delta, 100U);
- }
- }
- assert(damped_duty_cycle <= 100, "invalid duty cycle computed");
-
- if (CMSTraceIncrementalPacing) {
- gclog_or_tty->print(" [icms_damped_duty_cycle(%d,%d) = %d] ",
- old_duty_cycle, new_duty_cycle, damped_duty_cycle);
- }
- return damped_duty_cycle;
-}
-
-unsigned int CMSStats::icms_update_duty_cycle_impl() {
- assert(CMSIncrementalPacing && valid(),
- "should be handled in icms_update_duty_cycle()");
-
- double cms_time_so_far = cms_timer().seconds();
- double scaled_duration = cms_duration_per_mb() * _cms_used_at_gc0_end / M;
- double scaled_duration_remaining = fabsd(scaled_duration - cms_time_so_far);
-
- // Avoid division by 0.
- double time_until_full = MAX2(time_until_cms_gen_full(), 0.01);
- double duty_cycle_dbl = 100.0 * scaled_duration_remaining / time_until_full;
-
- unsigned int new_duty_cycle = MIN2((unsigned int)duty_cycle_dbl, 100U);
- if (new_duty_cycle > _icms_duty_cycle) {
- // Avoid very small duty cycles (1 or 2); 0 is allowed.
- if (new_duty_cycle > 2) {
- _icms_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle,
- new_duty_cycle);
- }
- } else if (_allow_duty_cycle_reduction) {
- // The duty cycle is reduced only once per cms cycle (see record_cms_end()).
- new_duty_cycle = icms_damped_duty_cycle(_icms_duty_cycle, new_duty_cycle);
- // Respect the minimum duty cycle.
- unsigned int min_duty_cycle = (unsigned int)CMSIncrementalDutyCycleMin;
- _icms_duty_cycle = MAX2(new_duty_cycle, min_duty_cycle);
- }
-
- if (PrintGCDetails || CMSTraceIncrementalPacing) {
- gclog_or_tty->print(" icms_dc=%d ", _icms_duty_cycle);
- }
-
- _allow_duty_cycle_reduction = false;
- return _icms_duty_cycle;
-}
-
#ifndef PRODUCT
void CMSStats::print_on(outputStream *st) const {
st->print(" gc0_alpha=%d,cms_alpha=%d", _gc0_alpha, _cms_alpha);
st->print(",gc0_dur=%g,gc0_per=%g,gc0_promo=" SIZE_FORMAT,
gc0_duration(), gc0_period(), gc0_promoted());
- st->print(",cms_dur=%g,cms_dur_per_mb=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
- cms_duration(), cms_duration_per_mb(),
- cms_period(), cms_allocated());
+ st->print(",cms_dur=%g,cms_per=%g,cms_alloc=" SIZE_FORMAT,
+ cms_duration(), cms_period(), cms_allocated());
st->print(",cms_since_beg=%g,cms_since_end=%g",
cms_time_since_begin(), cms_time_since_end());
st->print(",cms_used_beg=" SIZE_FORMAT ",cms_used_end=" SIZE_FORMAT,
_cms_used_at_gc0_begin, _cms_used_at_gc0_end);
- if (CMSIncrementalMode) {
- st->print(",dc=%d", icms_duty_cycle());
- }
if (valid()) {
st->print(",promo_rate=%g,cms_alloc_rate=%g",
@@ -579,8 +499,6 @@
#endif
_collection_count_start(0),
_verifying(false),
- _icms_start_limit(NULL),
- _icms_stop_limit(NULL),
_verification_mark_bm(0, Mutex::leaf + 1, "CMS_verification_mark_bm_lock"),
_completed_initialization(false),
_collector_policy(cp),
@@ -1116,137 +1034,6 @@
}
}
-static inline size_t percent_of_space(Space* space, HeapWord* addr)
-{
- size_t delta = pointer_delta(addr, space->bottom());
- return (size_t)(delta * 100.0 / (space->capacity() / HeapWordSize));
-}
-
-void CMSCollector::icms_update_allocation_limits()
-{
- Generation* young = GenCollectedHeap::heap()->get_gen(0);
- EdenSpace* eden = young->as_DefNewGeneration()->eden();
-
- const unsigned int duty_cycle = stats().icms_update_duty_cycle();
- if (CMSTraceIncrementalPacing) {
- stats().print();
- }
-
- assert(duty_cycle <= 100, "invalid duty cycle");
- if (duty_cycle != 0) {
- // The duty_cycle is a percentage between 0 and 100; convert to words and
- // then compute the offset from the endpoints of the space.
- size_t free_words = eden->free() / HeapWordSize;
- double free_words_dbl = (double)free_words;
- size_t duty_cycle_words = (size_t)(free_words_dbl * duty_cycle / 100.0);
- size_t offset_words = (free_words - duty_cycle_words) / 2;
-
- _icms_start_limit = eden->top() + offset_words;
- _icms_stop_limit = eden->end() - offset_words;
-
- // The limits may be adjusted (shifted to the right) by
- // CMSIncrementalOffset, to allow the application more mutator time after a
- // young gen gc (when all mutators were stopped) and before CMS starts and
- // takes away one or more cpus.
- if (CMSIncrementalOffset != 0) {
- double adjustment_dbl = free_words_dbl * CMSIncrementalOffset / 100.0;
- size_t adjustment = (size_t)adjustment_dbl;
- HeapWord* tmp_stop = _icms_stop_limit + adjustment;
- if (tmp_stop > _icms_stop_limit && tmp_stop < eden->end()) {
- _icms_start_limit += adjustment;
- _icms_stop_limit = tmp_stop;
- }
- }
- }
- if (duty_cycle == 0 || (_icms_start_limit == _icms_stop_limit)) {
- _icms_start_limit = _icms_stop_limit = eden->end();
- }
-
- // Install the new start limit.
- eden->set_soft_end(_icms_start_limit);
-
- if (CMSTraceIncrementalMode) {
- gclog_or_tty->print(" icms alloc limits: "
- PTR_FORMAT "," PTR_FORMAT
- " (" SIZE_FORMAT "%%," SIZE_FORMAT "%%) ",
- p2i(_icms_start_limit), p2i(_icms_stop_limit),
- percent_of_space(eden, _icms_start_limit),
- percent_of_space(eden, _icms_stop_limit));
- if (Verbose) {
- gclog_or_tty->print("eden: ");
- eden->print_on(gclog_or_tty);
- }
- }
-}
-
-// Any changes here should try to maintain the invariant
-// that if this method is called with _icms_start_limit
-// and _icms_stop_limit both NULL, then it should return NULL
-// and not notify the icms thread.
-HeapWord*
-CMSCollector::allocation_limit_reached(Space* space, HeapWord* top,
- size_t word_size)
-{
- // A start_limit equal to end() means the duty cycle is 0, so treat that as a
- // nop.
- if (CMSIncrementalMode && _icms_start_limit != space->end()) {
- if (top <= _icms_start_limit) {
- if (CMSTraceIncrementalMode) {
- space->print_on(gclog_or_tty);
- gclog_or_tty->stamp();
- gclog_or_tty->print_cr(" start limit top=" PTR_FORMAT
- ", new limit=" PTR_FORMAT
- " (" SIZE_FORMAT "%%)",
- p2i(top), p2i(_icms_stop_limit),
- percent_of_space(space, _icms_stop_limit));
- }
- ConcurrentMarkSweepThread::start_icms();
- assert(top < _icms_stop_limit, "Tautology");
- if (word_size < pointer_delta(_icms_stop_limit, top)) {
- return _icms_stop_limit;
- }
-
- // The allocation will cross both the _start and _stop limits, so do the
- // stop notification also and return end().
- if (CMSTraceIncrementalMode) {
- space->print_on(gclog_or_tty);
- gclog_or_tty->stamp();
- gclog_or_tty->print_cr(" +stop limit top=" PTR_FORMAT
- ", new limit=" PTR_FORMAT
- " (" SIZE_FORMAT "%%)",
- p2i(top), p2i(space->end()),
- percent_of_space(space, space->end()));
- }
- ConcurrentMarkSweepThread::stop_icms();
- return space->end();
- }
-
- if (top <= _icms_stop_limit) {
- if (CMSTraceIncrementalMode) {
- space->print_on(gclog_or_tty);
- gclog_or_tty->stamp();
- gclog_or_tty->print_cr(" stop limit top=" PTR_FORMAT
- ", new limit=" PTR_FORMAT
- " (" SIZE_FORMAT "%%)",
- top, space->end(),
- percent_of_space(space, space->end()));
- }
- ConcurrentMarkSweepThread::stop_icms();
- return space->end();
- }
-
- if (CMSTraceIncrementalMode) {
- space->print_on(gclog_or_tty);
- gclog_or_tty->stamp();
- gclog_or_tty->print_cr(" end limit top=" PTR_FORMAT
- ", new limit=" PTR_FORMAT,
- top, NULL);
- }
- }
-
- return NULL;
-}
-
oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) {
assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
// allocate, copy and if necessary update promoinfo --
@@ -1289,14 +1076,6 @@
}
-HeapWord*
-ConcurrentMarkSweepGeneration::allocation_limit_reached(Space* space,
- HeapWord* top,
- size_t word_sz)
-{
- return collector()->allocation_limit_reached(space, top, word_sz);
-}
-
// IMPORTANT: Notes on object size recognition in CMS.
// ---------------------------------------------------
// A block of storage in the CMS generation is always in
@@ -1809,9 +1588,6 @@
// we want to do a foreground collection.
_foregroundGCIsActive = true;
- // Disable incremental mode during a foreground collection.
- ICMSDisabler icms_disabler;
-
// release locks and wait for a notify from the background collector
// releasing the locks in only necessary for phases which
// do yields to improve the granularity of the collection.
@@ -2135,7 +1911,7 @@
void CMSCollector::print_eden_and_survivor_chunk_arrays() {
DefNewGeneration* dng = _young_gen->as_DefNewGeneration();
- EdenSpace* eden_space = dng->eden();
+ ContiguousSpace* eden_space = dng->eden();
ContiguousSpace* from_space = dng->from();
ContiguousSpace* to_space = dng->to();
// Eden
@@ -2783,10 +2559,6 @@
//
_cmsGen->update_counters(cms_used);
- if (CMSIncrementalMode) {
- icms_update_allocation_limits();
- }
-
bitMapLock()->unlock();
releaseFreelistLocks();
@@ -4272,12 +4044,10 @@
assert_lock_strong(_bit_map_lock);
_bit_map_lock->unlock();
ConcurrentMarkSweepThread::desynchronize(true);
- ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
- _collector->icms_wait();
// It is possible for whichever thread initiated the yield request
// not to get a chance to wake up and take the bitmap lock between
@@ -4307,7 +4077,6 @@
ConcurrentMarkSweepThread::should_yield() &&
!CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false);
- ConcurrentMarkSweepThread::acknowledge_yield_request();
}
ConcurrentMarkSweepThread::synchronize(true);
@@ -5238,7 +5007,7 @@
void CMSParMarkTask::work_on_young_gen_roots(uint worker_id, OopsInGenClosure* cl) {
DefNewGeneration* dng = _collector->_young_gen->as_DefNewGeneration();
- EdenSpace* eden_space = dng->eden();
+ ContiguousSpace* eden_space = dng->eden();
ContiguousSpace* from_space = dng->from();
ContiguousSpace* to_space = dng->to();
@@ -5410,7 +5179,7 @@
while (!pst->is_task_claimed(/* reference */ nth_task)) {
// We claimed task # nth_task; compute its boundaries.
if (chunk_top == 0) { // no samples were taken
- assert(nth_task == 0 && n_tasks == 1, "Can have only 1 EdenSpace task");
+ assert(nth_task == 0 && n_tasks == 1, "Can have only 1 eden task");
start = space->bottom();
end = space->top();
} else if (nth_task == 0) {
@@ -5788,7 +5557,7 @@
// process_roots (which currently doesn't know how to
// parallelize such a scan), but rather will be broken up into
// a set of parallel tasks (via the sampling that the [abortable]
- // preclean phase did of EdenSpace, plus the [two] tasks of
+ // preclean phase did of eden, plus the [two] tasks of
// scanning the [two] survivor spaces. Further fine-grain
// parallelization of the scanning of the survivor spaces
// themselves, and of precleaning of the younger gen itself
@@ -6474,19 +6243,16 @@
assert_lock_strong(bitMapLock());
bitMapLock()->unlock();
ConcurrentMarkSweepThread::desynchronize(true);
- ConcurrentMarkSweepThread::acknowledge_yield_request();
stopTimer();
if (PrintCMSStatistics != 0) {
incrementYields();
}
- icms_wait();
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
ConcurrentMarkSweepThread::should_yield() &&
!CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false);
- ConcurrentMarkSweepThread::acknowledge_yield_request();
}
ConcurrentMarkSweepThread::synchronize(true);
@@ -6509,10 +6275,6 @@
_collectorState = Idling;
}
- // Stop incremental mode after a cycle completes, so that any future cycles
- // are triggered by allocation.
- stop_icms();
-
NOT_PRODUCT(
if (RotateCMSCollectionTypes) {
_cmsGen->rotate_debug_collection_type();
@@ -6964,12 +6726,10 @@
_bit_map->lock()->unlock();
_freelistLock->unlock();
ConcurrentMarkSweepThread::desynchronize(true);
- ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
- _collector->icms_wait();
// See the comment in coordinator_yield()
for (unsigned i = 0;
@@ -6978,7 +6738,6 @@
!CMSCollector::foregroundGCIsActive();
++i) {
os::sleep(Thread::current(), 1, false);
- ConcurrentMarkSweepThread::acknowledge_yield_request();
}
ConcurrentMarkSweepThread::synchronize(true);
@@ -7124,19 +6883,16 @@
_bitMap->lock()->unlock();
_freelistLock->unlock();
ConcurrentMarkSweepThread::desynchronize(true);
- ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
- _collector->icms_wait();
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
ConcurrentMarkSweepThread::should_yield() &&
!CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false);
- ConcurrentMarkSweepThread::acknowledge_yield_request();
}
ConcurrentMarkSweepThread::synchronize(true);
@@ -7196,19 +6952,16 @@
// Relinquish the bit map lock
_bit_map->lock()->unlock();
ConcurrentMarkSweepThread::desynchronize(true);
- ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
- _collector->icms_wait();
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
ConcurrentMarkSweepThread::should_yield() &&
!CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false);
- ConcurrentMarkSweepThread::acknowledge_yield_request();
}
ConcurrentMarkSweepThread::synchronize(true);
@@ -7354,19 +7107,16 @@
assert_lock_strong(_bitMap->lock());
_bitMap->lock()->unlock();
ConcurrentMarkSweepThread::desynchronize(true);
- ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
- _collector->icms_wait();
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
ConcurrentMarkSweepThread::should_yield() &&
!CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false);
- ConcurrentMarkSweepThread::acknowledge_yield_request();
}
ConcurrentMarkSweepThread::synchronize(true);
@@ -7388,7 +7138,7 @@
_finger = ptr + obj->size();
assert(_finger > ptr, "we just incremented it above");
// On large heaps, it may take us some time to get through
- // the marking phase (especially if running iCMS). During
+ // the marking phase. During
// this time it's possible that a lot of mutations have
// accumulated in the card table and the mod union table --
// these mutation records are redundant until we have
@@ -7505,7 +7255,7 @@
_finger = ptr + obj->size();
assert(_finger > ptr, "we just incremented it above");
// On large heaps, it may take us some time to get through
- // the marking phase (especially if running iCMS). During
+ // the marking phase. During
// this time it's possible that a lot of mutations have
// accumulated in the card table and the mod union table --
// these mutation records are redundant until we have
@@ -7994,20 +7744,16 @@
bml->unlock();
ConcurrentMarkSweepThread::desynchronize(true);
- ConcurrentMarkSweepThread::acknowledge_yield_request();
-
_collector->stopTimer();
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
- _collector->icms_wait();
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
ConcurrentMarkSweepThread::should_yield() &&
!CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false);
- ConcurrentMarkSweepThread::acknowledge_yield_request();
}
ConcurrentMarkSweepThread::synchronize(true);
@@ -8675,19 +8421,16 @@
_bitMap->lock()->unlock();
_freelistLock->unlock();
ConcurrentMarkSweepThread::desynchronize(true);
- ConcurrentMarkSweepThread::acknowledge_yield_request();
_collector->stopTimer();
if (PrintCMSStatistics != 0) {
_collector->incrementYields();
}
- _collector->icms_wait();
// See the comment in coordinator_yield()
for (unsigned i = 0; i < CMSYieldSleepCount &&
ConcurrentMarkSweepThread::should_yield() &&
!CMSCollector::foregroundGCIsActive(); ++i) {
os::sleep(Thread::current(), 1, false);
- ConcurrentMarkSweepThread::acknowledge_yield_request();
}
ConcurrentMarkSweepThread::synchronize(true);
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Fri Oct 31 09:10:51 2014 +0100
@@ -356,7 +356,6 @@
size_t _gc0_promoted; // bytes promoted per gc0
double _cms_duration;
double _cms_duration_pre_sweep; // time from initiation to start of sweep
- double _cms_duration_per_mb;
double _cms_period;
size_t _cms_allocated; // bytes of direct allocation per gc0 period
@@ -383,17 +382,7 @@
unsigned int _valid_bits;
- unsigned int _icms_duty_cycle; // icms duty cycle (0-100).
-
protected:
-
- // Return a duty cycle that avoids wild oscillations, by limiting the amount
- // of change between old_duty_cycle and new_duty_cycle (the latter is treated
- // as a recommended value).
- static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
- unsigned int new_duty_cycle);
- unsigned int icms_update_duty_cycle_impl();
-
// In support of adjusting of cms trigger ratios based on history
// of concurrent mode failure.
double cms_free_adjustment_factor(size_t free) const;
@@ -426,7 +415,6 @@
size_t gc0_promoted() const { return _gc0_promoted; }
double cms_period() const { return _cms_period; }
double cms_duration() const { return _cms_duration; }
- double cms_duration_per_mb() const { return _cms_duration_per_mb; }
size_t cms_allocated() const { return _cms_allocated; }
size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
@@ -458,12 +446,6 @@
// End of higher level statistics.
- // Returns the cms incremental mode duty cycle, as a percentage (0-100).
- unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
-
- // Update the duty cycle and return the new value.
- unsigned int icms_update_duty_cycle();
-
// Debugging.
void print_on(outputStream* st) const PRODUCT_RETURN;
void print() const { print_on(gclog_or_tty); }
@@ -725,13 +707,6 @@
// Timing, allocation and promotion statistics, used for scheduling.
CMSStats _stats;
- // Allocation limits installed in the young gen, used only in
- // CMSIncrementalMode. When an allocation in the young gen would cross one of
- // these limits, the cms generation is notified and the cms thread is started
- // or stopped, respectively.
- HeapWord* _icms_start_limit;
- HeapWord* _icms_stop_limit;
-
enum CMS_op_type {
CMS_op_checkpointRootsInitial,
CMS_op_checkpointRootsFinal
@@ -867,10 +842,6 @@
// collector.
bool waitForForegroundGC();
- // Incremental mode triggering: recompute the icms duty cycle and set the
- // allocation limits in the young gen.
- void icms_update_allocation_limits();
-
size_t block_size_using_printezis_bits(HeapWord* addr) const;
size_t block_size_if_printezis_bits(HeapWord* addr) const;
HeapWord* next_card_start_after_block(HeapWord* addr) const;
@@ -928,9 +899,6 @@
void promoted(bool par, HeapWord* start,
bool is_obj_array, size_t obj_size);
- HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
- size_t word_size);
-
void getFreelistLocks() const;
void releaseFreelistLocks() const;
bool haveFreelistLocks() const;
@@ -1001,14 +969,6 @@
// Timers/stats for gc scheduling and incremental mode pacing.
CMSStats& stats() { return _stats; }
- // Convenience methods that check whether CMSIncrementalMode is enabled and
- // forward to the corresponding methods in ConcurrentMarkSweepThread.
- static void start_icms();
- static void stop_icms(); // Called at the end of the cms cycle.
- static void disable_icms(); // Called before a foreground collection.
- static void enable_icms(); // Called after a foreground collection.
- void icms_wait(); // Called at yield points.
-
// Adaptive size policy
AdaptiveSizePolicy* size_policy();
@@ -1211,9 +1171,6 @@
return allocate(size, tlab);
}
- // Incremental mode triggering.
- HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
- size_t word_size);
// Used by CMSStats to track direct allocation. The value is sampled and
// reset after each young gen collection.
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp Fri Oct 31 09:10:51 2014 +0100
@@ -234,36 +234,6 @@
}
}
-inline void CMSCollector::start_icms() {
- if (CMSIncrementalMode) {
- ConcurrentMarkSweepThread::start_icms();
- }
-}
-
-inline void CMSCollector::stop_icms() {
- if (CMSIncrementalMode) {
- ConcurrentMarkSweepThread::stop_icms();
- }
-}
-
-inline void CMSCollector::disable_icms() {
- if (CMSIncrementalMode) {
- ConcurrentMarkSweepThread::disable_icms();
- }
-}
-
-inline void CMSCollector::enable_icms() {
- if (CMSIncrementalMode) {
- ConcurrentMarkSweepThread::enable_icms();
- }
-}
-
-inline void CMSCollector::icms_wait() {
- if (CMSIncrementalMode) {
- cmsThread()->icms_wait();
- }
-}
-
inline void CMSCollector::save_sweep_limits() {
_cmsGen->save_sweep_limit();
}
@@ -363,12 +333,6 @@
_cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
cur_duration, _cms_alpha);
- // Avoid division by 0.
- const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
- _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
- cur_duration / cms_used_mb,
- _cms_alpha);
-
_cms_end_time.update();
_cms_alpha = _saved_alpha;
_allow_duty_cycle_reduction = true;
@@ -400,15 +364,6 @@
return (gc0_promoted() + cms_allocated()) / gc0_period();
}
-inline unsigned int CMSStats::icms_update_duty_cycle() {
- // Update the duty cycle only if pacing is enabled and the stats are valid
- // (after at least one young gen gc and one cms cycle have completed).
- if (CMSIncrementalPacing && valid()) {
- return icms_update_duty_cycle_impl();
- }
- return _icms_duty_cycle;
-}
-
inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
cmsSpace()->save_sweep_limit();
}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp Fri Oct 31 09:10:51 2014 +0100
@@ -49,13 +49,6 @@
int ConcurrentMarkSweepThread::_CMS_flag = CMS_nil;
volatile jint ConcurrentMarkSweepThread::_pending_yields = 0;
-volatile jint ConcurrentMarkSweepThread::_pending_decrements = 0;
-
-volatile jint ConcurrentMarkSweepThread::_icms_disabled = 0;
-volatile bool ConcurrentMarkSweepThread::_should_run = false;
-// When icms is enabled, the icms thread is stopped until explicitly
-// started.
-volatile bool ConcurrentMarkSweepThread::_should_stop = true;
SurrogateLockerThread*
ConcurrentMarkSweepThread::_slt = NULL;
@@ -99,7 +92,6 @@
}
}
_sltMonitor = SLT_lock;
- assert(!CMSIncrementalMode || icms_is_enabled(), "Error");
}
void ConcurrentMarkSweepThread::run() {
@@ -184,11 +176,6 @@
}
void ConcurrentMarkSweepThread::stop() {
- if (CMSIncrementalMode) {
- // Disable incremental mode and wake up the thread so it notices the change.
- disable_icms();
- start_icms();
- }
// it is ok to take late safepoints here, if needed
{
MutexLockerEx x(Terminator_lock);
@@ -387,23 +374,13 @@
void ConcurrentMarkSweepThread::sleepBeforeNextCycle() {
while (!_should_terminate) {
- if (CMSIncrementalMode) {
- icms_wait();
- if(CMSWaitDuration >= 0) {
- // Wait until the next synchronous GC, a concurrent full gc
- // request or a timeout, whichever is earlier.
- wait_on_cms_lock_for_scavenge(CMSWaitDuration);
- }
- return;
+ if(CMSWaitDuration >= 0) {
+ // Wait until the next synchronous GC, a concurrent full gc
+ // request or a timeout, whichever is earlier.
+ wait_on_cms_lock_for_scavenge(CMSWaitDuration);
} else {
- if(CMSWaitDuration >= 0) {
- // Wait until the next synchronous GC, a concurrent full gc
- // request or a timeout, whichever is earlier.
- wait_on_cms_lock_for_scavenge(CMSWaitDuration);
- } else {
- // Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently
- wait_on_cms_lock(CMSCheckInterval);
- }
+ // Wait until any cms_lock event or check interval not to call shouldConcurrentCollect permanently
+ wait_on_cms_lock(CMSCheckInterval);
}
// Check if we should start a CMS collection cycle
if (_collector->shouldConcurrentCollect()) {
@@ -414,42 +391,6 @@
}
}
-// Incremental CMS
-void ConcurrentMarkSweepThread::start_icms() {
- assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
- MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
- trace_state("start_icms");
- _should_run = true;
- iCMS_lock->notify_all();
-}
-
-void ConcurrentMarkSweepThread::stop_icms() {
- assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
- MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
- if (!_should_stop) {
- trace_state("stop_icms");
- _should_stop = true;
- _should_run = false;
- asynchronous_yield_request();
- iCMS_lock->notify_all();
- }
-}
-
-void ConcurrentMarkSweepThread::icms_wait() {
- assert(UseConcMarkSweepGC && CMSIncrementalMode, "just checking");
- if (_should_stop && icms_is_enabled()) {
- MutexLockerEx x(iCMS_lock, Mutex::_no_safepoint_check_flag);
- trace_state("pause_icms");
- _collector->stats().stop_cms_timer();
- while(!_should_run && icms_is_enabled()) {
- iCMS_lock->wait(Mutex::_no_safepoint_check_flag);
- }
- _collector->stats().start_cms_timer();
- _should_stop = false;
- trace_state("pause_icms end");
- }
-}
-
// Note: this method, although exported by the ConcurrentMarkSweepThread,
// which is a non-JavaThread, can only be called by a JavaThread.
// Currently this is done at vm creation time (post-vm-init) by the
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp Fri Oct 31 09:10:51 2014 +0100
@@ -64,20 +64,11 @@
static bool clear_CMS_flag(int b) { return (_CMS_flag &= ~b) != 0; }
void sleepBeforeNextCycle();
- // CMS thread should yield for a young gen collection, direct allocation,
- // and iCMS activity.
+ // CMS thread should yield for a young gen collection and direct allocations
static char _pad_1[64 - sizeof(jint)]; // prevent cache-line sharing
static volatile jint _pending_yields;
- static volatile jint _pending_decrements; // decrements to _pending_yields
static char _pad_2[64 - sizeof(jint)]; // prevent cache-line sharing
- // Tracing messages, enabled by CMSTraceThreadState.
- static inline void trace_state(const char* desc);
-
- static volatile int _icms_disabled; // a counter to track #iCMS disable & enable
- static volatile bool _should_run; // iCMS may run
- static volatile bool _should_stop; // iCMS should stop
-
// debugging
void verify_ok_to_terminate() const PRODUCT_RETURN;
@@ -135,44 +126,13 @@
void wait_on_cms_lock_for_scavenge(long t_millis);
// The CMS thread will yield during the work portion of its cycle
- // only when requested to. Both synchronous and asychronous requests
- // are provided:
- // (1) A synchronous request is used for young gen collections and
- // for direct allocations. The requesting thread increments
- // _pending_yields at the beginning of an operation, and decrements
- // _pending_yields when that operation is completed.
- // In turn, the CMS thread yields when _pending_yields is positive,
- // and continues to yield until the value reverts to 0.
- // (2) An asynchronous request, on the other hand, is used by iCMS
- // for the stop_icms() operation. A single yield satisfies all of
- // the outstanding asynch yield requests, of which there may
- // occasionally be several in close succession. To accomplish
- // this, an asynch-requesting thread atomically increments both
- // _pending_yields and _pending_decrements. An asynchr requesting
- // thread does not wait and "acknowledge" completion of an operation
- // and deregister the request, like the synchronous version described
- // above does. In turn, after yielding, the CMS thread decrements both
- // _pending_yields and _pending_decrements by the value seen in
- // _pending_decrements before the decrement.
- // NOTE: The above scheme is isomorphic to having two request counters,
- // one for async requests and one for sync requests, and for the CMS thread
- // to check the sum of the two counters to decide whether it should yield
- // and to clear only the async counter when it yields. However, it turns out
- // to be more efficient for CMS code to just check a single counter
- // _pending_yields that holds the sum (of both sync and async requests), and
- // a second counter _pending_decrements that only holds the async requests,
- // for greater efficiency, since in a typical CMS run, there are many more
- // potential (i.e. static) yield points than there are actual
- // (i.e. dynamic) yields because of requests, which are few and far between.
- //
- // Note that, while "_pending_yields >= _pending_decrements" is an invariant,
- // we cannot easily test that invariant, since the counters are manipulated via
- // atomic instructions without explicit locking and we cannot read
- // the two counters atomically together: one suggestion is to
- // use (for example) 16-bit counters so as to be able to read the
- // two counters atomically even on 32-bit platforms. Notice that
- // the second assert in acknowledge_yield_request() below does indeed
- // check a form of the above invariant, albeit indirectly.
+ // only when requested to.
+ // A synchronous request is used for young gen collections and
+ // for direct allocations. The requesting thread increments
+ // _pending_yields at the beginning of an operation, and decrements
+ // _pending_yields when that operation is completed.
+ // In turn, the CMS thread yields when _pending_yields is positive,
+ // and continues to yield until the value reverts to 0.
static void increment_pending_yields() {
Atomic::inc(&_pending_yields);
@@ -182,67 +142,9 @@
Atomic::dec(&_pending_yields);
assert(_pending_yields >= 0, "can't be negative");
}
- static void asynchronous_yield_request() {
- assert(CMSIncrementalMode, "Currently only used w/iCMS");
- increment_pending_yields();
- Atomic::inc(&_pending_decrements);
- assert(_pending_decrements >= 0, "can't be negative");
- }
- static void acknowledge_yield_request() {
- jint decrement = _pending_decrements;
- if (decrement > 0) {
- assert(CMSIncrementalMode, "Currently only used w/iCMS");
- // Order important to preserve: _pending_yields >= _pending_decrements
- Atomic::add(-decrement, &_pending_decrements);
- Atomic::add(-decrement, &_pending_yields);
- assert(_pending_decrements >= 0, "can't be negative");
- assert(_pending_yields >= 0, "can't be negative");
- }
- }
static bool should_yield() { return _pending_yields > 0; }
-
- // CMS incremental mode.
- static void start_icms(); // notify thread to start a quantum of work
- static void stop_icms(); // request thread to stop working
- void icms_wait(); // if asked to stop, wait until notified to start
-
- // Incremental mode is enabled globally by the flag CMSIncrementalMode. It
- // must also be enabled/disabled dynamically to allow foreground collections.
-#define ICMS_ENABLING_ASSERT \
- assert((CMSIncrementalMode && _icms_disabled >= 0) || \
- (!CMSIncrementalMode && _icms_disabled <= 0), "Error")
-
- static inline void enable_icms() {
- ICMS_ENABLING_ASSERT;
- Atomic::dec(&_icms_disabled);
- }
- static inline void disable_icms() {
- ICMS_ENABLING_ASSERT;
- Atomic::inc(&_icms_disabled);
- }
- static inline bool icms_is_disabled() {
- ICMS_ENABLING_ASSERT;
- return _icms_disabled > 0;
- }
- static inline bool icms_is_enabled() {
- return !icms_is_disabled();
- }
};
-inline void ConcurrentMarkSweepThread::trace_state(const char* desc) {
- if (CMSTraceThreadState) {
- char buf[128];
- TimeStamp& ts = gclog_or_tty->time_stamp();
- if (!ts.is_updated()) {
- ts.update();
- }
- jio_snprintf(buf, sizeof(buf), " [%.3f: CMSThread %s] ",
- ts.seconds(), desc);
- buf[sizeof(buf) - 1] = '\0';
- gclog_or_tty->print("%s", buf);
- }
-}
-
// For scoped increment/decrement of (synchronous) yield requests
class CMSSynchronousYieldRequest: public StackObj {
public:
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp Fri Oct 31 09:10:51 2014 +0100
@@ -207,12 +207,6 @@
MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
assert(_full_gc_count_before <= gch->total_full_collections(), "Error");
if (gch->total_full_collections() == _full_gc_count_before) {
- // Disable iCMS until the full collection is done, and
- // remember that we did so.
- CMSCollector::disable_icms();
- _disabled_icms = true;
- // In case CMS thread was in icms_wait(), wake it up.
- CMSCollector::start_icms();
// Nudge the CMS thread to start a concurrent collection.
CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
} else {
@@ -276,8 +270,4 @@
FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
}
}
- // Enable iCMS back if we disabled it earlier.
- if (_disabled_icms) {
- CMSCollector::enable_icms();
- }
}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp Fri Oct 31 09:10:51 2014 +0100
@@ -128,13 +128,11 @@
// VM operation to invoke a concurrent collection of the heap as a
// GenCollectedHeap heap.
class VM_GenCollectFullConcurrent: public VM_GC_Operation {
- bool _disabled_icms;
public:
VM_GenCollectFullConcurrent(unsigned int gc_count_before,
unsigned int full_gc_count_before,
GCCause::Cause gc_cause)
- : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
- _disabled_icms(false)
+ : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */)
{
assert(FullGCCount_lock != NULL, "Error");
assert(UseAsyncConcMarkSweepGC, "Else will hang caller");
--- a/hotspot/src/share/vm/memory/collectorPolicy.hpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/memory/collectorPolicy.hpp Fri Oct 31 09:10:51 2014 +0100
@@ -189,11 +189,6 @@
return CollectorPolicy::CollectorPolicyKind;
}
- // Returns true if a collector has eden space with soft end.
- virtual bool has_soft_ended_eden() {
- return false;
- }
-
// Do any updates required to global flags that are due to heap initialization
// changes
virtual void post_heap_initialize() = 0;
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp Fri Oct 31 09:10:51 2014 +0100
@@ -194,11 +194,7 @@
(HeapWord*)_virtual_space.high());
Universe::heap()->barrier_set()->resize_covered_region(cmr);
- if (GenCollectedHeap::heap()->collector_policy()->has_soft_ended_eden()) {
- _eden_space = new ConcEdenSpace(this);
- } else {
- _eden_space = new EdenSpace(this);
- }
+ _eden_space = new ContiguousSpace();
_from_space = new ContiguousSpace();
_to_space = new ContiguousSpace();
@@ -1038,38 +1034,12 @@
if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
_next_gen->sample_eden_chunk();
}
- return result;
- }
- do {
- HeapWord* old_limit = eden()->soft_end();
- if (old_limit < eden()->end()) {
- // Tell the next generation we reached a limit.
- HeapWord* new_limit =
- next_gen()->allocation_limit_reached(eden(), eden()->top(), word_size);
- if (new_limit != NULL) {
- Atomic::cmpxchg_ptr(new_limit, eden()->soft_end_addr(), old_limit);
- } else {
- assert(eden()->soft_end() == eden()->end(),
- "invalid state after allocation_limit_reached returned null");
- }
- } else {
- // The allocation failed and the soft limit is equal to the hard limit,
- // there are no reasons to do an attempt to allocate
- assert(old_limit == eden()->end(), "sanity check");
- break;
- }
- // Try to allocate until succeeded or the soft limit can't be adjusted
- result = eden()->par_allocate(word_size);
- } while (result == NULL);
-
- // If the eden is full and the last collection bailed out, we are running
- // out of heap space, and we try to allocate the from-space, too.
- // allocate_from_space can't be inlined because that would introduce a
- // circular dependency at compile time.
- if (result == NULL) {
+ } else {
+ // If the eden is full and the last collection bailed out, we are running
+ // out of heap space, and we try to allocate the from-space, too.
+ // allocate_from_space can't be inlined because that would introduce a
+ // circular dependency at compile time.
result = allocate_from_space(word_size);
- } else if (CMSEdenChunksRecordAlways && _next_gen != NULL) {
- _next_gen->sample_eden_chunk();
}
return result;
}
@@ -1083,11 +1053,6 @@
return res;
}
-void DefNewGeneration::gc_prologue(bool full) {
- // Ensure that _end and _soft_end are the same in eden space.
- eden()->set_soft_end(eden()->end());
-}
-
size_t DefNewGeneration::tlab_capacity() const {
return eden()->capacity();
}
--- a/hotspot/src/share/vm/memory/defNewGeneration.hpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/memory/defNewGeneration.hpp Fri Oct 31 09:10:51 2014 +0100
@@ -32,7 +32,6 @@
#include "memory/generation.inline.hpp"
#include "utilities/stack.hpp"
-class EdenSpace;
class ContiguousSpace;
class ScanClosure;
class STWGCTimer;
@@ -132,7 +131,7 @@
void adjust_desired_tenuring_threshold();
// Spaces
- EdenSpace* _eden_space;
+ ContiguousSpace* _eden_space;
ContiguousSpace* _from_space;
ContiguousSpace* _to_space;
@@ -214,9 +213,9 @@
virtual Generation::Name kind() { return Generation::DefNew; }
// Accessing spaces
- EdenSpace* eden() const { return _eden_space; }
- ContiguousSpace* from() const { return _from_space; }
- ContiguousSpace* to() const { return _to_space; }
+ ContiguousSpace* eden() const { return _eden_space; }
+ ContiguousSpace* from() const { return _from_space; }
+ ContiguousSpace* to() const { return _to_space; }
virtual CompactibleSpace* first_compaction_space() const;
@@ -282,8 +281,6 @@
HeapWord* par_allocate(size_t word_size, bool is_tlab);
- // Prologue & Epilogue
- virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
// Save the tops for eden, from, and to
--- a/hotspot/src/share/vm/memory/generation.hpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/memory/generation.hpp Fri Oct 31 09:10:51 2014 +0100
@@ -265,14 +265,6 @@
// Like "allocate", but performs any necessary locking internally.
virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0;
- // A 'younger' gen has reached an allocation limit, and uses this to notify
- // the next older gen. The return value is a new limit, or NULL if none. The
- // caller must do the necessary locking.
- virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
- size_t word_size) {
- return NULL;
- }
-
// Some generation may offer a region for shared, contiguous allocation,
// via inlined code (by exporting the address of the top and end fields
// defining the extent of the contiguous allocation region.)
--- a/hotspot/src/share/vm/memory/space.cpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/memory/space.cpp Fri Oct 31 09:10:51 2014 +0100
@@ -640,13 +640,12 @@
}
// This version requires locking.
-inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
- HeapWord* const end_value) {
+inline HeapWord* ContiguousSpace::allocate_impl(size_t size) {
assert(Heap_lock->owned_by_self() ||
(SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
"not locked");
HeapWord* obj = top();
- if (pointer_delta(end_value, obj) >= size) {
+ if (pointer_delta(end(), obj) >= size) {
HeapWord* new_top = obj + size;
set_top(new_top);
assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
@@ -657,11 +656,10 @@
}
// This version is lock-free.
-inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size,
- HeapWord* const end_value) {
+inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) {
do {
HeapWord* obj = top();
- if (pointer_delta(end_value, obj) >= size) {
+ if (pointer_delta(end(), obj) >= size) {
HeapWord* new_top = obj + size;
HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
// result can be one of two:
@@ -700,12 +698,12 @@
// Requires locking.
HeapWord* ContiguousSpace::allocate(size_t size) {
- return allocate_impl(size, end());
+ return allocate_impl(size);
}
// Lock-free.
HeapWord* ContiguousSpace::par_allocate(size_t size) {
- return par_allocate_impl(size, end());
+ return par_allocate_impl(size);
}
void ContiguousSpace::allocate_temporary_filler(int factor) {
@@ -740,49 +738,6 @@
}
}
-void EdenSpace::clear(bool mangle_space) {
- ContiguousSpace::clear(mangle_space);
- set_soft_end(end());
-}
-
-// Requires locking.
-HeapWord* EdenSpace::allocate(size_t size) {
- return allocate_impl(size, soft_end());
-}
-
-// Lock-free.
-HeapWord* EdenSpace::par_allocate(size_t size) {
- return par_allocate_impl(size, soft_end());
-}
-
-HeapWord* ConcEdenSpace::par_allocate(size_t size)
-{
- do {
- // The invariant is top() should be read before end() because
- // top() can't be greater than end(), so if an update of _soft_end
- // occurs between 'end_val = end();' and 'top_val = top();' top()
- // also can grow up to the new end() and the condition
- // 'top_val > end_val' is true. To ensure the loading order
- // OrderAccess::loadload() is required after top() read.
- HeapWord* obj = top();
- OrderAccess::loadload();
- if (pointer_delta(*soft_end_addr(), obj) >= size) {
- HeapWord* new_top = obj + size;
- HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
- // result can be one of two:
- // the old top value: the exchange succeeded
- // otherwise: the new value of the top is returned.
- if (result == obj) {
- assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
- return obj;
- }
- } else {
- return NULL;
- }
- } while (true);
-}
-
-
HeapWord* OffsetTableContigSpace::initialize_threshold() {
return _offsets.initialize_threshold();
}
--- a/hotspot/src/share/vm/memory/space.hpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/memory/space.hpp Fri Oct 31 09:10:51 2014 +0100
@@ -41,20 +41,6 @@
// implementations for keeping track of free and used space,
// for iterating over objects and free blocks, etc.
-// Here's the Space hierarchy:
-//
-// - Space -- an abstract base class describing a heap area
-// - CompactibleSpace -- a space supporting compaction
-// - CompactibleFreeListSpace -- (used for CMS generation)
-// - G1OffsetTableContigSpace -- G1 version of OffsetTableContigSpace
-// - ContiguousSpace -- a compactible space in which all free space
-// is contiguous
-// - EdenSpace -- contiguous space used as nursery
-// - ConcEdenSpace -- contiguous space with a 'soft end safe' allocation
-// - OffsetTableContigSpace -- contiguous space with a block offset array
-// that allows "fast" block_start calls
-// - TenuredSpace -- (used for TenuredGeneration)
-
// Forward decls.
class Space;
class BlockOffsetArray;
@@ -544,8 +530,8 @@
GenSpaceMangler* mangler() { return _mangler; }
// Allocation helpers (return NULL if full).
- inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
- inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
+ inline HeapWord* allocate_impl(size_t word_size);
+ inline HeapWord* par_allocate_impl(size_t word_size);
public:
ContiguousSpace();
@@ -761,56 +747,6 @@
{}
};
-
-// Class EdenSpace describes eden-space in new generation.
-
-class DefNewGeneration;
-
-class EdenSpace : public ContiguousSpace {
- friend class VMStructs;
- private:
- DefNewGeneration* _gen;
-
- // _soft_end is used as a soft limit on allocation. As soft limits are
- // reached, the slow-path allocation code can invoke other actions and then
- // adjust _soft_end up to a new soft limit or to end().
- HeapWord* _soft_end;
-
- public:
- EdenSpace(DefNewGeneration* gen) :
- _gen(gen), _soft_end(NULL) {}
-
- // Get/set just the 'soft' limit.
- HeapWord* soft_end() { return _soft_end; }
- HeapWord** soft_end_addr() { return &_soft_end; }
- void set_soft_end(HeapWord* value) { _soft_end = value; }
-
- // Override.
- void clear(bool mangle_space);
-
- // Set both the 'hard' and 'soft' limits (_end and _soft_end).
- void set_end(HeapWord* value) {
- set_soft_end(value);
- ContiguousSpace::set_end(value);
- }
-
- // Allocation (return NULL if full)
- HeapWord* allocate(size_t word_size);
- HeapWord* par_allocate(size_t word_size);
-};
-
-// Class ConcEdenSpace extends EdenSpace for the sake of safe
-// allocation while soft-end is being modified concurrently
-
-class ConcEdenSpace : public EdenSpace {
- public:
- ConcEdenSpace(DefNewGeneration* gen) : EdenSpace(gen) { }
-
- // Allocation (return NULL if full)
- HeapWord* par_allocate(size_t word_size);
-};
-
-
// A ContigSpace that Supports an efficient "block_start" operation via
// a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
// other spaces.) This is the abstract base class for old generation
--- a/hotspot/src/share/vm/opto/macro.cpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/opto/macro.cpp Fri Oct 31 09:10:51 2014 +0100
@@ -1197,8 +1197,7 @@
}
if (C->env()->dtrace_alloc_probes() ||
- !UseTLAB && (!Universe::heap()->supports_inline_contig_alloc() ||
- (UseConcMarkSweepGC && CMSIncrementalMode))) {
+ !UseTLAB && (!Universe::heap()->supports_inline_contig_alloc())) {
// Force slow-path allocation
always_slow = true;
initial_slow_test = NULL;
--- a/hotspot/src/share/vm/runtime/arguments.cpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Fri Oct 31 09:10:51 2014 +0100
@@ -1774,7 +1774,7 @@
#ifdef ASSERT
static bool verify_serial_gc_flags() {
return (UseSerialGC &&
- !(UseParNewGC || (UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC ||
+ !(UseParNewGC || (UseConcMarkSweepGC) || UseG1GC ||
UseParallelGC || UseParallelOldGC));
}
#endif // ASSERT
@@ -2188,10 +2188,6 @@
warning("Using the ParNew young collector with the Serial old collector is deprecated "
"and will likely be removed in a future release");
}
-
- if (CMSIncrementalMode) {
- warning("Using incremental CMS is deprecated and will likely be removed in a future release");
- }
}
void Arguments::check_deprecated_gc_flags() {
@@ -2313,31 +2309,8 @@
status = status && ArgumentsExt::check_gc_consistency_user();
status = status && check_stack_pages();
- if (CMSIncrementalMode) {
- if (!UseConcMarkSweepGC) {
- jio_fprintf(defaultStream::error_stream(),
- "error: invalid argument combination.\n"
- "The CMS collector (-XX:+UseConcMarkSweepGC) must be "
- "selected in order\nto use CMSIncrementalMode.\n");
- status = false;
- } else {
- status = status && verify_percentage(CMSIncrementalDutyCycle,
- "CMSIncrementalDutyCycle");
- status = status && verify_percentage(CMSIncrementalDutyCycleMin,
- "CMSIncrementalDutyCycleMin");
- status = status && verify_percentage(CMSIncrementalSafetyFactor,
- "CMSIncrementalSafetyFactor");
- status = status && verify_percentage(CMSIncrementalOffset,
- "CMSIncrementalOffset");
- status = status && verify_percentage(CMSExpAvgFactor,
- "CMSExpAvgFactor");
- // If it was not set on the command line, set
- // CMSInitiatingOccupancyFraction to 1 so icms can initiate cycles early.
- if (CMSInitiatingOccupancyFraction < 0) {
- FLAG_SET_DEFAULT(CMSInitiatingOccupancyFraction, 1);
- }
- }
- }
+ status = status && verify_percentage(CMSIncrementalSafetyFactor,
+ "CMSIncrementalSafetyFactor");
// CMS space iteration, which FLSVerifyAllHeapreferences entails,
// insists that we hold the requisite locks so that the iteration is
@@ -2870,14 +2843,6 @@
// -Xnoclassgc
} else if (match_option(option, "-Xnoclassgc", &tail)) {
FLAG_SET_CMDLINE(bool, ClassUnloading, false);
- // -Xincgc: i-CMS
- } else if (match_option(option, "-Xincgc", &tail)) {
- FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, true);
- FLAG_SET_CMDLINE(bool, CMSIncrementalMode, true);
- // -Xnoincgc: no i-CMS
- } else if (match_option(option, "-Xnoincgc", &tail)) {
- FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, false);
- FLAG_SET_CMDLINE(bool, CMSIncrementalMode, false);
// -Xconcgc
} else if (match_option(option, "-Xconcgc", &tail)) {
FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, true);
@@ -3707,7 +3672,6 @@
#if !INCLUDE_ALL_GCS
static void force_serial_gc() {
FLAG_SET_DEFAULT(UseSerialGC, true);
- FLAG_SET_DEFAULT(CMSIncrementalMode, false); // special CMS suboption
UNSUPPORTED_GC_OPTION(UseG1GC);
UNSUPPORTED_GC_OPTION(UseParallelGC);
UNSUPPORTED_GC_OPTION(UseParallelOldGC);
--- a/hotspot/src/share/vm/runtime/globals.hpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/runtime/globals.hpp Fri Oct 31 09:10:51 2014 +0100
@@ -1638,30 +1638,10 @@
"The maximum size of young gen chosen by default per GC worker " \
"thread available") \
\
- product(bool, CMSIncrementalMode, false, \
- "Whether CMS GC should operate in \"incremental\" mode") \
- \
- product(uintx, CMSIncrementalDutyCycle, 10, \
- "Percentage (0-100) of CMS incremental mode duty cycle. If " \
- "CMSIncrementalPacing is enabled, then this is just the initial " \
- "value.") \
- \
- product(bool, CMSIncrementalPacing, true, \
- "Whether the CMS incremental mode duty cycle should be " \
- "automatically adjusted") \
- \
- product(uintx, CMSIncrementalDutyCycleMin, 0, \
- "Minimum percentage (0-100) of the CMS incremental duty cycle " \
- "used when CMSIncrementalPacing is enabled") \
- \
product(uintx, CMSIncrementalSafetyFactor, 10, \
"Percentage (0-100) used to add conservatism when computing the " \
"duty cycle") \
\
- product(uintx, CMSIncrementalOffset, 0, \
- "Percentage (0-100) by which the CMS incremental mode duty cycle "\
- "is shifted to the right within the period between young GCs") \
- \
product(uintx, CMSExpAvgFactor, 50, \
"Percentage (0-100) used to weight the current sample when " \
"computing exponential averages for CMS statistics") \
@@ -1720,15 +1700,6 @@
"Skip block flux-rate sampling for an epoch unless inter-sweep " \
"duration exceeds this threshold in milliseconds") \
\
- develop(bool, CMSTraceIncrementalMode, false, \
- "Trace CMS incremental mode") \
- \
- develop(bool, CMSTraceIncrementalPacing, false, \
- "Trace CMS incremental mode pacing computation") \
- \
- develop(bool, CMSTraceThreadState, false, \
- "Trace the CMS thread state (enable the trace_state() method)") \
- \
product(bool, CMSClassUnloadingEnabled, true, \
"Whether class unloading enabled when using CMS GC") \
\
--- a/hotspot/src/share/vm/runtime/mutexLocker.cpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/runtime/mutexLocker.cpp Fri Oct 31 09:10:51 2014 +0100
@@ -72,7 +72,6 @@
Monitor* CGC_lock = NULL;
Monitor* STS_lock = NULL;
Monitor* SLT_lock = NULL;
-Monitor* iCMS_lock = NULL;
Monitor* FullGCCount_lock = NULL;
Monitor* CMark_lock = NULL;
Mutex* CMRegionStack_lock = NULL;
@@ -175,9 +174,6 @@
def(CGC_lock , Monitor, special, true ); // coordinate between fore- and background GC
def(STS_lock , Monitor, leaf, true );
- if (UseConcMarkSweepGC) {
- def(iCMS_lock , Monitor, special, true ); // CMS incremental mode start/stop notification
- }
if (UseConcMarkSweepGC || UseG1GC) {
def(FullGCCount_lock , Monitor, leaf, true ); // in support of ExplicitGCInvokesConcurrent
}
--- a/hotspot/src/share/vm/runtime/mutexLocker.hpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/runtime/mutexLocker.hpp Fri Oct 31 09:10:51 2014 +0100
@@ -66,7 +66,6 @@
// fore- & background GC threads.
extern Monitor* STS_lock; // used for joining/leaving SuspendibleThreadSet.
extern Monitor* SLT_lock; // used in CMS GC for acquiring PLL
-extern Monitor* iCMS_lock; // CMS incremental mode start/stop notification
extern Monitor* FullGCCount_lock; // in support of "concurrent" full gc
extern Monitor* CMark_lock; // used for concurrent mark thread coordination
extern Mutex* CMRegionStack_lock; // used for protecting accesses to the CM region stack
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Fri Oct 31 09:10:51 2014 +0100
@@ -527,12 +527,10 @@
nonstatic_field(DefNewGeneration, _next_gen, Generation*) \
nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
nonstatic_field(DefNewGeneration, _age_table, ageTable) \
- nonstatic_field(DefNewGeneration, _eden_space, EdenSpace*) \
+ nonstatic_field(DefNewGeneration, _eden_space, ContiguousSpace*) \
nonstatic_field(DefNewGeneration, _from_space, ContiguousSpace*) \
nonstatic_field(DefNewGeneration, _to_space, ContiguousSpace*) \
\
- nonstatic_field(EdenSpace, _gen, DefNewGeneration*) \
- \
nonstatic_field(Generation, _reserved, MemRegion) \
nonstatic_field(Generation, _virtual_space, VirtualSpace) \
nonstatic_field(Generation, _level, int) \
@@ -1490,7 +1488,6 @@
declare_toplevel_type(BitMap) \
declare_type(CompactibleSpace, Space) \
declare_type(ContiguousSpace, CompactibleSpace) \
- declare_type(EdenSpace, ContiguousSpace) \
declare_type(OffsetTableContigSpace, ContiguousSpace) \
declare_type(TenuredSpace, OffsetTableContigSpace) \
declare_toplevel_type(BarrierSet) \
@@ -1532,7 +1529,6 @@
declare_toplevel_type(CollectedHeap*) \
declare_toplevel_type(ContiguousSpace*) \
declare_toplevel_type(DefNewGeneration*) \
- declare_toplevel_type(EdenSpace*) \
declare_toplevel_type(GenCollectedHeap*) \
declare_toplevel_type(Generation*) \
declare_toplevel_type(GenerationSpec**) \
--- a/hotspot/test/TEST.groups Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/test/TEST.groups Fri Oct 31 09:10:51 2014 +0100
@@ -175,11 +175,8 @@
gc/g1/TestShrinkToOneRegion.java \
gc/metaspace/G1AddMetaspaceDependency.java \
gc/startup_warnings/TestCMS.java \
- gc/startup_warnings/TestCMSIncrementalMode.java \
- gc/startup_warnings/TestCMSNoIncrementalMode.java \
gc/startup_warnings/TestDefaultMaxRAMFraction.java \
gc/startup_warnings/TestDefNewCMS.java \
- gc/startup_warnings/TestIncGC.java \
gc/startup_warnings/TestParallelGC.java \
gc/startup_warnings/TestParallelScavengeSerialOld.java \
gc/startup_warnings/TestParNewCMS.java \
@@ -273,8 +270,6 @@
gc/class_unloading/TestCMSClassUnloadingEnabledHWM.java \
gc/concurrentMarkSweep/ \
gc/startup_warnings/TestCMS.java \
- gc/startup_warnings/TestCMSIncrementalMode.java \
- gc/startup_warnings/TestCMSNoIncrementalMode.java \
gc/startup_warnings/TestDefNewCMS.java \
gc/startup_warnings/TestParNewCMS.java
--- a/hotspot/test/gc/g1/TestShrinkAuxiliaryData.java Thu Oct 30 12:45:22 2014 +0100
+++ b/hotspot/test/gc/g1/TestShrinkAuxiliaryData.java Fri Oct 31 09:10:51 2014 +0100
@@ -279,8 +279,7 @@
"-XX:\\+UseConcMarkSweepGC",
"-XX:\\+UseParallelOldGC",
"-XX:\\+UseParNewGC",
- "-Xconcgc",
- "-Xincgc"
+ "-Xconcgc"
};
}
}
--- a/hotspot/test/gc/startup_warnings/TestCMSIncrementalMode.java Thu Oct 30 12:45:22 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,46 +0,0 @@
-
-/*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
-* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-*
-* This code is free software; you can redistribute it and/or modify it
-* under the terms of the GNU General Public License version 2 only, as
-* published by the Free Software Foundation.
-*
-* This code is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-* version 2 for more details (a copy is included in the LICENSE file that
-* accompanied this code).
-*
-* You should have received a copy of the GNU General Public License version
-* 2 along with this work; if not, write to the Free Software Foundation,
-* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-*
-* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-* or visit www.oracle.com if you need additional information or have any
-* questions.
-*/
-
-/*
-* @test TestCMSIncrementalMode
-* @key gc
-* @bug 8006398
-* @summary Test that the deprecated CMSIncrementalMode print a warning message
-* @library /testlibrary
-*/
-
-import com.oracle.java.testlibrary.OutputAnalyzer;
-import com.oracle.java.testlibrary.ProcessTools;
-
-public class TestCMSIncrementalMode {
-
- public static void main(String args[]) throws Exception {
- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseConcMarkSweepGC", "-XX:+CMSIncrementalMode", "-version");
- OutputAnalyzer output = new OutputAnalyzer(pb.start());
- output.shouldContain("warning: Using incremental CMS is deprecated and will likely be removed in a future release");
- output.shouldNotContain("error");
- output.shouldHaveExitValue(0);
- }
-
-}
--- a/hotspot/test/gc/startup_warnings/TestCMSNoIncrementalMode.java Thu Oct 30 12:45:22 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-/*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
-* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-*
-* This code is free software; you can redistribute it and/or modify it
-* under the terms of the GNU General Public License version 2 only, as
-* published by the Free Software Foundation.
-*
-* This code is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-* version 2 for more details (a copy is included in the LICENSE file that
-* accompanied this code).
-*
-* You should have received a copy of the GNU General Public License version
-* 2 along with this work; if not, write to the Free Software Foundation,
-* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-*
-* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-* or visit www.oracle.com if you need additional information or have any
-* questions.
-*/
-
-/*
-* @test TestCMSNoIncrementalMode
-* @key gc
-* @bug 8006398
-* @summary Test that CMS with incremental mode turned off does not print a warning message
-* @library /testlibrary
-*/
-
-import com.oracle.java.testlibrary.OutputAnalyzer;
-import com.oracle.java.testlibrary.ProcessTools;
-
-public class TestCMSNoIncrementalMode {
-
- public static void main(String args[]) throws Exception {
- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseConcMarkSweepGC", "-XX:-CMSIncrementalMode", "-version");
- OutputAnalyzer output = new OutputAnalyzer(pb.start());
- output.shouldNotContain("deprecated");
- output.shouldNotContain("error");
- output.shouldHaveExitValue(0);
- }
-
-}
--- a/hotspot/test/gc/startup_warnings/TestIncGC.java Thu Oct 30 12:45:22 2014 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,46 +0,0 @@
-/*
-* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
-* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-*
-* This code is free software; you can redistribute it and/or modify it
-* under the terms of the GNU General Public License version 2 only, as
-* published by the Free Software Foundation.
-*
-* This code is distributed in the hope that it will be useful, but WITHOUT
-* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-* version 2 for more details (a copy is included in the LICENSE file that
-* accompanied this code).
-*
-* You should have received a copy of the GNU General Public License version
-* 2 along with this work; if not, write to the Free Software Foundation,
-* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-*
-* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-* or visit www.oracle.com if you need additional information or have any
-* questions.
-*/
-
-/*
-* @test TestIncGC
-* @key gc
-* @bug 8006398
-* @summary Test that the deprecated -Xincgc print a warning message
-* @library /testlibrary
-*/
-
-import com.oracle.java.testlibrary.OutputAnalyzer;
-import com.oracle.java.testlibrary.ProcessTools;
-
-
-public class TestIncGC {
-
- public static void main(String args[]) throws Exception {
- ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xincgc", "-version");
- OutputAnalyzer output = new OutputAnalyzer(pb.start());
- output.shouldContain("warning: Using incremental CMS is deprecated and will likely be removed in a future release");
- output.shouldNotContain("error");
- output.shouldHaveExitValue(0);
- }
-
-}