8214791: Consistently name gc files containing VM operations
Summary: Name all gc files containing VM operations according to a <gc>VMOperations.?pp.
Reviewed-by: coleenp, dholmes
--- a/make/hotspot/lib/JvmDtraceObjects.gmk Thu Dec 06 13:55:22 2018 +0100
+++ b/make/hotspot/lib/JvmDtraceObjects.gmk Thu Dec 06 15:44:13 2018 +0100
@@ -60,6 +60,7 @@
ciEnv.o \
classLoadingService.o \
compileBroker.o \
+ gcVMOperations.o \
hashtable.o \
instanceKlass.o \
java.o \
@@ -74,18 +75,17 @@
thread.o \
unsafe.o \
vmThread.o \
- vmGCOperations.o \
)
ifeq ($(call check-jvm-feature, cmsgc), true)
DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
- vmCMSOperations.o \
+ cmsVMOperations.o \
)
endif
ifeq ($(call check-jvm-feature, parallelgc), true)
DTRACE_INSTRUMENTED_OBJS += $(addprefix $(JVM_OUTPUTDIR)/objs/, \
- vmPSOperations.o \
+ psVMOperations.o \
)
endif
--- a/src/hotspot/share/gc/cms/cmsCollectorPolicy.cpp Thu Dec 06 13:55:22 2018 +0100
+++ b/src/hotspot/share/gc/cms/cmsCollectorPolicy.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,9 +30,9 @@
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
+#include "gc/shared/gcVMOperations.hpp"
#include "gc/shared/generationSpec.hpp"
#include "gc/shared/space.hpp"
-#include "gc/shared/vmGCOperations.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/arguments.hpp"
--- a/src/hotspot/share/gc/cms/cmsHeap.cpp Thu Dec 06 13:55:22 2018 +0100
+++ b/src/hotspot/share/gc/cms/cmsHeap.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -24,12 +24,12 @@
#include "precompiled.hpp"
#include "gc/cms/cmsCardTable.hpp"
+#include "gc/cms/cmsVMOperations.hpp"
#include "gc/cms/compactibleFreeListSpace.hpp"
#include "gc/cms/concurrentMarkSweepGeneration.hpp"
#include "gc/cms/concurrentMarkSweepThread.hpp"
#include "gc/cms/cmsHeap.hpp"
#include "gc/cms/parNewGeneration.hpp"
-#include "gc/cms/vmCMSOperations.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/genMemoryPools.hpp"
#include "gc/shared/genOopClosures.inline.hpp"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/cms/cmsVMOperations.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/cms/cmsHeap.hpp"
+#include "gc/cms/cmsVMOperations.hpp"
+#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
+#include "gc/cms/concurrentMarkSweepThread.hpp"
+#include "gc/shared/gcLocker.hpp"
+#include "gc/shared/gcTimer.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/isGCActiveMark.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+#include "runtime/os.hpp"
+#include "utilities/dtrace.hpp"
+
+//////////////////////////////////////////////////////////
+// Methods in abstract class VM_CMS_Operation
+//////////////////////////////////////////////////////////
+void VM_CMS_Operation::verify_before_gc() {
+ if (VerifyBeforeGC &&
+ CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
+ GCTraceTime(Info, gc, phases, verify) tm("Verify Before", _collector->_gc_timer_cm);
+ HandleMark hm;
+ FreelistLocker x(_collector);
+ MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
+ CMSHeap::heap()->prepare_for_verify();
+ Universe::verify();
+ }
+}
+
+void VM_CMS_Operation::verify_after_gc() {
+ if (VerifyAfterGC &&
+ CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
+ GCTraceTime(Info, gc, phases, verify) tm("Verify After", _collector->_gc_timer_cm);
+ HandleMark hm;
+ FreelistLocker x(_collector);
+ MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
+ Universe::verify();
+ }
+}
+
+bool VM_CMS_Operation::lost_race() const {
+ if (CMSCollector::abstract_state() == CMSCollector::Idling) {
+ // We lost a race to a foreground collection
+ // -- there's nothing to do
+ return true;
+ }
+ assert(CMSCollector::abstract_state() == legal_state(),
+ "Inconsistent collector state?");
+ return false;
+}
+
+bool VM_CMS_Operation::doit_prologue() {
+ assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
+ assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
+ assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+ "Possible deadlock");
+
+ Heap_lock->lock();
+ if (lost_race()) {
+ assert(_prologue_succeeded == false, "Initialized in c'tor");
+ Heap_lock->unlock();
+ } else {
+ _prologue_succeeded = true;
+ }
+ return _prologue_succeeded;
+}
+
+void VM_CMS_Operation::doit_epilogue() {
+ assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
+ assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
+ assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
+ "Possible deadlock");
+
+ if (Universe::has_reference_pending_list()) {
+ Heap_lock->notify_all();
+ }
+ Heap_lock->unlock();
+}
+
+//////////////////////////////////////////////////////////
+// Methods in class VM_CMS_Initial_Mark
+//////////////////////////////////////////////////////////
+void VM_CMS_Initial_Mark::doit() {
+ if (lost_race()) {
+ // Nothing to do.
+ return;
+ }
+ HS_PRIVATE_CMS_INITMARK_BEGIN();
+ GCIdMark gc_id_mark(_gc_id);
+
+ _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
+
+ CMSHeap* heap = CMSHeap::heap();
+ GCCauseSetter gccs(heap, GCCause::_cms_initial_mark);
+
+ VM_CMS_Operation::verify_before_gc();
+
+ IsGCActiveMark x; // stop-world GC active
+ _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, heap->gc_cause());
+
+ VM_CMS_Operation::verify_after_gc();
+
+ _collector->_gc_timer_cm->register_gc_pause_end();
+
+ HS_PRIVATE_CMS_INITMARK_END();
+}
+
+//////////////////////////////////////////////////////////
+// Methods in class VM_CMS_Final_Remark_Operation
+//////////////////////////////////////////////////////////
+void VM_CMS_Final_Remark::doit() {
+ if (lost_race()) {
+ // Nothing to do.
+ return;
+ }
+ HS_PRIVATE_CMS_REMARK_BEGIN();
+ GCIdMark gc_id_mark(_gc_id);
+
+ _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
+
+ CMSHeap* heap = CMSHeap::heap();
+ GCCauseSetter gccs(heap, GCCause::_cms_final_remark);
+
+ VM_CMS_Operation::verify_before_gc();
+
+ IsGCActiveMark x; // stop-world GC active
+ _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, heap->gc_cause());
+
+ VM_CMS_Operation::verify_after_gc();
+
+ _collector->save_heap_summary();
+ _collector->_gc_timer_cm->register_gc_pause_end();
+
+ HS_PRIVATE_CMS_REMARK_END();
+}
+
+// VM operation to invoke a concurrent collection of a
+// GenCollectedHeap heap.
+void VM_GenCollectFullConcurrent::doit() {
+ assert(Thread::current()->is_VM_thread(), "Should be VM thread");
+ assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
+
+ CMSHeap* heap = CMSHeap::heap();
+ if (_gc_count_before == heap->total_collections()) {
+ // The "full" of do_full_collection call below "forces"
+ // a collection; the second arg, 0, below ensures that
+ // only the young gen is collected. XXX In the future,
+ // we'll probably need to have something in this interface
+ // to say do this only if we are sure we will not bail
+ // out to a full collection in this attempt, but that's
+ // for the future.
+ assert(SafepointSynchronize::is_at_safepoint(),
+ "We can only be executing this arm of if at a safepoint");
+ GCCauseSetter gccs(heap, _gc_cause);
+ heap->do_full_collection(heap->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
+ } // Else no need for a foreground young gc
+ assert((_gc_count_before < heap->total_collections()) ||
+ (GCLocker::is_active() /* gc may have been skipped */
+ && (_gc_count_before == heap->total_collections())),
+ "total_collections() should be monotonically increasing");
+
+ MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
+ assert(_full_gc_count_before <= heap->total_full_collections(), "Error");
+ if (heap->total_full_collections() == _full_gc_count_before) {
+ // Nudge the CMS thread to start a concurrent collection.
+ CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
+ } else {
+ assert(_full_gc_count_before < heap->total_full_collections(), "Error");
+ FullGCCount_lock->notify_all(); // Inform the Java thread its work is done
+ }
+}
+
+bool VM_GenCollectFullConcurrent::evaluate_at_safepoint() const {
+ Thread* thr = Thread::current();
+ assert(thr != NULL, "Unexpected tid");
+ if (!thr->is_Java_thread()) {
+ assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread");
+ CMSHeap* heap = CMSHeap::heap();
+ if (_gc_count_before != heap->total_collections()) {
+ // No need to do a young gc, we'll just nudge the CMS thread
+ // in the doit() method above, to be executed soon.
+ assert(_gc_count_before < heap->total_collections(),
+ "total_collections() should be monotonically increasing");
+ return false; // no need for foreground young gc
+ }
+ }
+ return true; // may still need foreground young gc
+}
+
+
+void VM_GenCollectFullConcurrent::doit_epilogue() {
+ Thread* thr = Thread::current();
+ assert(thr->is_Java_thread(), "just checking");
+ JavaThread* jt = (JavaThread*)thr;
+
+ if (Universe::has_reference_pending_list()) {
+ Heap_lock->notify_all();
+ }
+ Heap_lock->unlock();
+
+ // It is fine to test whether completed collections has
+ // exceeded our request count without locking because
+ // the completion count is monotonically increasing;
+ // this will break for very long-running apps when the
+ // count overflows and wraps around. XXX fix me !!!
+ // e.g. at the rate of 1 full gc per ms, this could
+ // overflow in about 1000 years.
+ CMSHeap* heap = CMSHeap::heap();
+ if (_gc_cause != GCCause::_gc_locker &&
+ heap->total_full_collections_completed() <= _full_gc_count_before) {
+ // maybe we should change the condition to test _gc_cause ==
+ // GCCause::_java_lang_system_gc or GCCause::_dcmd_gc_run,
+ // instead of _gc_cause != GCCause::_gc_locker
+ assert(GCCause::is_user_requested_gc(_gc_cause),
+ "the only way to get here if this was a System.gc()-induced GC");
+ assert(ExplicitGCInvokesConcurrent, "Error");
+ // Now, wait for witnessing concurrent gc cycle to complete,
+ // but do so in native mode, because we want to lock the
+ // FullGCEvent_lock, which may be needed by the VM thread
+ // or by the CMS thread, so we do not want to be suspended
+ // while holding that lock.
+ ThreadToNativeFromVM native(jt);
+ MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
+ // Either a concurrent or a stop-world full gc is sufficient
+ // witness to our request.
+ while (heap->total_full_collections_completed() <= _full_gc_count_before) {
+ FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/cms/cmsVMOperations.hpp Thu Dec 06 15:44:13 2018 +0100
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_CMS_CMSVMOPERATIONS_HPP
+#define SHARE_VM_GC_CMS_CMSVMOPERATIONS_HPP
+
+#include "gc/cms/concurrentMarkSweepGeneration.hpp"
+#include "gc/shared/gcCause.hpp"
+#include "gc/shared/gcId.hpp"
+#include "gc/shared/gcVMOperations.hpp"
+#include "runtime/vm_operations.hpp"
+
+// The VM_CMS_Operation is slightly different from
+// a VM_GC_Operation -- and would not have subclassed easily
+// to VM_GC_Operation without several changes to VM_GC_Operation.
+// To minimize the changes, we have replicated some of the VM_GC_Operation
+// functionality here. We will consolidate that back by doing subclassing
+// as appropriate in Dolphin.
+//
+// VM_Operation
+// VM_CMS_Operation
+// - implements the common portion of work done in support
+// of CMS' stop-world phases (initial mark and remark).
+//
+// VM_CMS_Initial_Mark
+// VM_CMS_Final_Mark
+//
+
+// Forward decl.
+class CMSCollector;
+
+class VM_CMS_Operation: public VM_Operation {
+ protected:
+ CMSCollector* _collector; // associated collector
+ bool _prologue_succeeded; // whether doit_prologue succeeded
+ uint _gc_id;
+
+ bool lost_race() const;
+
+ public:
+ VM_CMS_Operation(CMSCollector* collector):
+ _collector(collector),
+ _prologue_succeeded(false),
+ _gc_id(GCId::current()) {}
+ ~VM_CMS_Operation() {}
+
+ // The legal collector state for executing this CMS op.
+ virtual const CMSCollector::CollectorState legal_state() const = 0;
+
+ // Whether the pending list lock needs to be held
+ virtual const bool needs_pending_list_lock() const = 0;
+
+ // Execute operations in the context of the caller,
+ // prior to execution of the vm operation itself.
+ virtual bool doit_prologue();
+ // Execute operations in the context of the caller,
+ // following completion of the vm operation.
+ virtual void doit_epilogue();
+
+ virtual bool evaluate_at_safepoint() const { return true; }
+ virtual bool is_cheap_allocated() const { return false; }
+ virtual bool allow_nested_vm_operations() const { return false; }
+ bool prologue_succeeded() const { return _prologue_succeeded; }
+
+ void verify_before_gc();
+ void verify_after_gc();
+};
+
+
+// VM_CMS_Operation for the initial marking phase of CMS.
+class VM_CMS_Initial_Mark: public VM_CMS_Operation {
+ public:
+ VM_CMS_Initial_Mark(CMSCollector* _collector) :
+ VM_CMS_Operation(_collector) {}
+
+ virtual VMOp_Type type() const { return VMOp_CMS_Initial_Mark; }
+ virtual void doit();
+
+ virtual const CMSCollector::CollectorState legal_state() const {
+ return CMSCollector::InitialMarking;
+ }
+
+ virtual const bool needs_pending_list_lock() const {
+ return false;
+ }
+};
+
+// VM_CMS_Operation for the final remark phase of CMS.
+class VM_CMS_Final_Remark: public VM_CMS_Operation {
+ public:
+ VM_CMS_Final_Remark(CMSCollector* _collector) :
+ VM_CMS_Operation(_collector) {}
+ virtual VMOp_Type type() const { return VMOp_CMS_Final_Remark; }
+ virtual void doit();
+
+ virtual const CMSCollector::CollectorState legal_state() const {
+ return CMSCollector::FinalMarking;
+ }
+
+ virtual const bool needs_pending_list_lock() const {
+ return true;
+ }
+};
+
+
+// VM operation to invoke a concurrent collection of the heap as a
+// GenCollectedHeap heap.
+class VM_GenCollectFullConcurrent: public VM_GC_Operation {
+ public:
+ VM_GenCollectFullConcurrent(uint gc_count_before,
+ uint full_gc_count_before,
+ GCCause::Cause gc_cause)
+ : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */)
+ {
+ assert(FullGCCount_lock != NULL, "Error");
+ }
+ ~VM_GenCollectFullConcurrent() {}
+ virtual VMOp_Type type() const { return VMOp_GenCollectFullConcurrent; }
+ virtual void doit();
+ virtual void doit_epilogue();
+ virtual bool is_cheap_allocated() const { return false; }
+ virtual bool evaluate_at_safepoint() const;
+};
+
+#endif // SHARE_VM_GC_CMS_CMSVMOPERATIONS_HPP
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Thu Dec 06 13:55:22 2018 +0100
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -32,12 +32,12 @@
#include "gc/cms/cmsGCStats.hpp"
#include "gc/cms/cmsHeap.hpp"
#include "gc/cms/cmsOopClosures.inline.hpp"
+#include "gc/cms/cmsVMOperations.hpp"
#include "gc/cms/compactibleFreeListSpace.hpp"
#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
#include "gc/cms/concurrentMarkSweepThread.hpp"
#include "gc/cms/parNewGeneration.hpp"
#include "gc/cms/promotionInfo.inline.hpp"
-#include "gc/cms/vmCMSOperations.hpp"
#include "gc/serial/genMarkSweep.hpp"
#include "gc/serial/tenuredGeneration.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
--- a/src/hotspot/share/gc/cms/vmCMSOperations.cpp Thu Dec 06 13:55:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,254 +0,0 @@
-/*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
-#include "gc/cms/concurrentMarkSweepThread.hpp"
-#include "gc/cms/vmCMSOperations.hpp"
-#include "gc/shared/gcLocker.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/isGCActiveMark.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.inline.hpp"
-#include "runtime/os.hpp"
-#include "utilities/dtrace.hpp"
-
-//////////////////////////////////////////////////////////
-// Methods in abstract class VM_CMS_Operation
-//////////////////////////////////////////////////////////
-void VM_CMS_Operation::verify_before_gc() {
- if (VerifyBeforeGC &&
- CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
- GCTraceTime(Info, gc, phases, verify) tm("Verify Before", _collector->_gc_timer_cm);
- HandleMark hm;
- FreelistLocker x(_collector);
- MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
- CMSHeap::heap()->prepare_for_verify();
- Universe::verify();
- }
-}
-
-void VM_CMS_Operation::verify_after_gc() {
- if (VerifyAfterGC &&
- CMSHeap::heap()->total_collections() >= VerifyGCStartAt) {
- GCTraceTime(Info, gc, phases, verify) tm("Verify After", _collector->_gc_timer_cm);
- HandleMark hm;
- FreelistLocker x(_collector);
- MutexLockerEx y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
- Universe::verify();
- }
-}
-
-bool VM_CMS_Operation::lost_race() const {
- if (CMSCollector::abstract_state() == CMSCollector::Idling) {
- // We lost a race to a foreground collection
- // -- there's nothing to do
- return true;
- }
- assert(CMSCollector::abstract_state() == legal_state(),
- "Inconsistent collector state?");
- return false;
-}
-
-bool VM_CMS_Operation::doit_prologue() {
- assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
- assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
- assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "Possible deadlock");
-
- Heap_lock->lock();
- if (lost_race()) {
- assert(_prologue_succeeded == false, "Initialized in c'tor");
- Heap_lock->unlock();
- } else {
- _prologue_succeeded = true;
- }
- return _prologue_succeeded;
-}
-
-void VM_CMS_Operation::doit_epilogue() {
- assert(Thread::current()->is_ConcurrentGC_thread(), "just checking");
- assert(!CMSCollector::foregroundGCShouldWait(), "Possible deadlock");
- assert(!ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
- "Possible deadlock");
-
- if (Universe::has_reference_pending_list()) {
- Heap_lock->notify_all();
- }
- Heap_lock->unlock();
-}
-
-//////////////////////////////////////////////////////////
-// Methods in class VM_CMS_Initial_Mark
-//////////////////////////////////////////////////////////
-void VM_CMS_Initial_Mark::doit() {
- if (lost_race()) {
- // Nothing to do.
- return;
- }
- HS_PRIVATE_CMS_INITMARK_BEGIN();
- GCIdMark gc_id_mark(_gc_id);
-
- _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
-
- CMSHeap* heap = CMSHeap::heap();
- GCCauseSetter gccs(heap, GCCause::_cms_initial_mark);
-
- VM_CMS_Operation::verify_before_gc();
-
- IsGCActiveMark x; // stop-world GC active
- _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, heap->gc_cause());
-
- VM_CMS_Operation::verify_after_gc();
-
- _collector->_gc_timer_cm->register_gc_pause_end();
-
- HS_PRIVATE_CMS_INITMARK_END();
-}
-
-//////////////////////////////////////////////////////////
-// Methods in class VM_CMS_Final_Remark_Operation
-//////////////////////////////////////////////////////////
-void VM_CMS_Final_Remark::doit() {
- if (lost_race()) {
- // Nothing to do.
- return;
- }
- HS_PRIVATE_CMS_REMARK_BEGIN();
- GCIdMark gc_id_mark(_gc_id);
-
- _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
-
- CMSHeap* heap = CMSHeap::heap();
- GCCauseSetter gccs(heap, GCCause::_cms_final_remark);
-
- VM_CMS_Operation::verify_before_gc();
-
- IsGCActiveMark x; // stop-world GC active
- _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, heap->gc_cause());
-
- VM_CMS_Operation::verify_after_gc();
-
- _collector->save_heap_summary();
- _collector->_gc_timer_cm->register_gc_pause_end();
-
- HS_PRIVATE_CMS_REMARK_END();
-}
-
-// VM operation to invoke a concurrent collection of a
-// GenCollectedHeap heap.
-void VM_GenCollectFullConcurrent::doit() {
- assert(Thread::current()->is_VM_thread(), "Should be VM thread");
- assert(GCLockerInvokesConcurrent || ExplicitGCInvokesConcurrent, "Unexpected");
-
- CMSHeap* heap = CMSHeap::heap();
- if (_gc_count_before == heap->total_collections()) {
- // The "full" of do_full_collection call below "forces"
- // a collection; the second arg, 0, below ensures that
- // only the young gen is collected. XXX In the future,
- // we'll probably need to have something in this interface
- // to say do this only if we are sure we will not bail
- // out to a full collection in this attempt, but that's
- // for the future.
- assert(SafepointSynchronize::is_at_safepoint(),
- "We can only be executing this arm of if at a safepoint");
- GCCauseSetter gccs(heap, _gc_cause);
- heap->do_full_collection(heap->must_clear_all_soft_refs(), GenCollectedHeap::YoungGen);
- } // Else no need for a foreground young gc
- assert((_gc_count_before < heap->total_collections()) ||
- (GCLocker::is_active() /* gc may have been skipped */
- && (_gc_count_before == heap->total_collections())),
- "total_collections() should be monotonically increasing");
-
- MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
- assert(_full_gc_count_before <= heap->total_full_collections(), "Error");
- if (heap->total_full_collections() == _full_gc_count_before) {
- // Nudge the CMS thread to start a concurrent collection.
- CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
- } else {
- assert(_full_gc_count_before < heap->total_full_collections(), "Error");
- FullGCCount_lock->notify_all(); // Inform the Java thread its work is done
- }
-}
-
-bool VM_GenCollectFullConcurrent::evaluate_at_safepoint() const {
- Thread* thr = Thread::current();
- assert(thr != NULL, "Unexpected tid");
- if (!thr->is_Java_thread()) {
- assert(thr->is_VM_thread(), "Expected to be evaluated by VM thread");
- CMSHeap* heap = CMSHeap::heap();
- if (_gc_count_before != heap->total_collections()) {
- // No need to do a young gc, we'll just nudge the CMS thread
- // in the doit() method above, to be executed soon.
- assert(_gc_count_before < heap->total_collections(),
- "total_collections() should be monotonically increasing");
- return false; // no need for foreground young gc
- }
- }
- return true; // may still need foreground young gc
-}
-
-
-void VM_GenCollectFullConcurrent::doit_epilogue() {
- Thread* thr = Thread::current();
- assert(thr->is_Java_thread(), "just checking");
- JavaThread* jt = (JavaThread*)thr;
-
- if (Universe::has_reference_pending_list()) {
- Heap_lock->notify_all();
- }
- Heap_lock->unlock();
-
- // It is fine to test whether completed collections has
- // exceeded our request count without locking because
- // the completion count is monotonically increasing;
- // this will break for very long-running apps when the
- // count overflows and wraps around. XXX fix me !!!
- // e.g. at the rate of 1 full gc per ms, this could
- // overflow in about 1000 years.
- CMSHeap* heap = CMSHeap::heap();
- if (_gc_cause != GCCause::_gc_locker &&
- heap->total_full_collections_completed() <= _full_gc_count_before) {
- // maybe we should change the condition to test _gc_cause ==
- // GCCause::_java_lang_system_gc or GCCause::_dcmd_gc_run,
- // instead of _gc_cause != GCCause::_gc_locker
- assert(GCCause::is_user_requested_gc(_gc_cause),
- "the only way to get here if this was a System.gc()-induced GC");
- assert(ExplicitGCInvokesConcurrent, "Error");
- // Now, wait for witnessing concurrent gc cycle to complete,
- // but do so in native mode, because we want to lock the
- // FullGCEvent_lock, which may be needed by the VM thread
- // or by the CMS thread, so we do not want to be suspended
- // while holding that lock.
- ThreadToNativeFromVM native(jt);
- MutexLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
- // Either a concurrent or a stop-world full gc is sufficient
- // witness to our request.
- while (heap->total_full_collections_completed() <= _full_gc_count_before) {
- FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
- }
- }
-}
--- a/src/hotspot/share/gc/cms/vmCMSOperations.hpp Thu Dec 06 13:55:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,146 +0,0 @@
-/*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_CMS_VMCMSOPERATIONS_HPP
-#define SHARE_VM_GC_CMS_VMCMSOPERATIONS_HPP
-
-#include "gc/cms/concurrentMarkSweepGeneration.hpp"
-#include "gc/shared/gcCause.hpp"
-#include "gc/shared/gcId.hpp"
-#include "gc/shared/vmGCOperations.hpp"
-#include "runtime/vm_operations.hpp"
-
-// The VM_CMS_Operation is slightly different from
-// a VM_GC_Operation -- and would not have subclassed easily
-// to VM_GC_Operation without several changes to VM_GC_Operation.
-// To minimize the changes, we have replicated some of the VM_GC_Operation
-// functionality here. We will consolidate that back by doing subclassing
-// as appropriate in Dolphin.
-//
-// VM_Operation
-// VM_CMS_Operation
-// - implements the common portion of work done in support
-// of CMS' stop-world phases (initial mark and remark).
-//
-// VM_CMS_Initial_Mark
-// VM_CMS_Final_Mark
-//
-
-// Forward decl.
-class CMSCollector;
-
-class VM_CMS_Operation: public VM_Operation {
- protected:
- CMSCollector* _collector; // associated collector
- bool _prologue_succeeded; // whether doit_prologue succeeded
- uint _gc_id;
-
- bool lost_race() const;
-
- public:
- VM_CMS_Operation(CMSCollector* collector):
- _collector(collector),
- _prologue_succeeded(false),
- _gc_id(GCId::current()) {}
- ~VM_CMS_Operation() {}
-
- // The legal collector state for executing this CMS op.
- virtual const CMSCollector::CollectorState legal_state() const = 0;
-
- // Whether the pending list lock needs to be held
- virtual const bool needs_pending_list_lock() const = 0;
-
- // Execute operations in the context of the caller,
- // prior to execution of the vm operation itself.
- virtual bool doit_prologue();
- // Execute operations in the context of the caller,
- // following completion of the vm operation.
- virtual void doit_epilogue();
-
- virtual bool evaluate_at_safepoint() const { return true; }
- virtual bool is_cheap_allocated() const { return false; }
- virtual bool allow_nested_vm_operations() const { return false; }
- bool prologue_succeeded() const { return _prologue_succeeded; }
-
- void verify_before_gc();
- void verify_after_gc();
-};
-
-
-// VM_CMS_Operation for the initial marking phase of CMS.
-class VM_CMS_Initial_Mark: public VM_CMS_Operation {
- public:
- VM_CMS_Initial_Mark(CMSCollector* _collector) :
- VM_CMS_Operation(_collector) {}
-
- virtual VMOp_Type type() const { return VMOp_CMS_Initial_Mark; }
- virtual void doit();
-
- virtual const CMSCollector::CollectorState legal_state() const {
- return CMSCollector::InitialMarking;
- }
-
- virtual const bool needs_pending_list_lock() const {
- return false;
- }
-};
-
-// VM_CMS_Operation for the final remark phase of CMS.
-class VM_CMS_Final_Remark: public VM_CMS_Operation {
- public:
- VM_CMS_Final_Remark(CMSCollector* _collector) :
- VM_CMS_Operation(_collector) {}
- virtual VMOp_Type type() const { return VMOp_CMS_Final_Remark; }
- virtual void doit();
-
- virtual const CMSCollector::CollectorState legal_state() const {
- return CMSCollector::FinalMarking;
- }
-
- virtual const bool needs_pending_list_lock() const {
- return true;
- }
-};
-
-
-// VM operation to invoke a concurrent collection of the heap as a
-// GenCollectedHeap heap.
-class VM_GenCollectFullConcurrent: public VM_GC_Operation {
- public:
- VM_GenCollectFullConcurrent(uint gc_count_before,
- uint full_gc_count_before,
- GCCause::Cause gc_cause)
- : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */)
- {
- assert(FullGCCount_lock != NULL, "Error");
- }
- ~VM_GenCollectFullConcurrent() {}
- virtual VMOp_Type type() const { return VMOp_GenCollectFullConcurrent; }
- virtual void doit();
- virtual void doit_epilogue();
- virtual bool is_cheap_allocated() const { return false; }
- virtual bool evaluate_at_safepoint() const;
-};
-
-#endif // SHARE_VM_GC_CMS_VMCMSOPERATIONS_HPP
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Thu Dec 06 13:55:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -57,10 +57,10 @@
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/g1YCTypes.hpp"
#include "gc/g1/g1YoungRemSetSamplingThread.hpp"
+#include "gc/g1/g1VMOperations.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/g1/heapRegionSet.inline.hpp"
-#include "gc/g1/vm_operations_g1.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
#include "gc/shared/gcBehaviours.hpp"
#include "gc/shared/gcHeapSummary.hpp"
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Thu Dec 06 13:55:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -44,12 +44,12 @@
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTrace.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/gcVMOperations.hpp"
#include "gc/shared/genOopClosures.inline.hpp"
#include "gc/shared/referencePolicy.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/shared/taskqueue.inline.hpp"
-#include "gc/shared/vmGCOperations.hpp"
#include "gc/shared/weakProcessor.inline.hpp"
#include "include/jvm.h"
#include "logging/log.hpp"
--- a/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp Thu Dec 06 13:55:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMarkThread.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -31,7 +31,7 @@
#include "gc/g1/g1MMUTracker.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1RemSet.hpp"
-#include "gc/g1/vm_operations_g1.hpp"
+#include "gc/g1/g1VMOperations.hpp"
#include "gc/shared/concurrentGCPhaseManager.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcTrace.hpp"
--- a/src/hotspot/share/gc/g1/g1FullGCScope.hpp Thu Dec 06 13:55:22 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1FullGCScope.hpp Thu Dec 06 15:44:13 2018 +0100
@@ -31,8 +31,8 @@
#include "gc/shared/gcTrace.hpp"
#include "gc/shared/gcTraceTime.hpp"
#include "gc/shared/gcTimer.hpp"
+#include "gc/shared/gcVMOperations.hpp"
#include "gc/shared/isGCActiveMark.hpp"
-#include "gc/shared/vmGCOperations.hpp"
#include "memory/allocation.hpp"
#include "services/memoryService.hpp"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1VMOperations.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
+#include "gc/g1/g1Policy.hpp"
+#include "gc/g1/g1VMOperations.hpp"
+#include "gc/shared/gcId.hpp"
+#include "gc/shared/gcTimer.hpp"
+#include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/isGCActiveMark.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+
+void VM_G1CollectFull::doit() {
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+ GCCauseSetter x(g1h, _gc_cause);
+ g1h->do_full_collection(false /* clear_all_soft_refs */);
+}
+
+VM_G1CollectForAllocation::VM_G1CollectForAllocation(size_t word_size,
+ uint gc_count_before,
+ GCCause::Cause gc_cause,
+ bool should_initiate_conc_mark,
+ double target_pause_time_ms) :
+ VM_CollectForAllocation(word_size, gc_count_before, gc_cause),
+ _pause_succeeded(false),
+ _should_initiate_conc_mark(should_initiate_conc_mark),
+ _should_retry_gc(false),
+ _target_pause_time_ms(target_pause_time_ms),
+ _old_marking_cycles_completed_before(0) {
+
+ guarantee(target_pause_time_ms > 0.0,
+ "target_pause_time_ms = %1.6lf should be positive",
+ target_pause_time_ms);
+ _gc_cause = gc_cause;
+}
+
+bool VM_G1CollectForAllocation::doit_prologue() {
+ bool res = VM_CollectForAllocation::doit_prologue();
+ if (!res) {
+ if (_should_initiate_conc_mark) {
+ // The prologue can fail for a couple of reasons. The first is that another GC
+ // got scheduled and prevented the scheduling of the initial mark GC. The
+ // second is that the GC locker may be active and the heap can't be expanded.
+ // In both cases we want to retry the GC so that the initial mark pause is
+ // actually scheduled. In the second case, however, we should stall until
+ // until the GC locker is no longer active and then retry the initial mark GC.
+ _should_retry_gc = true;
+ }
+ }
+ return res;
+}
+
+void VM_G1CollectForAllocation::doit() {
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+ assert(!_should_initiate_conc_mark || g1h->should_do_concurrent_full_gc(_gc_cause),
+ "only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle");
+
+ if (_word_size > 0) {
+ // An allocation has been requested. So, try to do that first.
+ _result = g1h->attempt_allocation_at_safepoint(_word_size,
+ false /* expect_null_cur_alloc_region */);
+ if (_result != NULL) {
+ // If we can successfully allocate before we actually do the
+ // pause then we will consider this pause successful.
+ _pause_succeeded = true;
+ return;
+ }
+ }
+
+ GCCauseSetter x(g1h, _gc_cause);
+ if (_should_initiate_conc_mark) {
+ // It's safer to read old_marking_cycles_completed() here, given
+ // that noone else will be updating it concurrently. Since we'll
+ // only need it if we're initiating a marking cycle, no point in
+ // setting it earlier.
+ _old_marking_cycles_completed_before = g1h->old_marking_cycles_completed();
+
+ // At this point we are supposed to start a concurrent cycle. We
+ // will do so if one is not already in progress.
+ bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
+
+ // The above routine returns true if we were able to force the
+ // next GC pause to be an initial mark; it returns false if a
+ // marking cycle is already in progress.
+ //
+ // If a marking cycle is already in progress just return and skip the
+ // pause below - if the reason for requesting this initial mark pause
+ // was due to a System.gc() then the requesting thread should block in
+ // doit_epilogue() until the marking cycle is complete.
+ //
+ // If this initial mark pause was requested as part of a humongous
+ // allocation then we know that the marking cycle must just have
+ // been started by another thread (possibly also allocating a humongous
+ // object) as there was no active marking cycle when the requesting
+ // thread checked before calling collect() in
+ // attempt_allocation_humongous(). Retrying the GC, in this case,
+ // will cause the requesting thread to spin inside collect() until the
+ // just started marking cycle is complete - which may be a while. So
+ // we do NOT retry the GC.
+ if (!res) {
+ assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating");
+ if (_gc_cause != GCCause::_g1_humongous_allocation) {
+ _should_retry_gc = true;
+ }
+ return;
+ }
+ }
+
+ // Try a partial collection of some kind.
+ _pause_succeeded = g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
+
+ if (_pause_succeeded) {
+ if (_word_size > 0) {
+ // An allocation had been requested. Do it, eventually trying a stronger
+ // kind of GC.
+ _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
+ } else {
+ bool should_upgrade_to_full = !g1h->should_do_concurrent_full_gc(_gc_cause) &&
+ !g1h->has_regions_left_for_allocation();
+ if (should_upgrade_to_full) {
+ // There has been a request to perform a GC to free some space. We have no
+ // information on how much memory has been asked for. In case there are
+ // absolutely no regions left to allocate into, do a maximally compacting full GC.
+ log_info(gc, ergo)("Attempting maximally compacting collection");
+ _pause_succeeded = g1h->do_full_collection(false, /* explicit gc */
+ true /* clear_all_soft_refs */);
+ }
+ }
+ guarantee(_pause_succeeded, "Elevated collections during the safepoint must always succeed.");
+ } else {
+ assert(_result == NULL, "invariant");
+ // The only reason for the pause to not be successful is that, the GC locker is
+ // active (or has become active since the prologue was executed). In this case
+ // we should retry the pause after waiting for the GC locker to become inactive.
+ _should_retry_gc = true;
+ }
+}
+
+void VM_G1CollectForAllocation::doit_epilogue() {
+ VM_CollectForAllocation::doit_epilogue();
+
+ // If the pause was initiated by a System.gc() and
+ // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
+ // that just started (or maybe one that was already in progress) to
+ // finish.
+ if (GCCause::is_user_requested_gc(_gc_cause) &&
+ _should_initiate_conc_mark) {
+ assert(ExplicitGCInvokesConcurrent,
+ "the only way to be here is if ExplicitGCInvokesConcurrent is set");
+
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+ // In the doit() method we saved g1h->old_marking_cycles_completed()
+ // in the _old_marking_cycles_completed_before field. We have to
+ // wait until we observe that g1h->old_marking_cycles_completed()
+ // has increased by at least one. This can happen if a) we started
+ // a cycle and it completes, b) a cycle already in progress
+ // completes, or c) a Full GC happens.
+
+ // If the condition has already been reached, there's no point in
+ // actually taking the lock and doing the wait.
+ if (g1h->old_marking_cycles_completed() <=
+ _old_marking_cycles_completed_before) {
+ // The following is largely copied from CMS
+
+ Thread* thr = Thread::current();
+ assert(thr->is_Java_thread(), "invariant");
+ JavaThread* jt = (JavaThread*)thr;
+ ThreadToNativeFromVM native(jt);
+
+ MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
+ while (g1h->old_marking_cycles_completed() <=
+ _old_marking_cycles_completed_before) {
+ FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
+ }
+ }
+ }
+}
+
+void VM_G1Concurrent::doit() {
+ GCIdMark gc_id_mark(_gc_id);
+ GCTraceCPUTime tcpu;
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+ GCTraceTime(Info, gc) t(_message, g1h->concurrent_mark()->gc_timer_cm(), GCCause::_no_gc, true);
+ TraceCollectorStats tcs(g1h->g1mm()->conc_collection_counters());
+ SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
+ IsGCActiveMark x;
+ _cl->do_void();
+}
+
+bool VM_G1Concurrent::doit_prologue() {
+ Heap_lock->lock();
+ return true;
+}
+
+void VM_G1Concurrent::doit_epilogue() {
+ if (Universe::has_reference_pending_list()) {
+ Heap_lock->notify_all();
+ }
+ Heap_lock->unlock();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1VMOperations.hpp Thu Dec 06 15:44:13 2018 +0100
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_G1_G1VMOPERATIONS_HPP
+#define SHARE_VM_GC_G1_G1VMOPERATIONS_HPP
+
+#include "gc/shared/gcId.hpp"
+#include "gc/shared/gcVMOperations.hpp"
+
+// VM_operations for the G1 collector.
+// VM_GC_Operation:
+// - VM_G1Concurrent
+// - VM_G1CollectForAllocation
+// - VM_G1CollectFull
+
+class VM_G1CollectFull : public VM_GC_Operation {
+public:
+ VM_G1CollectFull(uint gc_count_before,
+ uint full_gc_count_before,
+ GCCause::Cause cause) :
+ VM_GC_Operation(gc_count_before, cause, full_gc_count_before, true) { }
+ virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
+ virtual void doit();
+};
+
+class VM_G1CollectForAllocation : public VM_CollectForAllocation {
+ bool _pause_succeeded;
+
+ bool _should_initiate_conc_mark;
+ bool _should_retry_gc;
+ double _target_pause_time_ms;
+ uint _old_marking_cycles_completed_before;
+
+public:
+ VM_G1CollectForAllocation(size_t word_size,
+ uint gc_count_before,
+ GCCause::Cause gc_cause,
+ bool should_initiate_conc_mark,
+ double target_pause_time_ms);
+ virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; }
+ virtual bool doit_prologue();
+ virtual void doit();
+ virtual void doit_epilogue();
+ bool should_retry_gc() const { return _should_retry_gc; }
+ bool pause_succeeded() { return _pause_succeeded; }
+};
+
+// Concurrent G1 stop-the-world operations such as remark and cleanup.
+class VM_G1Concurrent : public VM_Operation {
+ VoidClosure* _cl;
+ const char* _message;
+ uint _gc_id;
+
+public:
+ VM_G1Concurrent(VoidClosure* cl, const char* message) :
+ _cl(cl), _message(message), _gc_id(GCId::current()) { }
+ virtual VMOp_Type type() const { return VMOp_G1Concurrent; }
+ virtual void doit();
+ virtual bool doit_prologue();
+ virtual void doit_epilogue();
+};
+
+#endif // SHARE_VM_GC_G1_G1VMOPERATIONS_HPP
--- a/src/hotspot/share/gc/g1/vm_operations_g1.cpp Thu Dec 06 13:55:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,224 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
-#include "gc/g1/g1Policy.hpp"
-#include "gc/shared/gcId.hpp"
-#include "gc/g1/vm_operations_g1.hpp"
-#include "gc/shared/gcTimer.hpp"
-#include "gc/shared/gcTraceTime.inline.hpp"
-#include "gc/shared/isGCActiveMark.hpp"
-#include "runtime/interfaceSupport.inline.hpp"
-
-void VM_G1CollectFull::doit() {
- G1CollectedHeap* g1h = G1CollectedHeap::heap();
- GCCauseSetter x(g1h, _gc_cause);
- g1h->do_full_collection(false /* clear_all_soft_refs */);
-}
-
-VM_G1CollectForAllocation::VM_G1CollectForAllocation(size_t word_size,
- uint gc_count_before,
- GCCause::Cause gc_cause,
- bool should_initiate_conc_mark,
- double target_pause_time_ms) :
- VM_CollectForAllocation(word_size, gc_count_before, gc_cause),
- _pause_succeeded(false),
- _should_initiate_conc_mark(should_initiate_conc_mark),
- _should_retry_gc(false),
- _target_pause_time_ms(target_pause_time_ms),
- _old_marking_cycles_completed_before(0) {
-
- guarantee(target_pause_time_ms > 0.0,
- "target_pause_time_ms = %1.6lf should be positive",
- target_pause_time_ms);
- _gc_cause = gc_cause;
-}
-
-bool VM_G1CollectForAllocation::doit_prologue() {
- bool res = VM_CollectForAllocation::doit_prologue();
- if (!res) {
- if (_should_initiate_conc_mark) {
- // The prologue can fail for a couple of reasons. The first is that another GC
- // got scheduled and prevented the scheduling of the initial mark GC. The
- // second is that the GC locker may be active and the heap can't be expanded.
- // In both cases we want to retry the GC so that the initial mark pause is
- // actually scheduled. In the second case, however, we should stall until
- // until the GC locker is no longer active and then retry the initial mark GC.
- _should_retry_gc = true;
- }
- }
- return res;
-}
-
-void VM_G1CollectForAllocation::doit() {
- G1CollectedHeap* g1h = G1CollectedHeap::heap();
- assert(!_should_initiate_conc_mark || g1h->should_do_concurrent_full_gc(_gc_cause),
- "only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle");
-
- if (_word_size > 0) {
- // An allocation has been requested. So, try to do that first.
- _result = g1h->attempt_allocation_at_safepoint(_word_size,
- false /* expect_null_cur_alloc_region */);
- if (_result != NULL) {
- // If we can successfully allocate before we actually do the
- // pause then we will consider this pause successful.
- _pause_succeeded = true;
- return;
- }
- }
-
- GCCauseSetter x(g1h, _gc_cause);
- if (_should_initiate_conc_mark) {
- // It's safer to read old_marking_cycles_completed() here, given
- // that noone else will be updating it concurrently. Since we'll
- // only need it if we're initiating a marking cycle, no point in
- // setting it earlier.
- _old_marking_cycles_completed_before = g1h->old_marking_cycles_completed();
-
- // At this point we are supposed to start a concurrent cycle. We
- // will do so if one is not already in progress.
- bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
-
- // The above routine returns true if we were able to force the
- // next GC pause to be an initial mark; it returns false if a
- // marking cycle is already in progress.
- //
- // If a marking cycle is already in progress just return and skip the
- // pause below - if the reason for requesting this initial mark pause
- // was due to a System.gc() then the requesting thread should block in
- // doit_epilogue() until the marking cycle is complete.
- //
- // If this initial mark pause was requested as part of a humongous
- // allocation then we know that the marking cycle must just have
- // been started by another thread (possibly also allocating a humongous
- // object) as there was no active marking cycle when the requesting
- // thread checked before calling collect() in
- // attempt_allocation_humongous(). Retrying the GC, in this case,
- // will cause the requesting thread to spin inside collect() until the
- // just started marking cycle is complete - which may be a while. So
- // we do NOT retry the GC.
- if (!res) {
- assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating");
- if (_gc_cause != GCCause::_g1_humongous_allocation) {
- _should_retry_gc = true;
- }
- return;
- }
- }
-
- // Try a partial collection of some kind.
- _pause_succeeded = g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
-
- if (_pause_succeeded) {
- if (_word_size > 0) {
- // An allocation had been requested. Do it, eventually trying a stronger
- // kind of GC.
- _result = g1h->satisfy_failed_allocation(_word_size, &_pause_succeeded);
- } else {
- bool should_upgrade_to_full = !g1h->should_do_concurrent_full_gc(_gc_cause) &&
- !g1h->has_regions_left_for_allocation();
- if (should_upgrade_to_full) {
- // There has been a request to perform a GC to free some space. We have no
- // information on how much memory has been asked for. In case there are
- // absolutely no regions left to allocate into, do a maximally compacting full GC.
- log_info(gc, ergo)("Attempting maximally compacting collection");
- _pause_succeeded = g1h->do_full_collection(false, /* explicit gc */
- true /* clear_all_soft_refs */);
- }
- }
- guarantee(_pause_succeeded, "Elevated collections during the safepoint must always succeed.");
- } else {
- assert(_result == NULL, "invariant");
- // The only reason for the pause to not be successful is that, the GC locker is
- // active (or has become active since the prologue was executed). In this case
- // we should retry the pause after waiting for the GC locker to become inactive.
- _should_retry_gc = true;
- }
-}
-
-void VM_G1CollectForAllocation::doit_epilogue() {
- VM_CollectForAllocation::doit_epilogue();
-
- // If the pause was initiated by a System.gc() and
- // +ExplicitGCInvokesConcurrent, we have to wait here for the cycle
- // that just started (or maybe one that was already in progress) to
- // finish.
- if (GCCause::is_user_requested_gc(_gc_cause) &&
- _should_initiate_conc_mark) {
- assert(ExplicitGCInvokesConcurrent,
- "the only way to be here is if ExplicitGCInvokesConcurrent is set");
-
- G1CollectedHeap* g1h = G1CollectedHeap::heap();
-
- // In the doit() method we saved g1h->old_marking_cycles_completed()
- // in the _old_marking_cycles_completed_before field. We have to
- // wait until we observe that g1h->old_marking_cycles_completed()
- // has increased by at least one. This can happen if a) we started
- // a cycle and it completes, b) a cycle already in progress
- // completes, or c) a Full GC happens.
-
- // If the condition has already been reached, there's no point in
- // actually taking the lock and doing the wait.
- if (g1h->old_marking_cycles_completed() <=
- _old_marking_cycles_completed_before) {
- // The following is largely copied from CMS
-
- Thread* thr = Thread::current();
- assert(thr->is_Java_thread(), "invariant");
- JavaThread* jt = (JavaThread*)thr;
- ThreadToNativeFromVM native(jt);
-
- MutexLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
- while (g1h->old_marking_cycles_completed() <=
- _old_marking_cycles_completed_before) {
- FullGCCount_lock->wait(Mutex::_no_safepoint_check_flag);
- }
- }
- }
-}
-
-void VM_G1Concurrent::doit() {
- GCIdMark gc_id_mark(_gc_id);
- GCTraceCPUTime tcpu;
- G1CollectedHeap* g1h = G1CollectedHeap::heap();
- GCTraceTime(Info, gc) t(_message, g1h->concurrent_mark()->gc_timer_cm(), GCCause::_no_gc, true);
- TraceCollectorStats tcs(g1h->g1mm()->conc_collection_counters());
- SvcGCMarker sgcm(SvcGCMarker::CONCURRENT);
- IsGCActiveMark x;
- _cl->do_void();
-}
-
-bool VM_G1Concurrent::doit_prologue() {
- Heap_lock->lock();
- return true;
-}
-
-void VM_G1Concurrent::doit_epilogue() {
- if (Universe::has_reference_pending_list()) {
- Heap_lock->notify_all();
- }
- Heap_lock->unlock();
-}
--- a/src/hotspot/share/gc/g1/vm_operations_g1.hpp Thu Dec 06 13:55:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_G1_VM_OPERATIONS_G1_HPP
-#define SHARE_VM_GC_G1_VM_OPERATIONS_G1_HPP
-
-#include "gc/shared/gcId.hpp"
-#include "gc/shared/vmGCOperations.hpp"
-
-// VM_operations for the G1 collector.
-// VM_GC_Operation:
-// - VM_G1Concurrent
-// - VM_G1CollectForAllocation
-// - VM_G1CollectFull
-
-class VM_G1CollectFull : public VM_GC_Operation {
-public:
- VM_G1CollectFull(uint gc_count_before,
- uint full_gc_count_before,
- GCCause::Cause cause) :
- VM_GC_Operation(gc_count_before, cause, full_gc_count_before, true) { }
- virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
- virtual void doit();
-};
-
-class VM_G1CollectForAllocation : public VM_CollectForAllocation {
- bool _pause_succeeded;
-
- bool _should_initiate_conc_mark;
- bool _should_retry_gc;
- double _target_pause_time_ms;
- uint _old_marking_cycles_completed_before;
-
-public:
- VM_G1CollectForAllocation(size_t word_size,
- uint gc_count_before,
- GCCause::Cause gc_cause,
- bool should_initiate_conc_mark,
- double target_pause_time_ms);
- virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; }
- virtual bool doit_prologue();
- virtual void doit();
- virtual void doit_epilogue();
- bool should_retry_gc() const { return _should_retry_gc; }
- bool pause_succeeded() { return _pause_succeeded; }
-};
-
-// Concurrent G1 stop-the-world operations such as remark and cleanup.
-class VM_G1Concurrent : public VM_Operation {
- VoidClosure* _cl;
- const char* _message;
- uint _gc_id;
-
-public:
- VM_G1Concurrent(VoidClosure* cl, const char* message) :
- _cl(cl), _message(message), _gc_id(GCId::current()) { }
- virtual VMOp_Type type() const { return VMOp_G1Concurrent; }
- virtual void doit();
- virtual bool doit_prologue();
- virtual void doit_epilogue();
-};
-
-#endif // SHARE_VM_GC_G1_VM_OPERATIONS_G1_HPP
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Thu Dec 06 13:55:22 2018 +0100
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -36,7 +36,7 @@
#include "gc/parallel/psParallelCompact.inline.hpp"
#include "gc/parallel/psPromotionManager.hpp"
#include "gc/parallel/psScavenge.hpp"
-#include "gc/parallel/vmPSOperations.hpp"
+#include "gc/parallel/psVMOperations.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/gcWhen.hpp"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/psVMOperations.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/parallel/parallelScavengeHeap.inline.hpp"
+#include "gc/parallel/psScavenge.hpp"
+#include "gc/parallel/psVMOperations.hpp"
+#include "gc/shared/gcLocker.hpp"
+#include "utilities/dtrace.hpp"
+
+// The following methods are used by the parallel scavenge collector
+VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t word_size,
+ uint gc_count) :
+ VM_CollectForAllocation(word_size, gc_count, GCCause::_allocation_failure) {
+ assert(word_size != 0, "An allocation should always be requested with this operation.");
+}
+
+void VM_ParallelGCFailedAllocation::doit() {
+ SvcGCMarker sgcm(SvcGCMarker::MINOR);
+
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
+
+ GCCauseSetter gccs(heap, _gc_cause);
+ _result = heap->failed_mem_allocate(_word_size);
+
+ if (_result == NULL && GCLocker::is_active_and_needs_gc()) {
+ set_gc_locked();
+ }
+}
+
+// Only used for System.gc() calls
+VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(uint gc_count,
+ uint full_gc_count,
+ GCCause::Cause gc_cause) :
+ VM_GC_Operation(gc_count, gc_cause, full_gc_count, true /* full */)
+{
+}
+
+void VM_ParallelGCSystemGC::doit() {
+ SvcGCMarker sgcm(SvcGCMarker::FULL);
+
+ ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
+
+ GCCauseSetter gccs(heap, _gc_cause);
+ if (_gc_cause == GCCause::_gc_locker || _gc_cause == GCCause::_wb_young_gc
+ DEBUG_ONLY(|| _gc_cause == GCCause::_scavenge_alot)) {
+ // If (and only if) the scavenge fails, this will invoke a full gc.
+ heap->invoke_scavenge();
+ } else {
+ heap->do_full_collection(false);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/parallel/psVMOperations.hpp Thu Dec 06 15:44:13 2018 +0100
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_PARALLEL_PSVMOPERATIONS_HPP
+#define SHARE_VM_GC_PARALLEL_PSVMOPERATIONS_HPP
+
+#include "gc/parallel/parallelScavengeHeap.hpp"
+#include "gc/shared/gcCause.hpp"
+#include "gc/shared/gcVMOperations.hpp"
+
+class VM_ParallelGCFailedAllocation : public VM_CollectForAllocation {
+ public:
+ VM_ParallelGCFailedAllocation(size_t word_size, uint gc_count);
+
+ virtual VMOp_Type type() const {
+ return VMOp_ParallelGCFailedAllocation;
+ }
+ virtual void doit();
+};
+
+class VM_ParallelGCSystemGC: public VM_GC_Operation {
+ public:
+ VM_ParallelGCSystemGC(uint gc_count, uint full_gc_count, GCCause::Cause gc_cause);
+ virtual VMOp_Type type() const { return VMOp_ParallelGCSystemGC; }
+ virtual void doit();
+};
+
+#endif // SHARE_VM_GC_PARALLEL_PSVMOPERATIONS_HPP
--- a/src/hotspot/share/gc/parallel/vmPSOperations.cpp Thu Dec 06 13:55:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/parallel/parallelScavengeHeap.inline.hpp"
-#include "gc/parallel/psScavenge.hpp"
-#include "gc/parallel/vmPSOperations.hpp"
-#include "gc/shared/gcLocker.hpp"
-#include "utilities/dtrace.hpp"
-
-// The following methods are used by the parallel scavenge collector
-VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t word_size,
- uint gc_count) :
- VM_CollectForAllocation(word_size, gc_count, GCCause::_allocation_failure) {
- assert(word_size != 0, "An allocation should always be requested with this operation.");
-}
-
-void VM_ParallelGCFailedAllocation::doit() {
- SvcGCMarker sgcm(SvcGCMarker::MINOR);
-
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-
- GCCauseSetter gccs(heap, _gc_cause);
- _result = heap->failed_mem_allocate(_word_size);
-
- if (_result == NULL && GCLocker::is_active_and_needs_gc()) {
- set_gc_locked();
- }
-}
-
-// Only used for System.gc() calls
-VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(uint gc_count,
- uint full_gc_count,
- GCCause::Cause gc_cause) :
- VM_GC_Operation(gc_count, gc_cause, full_gc_count, true /* full */)
-{
-}
-
-void VM_ParallelGCSystemGC::doit() {
- SvcGCMarker sgcm(SvcGCMarker::FULL);
-
- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
-
- GCCauseSetter gccs(heap, _gc_cause);
- if (_gc_cause == GCCause::_gc_locker || _gc_cause == GCCause::_wb_young_gc
- DEBUG_ONLY(|| _gc_cause == GCCause::_scavenge_alot)) {
- // If (and only if) the scavenge fails, this will invoke a full gc.
- heap->invoke_scavenge();
- } else {
- heap->do_full_collection(false);
- }
-}
--- a/src/hotspot/share/gc/parallel/vmPSOperations.hpp Thu Dec 06 13:55:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_PARALLEL_VMPSOPERATIONS_HPP
-#define SHARE_VM_GC_PARALLEL_VMPSOPERATIONS_HPP
-
-#include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/shared/gcCause.hpp"
-#include "gc/shared/vmGCOperations.hpp"
-
-class VM_ParallelGCFailedAllocation : public VM_CollectForAllocation {
- public:
- VM_ParallelGCFailedAllocation(size_t word_size, uint gc_count);
-
- virtual VMOp_Type type() const {
- return VMOp_ParallelGCFailedAllocation;
- }
- virtual void doit();
-};
-
-class VM_ParallelGCSystemGC: public VM_GC_Operation {
- public:
- VM_ParallelGCSystemGC(uint gc_count, uint full_gc_count, GCCause::Cause gc_cause);
- virtual VMOp_Type type() const { return VMOp_ParallelGCSystemGC; }
- virtual void doit();
-};
-
-#endif // SHARE_VM_GC_PARALLEL_VMPSOPERATIONS_HPP
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp Thu Dec 06 13:55:22 2018 +0100
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -32,9 +32,9 @@
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/gcTrace.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/gcVMOperations.hpp"
#include "gc/shared/gcWhen.hpp"
#include "gc/shared/memAllocator.hpp"
-#include "gc/shared/vmGCOperations.hpp"
#include "logging/log.hpp"
#include "memory/metaspace.hpp"
#include "memory/resourceArea.hpp"
--- a/src/hotspot/share/gc/shared/collectorPolicy.cpp Thu Dec 06 13:55:22 2018 +0100
+++ b/src/hotspot/share/gc/shared/collectorPolicy.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -28,9 +28,9 @@
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
+#include "gc/shared/gcVMOperations.hpp"
#include "gc/shared/generationSpec.hpp"
#include "gc/shared/space.hpp"
-#include "gc/shared/vmGCOperations.hpp"
#include "logging/log.hpp"
#include "memory/universe.hpp"
#include "runtime/arguments.hpp"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/gcVMOperations.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/classLoader.hpp"
+#include "classfile/javaClasses.hpp"
+#include "gc/shared/allocTracer.hpp"
+#include "gc/shared/gcId.hpp"
+#include "gc/shared/gcLocker.hpp"
+#include "gc/shared/gcVMOperations.hpp"
+#include "gc/shared/genCollectedHeap.hpp"
+#include "interpreter/oopMapCache.hpp"
+#include "logging/log.hpp"
+#include "memory/oopFactory.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/init.hpp"
+#include "utilities/dtrace.hpp"
+#include "utilities/macros.hpp"
+#include "utilities/preserveException.hpp"
+#if INCLUDE_G1GC
+#include "gc/g1/g1CollectedHeap.inline.hpp"
+#include "gc/g1/g1Policy.hpp"
+#endif // INCLUDE_G1GC
+
+VM_GC_Operation::~VM_GC_Operation() {
+ CollectedHeap* ch = Universe::heap();
+ ch->soft_ref_policy()->set_all_soft_refs_clear(false);
+}
+
+// The same dtrace probe can't be inserted in two different files, so we
+// have to call it here, so it's only in one file. Can't create new probes
+// for the other file anymore. The dtrace probes have to remain stable.
+void VM_GC_Operation::notify_gc_begin(bool full) {
+ HOTSPOT_GC_BEGIN(
+ full);
+ HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
+}
+
+void VM_GC_Operation::notify_gc_end() {
+ HOTSPOT_GC_END();
+ HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
+}
+
+// Allocations may fail in several threads at about the same time,
+// resulting in multiple gc requests. We only want to do one of them.
+// In case a GC locker is active and the need for a GC is already signaled,
+// we want to skip this GC attempt altogether, without doing a futile
+// safepoint operation.
+bool VM_GC_Operation::skip_operation() const {
+ bool skip = (_gc_count_before != Universe::heap()->total_collections());
+ if (_full && skip) {
+ skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
+ }
+ if (!skip && GCLocker::is_active_and_needs_gc()) {
+ skip = Universe::heap()->is_maximal_no_gc();
+ assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
+ "GCLocker cannot be active when initiating GC");
+ }
+ return skip;
+}
+
+bool VM_GC_Operation::doit_prologue() {
+ assert(Thread::current()->is_Java_thread(), "just checking");
+ assert(((_gc_cause != GCCause::_no_gc) &&
+ (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
+
+ // To be able to handle a GC the VM initialization needs to be completed.
+ if (!is_init_completed()) {
+ vm_exit_during_initialization(
+ err_msg("GC triggered before VM initialization completed. Try increasing "
+ "NewSize, current value " SIZE_FORMAT "%s.",
+ byte_size_in_proper_unit(NewSize),
+ proper_unit_for_byte_size(NewSize)));
+ }
+
+ // If the GC count has changed someone beat us to the collection
+ Heap_lock->lock();
+
+ // Check invocations
+ if (skip_operation()) {
+ // skip collection
+ Heap_lock->unlock();
+ _prologue_succeeded = false;
+ } else {
+ _prologue_succeeded = true;
+ }
+ return _prologue_succeeded;
+}
+
+
+void VM_GC_Operation::doit_epilogue() {
+ assert(Thread::current()->is_Java_thread(), "just checking");
+ // Clean up old interpreter OopMap entries that were replaced
+ // during the GC thread root traversal.
+ OopMapCache::cleanup_old_entries();
+ if (Universe::has_reference_pending_list()) {
+ Heap_lock->notify_all();
+ }
+ Heap_lock->unlock();
+}
+
+bool VM_GC_HeapInspection::skip_operation() const {
+ return false;
+}
+
+bool VM_GC_HeapInspection::collect() {
+ if (GCLocker::is_active()) {
+ return false;
+ }
+ Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
+ return true;
+}
+
+void VM_GC_HeapInspection::doit() {
+ HandleMark hm;
+ Universe::heap()->ensure_parsability(false); // must happen, even if collection does
+ // not happen (e.g. due to GCLocker)
+ // or _full_gc being false
+ if (_full_gc) {
+ if (!collect()) {
+ // The collection attempt was skipped because the gc locker is held.
+ // The following dump may then be a tad misleading to someone expecting
+ // only live objects to show up in the dump (see CR 6944195). Just issue
+ // a suitable warning in that case and do not attempt to do a collection.
+ // The latter is a subtle point, because even a failed attempt
+ // to GC will, in fact, induce one in the future, which we
+ // probably want to avoid in this case because the GC that we may
+ // be about to attempt holds value for us only
+ // if it happens now and not if it happens in the eventual
+ // future.
+ log_warning(gc)("GC locker is held; pre-dump GC was skipped");
+ }
+ }
+ HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
+ _columns);
+ inspect.heap_inspection(_out);
+}
+
+
+void VM_GenCollectForAllocation::doit() {
+ SvcGCMarker sgcm(SvcGCMarker::MINOR);
+
+ GenCollectedHeap* gch = GenCollectedHeap::heap();
+ GCCauseSetter gccs(gch, _gc_cause);
+ _result = gch->satisfy_failed_allocation(_word_size, _tlab);
+ assert(gch->is_in_reserved_or_null(_result), "result not in heap");
+
+ if (_result == NULL && GCLocker::is_active_and_needs_gc()) {
+ set_gc_locked();
+ }
+}
+
+void VM_GenCollectFull::doit() {
+ SvcGCMarker sgcm(SvcGCMarker::FULL);
+
+ GenCollectedHeap* gch = GenCollectedHeap::heap();
+ GCCauseSetter gccs(gch, _gc_cause);
+ gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
+}
+
+VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
+ size_t size,
+ Metaspace::MetadataType mdtype,
+ uint gc_count_before,
+ uint full_gc_count_before,
+ GCCause::Cause gc_cause)
+ : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
+ _result(NULL), _size(size), _mdtype(mdtype), _loader_data(loader_data) {
+ assert(_size != 0, "An allocation should always be requested with this operation.");
+ AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
+}
+
+// Returns true iff concurrent GCs unloads metadata.
+bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
+#if INCLUDE_CMSGC
+ if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
+ MetaspaceGC::set_should_concurrent_collect(true);
+ return true;
+ }
+#endif
+
+#if INCLUDE_G1GC
+ if (UseG1GC && ClassUnloadingWithConcurrentMark) {
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+ g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
+
+ GCCauseSetter x(g1h, _gc_cause);
+
+ // At this point we are supposed to start a concurrent cycle. We
+ // will do so if one is not already in progress.
+ bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
+
+ if (should_start) {
+ double pause_target = g1h->g1_policy()->max_pause_time_ms();
+ g1h->do_collection_pause_at_safepoint(pause_target);
+ }
+ return true;
+ }
+#endif
+
+ return false;
+}
+
+void VM_CollectForMetadataAllocation::doit() {
+ SvcGCMarker sgcm(SvcGCMarker::FULL);
+
+ CollectedHeap* heap = Universe::heap();
+ GCCauseSetter gccs(heap, _gc_cause);
+
+ // Check again if the space is available. Another thread
+ // may have similarly failed a metadata allocation and induced
+ // a GC that freed space for the allocation.
+ if (!MetadataAllocationFailALot) {
+ _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
+ if (_result != NULL) {
+ return;
+ }
+ }
+
+ if (initiate_concurrent_GC()) {
+ // For CMS and G1 expand since the collection is going to be concurrent.
+ _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
+ if (_result != NULL) {
+ return;
+ }
+
+ log_debug(gc)("%s full GC for Metaspace", UseConcMarkSweepGC ? "CMS" : "G1");
+ }
+
+ // Don't clear the soft refs yet.
+ heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
+ // After a GC try to allocate without expanding. Could fail
+ // and expansion will be tried below.
+ _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
+ if (_result != NULL) {
+ return;
+ }
+
+ // If still failing, allow the Metaspace to expand.
+ // See delta_capacity_until_GC() for explanation of the
+ // amount of the expansion.
+ // This should work unless there really is no more space
+ // or a MaxMetaspaceSize has been specified on the command line.
+ _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
+ if (_result != NULL) {
+ return;
+ }
+
+ // If expansion failed, do a collection clearing soft references.
+ heap->collect_as_vm_thread(GCCause::_metadata_GC_clear_soft_refs);
+ _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
+ if (_result != NULL) {
+ return;
+ }
+
+ log_debug(gc)("After Metaspace GC failed to allocate size " SIZE_FORMAT, _size);
+
+ if (GCLocker::is_active_and_needs_gc()) {
+ set_gc_locked();
+ }
+}
+
+VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause)
+ : VM_GC_Operation(gc_count_before, cause), _word_size(word_size), _result(NULL) {
+ // Only report if operation was really caused by an allocation.
+ if (_word_size != 0) {
+ AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek());
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/gcVMOperations.hpp Thu Dec 06 15:44:13 2018 +0100
@@ -0,0 +1,241 @@
+/*
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_GCVMOPERATIONS_HPP
+#define SHARE_VM_GC_SHARED_GCVMOPERATIONS_HPP
+
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/genCollectedHeap.hpp"
+#include "memory/heapInspection.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "runtime/handles.hpp"
+#include "runtime/jniHandles.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/vm_operations.hpp"
+
+// The following class hierarchy represents
+// a set of operations (VM_Operation) related to GC.
+//
+// VM_Operation
+// VM_GC_Operation
+// VM_GC_HeapInspection
+// VM_GenCollectFull
+// VM_GenCollectFullConcurrent
+// VM_ParallelGCSystemGC
+// VM_CollectForAllocation
+// VM_GenCollectForAllocation
+// VM_ParallelGCFailedAllocation
+// VM_GC_Operation
+// - implements methods common to all classes in the hierarchy:
+// prevents multiple gc requests and manages lock on heap;
+//
+// VM_GC_HeapInspection
+// - prints class histogram on SIGBREAK if PrintClassHistogram
+// is specified; and also the attach "inspectheap" operation
+//
+// VM_CollectForAllocation
+// VM_GenCollectForAllocation
+// VM_ParallelGCFailedAllocation
+// - this operation is invoked when allocation is failed;
+// operation performs garbage collection and tries to
+// allocate afterwards;
+//
+// VM_GenCollectFull
+// VM_GenCollectFullConcurrent
+// VM_ParallelGCSystemGC
+// - these operations preform full collection of heaps of
+// different kind
+//
+
+class VM_GC_Operation: public VM_Operation {
+ protected:
+ uint _gc_count_before; // gc count before acquiring PLL
+ uint _full_gc_count_before; // full gc count before acquiring PLL
+ bool _full; // whether a "full" collection
+ bool _prologue_succeeded; // whether doit_prologue succeeded
+ GCCause::Cause _gc_cause; // the putative cause for this gc op
+ bool _gc_locked; // will be set if gc was locked
+
+ virtual bool skip_operation() const;
+
+ public:
+ VM_GC_Operation(uint gc_count_before,
+ GCCause::Cause _cause,
+ uint full_gc_count_before = 0,
+ bool full = false) {
+ _full = full;
+ _prologue_succeeded = false;
+ _gc_count_before = gc_count_before;
+
+ // A subclass constructor will likely overwrite the following
+ _gc_cause = _cause;
+
+ _gc_locked = false;
+
+ _full_gc_count_before = full_gc_count_before;
+ // In ParallelScavengeHeap::mem_allocate() collections can be
+ // executed within a loop and _all_soft_refs_clear can be set
+ // true after they have been cleared by a collection and another
+ // collection started so that _all_soft_refs_clear can be true
+ // when this collection is started. Don't assert that
+ // _all_soft_refs_clear have to be false here even though
+ // mutators have run. Soft refs will be cleared again in this
+ // collection.
+ }
+ ~VM_GC_Operation();
+
+ // Acquire the reference synchronization lock
+ virtual bool doit_prologue();
+ // Do notifyAll (if needed) and release held lock
+ virtual void doit_epilogue();
+
+ virtual bool allow_nested_vm_operations() const { return true; }
+ bool prologue_succeeded() const { return _prologue_succeeded; }
+
+ void set_gc_locked() { _gc_locked = true; }
+ bool gc_locked() const { return _gc_locked; }
+
+ static void notify_gc_begin(bool full = false);
+ static void notify_gc_end();
+};
+
+
+class VM_GC_HeapInspection: public VM_GC_Operation {
+ private:
+ outputStream* _out;
+ bool _full_gc;
+ bool _csv_format; // "comma separated values" format for spreadsheet.
+ bool _print_help;
+ bool _print_class_stats;
+ const char* _columns;
+ public:
+ VM_GC_HeapInspection(outputStream* out, bool request_full_gc) :
+ VM_GC_Operation(0 /* total collections, dummy, ignored */,
+ GCCause::_heap_inspection /* GC Cause */,
+ 0 /* total full collections, dummy, ignored */,
+ request_full_gc) {
+ _out = out;
+ _full_gc = request_full_gc;
+ _csv_format = false;
+ _print_help = false;
+ _print_class_stats = false;
+ _columns = NULL;
+ }
+
+ ~VM_GC_HeapInspection() {}
+ virtual VMOp_Type type() const { return VMOp_GC_HeapInspection; }
+ virtual bool skip_operation() const;
+ virtual void doit();
+ void set_csv_format(bool value) {_csv_format = value;}
+ void set_print_help(bool value) {_print_help = value;}
+ void set_print_class_stats(bool value) {_print_class_stats = value;}
+ void set_columns(const char* value) {_columns = value;}
+ protected:
+ bool collect();
+};
+
+class VM_CollectForAllocation : public VM_GC_Operation {
+ protected:
+ size_t _word_size; // Size of object to be allocated (in number of words)
+ HeapWord* _result; // Allocation result (NULL if allocation failed)
+
+ public:
+ VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause);
+
+ HeapWord* result() const {
+ return _result;
+ }
+};
+
+class VM_GenCollectForAllocation : public VM_CollectForAllocation {
+ private:
+ bool _tlab; // alloc is of a tlab.
+ public:
+ VM_GenCollectForAllocation(size_t word_size,
+ bool tlab,
+ uint gc_count_before)
+ : VM_CollectForAllocation(word_size, gc_count_before, GCCause::_allocation_failure),
+ _tlab(tlab) {
+ assert(word_size != 0, "An allocation should always be requested with this operation.");
+ }
+ ~VM_GenCollectForAllocation() {}
+ virtual VMOp_Type type() const { return VMOp_GenCollectForAllocation; }
+ virtual void doit();
+};
+
+// VM operation to invoke a collection of the heap as a
+// GenCollectedHeap heap.
+class VM_GenCollectFull: public VM_GC_Operation {
+ private:
+ GenCollectedHeap::GenerationType _max_generation;
+ public:
+ VM_GenCollectFull(uint gc_count_before,
+ uint full_gc_count_before,
+ GCCause::Cause gc_cause,
+ GenCollectedHeap::GenerationType max_generation)
+ : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
+ _max_generation(max_generation) { }
+ ~VM_GenCollectFull() {}
+ virtual VMOp_Type type() const { return VMOp_GenCollectFull; }
+ virtual void doit();
+};
+
+class VM_CollectForMetadataAllocation: public VM_GC_Operation {
+ private:
+ MetaWord* _result;
+ size_t _size; // size of object to be allocated
+ Metaspace::MetadataType _mdtype;
+ ClassLoaderData* _loader_data;
+
+ public:
+ VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
+ size_t size,
+ Metaspace::MetadataType mdtype,
+ uint gc_count_before,
+ uint full_gc_count_before,
+ GCCause::Cause gc_cause);
+
+ virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
+ virtual void doit();
+ MetaWord* result() const { return _result; }
+
+ bool initiate_concurrent_GC();
+};
+
+class SvcGCMarker : public StackObj {
+ private:
+ JvmtiGCMarker _jgcm;
+ public:
+ typedef enum { MINOR, FULL, CONCURRENT } reason_type;
+
+ SvcGCMarker(reason_type reason ) {
+ VM_GC_Operation::notify_gc_begin(reason == FULL);
+ }
+
+ ~SvcGCMarker() {
+ VM_GC_Operation::notify_gc_end();
+ }
+};
+
+#endif // SHARE_VM_GC_SHARED_GCVMOPERATIONS_HPP
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp Thu Dec 06 13:55:22 2018 +0100
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -42,13 +42,13 @@
#include "gc/shared/gcPolicyCounters.hpp"
#include "gc/shared/gcTrace.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
+#include "gc/shared/gcVMOperations.hpp"
#include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/genOopClosures.inline.hpp"
#include "gc/shared/generationSpec.hpp"
#include "gc/shared/oopStorageParState.inline.hpp"
#include "gc/shared/space.hpp"
#include "gc/shared/strongRootsScope.hpp"
-#include "gc/shared/vmGCOperations.hpp"
#include "gc/shared/weakProcessor.hpp"
#include "gc/shared/workgroup.hpp"
#include "memory/filemap.hpp"
--- a/src/hotspot/share/gc/shared/vmGCOperations.cpp Thu Dec 06 13:55:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,290 +0,0 @@
-/*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/classLoader.hpp"
-#include "classfile/javaClasses.hpp"
-#include "gc/shared/allocTracer.hpp"
-#include "gc/shared/gcId.hpp"
-#include "gc/shared/gcLocker.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/vmGCOperations.hpp"
-#include "interpreter/oopMapCache.hpp"
-#include "logging/log.hpp"
-#include "memory/oopFactory.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/init.hpp"
-#include "utilities/dtrace.hpp"
-#include "utilities/macros.hpp"
-#include "utilities/preserveException.hpp"
-#if INCLUDE_G1GC
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1Policy.hpp"
-#endif // INCLUDE_G1GC
-
-VM_GC_Operation::~VM_GC_Operation() {
- CollectedHeap* ch = Universe::heap();
- ch->soft_ref_policy()->set_all_soft_refs_clear(false);
-}
-
-// The same dtrace probe can't be inserted in two different files, so we
-// have to call it here, so it's only in one file. Can't create new probes
-// for the other file anymore. The dtrace probes have to remain stable.
-void VM_GC_Operation::notify_gc_begin(bool full) {
- HOTSPOT_GC_BEGIN(
- full);
- HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
-}
-
-void VM_GC_Operation::notify_gc_end() {
- HOTSPOT_GC_END();
- HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
-}
-
-// Allocations may fail in several threads at about the same time,
-// resulting in multiple gc requests. We only want to do one of them.
-// In case a GC locker is active and the need for a GC is already signaled,
-// we want to skip this GC attempt altogether, without doing a futile
-// safepoint operation.
-bool VM_GC_Operation::skip_operation() const {
- bool skip = (_gc_count_before != Universe::heap()->total_collections());
- if (_full && skip) {
- skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
- }
- if (!skip && GCLocker::is_active_and_needs_gc()) {
- skip = Universe::heap()->is_maximal_no_gc();
- assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
- "GCLocker cannot be active when initiating GC");
- }
- return skip;
-}
-
-bool VM_GC_Operation::doit_prologue() {
- assert(Thread::current()->is_Java_thread(), "just checking");
- assert(((_gc_cause != GCCause::_no_gc) &&
- (_gc_cause != GCCause::_no_cause_specified)), "Illegal GCCause");
-
- // To be able to handle a GC the VM initialization needs to be completed.
- if (!is_init_completed()) {
- vm_exit_during_initialization(
- err_msg("GC triggered before VM initialization completed. Try increasing "
- "NewSize, current value " SIZE_FORMAT "%s.",
- byte_size_in_proper_unit(NewSize),
- proper_unit_for_byte_size(NewSize)));
- }
-
- // If the GC count has changed someone beat us to the collection
- Heap_lock->lock();
-
- // Check invocations
- if (skip_operation()) {
- // skip collection
- Heap_lock->unlock();
- _prologue_succeeded = false;
- } else {
- _prologue_succeeded = true;
- }
- return _prologue_succeeded;
-}
-
-
-void VM_GC_Operation::doit_epilogue() {
- assert(Thread::current()->is_Java_thread(), "just checking");
- // Clean up old interpreter OopMap entries that were replaced
- // during the GC thread root traversal.
- OopMapCache::cleanup_old_entries();
- if (Universe::has_reference_pending_list()) {
- Heap_lock->notify_all();
- }
- Heap_lock->unlock();
-}
-
-bool VM_GC_HeapInspection::skip_operation() const {
- return false;
-}
-
-bool VM_GC_HeapInspection::collect() {
- if (GCLocker::is_active()) {
- return false;
- }
- Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
- return true;
-}
-
-void VM_GC_HeapInspection::doit() {
- HandleMark hm;
- Universe::heap()->ensure_parsability(false); // must happen, even if collection does
- // not happen (e.g. due to GCLocker)
- // or _full_gc being false
- if (_full_gc) {
- if (!collect()) {
- // The collection attempt was skipped because the gc locker is held.
- // The following dump may then be a tad misleading to someone expecting
- // only live objects to show up in the dump (see CR 6944195). Just issue
- // a suitable warning in that case and do not attempt to do a collection.
- // The latter is a subtle point, because even a failed attempt
- // to GC will, in fact, induce one in the future, which we
- // probably want to avoid in this case because the GC that we may
- // be about to attempt holds value for us only
- // if it happens now and not if it happens in the eventual
- // future.
- log_warning(gc)("GC locker is held; pre-dump GC was skipped");
- }
- }
- HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
- _columns);
- inspect.heap_inspection(_out);
-}
-
-
-void VM_GenCollectForAllocation::doit() {
- SvcGCMarker sgcm(SvcGCMarker::MINOR);
-
- GenCollectedHeap* gch = GenCollectedHeap::heap();
- GCCauseSetter gccs(gch, _gc_cause);
- _result = gch->satisfy_failed_allocation(_word_size, _tlab);
- assert(gch->is_in_reserved_or_null(_result), "result not in heap");
-
- if (_result == NULL && GCLocker::is_active_and_needs_gc()) {
- set_gc_locked();
- }
-}
-
-void VM_GenCollectFull::doit() {
- SvcGCMarker sgcm(SvcGCMarker::FULL);
-
- GenCollectedHeap* gch = GenCollectedHeap::heap();
- GCCauseSetter gccs(gch, _gc_cause);
- gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_generation);
-}
-
-VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
- size_t size,
- Metaspace::MetadataType mdtype,
- uint gc_count_before,
- uint full_gc_count_before,
- GCCause::Cause gc_cause)
- : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
- _result(NULL), _size(size), _mdtype(mdtype), _loader_data(loader_data) {
- assert(_size != 0, "An allocation should always be requested with this operation.");
- AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
-}
-
-// Returns true iff concurrent GCs unloads metadata.
-bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
-#if INCLUDE_CMSGC
- if (UseConcMarkSweepGC && CMSClassUnloadingEnabled) {
- MetaspaceGC::set_should_concurrent_collect(true);
- return true;
- }
-#endif
-
-#if INCLUDE_G1GC
- if (UseG1GC && ClassUnloadingWithConcurrentMark) {
- G1CollectedHeap* g1h = G1CollectedHeap::heap();
- g1h->g1_policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
-
- GCCauseSetter x(g1h, _gc_cause);
-
- // At this point we are supposed to start a concurrent cycle. We
- // will do so if one is not already in progress.
- bool should_start = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause);
-
- if (should_start) {
- double pause_target = g1h->g1_policy()->max_pause_time_ms();
- g1h->do_collection_pause_at_safepoint(pause_target);
- }
- return true;
- }
-#endif
-
- return false;
-}
-
-void VM_CollectForMetadataAllocation::doit() {
- SvcGCMarker sgcm(SvcGCMarker::FULL);
-
- CollectedHeap* heap = Universe::heap();
- GCCauseSetter gccs(heap, _gc_cause);
-
- // Check again if the space is available. Another thread
- // may have similarly failed a metadata allocation and induced
- // a GC that freed space for the allocation.
- if (!MetadataAllocationFailALot) {
- _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
- if (_result != NULL) {
- return;
- }
- }
-
- if (initiate_concurrent_GC()) {
- // For CMS and G1 expand since the collection is going to be concurrent.
- _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
- if (_result != NULL) {
- return;
- }
-
- log_debug(gc)("%s full GC for Metaspace", UseConcMarkSweepGC ? "CMS" : "G1");
- }
-
- // Don't clear the soft refs yet.
- heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
- // After a GC try to allocate without expanding. Could fail
- // and expansion will be tried below.
- _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
- if (_result != NULL) {
- return;
- }
-
- // If still failing, allow the Metaspace to expand.
- // See delta_capacity_until_GC() for explanation of the
- // amount of the expansion.
- // This should work unless there really is no more space
- // or a MaxMetaspaceSize has been specified on the command line.
- _result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
- if (_result != NULL) {
- return;
- }
-
- // If expansion failed, do a collection clearing soft references.
- heap->collect_as_vm_thread(GCCause::_metadata_GC_clear_soft_refs);
- _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
- if (_result != NULL) {
- return;
- }
-
- log_debug(gc)("After Metaspace GC failed to allocate size " SIZE_FORMAT, _size);
-
- if (GCLocker::is_active_and_needs_gc()) {
- set_gc_locked();
- }
-}
-
-VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause)
- : VM_GC_Operation(gc_count_before, cause), _word_size(word_size), _result(NULL) {
- // Only report if operation was really caused by an allocation.
- if (_word_size != 0) {
- AllocTracer::send_allocation_requiring_gc_event(_word_size * HeapWordSize, GCId::peek());
- }
-}
--- a/src/hotspot/share/gc/shared/vmGCOperations.hpp Thu Dec 06 13:55:22 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,241 +0,0 @@
-/*
- * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_SHARED_VMGCOPERATIONS_HPP
-#define SHARE_VM_GC_SHARED_VMGCOPERATIONS_HPP
-
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "memory/heapInspection.hpp"
-#include "prims/jvmtiExport.hpp"
-#include "runtime/handles.hpp"
-#include "runtime/jniHandles.hpp"
-#include "runtime/synchronizer.hpp"
-#include "runtime/vm_operations.hpp"
-
-// The following class hierarchy represents
-// a set of operations (VM_Operation) related to GC.
-//
-// VM_Operation
-// VM_GC_Operation
-// VM_GC_HeapInspection
-// VM_GenCollectFull
-// VM_GenCollectFullConcurrent
-// VM_ParallelGCSystemGC
-// VM_CollectForAllocation
-// VM_GenCollectForAllocation
-// VM_ParallelGCFailedAllocation
-// VM_GC_Operation
-// - implements methods common to all classes in the hierarchy:
-// prevents multiple gc requests and manages lock on heap;
-//
-// VM_GC_HeapInspection
-// - prints class histogram on SIGBREAK if PrintClassHistogram
-// is specified; and also the attach "inspectheap" operation
-//
-// VM_CollectForAllocation
-// VM_GenCollectForAllocation
-// VM_ParallelGCFailedAllocation
-// - this operation is invoked when allocation is failed;
-// operation performs garbage collection and tries to
-// allocate afterwards;
-//
-// VM_GenCollectFull
-// VM_GenCollectFullConcurrent
-// VM_ParallelGCSystemGC
-// - these operations preform full collection of heaps of
-// different kind
-//
-
-class VM_GC_Operation: public VM_Operation {
- protected:
- uint _gc_count_before; // gc count before acquiring PLL
- uint _full_gc_count_before; // full gc count before acquiring PLL
- bool _full; // whether a "full" collection
- bool _prologue_succeeded; // whether doit_prologue succeeded
- GCCause::Cause _gc_cause; // the putative cause for this gc op
- bool _gc_locked; // will be set if gc was locked
-
- virtual bool skip_operation() const;
-
- public:
- VM_GC_Operation(uint gc_count_before,
- GCCause::Cause _cause,
- uint full_gc_count_before = 0,
- bool full = false) {
- _full = full;
- _prologue_succeeded = false;
- _gc_count_before = gc_count_before;
-
- // A subclass constructor will likely overwrite the following
- _gc_cause = _cause;
-
- _gc_locked = false;
-
- _full_gc_count_before = full_gc_count_before;
- // In ParallelScavengeHeap::mem_allocate() collections can be
- // executed within a loop and _all_soft_refs_clear can be set
- // true after they have been cleared by a collection and another
- // collection started so that _all_soft_refs_clear can be true
- // when this collection is started. Don't assert that
- // _all_soft_refs_clear have to be false here even though
- // mutators have run. Soft refs will be cleared again in this
- // collection.
- }
- ~VM_GC_Operation();
-
- // Acquire the reference synchronization lock
- virtual bool doit_prologue();
- // Do notifyAll (if needed) and release held lock
- virtual void doit_epilogue();
-
- virtual bool allow_nested_vm_operations() const { return true; }
- bool prologue_succeeded() const { return _prologue_succeeded; }
-
- void set_gc_locked() { _gc_locked = true; }
- bool gc_locked() const { return _gc_locked; }
-
- static void notify_gc_begin(bool full = false);
- static void notify_gc_end();
-};
-
-
-class VM_GC_HeapInspection: public VM_GC_Operation {
- private:
- outputStream* _out;
- bool _full_gc;
- bool _csv_format; // "comma separated values" format for spreadsheet.
- bool _print_help;
- bool _print_class_stats;
- const char* _columns;
- public:
- VM_GC_HeapInspection(outputStream* out, bool request_full_gc) :
- VM_GC_Operation(0 /* total collections, dummy, ignored */,
- GCCause::_heap_inspection /* GC Cause */,
- 0 /* total full collections, dummy, ignored */,
- request_full_gc) {
- _out = out;
- _full_gc = request_full_gc;
- _csv_format = false;
- _print_help = false;
- _print_class_stats = false;
- _columns = NULL;
- }
-
- ~VM_GC_HeapInspection() {}
- virtual VMOp_Type type() const { return VMOp_GC_HeapInspection; }
- virtual bool skip_operation() const;
- virtual void doit();
- void set_csv_format(bool value) {_csv_format = value;}
- void set_print_help(bool value) {_print_help = value;}
- void set_print_class_stats(bool value) {_print_class_stats = value;}
- void set_columns(const char* value) {_columns = value;}
- protected:
- bool collect();
-};
-
-class VM_CollectForAllocation : public VM_GC_Operation {
- protected:
- size_t _word_size; // Size of object to be allocated (in number of words)
- HeapWord* _result; // Allocation result (NULL if allocation failed)
-
- public:
- VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause);
-
- HeapWord* result() const {
- return _result;
- }
-};
-
-class VM_GenCollectForAllocation : public VM_CollectForAllocation {
- private:
- bool _tlab; // alloc is of a tlab.
- public:
- VM_GenCollectForAllocation(size_t word_size,
- bool tlab,
- uint gc_count_before)
- : VM_CollectForAllocation(word_size, gc_count_before, GCCause::_allocation_failure),
- _tlab(tlab) {
- assert(word_size != 0, "An allocation should always be requested with this operation.");
- }
- ~VM_GenCollectForAllocation() {}
- virtual VMOp_Type type() const { return VMOp_GenCollectForAllocation; }
- virtual void doit();
-};
-
-// VM operation to invoke a collection of the heap as a
-// GenCollectedHeap heap.
-class VM_GenCollectFull: public VM_GC_Operation {
- private:
- GenCollectedHeap::GenerationType _max_generation;
- public:
- VM_GenCollectFull(uint gc_count_before,
- uint full_gc_count_before,
- GCCause::Cause gc_cause,
- GenCollectedHeap::GenerationType max_generation)
- : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
- _max_generation(max_generation) { }
- ~VM_GenCollectFull() {}
- virtual VMOp_Type type() const { return VMOp_GenCollectFull; }
- virtual void doit();
-};
-
-class VM_CollectForMetadataAllocation: public VM_GC_Operation {
- private:
- MetaWord* _result;
- size_t _size; // size of object to be allocated
- Metaspace::MetadataType _mdtype;
- ClassLoaderData* _loader_data;
-
- public:
- VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
- size_t size,
- Metaspace::MetadataType mdtype,
- uint gc_count_before,
- uint full_gc_count_before,
- GCCause::Cause gc_cause);
-
- virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
- virtual void doit();
- MetaWord* result() const { return _result; }
-
- bool initiate_concurrent_GC();
-};
-
-class SvcGCMarker : public StackObj {
- private:
- JvmtiGCMarker _jgcm;
- public:
- typedef enum { MINOR, FULL, CONCURRENT } reason_type;
-
- SvcGCMarker(reason_type reason ) {
- VM_GC_Operation::notify_gc_begin(reason == FULL);
- }
-
- ~SvcGCMarker() {
- VM_GC_Operation::notify_gc_end();
- }
-};
-
-#endif // SHARE_VM_GC_SHARED_VMGCOPERATIONS_HPP
--- a/src/hotspot/share/gc/z/zDriver.cpp Thu Dec 06 13:55:22 2018 +0100
+++ b/src/hotspot/share/gc/z/zDriver.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -24,8 +24,8 @@
#include "precompiled.hpp"
#include "gc/shared/gcId.hpp"
#include "gc/shared/gcLocker.hpp"
+#include "gc/shared/gcVMOperations.hpp"
#include "gc/shared/isGCActiveMark.hpp"
-#include "gc/shared/vmGCOperations.hpp"
#include "gc/z/zCollectedHeap.hpp"
#include "gc/z/zDriver.hpp"
#include "gc/z/zHeap.inline.hpp"
--- a/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp Thu Dec 06 13:55:22 2018 +0100
+++ b/src/hotspot/share/jfr/periodic/jfrPeriodic.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -32,8 +32,8 @@
#include "gc/g1/g1HeapRegionEventSender.hpp"
#include "gc/shared/gcConfiguration.hpp"
#include "gc/shared/gcTrace.hpp"
+#include "gc/shared/gcVMOperations.hpp"
#include "gc/shared/objectCountEventSender.hpp"
-#include "gc/shared/vmGCOperations.hpp"
#include "jfr/jfrEvents.hpp"
#include "jfr/periodic/jfrModuleEvent.hpp"
#include "jfr/periodic/jfrOSInterface.hpp"
--- a/src/hotspot/share/runtime/os.cpp Thu Dec 06 13:55:22 2018 +0100
+++ b/src/hotspot/share/runtime/os.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -32,7 +32,7 @@
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
-#include "gc/shared/vmGCOperations.hpp"
+#include "gc/shared/gcVMOperations.hpp"
#include "logging/log.hpp"
#include "interpreter/interpreter.hpp"
#include "logging/log.hpp"
--- a/src/hotspot/share/services/attachListener.cpp Thu Dec 06 13:55:22 2018 +0100
+++ b/src/hotspot/share/services/attachListener.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/systemDictionary.hpp"
-#include "gc/shared/vmGCOperations.hpp"
+#include "gc/shared/gcVMOperations.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
--- a/src/hotspot/share/services/diagnosticCommand.cpp Thu Dec 06 13:55:22 2018 +0100
+++ b/src/hotspot/share/services/diagnosticCommand.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -28,7 +28,7 @@
#include "classfile/classLoaderStats.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/directivesParser.hpp"
-#include "gc/shared/vmGCOperations.hpp"
+#include "gc/shared/gcVMOperations.hpp"
#include "memory/metaspace/metaspaceDCmd.hpp"
#include "memory/resourceArea.hpp"
#include "oops/objArrayOop.inline.hpp"
--- a/src/hotspot/share/services/heapDumper.cpp Thu Dec 06 13:55:22 2018 +0100
+++ b/src/hotspot/share/services/heapDumper.cpp Thu Dec 06 15:44:13 2018 +0100
@@ -30,7 +30,7 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "gc/shared/gcLocker.hpp"
-#include "gc/shared/vmGCOperations.hpp"
+#include "gc/shared/gcVMOperations.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"