8211446: Replace oop_pc_follow_contents with oop_iterate and closure
Reviewed-by: sjohanss, tschatzl
--- a/src/hotspot/share/gc/parallel/pcTasks.cpp Fri Oct 12 12:10:34 2018 +0200
+++ b/src/hotspot/share/gc/parallel/pcTasks.cpp Fri Oct 12 12:13:06 2018 +0200
@@ -35,6 +35,7 @@
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "logging/log.hpp"
+#include "memory/iterator.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/objArrayKlass.inline.hpp"
@@ -58,7 +59,7 @@
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
- ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
+ PCMarkAndPushClosure mark_and_push_closure(cm);
MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
_thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
@@ -73,7 +74,7 @@
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
- ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
+ PCMarkAndPushClosure mark_and_push_closure(cm);
switch (_root_type) {
case universe:
@@ -139,7 +140,7 @@
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
- ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
+ PCMarkAndPushClosure mark_and_push_closure(cm);
ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
_rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(),
mark_and_push_closure, follow_stack_closure);
@@ -182,13 +183,12 @@
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
- ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
oop obj = NULL;
ObjArrayTask task;
do {
while (ParCompactionManager::steal_objarray(which, task)) {
- cm->follow_contents((objArrayOop)task.obj(), task.index());
+ cm->follow_array((objArrayOop)task.obj(), task.index());
cm->follow_marking_stacks();
}
while (ParCompactionManager::steal(which, obj)) {
--- a/src/hotspot/share/gc/parallel/psCompactionManager.cpp Fri Oct 12 12:10:34 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.cpp Fri Oct 12 12:13:06 2018 +0200
@@ -132,113 +132,6 @@
return _manager_array[index];
}
-void InstanceKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
- assert(obj != NULL, "can't follow the content of NULL object");
-
- cm->follow_klass(this);
- // Only mark the header and let the scan of the meta-data mark
- // everything else.
-
- ParCompactionManager::MarkAndPushClosure cl(cm);
- if (UseCompressedOops) {
- InstanceKlass::oop_oop_iterate_oop_maps<narrowOop>(obj, &cl);
- } else {
- InstanceKlass::oop_oop_iterate_oop_maps<oop>(obj, &cl);
- }
-}
-
-void InstanceMirrorKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
- InstanceKlass::oop_pc_follow_contents(obj, cm);
-
- // Follow the klass field in the mirror.
- Klass* klass = java_lang_Class::as_Klass(obj);
- if (klass != NULL) {
- // An unsafe anonymous class doesn't have its own class loader,
- // so the call to follow_klass will mark and push its java mirror instead of the
- // class loader. When handling the java mirror for an unsafe anonymous
- // class we need to make sure its class loader data is claimed, this is done
- // by calling follow_class_loader explicitly. For non-anonymous classes the
- // call to follow_class_loader is made when the class loader itself is handled.
- if (klass->is_instance_klass() &&
- InstanceKlass::cast(klass)->is_unsafe_anonymous()) {
- cm->follow_class_loader(klass->class_loader_data());
- } else {
- cm->follow_klass(klass);
- }
- } else {
- // If klass is NULL then this a mirror for a primitive type.
- // We don't have to follow them, since they are handled as strong
- // roots in Universe::oops_do.
- assert(java_lang_Class::is_primitive(obj), "Sanity check");
- }
-
- ParCompactionManager::MarkAndPushClosure cl(cm);
- if (UseCompressedOops) {
- oop_oop_iterate_statics<narrowOop>(obj, &cl);
- } else {
- oop_oop_iterate_statics<oop>(obj, &cl);
- }
-}
-
-void InstanceClassLoaderKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
- InstanceKlass::oop_pc_follow_contents(obj, cm);
-
- ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data_acquire(obj);
- if (loader_data != NULL) {
- cm->follow_class_loader(loader_data);
- }
-}
-
-template <class T>
-static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, ParCompactionManager* cm) {
- T* referent_addr = (T*)java_lang_ref_Reference::referent_addr_raw(obj);
- T heap_oop = RawAccess<>::oop_load(referent_addr);
- log_develop_trace(gc, ref)("InstanceRefKlass::oop_pc_follow_contents " PTR_FORMAT, p2i(obj));
- if (!CompressedOops::is_null(heap_oop)) {
- oop referent = CompressedOops::decode_not_null(heap_oop);
- if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
- PSParallelCompact::ref_processor()->discover_reference(obj, klass->reference_type())) {
- // reference already enqueued, referent will be traversed later
- klass->InstanceKlass::oop_pc_follow_contents(obj, cm);
- log_develop_trace(gc, ref)(" Non NULL enqueued " PTR_FORMAT, p2i(obj));
- return;
- } else {
- // treat referent as normal oop
- log_develop_trace(gc, ref)(" Non NULL normal " PTR_FORMAT, p2i(obj));
- cm->mark_and_push(referent_addr);
- }
- }
- // Treat discovered as normal oop.
- T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj);
- cm->mark_and_push(discovered_addr);
- klass->InstanceKlass::oop_pc_follow_contents(obj, cm);
-}
-
-
-void InstanceRefKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
- if (UseCompressedOops) {
- oop_pc_follow_contents_specialized<narrowOop>(this, obj, cm);
- } else {
- oop_pc_follow_contents_specialized<oop>(this, obj, cm);
- }
-}
-
-void ObjArrayKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
- cm->follow_klass(this);
-
- if (UseCompressedOops) {
- oop_pc_follow_contents_specialized<narrowOop>(objArrayOop(obj), 0, cm);
- } else {
- oop_pc_follow_contents_specialized<oop>(objArrayOop(obj), 0, cm);
- }
-}
-
-void TypeArrayKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
- assert(obj->is_typeArray(),"must be a type array");
- // Performance tweak: We skip iterating over the klass pointer since we
- // know that Universe::TypeArrayKlass never moves.
-}
-
void ParCompactionManager::follow_marking_stacks() {
do {
// Drain the overflow stack first, to allow stealing from the marking stack.
@@ -253,7 +146,7 @@
// Process ObjArrays one at a time to avoid marking stack bloat.
ObjArrayTask task;
if (_objarray_stack.pop_overflow(task) || _objarray_stack.pop_local(task)) {
- follow_contents((objArrayOop)task.obj(), task.index());
+ follow_array((objArrayOop)task.obj(), task.index());
}
} while (!marking_stacks_empty());
--- a/src/hotspot/share/gc/parallel/psCompactionManager.hpp Fri Oct 12 12:10:34 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.hpp Fri Oct 12 12:13:06 2018 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -171,24 +171,10 @@
void drain_region_stacks();
void follow_contents(oop obj);
- void follow_contents(objArrayOop array, int index);
+ void follow_array(objArrayOop array, int index);
void update_contents(oop obj);
- class MarkAndPushClosure: public BasicOopIterateClosure {
- private:
- ParCompactionManager* _compaction_manager;
- public:
- MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
-
- template <typename T> void do_oop_work(T* p);
- virtual void do_oop(oop* p);
- virtual void do_oop(narrowOop* p);
-
- // This closure provides its own oop verification code.
- debug_only(virtual bool should_verify_oops() { return false; })
- };
-
class FollowStackClosure: public VoidClosure {
private:
ParCompactionManager* _compaction_manager;
--- a/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp Fri Oct 12 12:10:34 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psCompactionManager.inline.hpp Fri Oct 12 12:13:06 2018 +0200
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_PARALLEL_PSCOMPACTIONMANAGER_INLINE_HPP
#define SHARE_VM_GC_PARALLEL_PSCOMPACTIONMANAGER_INLINE_HPP
+#include "classfile/javaClasses.inline.hpp"
#include "gc/parallel/parMarkBitMap.hpp"
#include "gc/parallel/psCompactionManager.hpp"
#include "gc/parallel/psParallelCompact.inline.hpp"
@@ -37,6 +38,37 @@
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
+class PCMarkAndPushClosure: public OopClosure {
+private:
+ ParCompactionManager* _compaction_manager;
+public:
+ PCMarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
+
+ template <typename T> void do_oop_nv(T* p) { _compaction_manager->mark_and_push(p); }
+ virtual void do_oop(oop* p) { do_oop_nv(p); }
+ virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
+
+ // This closure provides its own oop verification code.
+ debug_only(virtual bool should_verify_oops() { return false; })
+};
+
+class PCIterateMarkAndPushClosure: public MetadataVisitingOopIterateClosure {
+private:
+ ParCompactionManager* _compaction_manager;
+public:
+ PCIterateMarkAndPushClosure(ParCompactionManager* cm, ReferenceProcessor* rp) : MetadataVisitingOopIterateClosure(rp), _compaction_manager(cm) { }
+
+ template <typename T> void do_oop_nv(T* p) { _compaction_manager->mark_and_push(p); }
+ virtual void do_oop(oop* p) { do_oop_nv(p); }
+ virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
+
+ void do_klass_nv(Klass* k) { _compaction_manager->follow_klass(k); }
+ void do_cld_nv(ClassLoaderData* cld) { _compaction_manager->follow_class_loader(cld); }
+
+ // This closure provides its own oop verification code.
+ debug_only(virtual bool should_verify_oops() { return false; })
+};
+
inline bool ParCompactionManager::steal(int queue_num, oop& t) {
return stack_array()->steal(queue_num, t);
}
@@ -84,14 +116,6 @@
}
}
-template <typename T>
-inline void ParCompactionManager::MarkAndPushClosure::do_oop_work(T* p) {
- _compaction_manager->mark_and_push(p);
-}
-
-inline void ParCompactionManager::MarkAndPushClosure::do_oop(oop* p) { do_oop_work(p); }
-inline void ParCompactionManager::MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_work(p); }
-
inline void ParCompactionManager::follow_klass(Klass* klass) {
oop holder = klass->klass_holder();
mark_and_push(&holder);
@@ -101,19 +125,8 @@
_compaction_manager->follow_marking_stacks();
}
-inline void ParCompactionManager::follow_class_loader(ClassLoaderData* cld) {
- MarkAndPushClosure mark_and_push_closure(this);
-
- cld->oops_do(&mark_and_push_closure, true);
-}
-
-inline void ParCompactionManager::follow_contents(oop obj) {
- assert(PSParallelCompact::mark_bitmap()->is_marked(obj), "should be marked");
- obj->pc_follow_contents(this);
-}
-
-template <class T>
-inline void oop_pc_follow_contents_specialized(objArrayOop obj, int index, ParCompactionManager* cm) {
+template <typename T>
+inline void follow_array_specialized(objArrayOop obj, int index, ParCompactionManager* cm) {
const size_t len = size_t(obj->length());
const size_t beg_index = size_t(index);
assert(beg_index < len || len == 0, "index too large");
@@ -134,11 +147,11 @@
}
}
-inline void ParCompactionManager::follow_contents(objArrayOop obj, int index) {
+inline void ParCompactionManager::follow_array(objArrayOop obj, int index) {
if (UseCompressedOops) {
- oop_pc_follow_contents_specialized<narrowOop>(obj, index, this);
+ follow_array_specialized<narrowOop>(obj, index, this);
} else {
- oop_pc_follow_contents_specialized<oop>(obj, index, this);
+ follow_array_specialized<oop>(obj, index, this);
}
}
@@ -146,4 +159,19 @@
obj->pc_update_contents(this);
}
+inline void ParCompactionManager::follow_class_loader(ClassLoaderData* cld) {
+ PCMarkAndPushClosure mark_and_push_closure(this);
+ cld->oops_do(&mark_and_push_closure, true);
+}
+
+inline void ParCompactionManager::follow_contents(oop obj) {
+ assert(PSParallelCompact::mark_bitmap()->is_marked(obj), "should be marked");
+ if (obj->is_objArray()) {
+ follow_array(objArrayOop(obj), 0);
+ } else {
+ PCIterateMarkAndPushClosure cl(this, PSParallelCompact::ref_processor());
+ obj->oop_iterate(&cl);
+ }
+}
+
#endif // SHARE_VM_GC_PARALLEL_PSCOMPACTIONMANAGER_INLINE_HPP
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp Fri Oct 12 12:10:34 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp Fri Oct 12 12:13:06 2018 +0200
@@ -846,18 +846,43 @@
bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
+class PCReferenceProcessor: public ReferenceProcessor {
+public:
+ PCReferenceProcessor(
+ BoolObjectClosure* is_subject_to_discovery,
+ BoolObjectClosure* is_alive_non_header) :
+ ReferenceProcessor(is_subject_to_discovery,
+ ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
+ ParallelGCThreads, // mt processing degree
+ true, // mt discovery
+ ParallelGCThreads, // mt discovery degree
+ true, // atomic_discovery
+ is_alive_non_header) {
+ }
+
+ template<typename T> bool discover(oop obj, ReferenceType type) {
+ T* referent_addr = (T*) java_lang_ref_Reference::referent_addr_raw(obj);
+ T heap_oop = RawAccess<>::oop_load(referent_addr);
+ oop referent = CompressedOops::decode_not_null(heap_oop);
+ return PSParallelCompact::mark_bitmap()->is_unmarked(referent)
+ && ReferenceProcessor::discover_reference(obj, type);
+ }
+ virtual bool discover_reference(oop obj, ReferenceType type) {
+ if (UseCompressedOops) {
+ return discover<narrowOop>(obj, type);
+ } else {
+ return discover<oop>(obj, type);
+ }
+ }
+};
+
void PSParallelCompact::post_initialize() {
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
_span_based_discoverer.set_span(heap->reserved_region());
_ref_processor =
- new ReferenceProcessor(&_span_based_discoverer,
- ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
- ParallelGCThreads, // mt processing degree
- true, // mt discovery
- ParallelGCThreads, // mt discovery degree
- true, // atomic_discovery
- &_is_alive_closure, // non-header is alive closure
- false); // disable adjusting number of processing threads
+ new PCReferenceProcessor(&_span_based_discoverer,
+ &_is_alive_closure); // non-header is alive closure
+
_counters = new CollectorCounters("PSParallelCompact", 1);
// Initialize static fields in ParCompactionManager.
@@ -2077,7 +2102,7 @@
TaskQueueSetSuper* qset = ParCompactionManager::stack_array();
ParallelTaskTerminator terminator(active_gc_threads, qset);
- ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
+ PCMarkAndPushClosure mark_and_push_closure(cm);
ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
// Need new claim bits before marking starts.
--- a/src/hotspot/share/oops/instanceClassLoaderKlass.hpp Fri Oct 12 12:10:34 2018 +0200
+++ b/src/hotspot/share/oops/instanceClassLoaderKlass.hpp Fri Oct 12 12:13:06 2018 +0200
@@ -52,7 +52,6 @@
//
#if INCLUDE_PARALLELGC
// Parallel Compact
- void oop_pc_follow_contents(oop obj, ParCompactionManager* cm);
void oop_pc_update_pointers(oop obj, ParCompactionManager* cm);
#endif
--- a/src/hotspot/share/oops/instanceKlass.hpp Fri Oct 12 12:10:34 2018 +0200
+++ b/src/hotspot/share/oops/instanceKlass.hpp Fri Oct 12 12:13:06 2018 +0200
@@ -1187,7 +1187,6 @@
//
#if INCLUDE_PARALLELGC
// Parallel Compact
- void oop_pc_follow_contents(oop obj, ParCompactionManager* cm);
void oop_pc_update_pointers(oop obj, ParCompactionManager* cm);
#endif
--- a/src/hotspot/share/oops/instanceMirrorKlass.hpp Fri Oct 12 12:10:34 2018 +0200
+++ b/src/hotspot/share/oops/instanceMirrorKlass.hpp Fri Oct 12 12:13:06 2018 +0200
@@ -93,7 +93,6 @@
//
#if INCLUDE_PARALLELGC
// Parallel Compact
- void oop_pc_follow_contents(oop obj, ParCompactionManager* cm);
void oop_pc_update_pointers(oop obj, ParCompactionManager* cm);
#endif
--- a/src/hotspot/share/oops/instanceRefKlass.hpp Fri Oct 12 12:10:34 2018 +0200
+++ b/src/hotspot/share/oops/instanceRefKlass.hpp Fri Oct 12 12:13:06 2018 +0200
@@ -62,7 +62,6 @@
//
#if INCLUDE_PARALLELGC
// Parallel Compact
- void oop_pc_follow_contents(oop obj, ParCompactionManager* cm);
void oop_pc_update_pointers(oop obj, ParCompactionManager* cm);
#endif
--- a/src/hotspot/share/oops/klass.hpp Fri Oct 12 12:10:34 2018 +0200
+++ b/src/hotspot/share/oops/klass.hpp Fri Oct 12 12:13:06 2018 +0200
@@ -677,7 +677,6 @@
//
#if INCLUDE_PARALLELGC
// Parallel Compact
- virtual void oop_pc_follow_contents(oop obj, ParCompactionManager* cm) = 0;
virtual void oop_pc_update_pointers(oop obj, ParCompactionManager* cm) = 0;
#endif
--- a/src/hotspot/share/oops/objArrayKlass.hpp Fri Oct 12 12:10:34 2018 +0200
+++ b/src/hotspot/share/oops/objArrayKlass.hpp Fri Oct 12 12:13:06 2018 +0200
@@ -124,7 +124,6 @@
//
#if INCLUDE_PARALLELGC
// Parallel Compact
- void oop_pc_follow_contents(oop obj, ParCompactionManager* cm);
void oop_pc_update_pointers(oop obj, ParCompactionManager* cm);
#endif
--- a/src/hotspot/share/oops/oop.hpp Fri Oct 12 12:10:34 2018 +0200
+++ b/src/hotspot/share/oops/oop.hpp Fri Oct 12 12:13:06 2018 +0200
@@ -289,7 +289,6 @@
#if INCLUDE_PARALLELGC
// Parallel Compact
- inline void pc_follow_contents(ParCompactionManager* cm);
inline void pc_update_contents(ParCompactionManager* cm);
#endif
--- a/src/hotspot/share/oops/oop.inline.hpp Fri Oct 12 12:10:34 2018 +0200
+++ b/src/hotspot/share/oops/oop.inline.hpp Fri Oct 12 12:13:06 2018 +0200
@@ -427,9 +427,6 @@
}
#if INCLUDE_PARALLELGC
-void oopDesc::pc_follow_contents(ParCompactionManager* cm) {
- klass()->oop_pc_follow_contents(this, cm);
-}
void oopDesc::pc_update_contents(ParCompactionManager* cm) {
Klass* k = klass();
--- a/src/hotspot/share/oops/typeArrayKlass.hpp Fri Oct 12 12:10:34 2018 +0200
+++ b/src/hotspot/share/oops/typeArrayKlass.hpp Fri Oct 12 12:13:06 2018 +0200
@@ -79,7 +79,6 @@
#if INCLUDE_PARALLELGC
// Parallel Compact
- void oop_pc_follow_contents(oop obj, ParCompactionManager* cm);
void oop_pc_update_pointers(oop obj, ParCompactionManager* cm);
#endif