8139341: Hide ExtendedOopClosure::_ref_processor
Summary: Make ExtendedOopClosure::_ref_processor private.
Reviewed-by: mgerdin, sjohanss
--- a/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Fri Oct 16 10:20:59 2015 +0200
+++ b/hotspot/src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp Fri Oct 16 14:55:09 2015 -0400
@@ -6045,8 +6045,8 @@
_span(span),
_bitMap(bitMap)
{
- assert(_ref_processor == NULL, "deliberately left NULL");
- assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
+ assert(ref_processor() == NULL, "deliberately left NULL");
+ assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
}
void MarkRefsIntoClosure::do_oop(oop obj) {
@@ -6067,8 +6067,8 @@
_span(span),
_bitMap(bitMap)
{
- assert(_ref_processor == NULL, "deliberately left NULL");
- assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
+ assert(ref_processor() == NULL, "deliberately left NULL");
+ assert(_bitMap->covers(_span), "_bitMap/_span mismatch");
}
void Par_MarkRefsIntoClosure::do_oop(oop obj) {
@@ -6091,8 +6091,8 @@
_verification_bm(verification_bm),
_cms_bm(cms_bm)
{
- assert(_ref_processor == NULL, "deliberately left NULL");
- assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
+ assert(ref_processor() == NULL, "deliberately left NULL");
+ assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch");
}
void MarkRefsIntoVerifyClosure::do_oop(oop obj) {
@@ -6134,8 +6134,9 @@
_concurrent_precleaning(concurrent_precleaning),
_freelistLock(NULL)
{
- _ref_processor = rp;
- assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
+ // FIXME: Should initialize in base class constructor.
+ assert(rp != NULL, "ref_processor shouldn't be NULL");
+ set_ref_processor_internal(rp);
}
// This closure is used to mark refs into the CMS generation at the
@@ -6240,8 +6241,9 @@
((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))),
_par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue)
{
- _ref_processor = rp;
- assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
+ // FIXME: Should initialize in base class constructor.
+ assert(rp != NULL, "ref_processor shouldn't be NULL");
+ set_ref_processor_internal(rp);
}
// This closure is used to mark refs into the CMS generation at the
@@ -7091,7 +7093,7 @@
_mark_stack(mark_stack),
_concurrent_precleaning(concurrent_precleaning)
{
- assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
+ assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
}
// Grey object rescan during pre-cleaning and second checkpoint phases --
@@ -7162,7 +7164,7 @@
_bit_map(bit_map),
_work_queue(work_queue)
{
- assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL");
+ assert(ref_processor() != NULL, "ref_processor shouldn't be NULL");
}
void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); }
--- a/hotspot/src/share/vm/gc/g1/concurrentMark.cpp Fri Oct 16 10:20:59 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/concurrentMark.cpp Fri Oct 16 14:55:09 2015 -0400
@@ -3084,17 +3084,21 @@
}
};
+static ReferenceProcessor* get_cm_oop_closure_ref_processor(G1CollectedHeap* g1h) {
+ ReferenceProcessor* result = NULL;
+ if (G1UseConcMarkReferenceProcessing) {
+ result = g1h->ref_processor_cm();
+ assert(result != NULL, "should not be NULL");
+ }
+ return result;
+}
+
G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
ConcurrentMark* cm,
CMTask* task)
- : _g1h(g1h), _cm(cm), _task(task) {
- assert(_ref_processor == NULL, "should be initialized to NULL");
-
- if (G1UseConcMarkReferenceProcessing) {
- _ref_processor = g1h->ref_processor_cm();
- assert(_ref_processor != NULL, "should not be NULL");
- }
-}
+ : MetadataAwareOopClosure(get_cm_oop_closure_ref_processor(g1h)),
+ _g1h(g1h), _cm(cm), _task(task)
+{ }
void CMTask::setup_for_region(HeapRegion* hr) {
assert(hr != NULL,
--- a/hotspot/src/share/vm/gc/g1/g1OopClosures.hpp Fri Oct 16 10:20:59 2015 +0200
+++ b/hotspot/src/share/vm/gc/g1/g1OopClosures.hpp Fri Oct 16 14:55:09 2015 -0400
@@ -80,7 +80,9 @@
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
- void set_ref_processor(ReferenceProcessor* ref_processor) { _ref_processor = ref_processor; }
+ void set_ref_processor(ReferenceProcessor* rp) {
+ set_ref_processor_internal(rp);
+ }
};
// Add back base class for metadata
@@ -127,7 +129,7 @@
public:
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
G1ParCopyHelper(g1, par_scan_state) {
- assert(_ref_processor == NULL, "sanity");
+ assert(ref_processor() == NULL, "sanity");
}
template <class T> void do_oop_nv(T* p) { do_oop_work(p); }
--- a/hotspot/src/share/vm/gc/serial/markSweep.hpp Fri Oct 16 10:20:59 2015 +0200
+++ b/hotspot/src/share/vm/gc/serial/markSweep.hpp Fri Oct 16 14:55:09 2015 -0400
@@ -196,7 +196,9 @@
virtual void do_cld(ClassLoaderData* cld);
void do_cld_nv(ClassLoaderData* cld);
- void set_ref_processor(ReferenceProcessor* rp) { _ref_processor = rp; }
+ void set_ref_processor(ReferenceProcessor* rp) {
+ set_ref_processor_internal(rp);
+ }
};
class PreservedMark VALUE_OBJ_CLASS_SPEC {
--- a/hotspot/src/share/vm/gc/shared/genOopClosures.hpp Fri Oct 16 10:20:59 2015 +0200
+++ b/hotspot/src/share/vm/gc/shared/genOopClosures.hpp Fri Oct 16 14:55:09 2015 -0400
@@ -157,7 +157,7 @@
}
public:
FilteringClosure(HeapWord* boundary, ExtendedOopClosure* cl) :
- ExtendedOopClosure(cl->_ref_processor), _boundary(boundary),
+ ExtendedOopClosure(cl->ref_processor()), _boundary(boundary),
_cl(cl) {}
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
--- a/hotspot/src/share/vm/memory/iterator.hpp Fri Oct 16 10:20:59 2015 +0200
+++ b/hotspot/src/share/vm/memory/iterator.hpp Fri Oct 16 14:55:09 2015 -0400
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,10 +51,18 @@
// This is needed by the GC and is extracted to a separate type to not
// pollute the OopClosure interface.
class ExtendedOopClosure : public OopClosure {
- public:
+ private:
ReferenceProcessor* _ref_processor;
+
+ protected:
ExtendedOopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { }
- ExtendedOopClosure() : OopClosure(), _ref_processor(NULL) { }
+ ExtendedOopClosure() : _ref_processor(NULL) { }
+ ~ExtendedOopClosure() { }
+
+ void set_ref_processor_internal(ReferenceProcessor* rp) { _ref_processor = rp; }
+
+ public:
+ ReferenceProcessor* ref_processor() const { return _ref_processor; }
// If the do_metadata functions return "true",
// we invoke the following when running oop_iterate():
--- a/hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp Fri Oct 16 10:20:59 2015 +0200
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.inline.hpp Fri Oct 16 14:55:09 2015 -0400
@@ -43,7 +43,7 @@
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
T heap_oop = oopDesc::load_heap_oop(referent_addr);
- ReferenceProcessor* rp = closure->_ref_processor;
+ ReferenceProcessor* rp = closure->ref_processor();
if (!oopDesc::is_null(heap_oop)) {
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
if (!referent->is_gc_marked() && (rp != NULL) &&