4965777: GC changes to support use of discovered field for pending references
Summary: If and when the reference handler thread is able to use the discovered field to link reference objects in its pending list, so will GC. In that case, GC will scan through this field once a reference object has been placed on the pending list, but not scan that field before that stage, as the field is used by the concurrent GC thread to link discovered objects. When ReferenceHandleR thread does not use the discovered field for the purpose of linking the elements in the pending list, as would be the case in older JDKs, the JVM will fall back to the old behaviour of using the next field for that purpose.
Reviewed-by: jcoomes, mchung, stefank
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp Wed Sep 07 15:00:13 2011 -0700
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp Wed Sep 07 13:55:42 2011 -0700
@@ -36,6 +36,7 @@
ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL;
const int subclasses_of_ref = REF_PHANTOM - REF_OTHER;
+bool ReferenceProcessor::_pending_list_uses_discovered_field = false;
// List of discovered references.
class DiscoveredList {
@@ -87,6 +88,7 @@
guarantee(RefDiscoveryPolicy == ReferenceBasedDiscovery ||
RefDiscoveryPolicy == ReferentBasedDiscovery,
"Unrecongnized RefDiscoveryPolicy");
+ _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field();
}
ReferenceProcessor::ReferenceProcessor(MemRegion span,
@@ -122,7 +124,7 @@
_discoveredSoftRefs[i].set_head(NULL);
_discoveredSoftRefs[i].set_length(0);
}
- // If we do barreirs, cache a copy of the barrier set.
+ // If we do barriers, cache a copy of the barrier set.
if (discovered_list_needs_barrier) {
_bs = Universe::heap()->barrier_set();
}
@@ -307,46 +309,77 @@
void ReferenceProcessor::enqueue_discovered_reflist(DiscoveredList& refs_list,
HeapWord* pending_list_addr) {
// Given a list of refs linked through the "discovered" field
- // (java.lang.ref.Reference.discovered) chain them through the
- // "next" field (java.lang.ref.Reference.next) and prepend
- // to the pending list.
+ // (java.lang.ref.Reference.discovered), self-loop their "next" field
+ // thus distinguishing them from active References, then
+ // prepend them to the pending list.
+ // BKWRD COMPATIBILITY NOTE: For older JDKs (prior to the fix for 4956777),
+ // the "next" field is used to chain the pending list, not the discovered
+ // field.
+
if (TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print_cr("ReferenceProcessor::enqueue_discovered_reflist list "
INTPTR_FORMAT, (address)refs_list.head());
}
oop obj = NULL;
- oop next = refs_list.head();
- // Walk down the list, copying the discovered field into
- // the next field and clearing it.
- while (obj != next) {
- obj = next;
- assert(obj->is_instanceRef(), "should be reference object");
- next = java_lang_ref_Reference::discovered(obj);
- if (TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next " INTPTR_FORMAT,
- obj, next);
+ oop next_d = refs_list.head();
+ if (pending_list_uses_discovered_field()) { // New behaviour
+ // Walk down the list, self-looping the next field
+ // so that the References are not considered active.
+ while (obj != next_d) {
+ obj = next_d;
+ assert(obj->is_instanceRef(), "should be reference object");
+ next_d = java_lang_ref_Reference::discovered(obj);
+ if (TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
+ obj, next_d);
+ }
+ assert(java_lang_ref_Reference::next(obj) == NULL,
+ "Reference not active; should not be discovered");
+ // Self-loop next, so as to make Ref not active.
+ java_lang_ref_Reference::set_next(obj, obj);
+ if (next_d == obj) { // obj is last
+ // Swap refs_list into pendling_list_addr and
+ // set obj's discovered to what we read from pending_list_addr.
+ oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
+ // Need oop_check on pending_list_addr above;
+ // see special oop-check code at the end of
+ // enqueue_discovered_reflists() further below.
+ java_lang_ref_Reference::set_discovered(obj, old); // old may be NULL
+ }
}
- assert(java_lang_ref_Reference::next(obj) == NULL,
- "The reference should not be enqueued");
- if (next == obj) { // obj is last
- // Swap refs_list into pendling_list_addr and
- // set obj's next to what we read from pending_list_addr.
- oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
- // Need oop_check on pending_list_addr above;
- // see special oop-check code at the end of
- // enqueue_discovered_reflists() further below.
- if (old == NULL) {
- // obj should be made to point to itself, since
- // pending list was empty.
- java_lang_ref_Reference::set_next(obj, obj);
+ } else { // Old behaviour
+ // Walk down the list, copying the discovered field into
+ // the next field and clearing the discovered field.
+ while (obj != next_d) {
+ obj = next_d;
+ assert(obj->is_instanceRef(), "should be reference object");
+ next_d = java_lang_ref_Reference::discovered(obj);
+ if (TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr(" obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
+ obj, next_d);
+ }
+ assert(java_lang_ref_Reference::next(obj) == NULL,
+ "The reference should not be enqueued");
+ if (next_d == obj) { // obj is last
+ // Swap refs_list into pendling_list_addr and
+ // set obj's next to what we read from pending_list_addr.
+ oop old = oopDesc::atomic_exchange_oop(refs_list.head(), pending_list_addr);
+ // Need oop_check on pending_list_addr above;
+ // see special oop-check code at the end of
+ // enqueue_discovered_reflists() further below.
+ if (old == NULL) {
+ // obj should be made to point to itself, since
+ // pending list was empty.
+ java_lang_ref_Reference::set_next(obj, obj);
+ } else {
+ java_lang_ref_Reference::set_next(obj, old);
+ }
} else {
- java_lang_ref_Reference::set_next(obj, old);
+ java_lang_ref_Reference::set_next(obj, next_d);
}
- } else {
- java_lang_ref_Reference::set_next(obj, next);
+ java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
}
- java_lang_ref_Reference::set_discovered(obj, (oop) NULL);
}
}
@@ -615,7 +648,7 @@
NOT_PRODUCT(
if (PrintGCDetails && TraceReferenceGC) {
gclog_or_tty->print_cr(" Dropped %d dead Refs out of %d "
- "discovered Refs by policy list " INTPTR_FORMAT,
+ "discovered Refs by policy, from list " INTPTR_FORMAT,
iter.removed(), iter.processed(), (address)refs_list.head());
}
)
@@ -1115,20 +1148,16 @@
// here is when *discovered_addr is NULL (see the CAS further below),
// so this will expand to nothing. As a result, we have manually
// elided this out for G1, but left in the test for some future
- // collector that might have need for a pre-barrier here.
- if (_discovered_list_needs_barrier && !UseG1GC) {
- if (UseCompressedOops) {
- _bs->write_ref_field_pre((narrowOop*)discovered_addr, next_discovered);
- } else {
- _bs->write_ref_field_pre((oop*)discovered_addr, next_discovered);
- }
- guarantee(false, "Need to check non-G1 collector");
- }
+ // collector that might have need for a pre-barrier here, e.g.:-
+ // _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
+ assert(!_discovered_list_needs_barrier || UseG1GC,
+ "Need to check non-G1 collector: "
+ "may need a pre-write-barrier for CAS from NULL below");
oop retest = oopDesc::atomic_compare_exchange_oop(next_discovered, discovered_addr,
NULL);
if (retest == NULL) {
// This thread just won the right to enqueue the object.
- // We have separate lists for enqueueing so no synchronization
+ // We have separate lists for enqueueing, so no synchronization
// is necessary.
refs_list.set_head(obj);
refs_list.inc_length(1);
@@ -1137,14 +1166,14 @@
}
if (TraceReferenceGC) {
- gclog_or_tty->print_cr("Enqueued reference (mt) (" INTPTR_FORMAT ": %s)",
+ gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
obj, obj->blueprint()->internal_name());
}
} else {
// If retest was non NULL, another thread beat us to it:
// The reference has already been discovered...
if (TraceReferenceGC) {
- gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
+ gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
obj, obj->blueprint()->internal_name());
}
}
@@ -1169,7 +1198,7 @@
// (or part of the heap being collected, indicated by our "span"
// we don't treat it specially (i.e. we scan it as we would
// a normal oop, treating its references as strong references).
-// This means that references can't be enqueued unless their
+// This means that references can't be discovered unless their
// referent is also in the same span. This is the simplest,
// most "local" and most conservative approach, albeit one
// that may cause weak references to be enqueued least promptly.
@@ -1191,14 +1220,13 @@
// and complexity in processing these references.
// We call this choice the "RefeferentBasedDiscovery" policy.
bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
- // We enqueue references only if we are discovering refs
- // (rather than processing discovered refs).
+ // Make sure we are discovering refs (rather than processing discovered refs).
if (!_discovering_refs || !RegisterReferences) {
return false;
}
- // We only enqueue active references.
+ // We only discover active references.
oop next = java_lang_ref_Reference::next(obj);
- if (next != NULL) {
+ if (next != NULL) { // Ref is no longer active
return false;
}
@@ -1211,8 +1239,8 @@
return false;
}
- // We only enqueue references whose referents are not (yet) strongly
- // reachable.
+ // We only discover references whose referents are not (yet)
+ // known to be strongly reachable.
if (is_alive_non_header() != NULL) {
verify_referent(obj);
if (is_alive_non_header()->do_object_b(java_lang_ref_Reference::referent(obj))) {
@@ -1238,7 +1266,7 @@
if (discovered != NULL) {
// The reference has already been discovered...
if (TraceReferenceGC) {
- gclog_or_tty->print_cr("Already enqueued reference (" INTPTR_FORMAT ": %s)",
+ gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
obj, obj->blueprint()->internal_name());
}
if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
@@ -1260,9 +1288,9 @@
if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
verify_referent(obj);
- // enqueue if and only if either:
- // reference is in our span or
- // we are an atomic collector and referent is in our span
+ // Discover if and only if EITHER:
+ // .. reference is in our span, OR
+ // .. we are an atomic collector and referent is in our span
if (_span.contains(obj_addr) ||
(discovery_is_atomic() &&
_span.contains(java_lang_ref_Reference::referent(obj)))) {
@@ -1294,15 +1322,10 @@
// As in the case further above, since we are over-writing a NULL
// pre-value, we can safely elide the pre-barrier here for the case of G1.
+ // e.g.:- _bs->write_ref_field_pre((oop* or narrowOop*)discovered_addr, next_discovered);
assert(discovered == NULL, "control point invariant");
- if (_discovered_list_needs_barrier && !UseG1GC) { // safe to elide for G1
- if (UseCompressedOops) {
- _bs->write_ref_field_pre((narrowOop*)discovered_addr, next_discovered);
- } else {
- _bs->write_ref_field_pre((oop*)discovered_addr, next_discovered);
- }
- guarantee(false, "Need to check non-G1 collector");
- }
+ assert(!_discovered_list_needs_barrier || UseG1GC,
+ "For non-G1 collector, may need a pre-write-barrier for CAS from NULL below");
oop_store_raw(discovered_addr, next_discovered);
if (_discovered_list_needs_barrier) {
_bs->write_ref_field((void*)discovered_addr, next_discovered);
@@ -1311,11 +1334,11 @@
list->inc_length(1);
if (TraceReferenceGC) {
- gclog_or_tty->print_cr("Enqueued reference (" INTPTR_FORMAT ": %s)",
+ gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)",
obj, obj->blueprint()->internal_name());
}
}
- assert(obj->is_oop(), "Enqueued a bad reference");
+ assert(obj->is_oop(), "Discovered a bad reference");
verify_referent(obj);
return true;
}
--- a/hotspot/src/share/vm/memory/referenceProcessor.hpp Wed Sep 07 15:00:13 2011 -0700
+++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp Wed Sep 07 13:55:42 2011 -0700
@@ -52,6 +52,8 @@
class ReferenceProcessor : public CHeapObj {
protected:
+ // Compatibility with pre-4965777 JDK's
+ static bool _pending_list_uses_discovered_field;
MemRegion _span; // (right-open) interval of heap
// subject to wkref discovery
bool _discovering_refs; // true when discovery enabled
@@ -111,7 +113,6 @@
return _current_soft_ref_policy;
}
- public:
// Process references with a certain reachability level.
void process_discovered_reflist(DiscoveredList refs_lists[],
ReferencePolicy* policy,
@@ -297,6 +298,13 @@
bool discovery_is_atomic() const { return _discovery_is_atomic; }
void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; }
+ // whether the JDK in which we are embedded is a pre-4965777 JDK,
+ // and thus whether or not it uses the discovered field to chain
+ // the entries in the pending list.
+ static bool pending_list_uses_discovered_field() {
+ return _pending_list_uses_discovered_field;
+ }
+
// whether discovery is done by multiple threads same-old-timeously
bool discovery_is_mt() const { return _discovery_is_mt; }
void set_mt_discovery(bool mt) { _discovery_is_mt = mt; }
--- a/hotspot/src/share/vm/oops/instanceRefKlass.cpp Wed Sep 07 15:00:13 2011 -0700
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.cpp Wed Sep 07 13:55:42 2011 -0700
@@ -56,9 +56,8 @@
if (!oopDesc::is_null(heap_oop)) {
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
if (!referent->is_gc_marked() &&
- MarkSweep::ref_processor()->
- discover_reference(obj, ref->reference_type())) {
- // reference already enqueued, referent will be traversed later
+ MarkSweep::ref_processor()->discover_reference(obj, ref->reference_type())) {
+ // reference was discovered, referent will be traversed later
ref->instanceKlass::oop_follow_contents(obj);
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
@@ -76,8 +75,34 @@
MarkSweep::mark_and_push(referent_addr);
}
}
- // treat next as normal oop. next is a link in the pending list.
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+ if (ReferenceProcessor::pending_list_uses_discovered_field()) {
+ // Treat discovered as normal oop, if ref is not "active",
+ // i.e. if next is non-NULL.
+ T next_oop = oopDesc::load_heap_oop(next_addr);
+ if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
+ T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+ debug_only(
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr(" Process discovered as normal "
+ INTPTR_FORMAT, discovered_addr);
+ }
+ )
+ MarkSweep::mark_and_push(discovered_addr);
+ }
+ } else {
+#ifdef ASSERT
+ // In the case of older JDKs which do not use the discovered
+ // field for the pending list, an inactive ref (next != NULL)
+ // must always have a NULL discovered field.
+ oop next = oopDesc::load_decode_heap_oop(next_addr);
+ oop discovered = java_lang_ref_Reference::discovered(obj);
+ assert(oopDesc::is_null(next) || oopDesc::is_null(discovered),
+ err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field",
+ obj));
+#endif
+ }
+ // treat next as normal oop. next is a link in the reference queue.
debug_only(
if(TraceReferenceGC && PrintGCDetails) {
gclog_or_tty->print_cr(" Process next as normal " INTPTR_FORMAT, next_addr);
@@ -130,13 +155,33 @@
PSParallelCompact::mark_and_push(cm, referent_addr);
}
}
- // treat next as normal oop. next is a link in the pending list.
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
- debug_only(
- if(TraceReferenceGC && PrintGCDetails) {
- gclog_or_tty->print_cr(" Process next as normal " INTPTR_FORMAT, next_addr);
+ if (ReferenceProcessor::pending_list_uses_discovered_field()) {
+ // Treat discovered as normal oop, if ref is not "active",
+ // i.e. if next is non-NULL.
+ T next_oop = oopDesc::load_heap_oop(next_addr);
+ if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
+ T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+ debug_only(
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr(" Process discovered as normal "
+ INTPTR_FORMAT, discovered_addr);
+ }
+ )
+ PSParallelCompact::mark_and_push(cm, discovered_addr);
}
- )
+ } else {
+#ifdef ASSERT
+ // In the case of older JDKs which do not use the discovered
+ // field for the pending list, an inactive ref (next != NULL)
+ // must always have a NULL discovered field.
+ T next = oopDesc::load_heap_oop(next_addr);
+ oop discovered = java_lang_ref_Reference::discovered(obj);
+ assert(oopDesc::is_null(next) || oopDesc::is_null(discovered),
+ err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field",
+ obj));
+#endif
+ }
PSParallelCompact::mark_and_push(cm, next_addr);
ref->instanceKlass::oop_follow_contents(cm, obj);
}
@@ -197,27 +242,53 @@
}
#define InstanceRefKlass_SPECIALIZED_OOP_ITERATE(T, nv_suffix, contains) \
+ T* disc_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); \
if (closure->apply_to_weak_ref_discovered_field()) { \
- T* disc_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); \
closure->do_oop##nv_suffix(disc_addr); \
} \
\
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); \
T heap_oop = oopDesc::load_heap_oop(referent_addr); \
- if (!oopDesc::is_null(heap_oop) && contains(referent_addr)) { \
- ReferenceProcessor* rp = closure->_ref_processor; \
+ ReferenceProcessor* rp = closure->_ref_processor; \
+ if (!oopDesc::is_null(heap_oop)) { \
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop); \
if (!referent->is_gc_marked() && (rp != NULL) && \
rp->discover_reference(obj, reference_type())) { \
return size; \
- } else { \
+ } else if (contains(referent_addr)) { \
/* treat referent as normal oop */ \
SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
closure->do_oop##nv_suffix(referent_addr); \
} \
} \
+ T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); \
+ if (ReferenceProcessor::pending_list_uses_discovered_field()) { \
+ T next_oop = oopDesc::load_heap_oop(next_addr); \
+ /* Treat discovered as normal oop, if ref is not "active" (next non-NULL) */\
+ if (!oopDesc::is_null(next_oop) && contains(disc_addr)) { \
+ /* i.e. ref is not "active" */ \
+ debug_only( \
+ if(TraceReferenceGC && PrintGCDetails) { \
+ gclog_or_tty->print_cr(" Process discovered as normal " \
+ INTPTR_FORMAT, disc_addr); \
+ } \
+ ) \
+ SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk);\
+ closure->do_oop##nv_suffix(disc_addr); \
+ } \
+ } else { \
+ /* In the case of older JDKs which do not use the discovered field for */ \
+ /* the pending list, an inactive ref (next != NULL) must always have a */ \
+ /* NULL discovered field. */ \
+ debug_only( \
+ T next_oop = oopDesc::load_heap_oop(next_addr); \
+ T disc_oop = oopDesc::load_heap_oop(disc_addr); \
+ assert(oopDesc::is_null(next_oop) || oopDesc::is_null(disc_oop), \
+ err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL" \
+ "discovered field", obj)); \
+ ) \
+ } \
/* treat next as normal oop */ \
- T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); \
if (contains(next_addr)) { \
SpecializationStats::record_do_oop_call##nv_suffix(SpecializationStats::irk); \
closure->do_oop##nv_suffix(next_addr); \
@@ -306,8 +377,37 @@
pm->claim_or_forward_depth(referent_addr);
}
}
- // treat next as normal oop
+ // Treat discovered as normal oop, if ref is not "active",
+ // i.e. if next is non-NULL.
T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
+ if (ReferenceProcessor::pending_list_uses_discovered_field()) {
+ T next_oop = oopDesc::load_heap_oop(next_addr);
+ if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
+ T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
+ debug_only(
+ if(TraceReferenceGC && PrintGCDetails) {
+ gclog_or_tty->print_cr(" Process discovered as normal "
+ INTPTR_FORMAT, discovered_addr);
+ }
+ )
+ if (PSScavenge::should_scavenge(discovered_addr)) {
+ pm->claim_or_forward_depth(discovered_addr);
+ }
+ }
+ } else {
+#ifdef ASSERT
+ // In the case of older JDKs which do not use the discovered
+ // field for the pending list, an inactive ref (next != NULL)
+ // must always have a NULL discovered field.
+ oop next = oopDesc::load_decode_heap_oop(next_addr);
+ oop discovered = java_lang_ref_Reference::discovered(obj);
+ assert(oopDesc::is_null(next) || oopDesc::is_null(discovered),
+ err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field",
+ obj));
+#endif
+ }
+
+ // Treat next as normal oop; next is a link in the reference queue.
if (PSScavenge::should_scavenge(next_addr)) {
pm->claim_or_forward_depth(next_addr);
}
--- a/hotspot/src/share/vm/prims/jvm.h Wed Sep 07 15:00:13 2011 -0700
+++ b/hotspot/src/share/vm/prims/jvm.h Wed Sep 07 13:55:42 2011 -0700
@@ -1650,7 +1650,8 @@
*/
unsigned int thread_park_blocker : 1;
unsigned int post_vm_init_hook_enabled : 1;
- unsigned int : 30;
+ unsigned int pending_list_uses_discovered_field : 1;
+ unsigned int : 29;
unsigned int : 32;
unsigned int : 32;
} jdk_version_info;
--- a/hotspot/src/share/vm/runtime/java.cpp Wed Sep 07 15:00:13 2011 -0700
+++ b/hotspot/src/share/vm/runtime/java.cpp Wed Sep 07 13:55:42 2011 -0700
@@ -672,7 +672,8 @@
_current = JDK_Version(major, minor, micro, info.update_version,
info.special_update_version, build,
info.thread_park_blocker == 1,
- info.post_vm_init_hook_enabled == 1);
+ info.post_vm_init_hook_enabled == 1,
+ info.pending_list_uses_discovered_field == 1);
}
}
--- a/hotspot/src/share/vm/runtime/java.hpp Wed Sep 07 15:00:13 2011 -0700
+++ b/hotspot/src/share/vm/runtime/java.hpp Wed Sep 07 13:55:42 2011 -0700
@@ -92,6 +92,7 @@
bool _partially_initialized;
bool _thread_park_blocker;
+ bool _pending_list_uses_discovered_field;
bool _post_vm_init_hook_enabled;
bool is_valid() const {
@@ -114,15 +115,18 @@
JDK_Version() : _major(0), _minor(0), _micro(0), _update(0),
_special(0), _build(0), _partially_initialized(false),
- _thread_park_blocker(false), _post_vm_init_hook_enabled(false) {}
+ _thread_park_blocker(false), _post_vm_init_hook_enabled(false),
+ _pending_list_uses_discovered_field(false) {}
JDK_Version(uint8_t major, uint8_t minor = 0, uint8_t micro = 0,
uint8_t update = 0, uint8_t special = 0, uint8_t build = 0,
- bool thread_park_blocker = false, bool post_vm_init_hook_enabled = false) :
+ bool thread_park_blocker = false, bool post_vm_init_hook_enabled = false,
+ bool pending_list_uses_discovered_field = false) :
_major(major), _minor(minor), _micro(micro), _update(update),
_special(special), _build(build), _partially_initialized(false),
_thread_park_blocker(thread_park_blocker),
- _post_vm_init_hook_enabled(post_vm_init_hook_enabled) {}
+ _post_vm_init_hook_enabled(post_vm_init_hook_enabled),
+ _pending_list_uses_discovered_field(pending_list_uses_discovered_field) {}
// Returns the current running JDK version
static JDK_Version current() { return _current; }
@@ -149,6 +153,10 @@
bool post_vm_init_hook_enabled() const {
return _post_vm_init_hook_enabled;
}
+ // For compatibility wrt pre-4965777 JDK's
+ bool pending_list_uses_discovered_field() const {
+ return _pending_list_uses_discovered_field;
+ }
// Performs a full ordering comparison using all fields (update, build, etc.)
int compare(const JDK_Version& other) const;