--- a/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp Thu Aug 31 16:29:58 2017 +0200
@@ -3630,6 +3630,12 @@
}
#if INCLUDE_ALL_GCS
+/*
+ * g1_write_barrier_pre -- G1GC pre-write barrier for store of new_val at
+ * store_addr.
+ *
+ * Allocates rscratch1
+ */
void MacroAssembler::g1_write_barrier_pre(Register obj,
Register pre_val,
Register thread,
@@ -3645,10 +3651,8 @@
Label done;
Label runtime;
- assert(pre_val != noreg, "check this code");
-
- if (obj != noreg)
- assert_different_registers(obj, pre_val, tmp);
+ assert_different_registers(obj, pre_val, tmp, rscratch1);
+ assert(pre_val != noreg && tmp != noreg, "expecting a register");
Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
SATBMarkQueue::byte_offset_of_active()));
@@ -3722,12 +3726,22 @@
bind(done);
}
+/*
+ * g1_write_barrier_post -- G1GC post-write barrier for store of new_val at
+ * store_addr
+ *
+ * Allocates rscratch1
+ */
void MacroAssembler::g1_write_barrier_post(Register store_addr,
Register new_val,
Register thread,
Register tmp,
Register tmp2) {
assert(thread == rthread, "must be");
+ assert_different_registers(store_addr, new_val, thread, tmp, tmp2,
+ rscratch1);
+ assert(store_addr != noreg && new_val != noreg && tmp != noreg
+ && tmp2 != noreg, "expecting a register");
Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
DirtyCardQueue::byte_offset_of_index()));
--- a/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp Thu Aug 31 16:29:58 2017 +0200
@@ -2067,7 +2067,7 @@
__ g1_write_barrier_pre(noreg /* obj */,
r0 /* pre_val */,
rthread /* thread */,
- rscratch1 /* tmp */,
+ rscratch2 /* tmp */,
true /* tosca_live */,
true /* expand_call */);
}
--- a/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/cpu/aarch64/vm/templateTable_aarch64.cpp Thu Aug 31 16:29:58 2017 +0200
@@ -170,7 +170,7 @@
// G1 barrier needs uncompressed oop for region cross check.
Register new_val = val;
if (UseCompressedOops) {
- new_val = rscratch1;
+ new_val = rscratch2;
__ mov(new_val, val);
}
__ store_heap_oop(Address(r3, 0), val);
--- a/hotspot/src/os/posix/vm/os_posix.cpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/os/posix/vm/os_posix.cpp Thu Aug 31 16:29:58 2017 +0200
@@ -24,7 +24,6 @@
#include "utilities/globalDefinitions.hpp"
#include "prims/jvm.h"
-#include "semaphore_posix.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/os.hpp"
@@ -32,6 +31,11 @@
#include "utilities/macros.hpp"
#include "utilities/vmError.hpp"
+#ifndef __APPLE__
+// POSIX unamed semaphores are not supported on OS X.
+#include "semaphore_posix.hpp"
+#endif
+
#include <dlfcn.h>
#include <pthread.h>
#include <semaphore.h>
--- a/hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp Thu Aug 31 16:29:58 2017 +0200
@@ -106,8 +106,8 @@
template<>
template<typename I, typename D>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
- STATIC_CAST(4 == sizeof(I));
- STATIC_CAST(4 == sizeof(D));
+ STATIC_ASSERT(4 == sizeof(I));
+ STATIC_ASSERT(4 == sizeof(D));
D result;
@@ -129,8 +129,8 @@
template<>
template<typename I, typename D>
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
- STATIC_CAST(8 == sizeof(I));
- STATIC_CAST(8 == sizeof(D));
+ STATIC_ASSERT(8 == sizeof(I));
+ STATIC_ASSERT(8 == sizeof(D));
D result;
--- a/hotspot/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp Thu Aug 31 16:29:58 2017 +0200
@@ -184,8 +184,8 @@
template<>
template<typename I, typename D>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
- STATIC_CAST(4 == sizeof(I));
- STATIC_CAST(4 == sizeof(D));
+ STATIC_ASSERT(4 == sizeof(I));
+ STATIC_ASSERT(4 == sizeof(D));
#ifdef ARM
return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
@@ -201,8 +201,8 @@
template<>
template<typename I, typename D>
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
- STATIC_CAST(8 == sizeof(I));
- STATIC_CAST(8 == sizeof(D));
+ STATIC_ASSERT(8 == sizeof(I));
+ STATIC_ASSERT(8 == sizeof(D));
return __sync_add_and_fetch(dest, add_value);
}
@@ -283,7 +283,7 @@
T volatile* dest,
T compare_value,
cmpxchg_memory_order order) const {
- STATIC_CAST(4 == sizeof(T));
+ STATIC_ASSERT(4 == sizeof(T));
#ifdef ARM
return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
#else
@@ -301,7 +301,7 @@
T volatile* dest,
T compare_value,
cmpxchg_memory_order order) const {
- STATIC_CAST(8 == sizeof(T));
+ STATIC_ASSERT(8 == sizeof(T));
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
}
--- a/hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp Thu Aug 31 16:29:58 2017 +0200
@@ -104,8 +104,8 @@
template<>
template<typename I, typename D>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
- STATIC_CAST(4 == sizeof(I));
- STATIC_CAST(4 == sizeof(D));
+ STATIC_ASSERT(4 == sizeof(I));
+ STATIC_ASSERT(4 == sizeof(D));
D result;
@@ -127,8 +127,8 @@
template<>
template<typename I, typename D>
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
- STATIC_CAST(8 == sizeof(I));
- STATIC_CAST(8 == sizeof(D));
+ STATIC_ASSERT(8 == sizeof(I));
+ STATIC_ASSERT(8 == sizeof(D));
D result;
--- a/hotspot/src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp Thu Aug 31 16:29:58 2017 +0200
@@ -92,9 +92,9 @@
template<>
template<typename I, typename D>
-inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
- STATIC_CAST(4 == sizeof(I));
- STATIC_CAST(4 == sizeof(D));
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I inc, D volatile* dest) const {
+ STATIC_ASSERT(4 == sizeof(I));
+ STATIC_ASSERT(4 == sizeof(D));
D old, upd;
@@ -143,9 +143,9 @@
template<>
template<typename I, typename D>
-inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
- STATIC_CAST(8 == sizeof(I));
- STATIC_CAST(8 == sizeof(D));
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest) const {
+ STATIC_ASSERT(8 == sizeof(I));
+ STATIC_ASSERT(8 == sizeof(D));
D old, upd;
--- a/hotspot/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.hpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.hpp Thu Aug 31 16:29:58 2017 +0200
@@ -62,8 +62,8 @@
template<>
template<typename I, typename D>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
- STATIC_CAST(4 == sizeof(I));
- STATIC_CAST(4 == sizeof(D));
+ STATIC_ASSERT(4 == sizeof(I));
+ STATIC_ASSERT(4 == sizeof(D));
D rv;
__asm__ volatile(
@@ -81,10 +81,11 @@
return rv;
}
+template<>
template<typename I, typename D>
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
- STATIC_CAST(8 == sizeof(I));
- STATIC_CAST(8 == sizeof(D));
+ STATIC_ASSERT(8 == sizeof(I));
+ STATIC_ASSERT(8 == sizeof(D));
D rv;
__asm__ volatile(
--- a/hotspot/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp Thu Aug 31 16:29:58 2017 +0200
@@ -178,8 +178,8 @@
template<>
template<typename I, typename D>
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
- STATIC_CAST(4 == sizeof(I));
- STATIC_CAST(4 == sizeof(D));
+ STATIC_ASSERT(4 == sizeof(I));
+ STATIC_ASSERT(4 == sizeof(D));
#ifdef ARM
return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
@@ -195,8 +195,8 @@
template<>
template<typename I, typename D>
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
- STATIC_CAST(8 == sizeof(I));
- STATIC_CAST(8 == sizeof(D));
+ STATIC_ASSERT(8 == sizeof(I));
+ STATIC_ASSERT(8 == sizeof(D));
return __sync_add_and_fetch(dest, add_value);
}
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp Thu Aug 31 16:29:58 2017 +0200
@@ -75,6 +75,9 @@
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
#include "utilities/ostream.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc/g1/g1SATBCardTableModRefBS.hpp"
+#endif // INCLUDE_ALL_GCS
#if INCLUDE_TRACE
#include "trace/tracing.hpp"
#endif
@@ -764,6 +767,25 @@
return OopHandle(_handles.add(h()));
}
+void ClassLoaderData::remove_handle(OopHandle h) {
+ oop* ptr = h.ptr_raw();
+ if (ptr != NULL) {
+ assert(_handles.contains(ptr), "Got unexpected handle " PTR_FORMAT, p2i(ptr));
+#if INCLUDE_ALL_GCS
+ // This barrier is used by G1 to remember the old oop values, so
+ // that we don't forget any objects that were live at the snapshot at
+ // the beginning.
+ if (UseG1GC) {
+ oop obj = *ptr;
+ if (obj != NULL) {
+ G1SATBCardTableModRefBS::enqueue(obj);
+ }
+ }
+#endif
+ *ptr = NULL;
+ }
+}
+
void ClassLoaderData::init_handle_locked(OopHandle& dest, Handle h) {
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
if (dest.resolve() != NULL) {
--- a/hotspot/src/share/vm/classfile/classLoaderData.hpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/share/vm/classfile/classLoaderData.hpp Thu Aug 31 16:29:58 2017 +0200
@@ -364,6 +364,7 @@
const char* loader_name();
OopHandle add_handle(Handle h);
+ void remove_handle(OopHandle h);
void init_handle_locked(OopHandle& pd, Handle h); // used for concurrent access to ModuleEntry::_pd field
void add_class(Klass* k, bool publicize = true);
void remove_class(Klass* k);
--- a/hotspot/src/share/vm/classfile/dictionary.cpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/share/vm/classfile/dictionary.cpp Thu Aug 31 16:29:58 2017 +0200
@@ -85,6 +85,7 @@
void Dictionary::free_entry(DictionaryEntry* entry) {
// avoid recursion when deleting linked list
+ // pd_set is accessed during a safepoint.
while (entry->pd_set() != NULL) {
ProtectionDomainEntry* to_delete = entry->pd_set();
entry->set_pd_set(to_delete->next());
@@ -101,7 +102,7 @@
if (protection_domain == instance_klass()->protection_domain()) {
// Ensure this doesn't show up in the pd_set (invariant)
bool in_pd_set = false;
- for (ProtectionDomainEntry* current = _pd_set;
+ for (ProtectionDomainEntry* current = pd_set_acquire();
current != NULL;
current = current->next()) {
if (current->protection_domain() == protection_domain) {
@@ -121,7 +122,7 @@
return true;
}
- for (ProtectionDomainEntry* current = _pd_set;
+ for (ProtectionDomainEntry* current = pd_set_acquire();
current != NULL;
current = current->next()) {
if (current->protection_domain() == protection_domain) return true;
@@ -135,12 +136,12 @@
if (!contains_protection_domain(protection_domain())) {
ProtectionDomainCacheEntry* entry = SystemDictionary::cache_get(protection_domain);
ProtectionDomainEntry* new_head =
- new ProtectionDomainEntry(entry, _pd_set);
+ new ProtectionDomainEntry(entry, pd_set());
// Warning: Preserve store ordering. The SystemDictionary is read
// without locks. The new ProtectionDomainEntry must be
// complete before other threads can be allowed to see it
// via a store to _pd_set.
- OrderAccess::release_store_ptr(&_pd_set, new_head);
+ release_set_pd_set(new_head);
}
LogTarget(Trace, protectiondomain) lt;
if (lt.is_enabled()) {
--- a/hotspot/src/share/vm/classfile/dictionary.hpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/share/vm/classfile/dictionary.hpp Thu Aug 31 16:29:58 2017 +0200
@@ -29,6 +29,7 @@
#include "classfile/systemDictionary.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/oop.hpp"
+#include "runtime/orderAccess.hpp"
#include "utilities/hashtable.hpp"
#include "utilities/ostream.hpp"
@@ -134,7 +135,7 @@
// It is essentially a cache to avoid repeated Java up-calls to
// ClassLoader.checkPackageAccess().
//
- ProtectionDomainEntry* _pd_set;
+ ProtectionDomainEntry* volatile _pd_set;
public:
// Tells whether a protection is in the approved set.
@@ -153,8 +154,15 @@
return (DictionaryEntry**)HashtableEntry<InstanceKlass*, mtClass>::next_addr();
}
- ProtectionDomainEntry* pd_set() const { return _pd_set; }
- void set_pd_set(ProtectionDomainEntry* pd_set) { _pd_set = pd_set; }
+ ProtectionDomainEntry* pd_set() const { return _pd_set; }
+ void set_pd_set(ProtectionDomainEntry* new_head) { _pd_set = new_head; }
+
+ ProtectionDomainEntry* pd_set_acquire() const {
+ return (ProtectionDomainEntry*)OrderAccess::load_ptr_acquire(&_pd_set);
+ }
+ void release_set_pd_set(ProtectionDomainEntry* new_head) {
+ OrderAccess::release_store_ptr(&_pd_set, new_head);
+ }
// Tells whether the initiating class' protection domain can access the klass in this entry
bool is_valid_protection_domain(Handle protection_domain) {
@@ -167,7 +175,7 @@
}
void verify_protection_domain_set() {
- for (ProtectionDomainEntry* current = _pd_set;
+ for (ProtectionDomainEntry* current = pd_set(); // accessed at a safepoint
current != NULL;
current = current->_next) {
current->_pd_cache->protection_domain()->verify();
@@ -181,7 +189,7 @@
void print_count(outputStream *st) {
int count = 0;
- for (ProtectionDomainEntry* current = _pd_set;
+ for (ProtectionDomainEntry* current = pd_set(); // accessed inside SD lock
current != NULL;
current = current->_next) {
count++;
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp Thu Aug 31 16:29:58 2017 +0200
@@ -910,12 +910,9 @@
if (protection_domain() == NULL) return k;
// Check the protection domain has the right access
- {
- MutexLocker mu(SystemDictionary_lock, THREAD);
- if (dictionary->is_valid_protection_domain(d_index, d_hash, name,
- protection_domain)) {
- return k;
- }
+ if (dictionary->is_valid_protection_domain(d_index, d_hash, name,
+ protection_domain)) {
+ return k;
}
// Verify protection domain. If it fails an exception is thrown
--- a/hotspot/src/share/vm/code/nmethod.cpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/share/vm/code/nmethod.cpp Thu Aug 31 16:29:58 2017 +0200
@@ -1220,7 +1220,7 @@
// for stack scanning.
if (state == not_entrant) {
mark_as_seen_on_stack();
- OrderAccess::storestore();
+ OrderAccess::storestore(); // _stack_traversal_mark and _state
}
// Change state
--- a/hotspot/src/share/vm/code/nmethod.hpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/share/vm/code/nmethod.hpp Thu Aug 31 16:29:58 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -136,7 +136,7 @@
// stack. An not_entrant method can be removed when there are no
// more activations, i.e., when the _stack_traversal_mark is less than
// current sweep traversal index.
- volatile jlong _stack_traversal_mark;
+ volatile long _stack_traversal_mark;
// The _hotness_counter indicates the hotness of a method. The higher
// the value the hotter the method. The hotness counter of a nmethod is
@@ -396,8 +396,8 @@
public:
// Sweeper support
- jlong stack_traversal_mark() { return OrderAccess::load_acquire(&_stack_traversal_mark); }
- void set_stack_traversal_mark(jlong l) { OrderAccess::release_store(&_stack_traversal_mark, l); }
+ long stack_traversal_mark() { return _stack_traversal_mark; }
+ void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
// implicit exceptions support
address continuation_for_implicit_exception(address pc);
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Thu Aug 31 16:29:58 2017 +0200
@@ -1719,7 +1719,6 @@
G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
G1BlockOffsetTable::heap_map_factor());
- ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
G1RegionToSpaceMapper* cardtable_storage =
create_aux_memory_mapper("Card Table",
G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
--- a/hotspot/src/share/vm/oops/constantPool.cpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/share/vm/oops/constantPool.cpp Thu Aug 31 16:29:58 2017 +0200
@@ -89,8 +89,6 @@
void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) {
if (cache() != NULL) {
- MetadataFactory::free_array<u2>(loader_data, reference_map());
- set_reference_map(NULL);
MetadataFactory::free_metadata(loader_data, cache());
set_cache(NULL);
}
--- a/hotspot/src/share/vm/oops/cpCache.cpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/share/vm/oops/cpCache.cpp Thu Aug 31 16:29:58 2017 +0200
@@ -26,6 +26,7 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/rewriter.hpp"
#include "logging/log.hpp"
+#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.inline.hpp"
@@ -608,6 +609,14 @@
}
}
+void ConstantPoolCache::deallocate_contents(ClassLoaderData* data) {
+ assert(!is_shared(), "shared caches are not deallocated");
+ data->remove_handle(_resolved_references);
+ set_resolved_references(NULL);
+ MetadataFactory::free_array<u2>(data, _reference_map);
+ set_reference_map(NULL);
+}
+
#if INCLUDE_CDS_JAVA_HEAP
oop ConstantPoolCache::archived_references() {
assert(UseSharedSpaces, "UseSharedSpaces expected.");
--- a/hotspot/src/share/vm/oops/cpCache.hpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/share/vm/oops/cpCache.hpp Thu Aug 31 16:29:58 2017 +0200
@@ -510,9 +510,9 @@
void dump_cache();
#endif // INCLUDE_JVMTI
- // Deallocate - no fields to deallocate
+ // RedefineClasses support
DEBUG_ONLY(bool on_stack() { return false; })
- void deallocate_contents(ClassLoaderData* data) {}
+ void deallocate_contents(ClassLoaderData* data);
bool is_klass() const { return false; }
// Printing
--- a/hotspot/src/share/vm/oops/oopHandle.hpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/share/vm/oops/oopHandle.hpp Thu Aug 31 16:29:58 2017 +0200
@@ -46,6 +46,9 @@
OopHandle(oop* w) : _obj(w) {}
oop resolve() const { return (_obj == NULL) ? (oop)NULL : *_obj; }
+
+ // Used only for removing handle.
+ oop* ptr_raw() { return _obj; }
};
#endif // SHARE_VM_OOPS_OOPHANDLE_HPP
--- a/hotspot/src/share/vm/runtime/sweeper.cpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/share/vm/runtime/sweeper.cpp Thu Aug 31 16:29:58 2017 +0200
@@ -53,7 +53,7 @@
public:
int traversal;
int compile_id;
- jlong traversal_mark;
+ long traversal_mark;
int state;
const char* kind;
address vep;
@@ -62,7 +62,7 @@
void print() {
tty->print_cr("traversal = %d compile_id = %d %s uep = " PTR_FORMAT " vep = "
- PTR_FORMAT " state = %d traversal_mark "JLONG_FORMAT" line = %d",
+ PTR_FORMAT " state = %d traversal_mark %ld line = %d",
traversal,
compile_id,
kind == NULL ? "" : kind,
@@ -629,6 +629,7 @@
} else if (cm->is_not_entrant()) {
// If there are no current activations of this method on the
// stack we can safely convert it to a zombie method
+ OrderAccess::loadload(); // _stack_traversal_mark and _state
if (cm->can_convert_to_zombie()) {
// Clear ICStubs to prevent back patching stubs of zombie or flushed
// nmethods during the next safepoint (see ICStub::finalize).
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Tue Aug 29 15:53:04 2017 -0400
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Thu Aug 31 16:29:58 2017 +0200
@@ -841,7 +841,7 @@
nonstatic_field(nmethod, _verified_entry_point, address) \
nonstatic_field(nmethod, _osr_entry_point, address) \
volatile_nonstatic_field(nmethod, _lock_count, jint) \
- volatile_nonstatic_field(nmethod, _stack_traversal_mark, jlong) \
+ volatile_nonstatic_field(nmethod, _stack_traversal_mark, long) \
nonstatic_field(nmethod, _compile_id, int) \
nonstatic_field(nmethod, _comp_level, int) \
\