--- a/.hgtags-top-repo Tue Jul 26 10:06:19 2016 -0700
+++ b/.hgtags-top-repo Wed Jul 05 21:59:15 2017 +0200
@@ -370,3 +370,4 @@
9aa7d40f3a453f51e47f4c1b19eff5740a74a9f8 jdk-9+125
3a58466296d36944454756ef01e7513ac5e14a16 jdk-9+126
8fa686245bd2a072ece3392743460030f0854520 jdk-9+127
+b30ae794d974d7dd3eb4e84203f70021823fa6c6 jdk-9+128
--- a/common/autoconf/boot-jdk.m4 Tue Jul 26 10:06:19 2016 -0700
+++ b/common/autoconf/boot-jdk.m4 Wed Jul 05 21:59:15 2017 +0200
@@ -345,6 +345,9 @@
# Disable special log output when a debug build is used as Boot JDK...
ADD_JVM_ARG_IF_OK([-XX:-PrintVMOptions -XX:-UnlockDiagnosticVMOptions -XX:-LogVMOutput],boot_jdk_jvmargs,[$JAVA])
+ # Force en-US environment
+ ADD_JVM_ARG_IF_OK([-Duser.language=en -Duser.country=US],boot_jdk_jvmargs,[$JAVA])
+
# Apply user provided options.
ADD_JVM_ARG_IF_OK([$with_boot_jdk_jvmargs],boot_jdk_jvmargs,[$JAVA])
--- a/common/autoconf/generated-configure.sh Tue Jul 26 10:06:19 2016 -0700
+++ b/common/autoconf/generated-configure.sh Wed Jul 05 21:59:15 2017 +0200
@@ -5094,7 +5094,7 @@
#CUSTOM_AUTOCONF_INCLUDE
# Do not change or remove the following line, it is needed for consistency checks:
-DATE_WHEN_GENERATED=1467960715
+DATE_WHEN_GENERATED=1469202305
###############################################################################
#
@@ -65048,6 +65048,23 @@
fi
+ # Force en-US environment
+
+ $ECHO "Check if jvm arg is ok: -Duser.language=en -Duser.country=US" >&5
+ $ECHO "Command: $JAVA -Duser.language=en -Duser.country=US -version" >&5
+ OUTPUT=`$JAVA -Duser.language=en -Duser.country=US -version 2>&1`
+ FOUND_WARN=`$ECHO "$OUTPUT" | $GREP -i warn`
+ FOUND_VERSION=`$ECHO $OUTPUT | $GREP " version \""`
+ if test "x$FOUND_VERSION" != x && test "x$FOUND_WARN" = x; then
+ boot_jdk_jvmargs="$boot_jdk_jvmargs -Duser.language=en -Duser.country=US"
+ JVM_ARG_OK=true
+ else
+ $ECHO "Arg failed:" >&5
+ $ECHO "$OUTPUT" >&5
+ JVM_ARG_OK=false
+ fi
+
+
# Apply user provided options.
$ECHO "Check if jvm arg is ok: $with_boot_jdk_jvmargs" >&5
--- a/hotspot/.hgtags Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/.hgtags Wed Jul 05 21:59:15 2017 +0200
@@ -530,3 +530,4 @@
bb640b49741af3f57f9994129934c46fc173219f jdk-9+125
adc8c84b7cf8c540d920182f78a2bc982366432a jdk-9+126
352357128f602dcf0426b1cbe011a4685a4d9f97 jdk-9+127
+22bf6db9767b1b3a1994cbf32eb3331f31ae2093 jdk-9+128
--- a/hotspot/make/test/JtregNative.gmk Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/make/test/JtregNative.gmk Wed Jul 05 21:59:15 2017 +0200
@@ -51,6 +51,7 @@
$(HOTSPOT_TOPDIR)/test/compiler/floatingpoint/ \
$(HOTSPOT_TOPDIR)/test/compiler/calls \
$(HOTSPOT_TOPDIR)/test/compiler/native \
+ $(HOTSPOT_TOPDIR)/test/serviceability/jvmti/GetNamedModule \
$(HOTSPOT_TOPDIR)/test/testlibrary/jvmti \
#
@@ -64,6 +65,7 @@
ifeq ($(TOOLCHAIN_TYPE), solstudio)
BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_liboverflow := -lc
BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libSimpleClassFileLoadHook := -lc
+ BUILD_HOTSPOT_JTREG_LIBRARIES_LDFLAGS_libGetNamedModuleTest := -lc
endif
BUILD_HOTSPOT_JTREG_OUTPUT_DIR := $(BUILD_OUTPUT)/support/test/hotspot/jtreg/native
--- a/hotspot/src/os/linux/vm/os_linux.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -1742,11 +1742,11 @@
}
typedef struct {
- Elf32_Half code; // Actual value as defined in elf.h
- Elf32_Half compat_class; // Compatibility of archs at VM's sense
- char elf_class; // 32 or 64 bit
- char endianess; // MSB or LSB
- char* name; // String representation
+ Elf32_Half code; // Actual value as defined in elf.h
+ Elf32_Half compat_class; // Compatibility of archs at VM's sense
+ unsigned char elf_class; // 32 or 64 bit
+ unsigned char endianess; // MSB or LSB
+ char* name; // String representation
} arch_t;
#ifndef EM_486
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -1320,36 +1320,8 @@
}
bool os::supports_vtime() { return true; }
-
-bool os::enable_vtime() {
- int fd = ::open("/proc/self/ctl", O_WRONLY);
- if (fd == -1) {
- return false;
- }
-
- long cmd[] = { PCSET, PR_MSACCT };
- int res = ::write(fd, cmd, sizeof(long) * 2);
- ::close(fd);
- if (res != sizeof(long) * 2) {
- return false;
- }
- return true;
-}
-
-bool os::vtime_enabled() {
- int fd = ::open("/proc/self/status", O_RDONLY);
- if (fd == -1) {
- return false;
- }
-
- pstatus_t status;
- int res = os::read(fd, (void*) &status, sizeof(pstatus_t));
- ::close(fd);
- if (res != sizeof(pstatus_t)) {
- return false;
- }
- return status.pr_flags & PR_MSACCT;
-}
+bool os::enable_vtime() { return false; }
+bool os::vtime_enabled() { return false; }
double os::elapsedVTime() {
return (double)gethrvtime() / (double)hrtime_hz;
--- a/hotspot/src/share/vm/classfile/altHashing.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/classfile/altHashing.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -224,7 +224,7 @@
static const jbyte THREE_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82};
static const jbyte FOUR_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82, (jbyte) 0x83};
static const jchar TWO_CHAR[] = { (jchar) 0x8180, (jchar) 0x8382};
-static const jint ONE_INT[] = { 0x83828180};
+static const jint ONE_INT[] = { (jint)0x83828180};
static const jbyte SIX_BYTE[] = { (jbyte) 0x80, (jbyte) 0x81, (jbyte) 0x82, (jbyte) 0x83, (jbyte) 0x84, (jbyte) 0x85};
static const jchar THREE_CHAR[] = { (jchar) 0x8180, (jchar) 0x8382, (jchar) 0x8584};
static const jbyte EIGHT_BYTE[] = {
@@ -235,7 +235,7 @@
(jchar) 0x8180, (jchar) 0x8382,
(jchar) 0x8584, (jchar) 0x8786};
-static const jint TWO_INT[] = { 0x83828180, 0x87868584};
+static const jint TWO_INT[] = { (jint)0x83828180, (jint)0x87868584};
static const juint MURMUR3_32_X86_CHECK_VALUE = 0xB0F57EE3;
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -142,7 +142,9 @@
f->do_oop(&_class_loader);
_dependencies.oops_do(f);
- _handles->oops_do(f);
+ if (_handles != NULL) {
+ _handles->oops_do(f);
+ }
if (klass_closure != NULL) {
classes_do(klass_closure);
}
--- a/hotspot/src/share/vm/classfile/compactHashtable.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/classfile/compactHashtable.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -270,6 +270,10 @@
// For reading from/writing to the CDS archive
void serialize(SerializeClosure* soc);
+
+ uintx base_address() {
+ return (uintx) _base_address;
+ }
};
////////////////////////////////////////////////////////////////////////
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -871,12 +871,17 @@
int java_lang_Class::oop_size(oop java_class) {
assert(_oop_size_offset != 0, "must be set");
- return java_class->int_field(_oop_size_offset);
-}
+ int size = java_class->int_field(_oop_size_offset);
+ assert(size > 0, "Oop size must be greater than zero, not %d", size);
+ return size;
+}
+
void java_lang_Class::set_oop_size(oop java_class, int size) {
assert(_oop_size_offset != 0, "must be set");
+ assert(size > 0, "Oop size must be greater than zero, not %d", size);
java_class->int_field_put(_oop_size_offset, size);
}
+
int java_lang_Class::static_oop_field_count(oop java_class) {
assert(_static_oop_field_count_offset != 0, "must be set");
return java_class->int_field(_static_oop_field_count_offset);
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -275,7 +275,6 @@
static int static_oop_field_count(oop java_class);
static void set_static_oop_field_count(oop java_class, int size);
-
static GrowableArray<Klass*>* fixup_mirror_list() {
return _fixup_mirror_list;
}
--- a/hotspot/src/share/vm/classfile/modules.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/classfile/modules.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -820,6 +820,28 @@
}
+jobject Modules::get_named_module(Handle h_loader, const char* package_str, TRAPS) {
+ assert(ModuleEntryTable::javabase_defined(),
+ "Attempt to call get_named_module before java.base is defined");
+ assert(h_loader.is_null() || java_lang_ClassLoader::is_subclass(h_loader->klass()),
+ "Class loader is not a subclass of java.lang.ClassLoader");
+ assert(package_str != NULL, "the package_str should not be NULL");
+
+ if (strlen(package_str) == 0) {
+ return NULL;
+ }
+ TempNewSymbol package_sym = SymbolTable::new_symbol(package_str, CHECK_NULL);
+ const PackageEntry* const pkg_entry =
+ get_package_entry_by_name(package_sym, h_loader, THREAD);
+ const ModuleEntry* const module_entry = (pkg_entry != NULL ? pkg_entry->module() : NULL);
+
+ if (module_entry != NULL && module_entry->module() != NULL && module_entry->is_named()) {
+ return JNIHandles::make_local(THREAD, JNIHandles::resolve(module_entry->module()));
+ }
+ return NULL;
+}
+
+
// This method is called by JFR and by the above method.
jobject Modules::get_module(Symbol* package_name, Handle h_loader, TRAPS) {
const PackageEntry* const pkg_entry =
--- a/hotspot/src/share/vm/classfile/modules.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/classfile/modules.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -121,6 +121,7 @@
// IllegalArgumentException is thrown if loader is neither null nor a subtype of
// java/lang/ClassLoader.
static jobject get_module_by_package_name(jobject loader, jstring package, TRAPS);
+ static jobject get_named_module(Handle h_loader, const char* package, TRAPS);
// If package is defined by loader, return the
// java.lang.reflect.Module object for the module in which the package is defined.
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -238,6 +238,29 @@
}
}
+u4 SymbolTable::encode_shared(Symbol* sym) {
+ assert(DumpSharedSpaces, "called only during dump time");
+ uintx base_address = uintx(MetaspaceShared::shared_rs()->base());
+ uintx offset = uintx(sym) - base_address;
+ assert(offset < 0x7fffffff, "sanity");
+ return u4(offset);
+}
+
+Symbol* SymbolTable::decode_shared(u4 offset) {
+ assert(!DumpSharedSpaces, "called only during runtime");
+ uintx base_address = _shared_table.base_address();
+ Symbol* sym = (Symbol*)(base_address + offset);
+
+#ifndef PRODUCT
+ const char* s = (const char*)sym->bytes();
+ int len = sym->utf8_length();
+ unsigned int hash = hash_symbol(s, len);
+ assert(sym == lookup_shared(s, len, hash), "must be shared symbol");
+#endif
+
+ return sym;
+}
+
// Pick hashing algorithm.
unsigned int SymbolTable::hash_symbol(const char* s, int len) {
return use_alternate_hashcode() ?
--- a/hotspot/src/share/vm/classfile/symbolTable.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/classfile/symbolTable.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -253,6 +253,8 @@
// Sharing
static void serialize(SerializeClosure* soc);
+ static u4 encode_shared(Symbol* sym);
+ static Symbol* decode_shared(u4 offset);
// Rehash the symbol table if it gets out of balance
static void rehash_table();
--- a/hotspot/src/share/vm/classfile/systemDictionaryShared.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionaryShared.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -78,7 +78,19 @@
TRAPS) {
return NULL;
}
+
static void serialize(SerializeClosure* soc) {}
+
+ // The (non-application) CDS implementation supports only classes in the boot
+ // class loader, which ensures that the verification constraints are the same
+ // during archive creation time and runtime. Thus we can do the constraint checks
+ // entirely during archive creation time.
+ static bool add_verification_constraint(Klass* k, Symbol* name,
+ Symbol* from_name, bool from_field_is_protected,
+ bool from_is_array, bool from_is_object) {return false;}
+ static void finalize_verification_constraints() {}
+ static void check_verification_constraints(instanceKlassHandle klass,
+ TRAPS) {}
};
#endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
--- a/hotspot/src/share/vm/classfile/verificationType.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/classfile/verificationType.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionaryShared.hpp"
#include "classfile/verificationType.hpp"
#include "classfile/verifier.hpp"
@@ -41,6 +42,39 @@
}
}
+bool VerificationType::resolve_and_check_assignability(instanceKlassHandle klass, Symbol* name,
+ Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object, TRAPS) {
+ Klass* obj = SystemDictionary::resolve_or_fail(
+ name, Handle(THREAD, klass->class_loader()),
+ Handle(THREAD, klass->protection_domain()), true, CHECK_false);
+ if (log_is_enabled(Debug, class, resolve)) {
+ Verifier::trace_class_resolution(obj, klass());
+ }
+
+ KlassHandle this_class(THREAD, obj);
+
+ if (this_class->is_interface() && (!from_field_is_protected ||
+ from_name != vmSymbols::java_lang_Object())) {
+ // If we are not trying to access a protected field or method in
+ // java.lang.Object then, for arrays, we only allow assignability
+ // to interfaces java.lang.Cloneable and java.io.Serializable.
+ // Otherwise, we treat interfaces as java.lang.Object.
+ return !from_is_array ||
+ this_class == SystemDictionary::Cloneable_klass() ||
+ this_class == SystemDictionary::Serializable_klass();
+ } else if (from_is_object) {
+ Klass* from_class = SystemDictionary::resolve_or_fail(
+ from_name, Handle(THREAD, klass->class_loader()),
+ Handle(THREAD, klass->protection_domain()), true, CHECK_false);
+ if (log_is_enabled(Debug, class, resolve)) {
+ Verifier::trace_class_resolution(from_class, klass());
+ }
+ return InstanceKlass::cast(from_class)->is_subclass_of(this_class());
+ }
+
+ return false;
+}
+
bool VerificationType::is_reference_assignable_from(
const VerificationType& from, ClassVerifier* context,
bool from_field_is_protected, TRAPS) const {
@@ -58,33 +92,17 @@
// any object or array is assignable to java.lang.Object
return true;
}
- Klass* obj = SystemDictionary::resolve_or_fail(
- name(), Handle(THREAD, klass->class_loader()),
- Handle(THREAD, klass->protection_domain()), true, CHECK_false);
- if (log_is_enabled(Debug, class, resolve)) {
- Verifier::trace_class_resolution(obj, klass());
+
+ if (DumpSharedSpaces && SystemDictionaryShared::add_verification_constraint(klass(),
+ name(), from.name(), from_field_is_protected, from.is_array(),
+ from.is_object())) {
+ // If add_verification_constraint() returns true, the resolution/check should be
+ // delayed until runtime.
+ return true;
}
- KlassHandle this_class(THREAD, obj);
-
- if (this_class->is_interface() && (!from_field_is_protected ||
- from.name() != vmSymbols::java_lang_Object())) {
- // If we are not trying to access a protected field or method in
- // java.lang.Object then, for arrays, we only allow assignability
- // to interfaces java.lang.Cloneable and java.io.Serializable.
- // Otherwise, we treat interfaces as java.lang.Object.
- return !from.is_array() ||
- this_class == SystemDictionary::Cloneable_klass() ||
- this_class == SystemDictionary::Serializable_klass();
- } else if (from.is_object()) {
- Klass* from_class = SystemDictionary::resolve_or_fail(
- from.name(), Handle(THREAD, klass->class_loader()),
- Handle(THREAD, klass->protection_domain()), true, CHECK_false);
- if (log_is_enabled(Debug, class, resolve)) {
- Verifier::trace_class_resolution(from_class, klass());
- }
- return InstanceKlass::cast(from_class)->is_subclass_of(this_class());
- }
+ return resolve_and_check_assignability(klass(), name(), from.name(),
+ from_field_is_protected, from.is_array(), from.is_object(), THREAD);
} else if (is_array() && from.is_array()) {
VerificationType comp_this = get_component(context, CHECK_false);
VerificationType comp_from = from.get_component(context, CHECK_false);
--- a/hotspot/src/share/vm/classfile/verificationType.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/classfile/verificationType.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -333,6 +333,12 @@
bool is_reference_assignable_from(
const VerificationType&, ClassVerifier*, bool from_field_is_protected,
TRAPS) const;
+
+ public:
+ static bool resolve_and_check_assignability(instanceKlassHandle klass, Symbol* name,
+ Symbol* from_name, bool from_field_is_protected,
+ bool from_is_array, bool from_is_object,
+ TRAPS);
};
#endif // SHARE_VM_CLASSFILE_VERIFICATIONTYPE_HPP
--- a/hotspot/src/share/vm/classfile/verifier.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/classfile/verifier.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -2377,9 +2377,17 @@
case Bytecodes::_ifnonnull:
target = bcs.dest();
if (visited_branches->contains(bci)) {
- if (bci_stack->is_empty()) return true;
- // Pop a bytecode starting offset and scan from there.
- bcs.set_start(bci_stack->pop());
+ if (bci_stack->is_empty()) {
+ if (handler_stack->is_empty()) {
+ return true;
+ } else {
+ // Parse the catch handlers for try blocks containing athrow.
+ bcs.set_start(handler_stack->pop());
+ }
+ } else {
+ // Pop a bytecode starting offset and scan from there.
+ bcs.set_start(bci_stack->pop());
+ }
} else {
if (target > bci) { // forward branch
if (target >= code_length) return false;
@@ -2402,9 +2410,17 @@
case Bytecodes::_goto_w:
target = (opcode == Bytecodes::_goto ? bcs.dest() : bcs.dest_w());
if (visited_branches->contains(bci)) {
- if (bci_stack->is_empty()) return true;
- // Been here before, pop new starting offset from stack.
- bcs.set_start(bci_stack->pop());
+ if (bci_stack->is_empty()) {
+ if (handler_stack->is_empty()) {
+ return true;
+ } else {
+ // Parse the catch handlers for try blocks containing athrow.
+ bcs.set_start(handler_stack->pop());
+ }
+ } else {
+ // Been here before, pop new starting offset from stack.
+ bcs.set_start(bci_stack->pop());
+ }
} else {
if (target >= code_length) return false;
// Continue scanning from the target onward.
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -1256,9 +1256,7 @@
// set between the last GC or pause and now. We need to clear the
// incremental collection set and then start rebuilding it afresh
// after this full GC.
- abandon_collection_set(collection_set()->inc_head());
- collection_set()->clear_incremental();
- collection_set()->stop_incremental_building();
+ abandon_collection_set(collection_set());
tear_down_region_sets(false /* free_list_only */);
collector_state()->set_gcs_are_young(true);
@@ -1379,7 +1377,6 @@
_verifier->check_bitmaps("Full GC End");
// Start a new incremental collection set for the next pause
- assert(collection_set()->head() == NULL, "must be");
collection_set()->start_incremental_building();
clear_cset_fast_test();
@@ -1724,8 +1721,6 @@
_old_marking_cycles_started(0),
_old_marking_cycles_completed(0),
_in_cset_fast_test(),
- _worker_cset_start_region(NULL),
- _worker_cset_start_region_time_stamp(NULL),
_gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
_gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()) {
@@ -1748,8 +1743,6 @@
uint n_queues = ParallelGCThreads;
_task_queues = new RefToScanQueueSet(n_queues);
- _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
- _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
_evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
for (uint i = 0; i < n_queues; i++) {
@@ -1758,7 +1751,6 @@
_task_queues->register_queue(i, q);
::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
}
- clear_cset_start_regions();
// Initialize the G1EvacuationFailureALot counters and flags.
NOT_PRODUCT(reset_evacuation_should_fail();)
@@ -1987,6 +1979,8 @@
_preserved_marks_set.init(ParallelGCThreads);
+ _collection_set.initialize(max_regions());
+
return JNI_OK;
}
@@ -2420,117 +2414,12 @@
_hrm.par_iterate(cl, worker_id, hrclaimer, concurrent);
}
-// Clear the cached CSet starting regions and (more importantly)
-// the time stamps. Called when we reset the GC time stamp.
-void G1CollectedHeap::clear_cset_start_regions() {
- assert(_worker_cset_start_region != NULL, "sanity");
- assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
-
- for (uint i = 0; i < ParallelGCThreads; i++) {
- _worker_cset_start_region[i] = NULL;
- _worker_cset_start_region_time_stamp[i] = 0;
- }
+void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
+ _collection_set.iterate(cl);
}
-// Given the id of a worker, obtain or calculate a suitable
-// starting region for iterating over the current collection set.
-HeapRegion* G1CollectedHeap::start_cset_region_for_worker(uint worker_i) {
- assert(get_gc_time_stamp() > 0, "should have been updated by now");
-
- HeapRegion* result = NULL;
- unsigned gc_time_stamp = get_gc_time_stamp();
-
- if (_worker_cset_start_region_time_stamp[worker_i] == gc_time_stamp) {
- // Cached starting region for current worker was set
- // during the current pause - so it's valid.
- // Note: the cached starting heap region may be NULL
- // (when the collection set is empty).
- result = _worker_cset_start_region[worker_i];
- assert(result == NULL || result->in_collection_set(), "sanity");
- return result;
- }
-
- // The cached entry was not valid so let's calculate
- // a suitable starting heap region for this worker.
-
- // We want the parallel threads to start their collection
- // set iteration at different collection set regions to
- // avoid contention.
- // If we have:
- // n collection set regions
- // p threads
- // Then thread t will start at region floor ((t * n) / p)
-
- result = collection_set()->head();
- uint cs_size = collection_set()->region_length();
- uint active_workers = workers()->active_workers();
-
- uint end_ind = (cs_size * worker_i) / active_workers;
- uint start_ind = 0;
-
- if (worker_i > 0 &&
- _worker_cset_start_region_time_stamp[worker_i - 1] == gc_time_stamp) {
- // Previous workers starting region is valid
- // so let's iterate from there
- start_ind = (cs_size * (worker_i - 1)) / active_workers;
- OrderAccess::loadload();
- result = _worker_cset_start_region[worker_i - 1];
- }
-
- for (uint i = start_ind; i < end_ind; i++) {
- result = result->next_in_collection_set();
- }
-
- // Note: the calculated starting heap region may be NULL
- // (when the collection set is empty).
- assert(result == NULL || result->in_collection_set(), "sanity");
- assert(_worker_cset_start_region_time_stamp[worker_i] != gc_time_stamp,
- "should be updated only once per pause");
- _worker_cset_start_region[worker_i] = result;
- OrderAccess::storestore();
- _worker_cset_start_region_time_stamp[worker_i] = gc_time_stamp;
- return result;
-}
-
-void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
- HeapRegion* r = collection_set()->head();
- while (r != NULL) {
- HeapRegion* next = r->next_in_collection_set();
- if (cl->doHeapRegion(r)) {
- cl->incomplete();
- return;
- }
- r = next;
- }
-}
-
-void G1CollectedHeap::collection_set_iterate_from(HeapRegion* r,
- HeapRegionClosure *cl) {
- if (r == NULL) {
- // The CSet is empty so there's nothing to do.
- return;
- }
-
- assert(r->in_collection_set(),
- "Start region must be a member of the collection set.");
- HeapRegion* cur = r;
- while (cur != NULL) {
- HeapRegion* next = cur->next_in_collection_set();
- if (cl->doHeapRegion(cur) && false) {
- cl->incomplete();
- return;
- }
- cur = next;
- }
- cur = collection_set()->head();
- while (cur != r) {
- HeapRegion* next = cur->next_in_collection_set();
- if (cl->doHeapRegion(cur) && false) {
- cl->incomplete();
- return;
- }
- cur = next;
- }
+void G1CollectedHeap::collection_set_iterate_from(HeapRegionClosure *cl, uint worker_id) {
+ _collection_set.iterate_from(cl, worker_id, workers()->active_workers());
}
HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
@@ -3090,6 +2979,18 @@
g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
}
+class G1PrintCollectionSetClosure : public HeapRegionClosure {
+private:
+ G1HRPrinter* _hr_printer;
+public:
+ G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
+
+ virtual bool doHeapRegion(HeapRegion* r) {
+ _hr_printer->cset(r);
+ return false;
+ }
+};
+
bool
G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
assert_at_safepoint(true /* should_be_vm_thread */);
@@ -3268,11 +3169,8 @@
_cm->verify_no_cset_oops();
if (_hr_printer.is_active()) {
- HeapRegion* hr = collection_set()->head();
- while (hr != NULL) {
- _hr_printer.cset(hr);
- hr = hr->next_in_collection_set();
- }
+ G1PrintCollectionSetClosure cl(&_hr_printer);
+ _collection_set.iterate(&cl);
}
// Initialize the GC alloc regions.
@@ -3287,12 +3185,10 @@
post_evacuate_collection_set(evacuation_info, &per_thread_states);
const size_t* surviving_young_words = per_thread_states.surviving_young_words();
- free_collection_set(collection_set()->head(), evacuation_info, surviving_young_words);
+ free_collection_set(&_collection_set, evacuation_info, surviving_young_words);
eagerly_reclaim_humongous_regions();
- collection_set()->clear_head();
-
record_obj_copy_mem_stats();
_survivor_evac_stats.adjust_desired_plab_sz();
_old_evac_stats.adjust_desired_plab_sz();
@@ -4704,120 +4600,139 @@
workers()->run_task(&g1_par_scrub_rs_task);
}
-void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
- size_t pre_used = 0;
- FreeRegionList local_free_list("Local List for CSet Freeing");
-
- double young_time_ms = 0.0;
- double non_young_time_ms = 0.0;
-
- _eden.clear();
-
- G1Policy* policy = g1_policy();
-
- double start_sec = os::elapsedTime();
- bool non_young = true;
-
- HeapRegion* cur = cs_head;
- int age_bound = -1;
- size_t rs_lengths = 0;
-
- while (cur != NULL) {
- assert(!is_on_master_free_list(cur), "sanity");
- if (non_young) {
- if (cur->is_young()) {
- double end_sec = os::elapsedTime();
- double elapsed_ms = (end_sec - start_sec) * 1000.0;
- non_young_time_ms += elapsed_ms;
-
- start_sec = os::elapsedTime();
- non_young = false;
- }
+class G1FreeCollectionSetClosure : public HeapRegionClosure {
+private:
+ const size_t* _surviving_young_words;
+
+ FreeRegionList _local_free_list;
+ size_t _rs_lengths;
+ // Bytes used in successfully evacuated regions before the evacuation.
+ size_t _before_used_bytes;
+ // Bytes used in unsucessfully evacuated regions before the evacuation
+ size_t _after_used_bytes;
+
+ size_t _bytes_allocated_in_old_since_last_gc;
+
+ size_t _failure_used_words;
+ size_t _failure_waste_words;
+
+ double _young_time;
+ double _non_young_time;
+public:
+ G1FreeCollectionSetClosure(const size_t* surviving_young_words) :
+ HeapRegionClosure(),
+ _surviving_young_words(surviving_young_words),
+ _local_free_list("Local Region List for CSet Freeing"),
+ _rs_lengths(0),
+ _before_used_bytes(0),
+ _after_used_bytes(0),
+ _bytes_allocated_in_old_since_last_gc(0),
+ _failure_used_words(0),
+ _failure_waste_words(0),
+ _young_time(0.0),
+ _non_young_time(0.0) {
+ }
+
+ virtual bool doHeapRegion(HeapRegion* r) {
+ double start_time = os::elapsedTime();
+
+ bool is_young = r->is_young();
+
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+ assert(!g1h->is_on_master_free_list(r), "sanity");
+
+ _rs_lengths += r->rem_set()->occupied_locked();
+
+ assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
+ g1h->clear_in_cset(r);
+
+ if (is_young) {
+ int index = r->young_index_in_cset();
+ assert(index != -1, "Young index in collection set must not be -1 for region %u", r->hrm_index());
+ assert((uint) index < g1h->collection_set()->young_region_length(), "invariant");
+ size_t words_survived = _surviving_young_words[index];
+ r->record_surv_words_in_group(words_survived);
} else {
- if (!cur->is_young()) {
- double end_sec = os::elapsedTime();
- double elapsed_ms = (end_sec - start_sec) * 1000.0;
- young_time_ms += elapsed_ms;
-
- start_sec = os::elapsedTime();
- non_young = true;
- }
+ assert(r->young_index_in_cset() == -1, "Young index for old region %u in collection set must be -1", r->hrm_index());
}
- rs_lengths += cur->rem_set()->occupied_locked();
-
- HeapRegion* next = cur->next_in_collection_set();
- assert(cur->in_collection_set(), "bad CS");
- cur->set_next_in_collection_set(NULL);
- clear_in_cset(cur);
-
- if (cur->is_young()) {
- int index = cur->young_index_in_cset();
- assert(index != -1, "invariant");
- assert((uint) index < collection_set()->young_region_length(), "invariant");
- size_t words_survived = surviving_young_words[index];
- cur->record_surv_words_in_group(words_survived);
-
+ if (!r->evacuation_failed()) {
+ assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
+ _before_used_bytes += r->used();
+ g1h->free_region(r, &_local_free_list, false /* par */, true /* locked */);
} else {
- int index = cur->young_index_in_cset();
- assert(index == -1, "invariant");
- }
-
- assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
- (!cur->is_young() && cur->young_index_in_cset() == -1),
- "invariant" );
-
- if (!cur->evacuation_failed()) {
- MemRegion used_mr = cur->used_region();
-
- // And the region is empty.
- assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
- pre_used += cur->used();
- free_region(cur, &local_free_list, false /* par */, true /* locked */);
- } else {
- cur->uninstall_surv_rate_group();
- if (cur->is_young()) {
- cur->set_young_index_in_cset(-1);
- }
- cur->set_evacuation_failed(false);
+ r->uninstall_surv_rate_group();
+ r->set_young_index_in_cset(-1);
+ r->set_evacuation_failed(false);
// When moving a young gen region to old gen, we "allocate" that whole region
// there. This is in addition to any already evacuated objects. Notify the
// policy about that.
// Old gen regions do not cause an additional allocation: both the objects
// still in the region and the ones already moved are accounted for elsewhere.
- if (cur->is_young()) {
- policy->add_bytes_allocated_in_old_since_last_gc(HeapRegion::GrainBytes);
+ if (is_young) {
+ _bytes_allocated_in_old_since_last_gc += HeapRegion::GrainBytes;
}
// The region is now considered to be old.
- cur->set_old();
+ r->set_old();
// Do some allocation statistics accounting. Regions that failed evacuation
// are always made old, so there is no need to update anything in the young
// gen statistics, but we need to update old gen statistics.
- size_t used_words = cur->marked_bytes() / HeapWordSize;
- _old_evac_stats.add_failure_used_and_waste(used_words, HeapRegion::GrainWords - used_words);
- _old_set.add(cur);
- evacuation_info.increment_collectionset_used_after(cur->used());
+ size_t used_words = r->marked_bytes() / HeapWordSize;
+
+ _failure_used_words += used_words;
+ _failure_waste_words += HeapRegion::GrainWords - used_words;
+
+ g1h->old_set_add(r);
+ _after_used_bytes += r->used();
+ }
+
+ if (is_young) {
+ _young_time += os::elapsedTime() - start_time;
+ } else {
+ _non_young_time += os::elapsedTime() - start_time;
}
- cur = next;
+ return false;
}
- evacuation_info.set_regions_freed(local_free_list.length());
- policy->record_max_rs_lengths(rs_lengths);
+ FreeRegionList* local_free_list() { return &_local_free_list; }
+ size_t rs_lengths() const { return _rs_lengths; }
+ size_t before_used_bytes() const { return _before_used_bytes; }
+ size_t after_used_bytes() const { return _after_used_bytes; }
+
+ size_t bytes_allocated_in_old_since_last_gc() const { return _bytes_allocated_in_old_since_last_gc; }
+
+ size_t failure_used_words() const { return _failure_used_words; }
+ size_t failure_waste_words() const { return _failure_waste_words; }
+
+ double young_time() const { return _young_time; }
+ double non_young_time() const { return _non_young_time; }
+};
+
+void G1CollectedHeap::free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words) {
+ _eden.clear();
+
+ G1FreeCollectionSetClosure cl(surviving_young_words);
+ collection_set_iterate(&cl);
+
+ evacuation_info.set_regions_freed(cl.local_free_list()->length());
+ evacuation_info.increment_collectionset_used_after(cl.after_used_bytes());
+
+ G1Policy* policy = g1_policy();
+
+ policy->record_max_rs_lengths(cl.rs_lengths());
policy->cset_regions_freed();
- double end_sec = os::elapsedTime();
- double elapsed_ms = (end_sec - start_sec) * 1000.0;
-
- if (non_young) {
- non_young_time_ms += elapsed_ms;
- } else {
- young_time_ms += elapsed_ms;
- }
-
- prepend_to_freelist(&local_free_list);
- decrement_summary_bytes(pre_used);
- policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
- policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
+ prepend_to_freelist(cl.local_free_list());
+ decrement_summary_bytes(cl.before_used_bytes());
+
+ policy->add_bytes_allocated_in_old_since_last_gc(cl.bytes_allocated_in_old_since_last_gc());
+
+ _old_evac_stats.add_failure_used_and_waste(cl.failure_used_words(), cl.failure_waste_words());
+
+ policy->phase_times()->record_young_free_cset_time_ms(cl.young_time() * 1000.0);
+ policy->phase_times()->record_non_young_free_cset_time_ms(cl.non_young_time() * 1000.0);
+
+ collection_set->clear();
}
class G1FreeHumongousRegionClosure : public HeapRegionClosure {
@@ -4960,25 +4875,22 @@
cl.humongous_free_count());
}
-// This routine is similar to the above but does not record
-// any policy statistics or update free lists; we are abandoning
-// the current incremental collection set in preparation of a
-// full collection. After the full GC we will start to build up
-// the incremental collection set again.
-// This is only called when we're doing a full collection
-// and is immediately followed by the tearing down of the young list.
-
-void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
- HeapRegion* cur = cs_head;
-
- while (cur != NULL) {
- HeapRegion* next = cur->next_in_collection_set();
- assert(cur->in_collection_set(), "bad CS");
- cur->set_next_in_collection_set(NULL);
- clear_in_cset(cur);
- cur->set_young_index_in_cset(-1);
- cur = next;
+class G1AbandonCollectionSetClosure : public HeapRegionClosure {
+public:
+ virtual bool doHeapRegion(HeapRegion* r) {
+ assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
+ G1CollectedHeap::heap()->clear_in_cset(r);
+ r->set_young_index_in_cset(-1);
+ return false;
}
+};
+
+void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
+ G1AbandonCollectionSetClosure cl;
+ collection_set->iterate(&cl);
+
+ collection_set->clear();
+ collection_set->stop_incremental_building();
}
void G1CollectedHeap::set_free_regions_coming() {
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -778,13 +778,13 @@
// The closure used to refine a single card.
RefineCardTableEntryClosure* _refine_cte_cl;
- // After a collection pause, make the regions in the CS into free
+ // After a collection pause, convert the regions in the collection set into free
// regions.
- void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
+ void free_collection_set(G1CollectionSet* collection_set, EvacuationInfo& evacuation_info, const size_t* surviving_young_words);
// Abandon the current collection set without recording policy
// statistics or updating free lists.
- void abandon_collection_set(HeapRegion* cs_head);
+ void abandon_collection_set(G1CollectionSet* collection_set);
// The concurrent marker (and the thread it runs in.)
G1ConcurrentMark* _cm;
@@ -930,16 +930,6 @@
// discovery.
G1CMIsAliveClosure _is_alive_closure_cm;
- // Cache used by G1CollectedHeap::start_cset_region_for_worker().
- HeapRegion** _worker_cset_start_region;
-
- // Time stamp to validate the regions recorded in the cache
- // used by G1CollectedHeap::start_cset_region_for_worker().
- // The heap region entry for a given worker is valid iff
- // the associated time stamp value matches the current value
- // of G1CollectedHeap::_gc_time_stamp.
- uint* _worker_cset_start_region_time_stamp;
-
volatile bool _free_regions_coming;
public:
@@ -1211,19 +1201,14 @@
HeapRegionClaimer* hrclaimer,
bool concurrent = false) const;
- // Clear the cached cset start regions and (more importantly)
- // the time stamps. Called when we reset the GC time stamp.
- void clear_cset_start_regions();
-
- // Given the id of a worker, obtain or calculate a suitable
- // starting region for iterating over the current collection set.
- HeapRegion* start_cset_region_for_worker(uint worker_i);
-
// Iterate over the regions (if any) in the current collection set.
void collection_set_iterate(HeapRegionClosure* blk);
- // As above but starting from region r
- void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
+ // Iterate over the regions (if any) in the current collection set. Starts the
+ // iteration over the entire collection set so that the start regions of a given
+ // worker id over the set active_workers are evenly spread across the set of
+ // collection set regions.
+ void collection_set_iterate_from(HeapRegionClosure *blk, uint worker_id);
HeapRegion* next_compaction_region(const HeapRegion* from) const;
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.inline.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -89,16 +89,13 @@
}
inline void G1CollectedHeap::reset_gc_time_stamp() {
+ assert_at_safepoint(true);
_gc_time_stamp = 0;
- OrderAccess::fence();
- // Clear the cached CSet starting regions and time stamps.
- // Their validity is dependent on the GC timestamp.
- clear_cset_start_regions();
}
inline void G1CollectedHeap::increment_gc_time_stamp() {
+ assert_at_safepoint(true);
++_gc_time_stamp;
- OrderAccess::fence();
}
inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
--- a/hotspot/src/share/vm/gc/g1/g1CollectionSet.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectionSet.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -30,6 +30,7 @@
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/g1/heapRegionSet.hpp"
+#include "logging/logStream.hpp"
#include "utilities/debug.hpp"
G1CollectorState* G1CollectionSet::collector_state() {
@@ -55,48 +56,63 @@
_eden_region_length(0),
_survivor_region_length(0),
_old_region_length(0),
-
- _head(NULL),
_bytes_used_before(0),
_recorded_rs_lengths(0),
+ _collection_set_regions(NULL),
+ _collection_set_cur_length(0),
+ _collection_set_max_length(0),
// Incremental CSet attributes
_inc_build_state(Inactive),
- _inc_head(NULL),
- _inc_tail(NULL),
_inc_bytes_used_before(0),
_inc_recorded_rs_lengths(0),
_inc_recorded_rs_lengths_diffs(0),
_inc_predicted_elapsed_time_ms(0.0),
- _inc_predicted_elapsed_time_ms_diffs(0.0),
- _inc_region_length(0) {}
+ _inc_predicted_elapsed_time_ms_diffs(0.0) {
+}
G1CollectionSet::~G1CollectionSet() {
+ if (_collection_set_regions != NULL) {
+ FREE_C_HEAP_ARRAY(uint, _collection_set_regions);
+ }
delete _cset_chooser;
}
void G1CollectionSet::init_region_lengths(uint eden_cset_region_length,
uint survivor_cset_region_length) {
+ assert_at_safepoint(true);
+
_eden_region_length = eden_cset_region_length;
_survivor_region_length = survivor_cset_region_length;
- assert(young_region_length() == _inc_region_length, "should match %u == %u", young_region_length(), _inc_region_length);
+ assert((size_t) young_region_length() == _collection_set_cur_length,
+ "Young region length %u should match collection set length " SIZE_FORMAT, young_region_length(), _collection_set_cur_length);
_old_region_length = 0;
}
+void G1CollectionSet::initialize(uint max_region_length) {
+ guarantee(_collection_set_regions == NULL, "Must only initialize once.");
+ _collection_set_max_length = max_region_length;
+ _collection_set_regions = NEW_C_HEAP_ARRAY(uint, max_region_length, mtGC);
+}
+
void G1CollectionSet::set_recorded_rs_lengths(size_t rs_lengths) {
_recorded_rs_lengths = rs_lengths;
}
// Add the heap region at the head of the non-incremental collection set
void G1CollectionSet::add_old_region(HeapRegion* hr) {
+ assert_at_safepoint(true);
+
assert(_inc_build_state == Active, "Precondition");
assert(hr->is_old(), "the region should be old");
assert(!hr->in_collection_set(), "should not already be in the CSet");
_g1->register_old_region_with_cset(hr);
- hr->set_next_in_collection_set(_head);
- _head = hr;
+
+ _collection_set_regions[_collection_set_cur_length++] = hr->hrm_index();
+ assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set now larger than maximum size.");
+
_bytes_used_before += hr->used();
size_t rs_length = hr->rem_set()->occupied();
_recorded_rs_lengths += rs_length;
@@ -105,12 +121,10 @@
// Initialize the per-collection-set information
void G1CollectionSet::start_incremental_building() {
+ assert(_collection_set_cur_length == 0, "Collection set must be empty before starting a new collection set.");
assert(_inc_build_state == Inactive, "Precondition");
- _inc_head = NULL;
- _inc_tail = NULL;
_inc_bytes_used_before = 0;
- _inc_region_length = 0;
_inc_recorded_rs_lengths = 0;
_inc_recorded_rs_lengths_diffs = 0;
@@ -151,6 +165,38 @@
_inc_predicted_elapsed_time_ms_diffs = 0.0;
}
+void G1CollectionSet::clear() {
+ assert_at_safepoint(true);
+ _collection_set_cur_length = 0;
+}
+
+void G1CollectionSet::iterate(HeapRegionClosure* cl) const {
+ iterate_from(cl, 0, 1);
+}
+
+void G1CollectionSet::iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const {
+ size_t len = _collection_set_cur_length;
+ OrderAccess::loadload();
+ if (len == 0) {
+ return;
+ }
+ size_t start_pos = (worker_id * len) / total_workers;
+ size_t cur_pos = start_pos;
+
+ do {
+ HeapRegion* r = G1CollectedHeap::heap()->region_at(_collection_set_regions[cur_pos]);
+ bool result = cl->doHeapRegion(r);
+ if (result) {
+ cl->incomplete();
+ return;
+ }
+ cur_pos++;
+ if (cur_pos == len) {
+ cur_pos = 0;
+ }
+ } while (cur_pos != start_pos);
+}
+
void G1CollectionSet::update_young_region_prediction(HeapRegion* hr,
size_t new_rs_length) {
// Update the CSet information that is dependent on the new RS length
@@ -183,8 +229,16 @@
assert(hr->is_young(), "invariant");
assert(_inc_build_state == Active, "Precondition");
- hr->set_young_index_in_cset(_inc_region_length);
- _inc_region_length++;
+ size_t collection_set_length = _collection_set_cur_length;
+ assert(collection_set_length <= INT_MAX, "Collection set is too large with %d entries", (int)collection_set_length);
+ hr->set_young_index_in_cset((int)collection_set_length);
+
+ _collection_set_regions[collection_set_length] = hr->hrm_index();
+ // Concurrent readers must observe the store of the value in the array before an
+ // update to the length field.
+ OrderAccess::storestore();
+ _collection_set_cur_length++;
+ assert(_collection_set_cur_length <= _collection_set_max_length, "Collection set larger than maximum allowed.");
// This routine is used when:
// * adding survivor regions to the incremental cset at the end of an
@@ -218,59 +272,81 @@
assert(!hr->in_collection_set(), "invariant");
_g1->register_young_region_with_cset(hr);
- assert(hr->next_in_collection_set() == NULL, "invariant");
}
-// Add the region at the RHS of the incremental cset
void G1CollectionSet::add_survivor_regions(HeapRegion* hr) {
- // We should only ever be appending survivors at the end of a pause
- assert(hr->is_survivor(), "Logic");
-
- // Do the 'common' stuff
+ assert(hr->is_survivor(), "Must only add survivor regions, but is %s", hr->get_type_str());
add_young_region_common(hr);
-
- // Now add the region at the right hand side
- if (_inc_tail == NULL) {
- assert(_inc_head == NULL, "invariant");
- _inc_head = hr;
- } else {
- _inc_tail->set_next_in_collection_set(hr);
- }
- _inc_tail = hr;
}
-// Add the region to the LHS of the incremental cset
void G1CollectionSet::add_eden_region(HeapRegion* hr) {
- // Survivors should be added to the RHS at the end of a pause
- assert(hr->is_eden(), "Logic");
-
- // Do the 'common' stuff
+ assert(hr->is_eden(), "Must only add eden regions, but is %s", hr->get_type_str());
add_young_region_common(hr);
-
- // Add the region at the left hand side
- hr->set_next_in_collection_set(_inc_head);
- if (_inc_head == NULL) {
- assert(_inc_tail == NULL, "Invariant");
- _inc_tail = hr;
- }
- _inc_head = hr;
}
#ifndef PRODUCT
-void G1CollectionSet::print(HeapRegion* list_head, outputStream* st) {
- assert(list_head == inc_head() || list_head == head(), "must be");
+class G1VerifyYoungAgesClosure : public HeapRegionClosure {
+public:
+ bool _valid;
+public:
+ G1VerifyYoungAgesClosure() : HeapRegionClosure(), _valid(true) { }
+
+ virtual bool doHeapRegion(HeapRegion* r) {
+ guarantee(r->is_young(), "Region must be young but is %s", r->get_type_str());
+
+ SurvRateGroup* group = r->surv_rate_group();
+
+ if (group == NULL) {
+ log_error(gc, verify)("## encountered NULL surv_rate_group in young region");
+ _valid = false;
+ }
+
+ if (r->age_in_surv_rate_group() < 0) {
+ log_error(gc, verify)("## encountered negative age in young region");
+ _valid = false;
+ }
+
+ return false;
+ }
+
+ bool valid() const { return _valid; }
+};
+bool G1CollectionSet::verify_young_ages() {
+ assert_at_safepoint(true);
+
+ G1VerifyYoungAgesClosure cl;
+ iterate(&cl);
+
+ if (!cl.valid()) {
+ LogStreamHandle(Error, gc, verify) log;
+ print(&log);
+ }
+
+ return cl.valid();
+}
+
+class G1PrintCollectionSetClosure : public HeapRegionClosure {
+ outputStream* _st;
+public:
+ G1PrintCollectionSetClosure(outputStream* st) : HeapRegionClosure(), _st(st) { }
+
+ virtual bool doHeapRegion(HeapRegion* r) {
+ assert(r->in_collection_set(), "Region %u should be in collection set", r->hrm_index());
+ _st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
+ HR_FORMAT_PARAMS(r),
+ p2i(r->prev_top_at_mark_start()),
+ p2i(r->next_top_at_mark_start()),
+ r->age_in_surv_rate_group_cond());
+ return false;
+ }
+};
+
+void G1CollectionSet::print(outputStream* st) {
st->print_cr("\nCollection_set:");
- HeapRegion* csr = list_head;
- while (csr != NULL) {
- HeapRegion* next = csr->next_in_collection_set();
- assert(csr->in_collection_set(), "bad CS");
- st->print_cr(" " HR_FORMAT ", P: " PTR_FORMAT "N: " PTR_FORMAT ", age: %4d",
- HR_FORMAT_PARAMS(csr),
- p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()),
- csr->age_in_surv_rate_group_cond());
- csr = next;
- }
+
+ G1PrintCollectionSetClosure cl(st);
+ iterate(&cl);
}
#endif // !PRODUCT
@@ -281,7 +357,6 @@
guarantee(target_pause_time_ms > 0.0,
"target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
- guarantee(_head == NULL, "Precondition");
size_t pending_cards = _policy->pending_cards();
double base_time_ms = _policy->predict_base_elapsed_time_ms(pending_cards);
@@ -305,7 +380,6 @@
// Clear the fields that point to the survivor list - they are all young now.
survivors->convert_to_eden();
- _head = _inc_head;
_bytes_used_before = _inc_bytes_used_before;
time_remaining_ms = MAX2(time_remaining_ms - _inc_predicted_elapsed_time_ms, 0.0);
@@ -422,23 +496,41 @@
}
#ifdef ASSERT
-void G1CollectionSet::verify_young_cset_indices() const {
- ResourceMark rm;
- uint* heap_region_indices = NEW_RESOURCE_ARRAY(uint, young_region_length());
- for (uint i = 0; i < young_region_length(); ++i) {
- heap_region_indices[i] = (uint)-1;
+class G1VerifyYoungCSetIndicesClosure : public HeapRegionClosure {
+private:
+ size_t _young_length;
+ int* _heap_region_indices;
+public:
+ G1VerifyYoungCSetIndicesClosure(size_t young_length) : HeapRegionClosure(), _young_length(young_length) {
+ _heap_region_indices = NEW_C_HEAP_ARRAY(int, young_length, mtGC);
+ for (size_t i = 0; i < young_length; i++) {
+ _heap_region_indices[i] = -1;
+ }
+ }
+ ~G1VerifyYoungCSetIndicesClosure() {
+ FREE_C_HEAP_ARRAY(int, _heap_region_indices);
}
- for (HeapRegion* hr = _inc_head; hr != NULL; hr = hr->next_in_collection_set()) {
- const int idx = hr->young_index_in_cset();
- assert(idx > -1, "must be set for all inc cset regions");
- assert((uint)idx < young_region_length(), "young cset index too large");
+ virtual bool doHeapRegion(HeapRegion* r) {
+ const int idx = r->young_index_in_cset();
+
+ assert(idx > -1, "Young index must be set for all regions in the incremental collection set but is not for region %u.", r->hrm_index());
+ assert((size_t)idx < _young_length, "Young cset index too large for region %u", r->hrm_index());
+
+ assert(_heap_region_indices[idx] == -1,
+ "Index %d used by multiple regions, first use by region %u, second by region %u",
+ idx, _heap_region_indices[idx], r->hrm_index());
- assert(heap_region_indices[idx] == (uint)-1,
- "index %d used by multiple regions, first use by %u, second by %u",
- idx, heap_region_indices[idx], hr->hrm_index());
+ _heap_region_indices[idx] = r->hrm_index();
+
+ return false;
+ }
+};
- heap_region_indices[idx] = hr->hrm_index();
- }
+void G1CollectionSet::verify_young_cset_indices() const {
+ assert_at_safepoint(true);
+
+ G1VerifyYoungCSetIndicesClosure cl(_collection_set_cur_length);
+ iterate(&cl);
}
#endif
--- a/hotspot/src/share/vm/gc/g1/g1CollectionSet.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1CollectionSet.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -47,10 +47,15 @@
uint _survivor_region_length;
uint _old_region_length;
- // The head of the list (via "next_in_collection_set()") representing the
- // current collection set. Set from the incrementally built collection
- // set at the start of the pause.
- HeapRegion* _head;
+ // The actual collection set as a set of region indices.
+ // All entries in _collection_set_regions below _collection_set_cur_length are
+ // assumed to be valid entries.
+ // We assume that at any time there is at most only one writer and (one or more)
+ // concurrent readers. This means we are good with using storestore and loadload
+ // barriers on the writer and reader respectively only.
+ uint* _collection_set_regions;
+ volatile size_t _collection_set_cur_length;
+ size_t _collection_set_max_length;
// The number of bytes in the collection set before the pause. Set from
// the incrementally built collection set at the start of an evacuation
@@ -71,12 +76,6 @@
CSetBuildType _inc_build_state;
- // The head of the incrementally built collection set.
- HeapRegion* _inc_head;
-
- // The tail of the incrementally built collection set.
- HeapRegion* _inc_tail;
-
// The number of bytes in the incrementally built collection set.
// Used to set _collection_set_bytes_used_before at the start of
// an evacuation pause.
@@ -105,8 +104,6 @@
// See the comment for _inc_recorded_rs_lengths_diffs.
double _inc_predicted_elapsed_time_ms_diffs;
- uint _inc_region_length;
-
G1CollectorState* collector_state();
G1GCPhaseTimes* phase_times();
@@ -117,6 +114,9 @@
G1CollectionSet(G1CollectedHeap* g1h, G1Policy* policy);
~G1CollectionSet();
+ // Initializes the collection set giving the maximum possible length of the collection set.
+ void initialize(uint max_region_length);
+
CollectionSetChooser* cset_chooser();
void init_region_lengths(uint eden_cset_region_length,
@@ -133,35 +133,30 @@
uint survivor_region_length() const { return _survivor_region_length; }
uint old_region_length() const { return _old_region_length; }
- // Incremental CSet Support
-
- // The head of the incrementally built collection set.
- HeapRegion* inc_head() { return _inc_head; }
-
- // The tail of the incrementally built collection set.
- HeapRegion* inc_tail() { return _inc_tail; }
+ // Incremental collection set support
// Initialize incremental collection set info.
void start_incremental_building();
- // Perform any final calculations on the incremental CSet fields
+ // Perform any final calculations on the incremental collection set fields
// before we can use them.
void finalize_incremental_building();
- void clear_incremental() {
- _inc_head = NULL;
- _inc_tail = NULL;
- _inc_region_length = 0;
- }
+ // Reset the contents of the collection set.
+ void clear();
+
+ // Iterate over the collection set, applying the given HeapRegionClosure on all of them.
+ // If may_be_aborted is true, iteration may be aborted using the return value of the
+ // called closure method.
+ void iterate(HeapRegionClosure* cl) const;
- // Stop adding regions to the incremental collection set
- void stop_incremental_building() { _inc_build_state = Inactive; }
+ // Iterate over the collection set, applying the given HeapRegionClosure on all of them,
+ // trying to optimally spread out starting position of total_workers workers given the
+ // caller's worker_id.
+ void iterate_from(HeapRegionClosure* cl, uint worker_id, uint total_workers) const;
- // The head of the list (via "next_in_collection_set()") representing the
- // current collection set.
- HeapRegion* head() { return _head; }
-
- void clear_head() { _head = NULL; }
+ // Stop adding regions to the incremental collection set.
+ void stop_incremental_building() { _inc_build_state = Inactive; }
size_t recorded_rs_lengths() { return _recorded_rs_lengths; }
@@ -174,33 +169,32 @@
}
// Choose a new collection set. Marks the chosen regions as being
- // "in_collection_set", and links them together. The head and number of
- // the collection set are available via access methods.
+ // "in_collection_set".
double finalize_young_part(double target_pause_time_ms, G1SurvivorRegions* survivors);
void finalize_old_part(double time_remaining_ms);
- // Add old region "hr" to the CSet.
+ // Add old region "hr" to the collection set.
void add_old_region(HeapRegion* hr);
// Update information about hr in the aggregated information for
// the incrementally built collection set.
void update_young_region_prediction(HeapRegion* hr, size_t new_rs_length);
- // Add hr to the LHS of the incremental collection set.
+ // Add eden region to the collection set.
void add_eden_region(HeapRegion* hr);
- // Add hr to the RHS of the incremental collection set.
+ // Add survivor region to the collection set.
void add_survivor_regions(HeapRegion* hr);
#ifndef PRODUCT
- void print(HeapRegion* list_head, outputStream* st);
+ bool verify_young_ages();
+
+ void print(outputStream* st);
#endif // !PRODUCT
private:
- // Update the incremental cset information when adding a region
- // (should not be called directly).
+ // Update the incremental collection set information when adding a region.
void add_young_region_common(HeapRegion* hr);
-
};
#endif // SHARE_VM_GC_G1_G1COLLECTIONSET_HPP
--- a/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -394,37 +394,6 @@
}
}
-#ifndef PRODUCT
-bool G1DefaultPolicy::verify_young_ages() {
- bool ret = true;
-
- for (HeapRegion* curr = _collection_set->inc_head();
- curr != NULL;
- curr = curr->next_in_collection_set()) {
- guarantee(curr->is_young(), "Region must be young");
-
- SurvRateGroup* group = curr->surv_rate_group();
-
- if (group == NULL) {
- log_error(gc, verify)("## encountered NULL surv_rate_group in young region");
- ret = false;
- }
-
- if (curr->age_in_surv_rate_group() < 0) {
- log_error(gc, verify)("## encountered negative age in young region");
- ret = false;
- }
- }
-
- if (!ret) {
- LogStreamHandle(Error, gc, verify) log;
- _collection_set->print(_collection_set->inc_head(), &log);
- }
-
- return ret;
-}
-#endif // PRODUCT
-
void G1DefaultPolicy::record_full_collection_start() {
_full_collection_start_sec = os::elapsedTime();
// Release the future to-space so that it is available for compaction into.
@@ -488,7 +457,7 @@
_short_lived_surv_rate_group->stop_adding_regions();
_survivors_age_table.clear();
- assert( verify_young_ages(), "region age verification" );
+ assert(_g1->collection_set()->verify_young_ages(), "region age verification failed");
}
void G1DefaultPolicy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
--- a/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -89,10 +89,6 @@
size_t _rs_lengths_prediction;
-#ifndef PRODUCT
- bool verify_young_ages(HeapRegion* head, SurvRateGroup *surv_rate_group);
-#endif // PRODUCT
-
size_t _pending_cards;
// The amount of allocated bytes in old gen during the last mutator and the following
@@ -116,10 +112,6 @@
hr->install_surv_rate_group(_survivor_surv_rate_group);
}
-#ifndef PRODUCT
- bool verify_young_ages();
-#endif // PRODUCT
-
void record_max_rs_lengths(size_t rs_lengths) {
_max_rs_lengths = rs_lengths;
}
--- a/hotspot/src/share/vm/gc/g1/g1EvacFailure.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1EvacFailure.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -251,6 +251,5 @@
void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) {
RemoveSelfForwardPtrHRClosure rsfp_cl(worker_id, &_hrclaimer);
- HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
- _g1h->collection_set_iterate_from(hr, &rsfp_cl);
+ _g1h->collection_set_iterate_from(&rsfp_cl, worker_id);
}
--- a/hotspot/src/share/vm/gc/g1/g1HeapVerifier.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1HeapVerifier.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -580,15 +580,20 @@
}
}
-void G1HeapVerifier::verify_dirty_young_list(HeapRegion* head) {
- G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set();
- for (HeapRegion* hr = head; hr != NULL; hr = hr->next_in_collection_set()) {
- verify_dirty_region(hr);
+class G1VerifyDirtyYoungListClosure : public HeapRegionClosure {
+private:
+ G1HeapVerifier* _verifier;
+public:
+ G1VerifyDirtyYoungListClosure(G1HeapVerifier* verifier) : HeapRegionClosure(), _verifier(verifier) { }
+ virtual bool doHeapRegion(HeapRegion* r) {
+ _verifier->verify_dirty_region(r);
+ return false;
}
-}
+};
void G1HeapVerifier::verify_dirty_young_regions() {
- verify_dirty_young_list(_g1h->collection_set()->inc_head());
+ G1VerifyDirtyYoungListClosure cl(this);
+ _g1h->collection_set()->iterate(&cl);
}
bool G1HeapVerifier::verify_no_bits_over_tams(const char* bitmap_name, G1CMBitMapRO* bitmap,
--- a/hotspot/src/share/vm/gc/g1/g1HeapVerifier.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1HeapVerifier.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -108,7 +108,6 @@
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
- void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
void verify_dirty_young_regions() PRODUCT_RETURN;
};
--- a/hotspot/src/share/vm/gc/g1/g1RemSet.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1RemSet.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -382,10 +382,8 @@
uint worker_i) {
double rs_time_start = os::elapsedTime();
- HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
-
G1ScanRSClosure cl(_scan_state, oops_in_heap_closure, heap_region_codeblobs, worker_i);
- _g1->collection_set_iterate_from(startRegion, &cl);
+ _g1->collection_set_iterate_from(&cl, worker_i);
double scan_rs_time_sec = (os::elapsedTime() - rs_time_start) -
cl.strong_code_root_scan_time_sec();
--- a/hotspot/src/share/vm/gc/g1/g1StringDedupQueue.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1StringDedupQueue.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -154,8 +154,8 @@
}
void G1StringDedupQueue::print_statistics() {
- log_debug(gc, stringdedup)(" [Queue]");
- log_debug(gc, stringdedup)(" [Dropped: " UINTX_FORMAT "]", _queue->_dropped);
+ log_debug(gc, stringdedup)(" Queue");
+ log_debug(gc, stringdedup)(" Dropped: " UINTX_FORMAT, _queue->_dropped);
}
void G1StringDedupQueue::verify() {
--- a/hotspot/src/share/vm/gc/g1/g1StringDedupStat.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1StringDedupStat.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -42,7 +42,9 @@
_idle(0),
_exec(0),
_block(0),
- _start(0.0),
+ _start_concurrent(0.0),
+ _end_concurrent(0.0),
+ _start_phase(0.0),
_idle_elapsed(0.0),
_exec_elapsed(0.0),
_block_elapsed(0.0) {
@@ -69,7 +71,13 @@
_block_elapsed += stat._block_elapsed;
}
-void G1StringDedupStat::print_summary(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
+void G1StringDedupStat::print_start(const G1StringDedupStat& last_stat) {
+ log_info(gc, stringdedup)(
+ "Concurrent String Deduplication (" G1_STRDEDUP_TIME_FORMAT ")",
+ G1_STRDEDUP_TIME_PARAM(last_stat._start_concurrent));
+}
+
+void G1StringDedupStat::print_end(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
double total_deduped_bytes_percent = 0.0;
if (total_stat._new_bytes > 0) {
@@ -79,13 +87,16 @@
log_info(gc, stringdedup)(
"Concurrent String Deduplication "
- G1_STRDEDUP_BYTES_FORMAT_NS "->" G1_STRDEDUP_BYTES_FORMAT_NS "(" G1_STRDEDUP_BYTES_FORMAT_NS "), avg "
- G1_STRDEDUP_PERCENT_FORMAT_NS ", " G1_STRDEDUP_TIME_FORMAT,
+ G1_STRDEDUP_BYTES_FORMAT_NS "->" G1_STRDEDUP_BYTES_FORMAT_NS "(" G1_STRDEDUP_BYTES_FORMAT_NS ") "
+ "avg " G1_STRDEDUP_PERCENT_FORMAT_NS " "
+ "(" G1_STRDEDUP_TIME_FORMAT ", " G1_STRDEDUP_TIME_FORMAT ") " G1_STRDEDUP_TIME_FORMAT_MS,
G1_STRDEDUP_BYTES_PARAM(last_stat._new_bytes),
G1_STRDEDUP_BYTES_PARAM(last_stat._new_bytes - last_stat._deduped_bytes),
G1_STRDEDUP_BYTES_PARAM(last_stat._deduped_bytes),
total_deduped_bytes_percent,
- last_stat._exec_elapsed);
+ G1_STRDEDUP_TIME_PARAM(last_stat._start_concurrent),
+ G1_STRDEDUP_TIME_PARAM(last_stat._end_concurrent),
+ G1_STRDEDUP_TIME_PARAM_MS(last_stat._exec_elapsed));
}
void G1StringDedupStat::print_statistics(const G1StringDedupStat& stat, bool total) {
@@ -134,23 +145,31 @@
if (total) {
log_debug(gc, stringdedup)(
- " [Total Exec: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT ", Idle: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT ", Blocked: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT "]",
- stat._exec, stat._exec_elapsed, stat._idle, stat._idle_elapsed, stat._block, stat._block_elapsed);
+ " Total Exec: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT_MS
+ ", Idle: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT_MS
+ ", Blocked: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT_MS,
+ stat._exec, G1_STRDEDUP_TIME_PARAM_MS(stat._exec_elapsed),
+ stat._idle, G1_STRDEDUP_TIME_PARAM_MS(stat._idle_elapsed),
+ stat._block, G1_STRDEDUP_TIME_PARAM_MS(stat._block_elapsed));
} else {
log_debug(gc, stringdedup)(
- " [Last Exec: " G1_STRDEDUP_TIME_FORMAT ", Idle: " G1_STRDEDUP_TIME_FORMAT ", Blocked: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT "]",
- stat._exec_elapsed, stat._idle_elapsed, stat._block, stat._block_elapsed);
+ " Last Exec: " G1_STRDEDUP_TIME_FORMAT_MS
+ ", Idle: " G1_STRDEDUP_TIME_FORMAT_MS
+ ", Blocked: " UINTX_FORMAT "/" G1_STRDEDUP_TIME_FORMAT_MS,
+ G1_STRDEDUP_TIME_PARAM_MS(stat._exec_elapsed),
+ G1_STRDEDUP_TIME_PARAM_MS(stat._idle_elapsed),
+ stat._block, G1_STRDEDUP_TIME_PARAM_MS(stat._block_elapsed));
}
- log_debug(gc, stringdedup)(" [Inspected: " G1_STRDEDUP_OBJECTS_FORMAT "]", stat._inspected);
- log_debug(gc, stringdedup)(" [Skipped: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]", stat._skipped, skipped_percent);
- log_debug(gc, stringdedup)(" [Hashed: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]", stat._hashed, hashed_percent);
- log_debug(gc, stringdedup)(" [Known: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]", stat._known, known_percent);
- log_debug(gc, stringdedup)(" [New: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "]",
+ log_debug(gc, stringdedup)(" Inspected: " G1_STRDEDUP_OBJECTS_FORMAT, stat._inspected);
+ log_debug(gc, stringdedup)(" Skipped: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")", stat._skipped, skipped_percent);
+ log_debug(gc, stringdedup)(" Hashed: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")", stat._hashed, hashed_percent);
+ log_debug(gc, stringdedup)(" Known: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")", stat._known, known_percent);
+ log_debug(gc, stringdedup)(" New: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT,
stat._new, new_percent, G1_STRDEDUP_BYTES_PARAM(stat._new_bytes));
- log_debug(gc, stringdedup)(" [Deduplicated: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]",
+ log_debug(gc, stringdedup)(" Deduplicated: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")",
stat._deduped, deduped_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_bytes), deduped_bytes_percent);
- log_debug(gc, stringdedup)(" [Young: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]",
+ log_debug(gc, stringdedup)(" Young: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")",
stat._deduped_young, deduped_young_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_young_bytes), deduped_young_bytes_percent);
- log_debug(gc, stringdedup)(" [Old: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")]",
+ log_debug(gc, stringdedup)(" Old: " G1_STRDEDUP_OBJECTS_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ") " G1_STRDEDUP_BYTES_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT ")",
stat._deduped_old, deduped_old_percent, G1_STRDEDUP_BYTES_PARAM(stat._deduped_old_bytes), deduped_old_bytes_percent);
}
--- a/hotspot/src/share/vm/gc/g1/g1StringDedupStat.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1StringDedupStat.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,11 +30,14 @@
// Macros for GC log output formating
#define G1_STRDEDUP_OBJECTS_FORMAT UINTX_FORMAT_W(12)
-#define G1_STRDEDUP_TIME_FORMAT "%1.7lf secs"
-#define G1_STRDEDUP_PERCENT_FORMAT "%5.1lf%%"
-#define G1_STRDEDUP_PERCENT_FORMAT_NS "%.1lf%%"
-#define G1_STRDEDUP_BYTES_FORMAT "%8.1lf%s"
-#define G1_STRDEDUP_BYTES_FORMAT_NS "%.1lf%s"
+#define G1_STRDEDUP_TIME_FORMAT "%.3fs"
+#define G1_STRDEDUP_TIME_PARAM(time) (time)
+#define G1_STRDEDUP_TIME_FORMAT_MS "%.3fms"
+#define G1_STRDEDUP_TIME_PARAM_MS(time) ((time) * MILLIUNITS)
+#define G1_STRDEDUP_PERCENT_FORMAT "%5.1f%%"
+#define G1_STRDEDUP_PERCENT_FORMAT_NS "%.1f%%"
+#define G1_STRDEDUP_BYTES_FORMAT "%8.1f%s"
+#define G1_STRDEDUP_BYTES_FORMAT_NS "%.1f%s"
#define G1_STRDEDUP_BYTES_PARAM(bytes) byte_size_in_proper_unit((double)(bytes)), proper_unit_for_byte_size((bytes))
//
@@ -60,7 +63,9 @@
uintx _block;
// Time spent by the deduplication thread in different phases
- double _start;
+ double _start_concurrent;
+ double _end_concurrent;
+ double _start_phase;
double _idle_elapsed;
double _exec_elapsed;
double _block_elapsed;
@@ -104,38 +109,41 @@
}
void mark_idle() {
- _start = os::elapsedTime();
+ _start_phase = os::elapsedTime();
_idle++;
}
void mark_exec() {
double now = os::elapsedTime();
- _idle_elapsed = now - _start;
- _start = now;
+ _idle_elapsed = now - _start_phase;
+ _start_phase = now;
+ _start_concurrent = now;
_exec++;
}
void mark_block() {
double now = os::elapsedTime();
- _exec_elapsed += now - _start;
- _start = now;
+ _exec_elapsed += now - _start_phase;
+ _start_phase = now;
_block++;
}
void mark_unblock() {
double now = os::elapsedTime();
- _block_elapsed += now - _start;
- _start = now;
+ _block_elapsed += now - _start_phase;
+ _start_phase = now;
}
void mark_done() {
double now = os::elapsedTime();
- _exec_elapsed += now - _start;
+ _exec_elapsed += now - _start_phase;
+ _end_concurrent = now;
}
void add(const G1StringDedupStat& stat);
- static void print_summary(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
+ static void print_start(const G1StringDedupStat& last_stat);
+ static void print_end(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
static void print_statistics(const G1StringDedupStat& stat, bool total);
};
--- a/hotspot/src/share/vm/gc/g1/g1StringDedupTable.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1StringDedupTable.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -196,7 +196,8 @@
}
double end = os::elapsedTime();
- log_trace(gc, stringdedup)("Deleted " UINTX_FORMAT " entries, " G1_STRDEDUP_TIME_FORMAT, count, end - start);
+ log_trace(gc, stringdedup)("Deleted " UINTX_FORMAT " entries, " G1_STRDEDUP_TIME_FORMAT_MS,
+ count, G1_STRDEDUP_TIME_PARAM_MS(end - start));
}
G1StringDedupTable* G1StringDedupTable::_table = NULL;
@@ -610,14 +611,14 @@
void G1StringDedupTable::print_statistics() {
Log(gc, stringdedup) log;
- log.debug(" [Table]");
- log.debug(" [Memory Usage: " G1_STRDEDUP_BYTES_FORMAT_NS "]",
+ log.debug(" Table");
+ log.debug(" Memory Usage: " G1_STRDEDUP_BYTES_FORMAT_NS,
G1_STRDEDUP_BYTES_PARAM(_table->_size * sizeof(G1StringDedupEntry*) + (_table->_entries + _entry_cache->size()) * sizeof(G1StringDedupEntry)));
- log.debug(" [Size: " SIZE_FORMAT ", Min: " SIZE_FORMAT ", Max: " SIZE_FORMAT "]", _table->_size, _min_size, _max_size);
- log.debug(" [Entries: " UINTX_FORMAT ", Load: " G1_STRDEDUP_PERCENT_FORMAT_NS ", Cached: " UINTX_FORMAT ", Added: " UINTX_FORMAT ", Removed: " UINTX_FORMAT "]",
+ log.debug(" Size: " SIZE_FORMAT ", Min: " SIZE_FORMAT ", Max: " SIZE_FORMAT, _table->_size, _min_size, _max_size);
+ log.debug(" Entries: " UINTX_FORMAT ", Load: " G1_STRDEDUP_PERCENT_FORMAT_NS ", Cached: " UINTX_FORMAT ", Added: " UINTX_FORMAT ", Removed: " UINTX_FORMAT,
_table->_entries, (double)_table->_entries / (double)_table->_size * 100.0, _entry_cache->size(), _entries_added, _entries_removed);
- log.debug(" [Resize Count: " UINTX_FORMAT ", Shrink Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS "), Grow Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS ")]",
+ log.debug(" Resize Count: " UINTX_FORMAT ", Shrink Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS "), Grow Threshold: " UINTX_FORMAT "(" G1_STRDEDUP_PERCENT_FORMAT_NS ")",
_resize_count, _table->_shrink_threshold, _shrink_load_factor * 100.0, _table->_grow_threshold, _grow_load_factor * 100.0);
- log.debug(" [Rehash Count: " UINTX_FORMAT ", Rehash Threshold: " UINTX_FORMAT ", Hash Seed: 0x%x]", _rehash_count, _rehash_threshold, _table->_hash_seed);
- log.debug(" [Age Threshold: " UINTX_FORMAT "]", StringDeduplicationAgeThreshold);
+ log.debug(" Rehash Count: " UINTX_FORMAT ", Rehash Threshold: " UINTX_FORMAT ", Hash Seed: 0x%x", _rehash_count, _rehash_threshold, _table->_hash_seed);
+ log.debug(" Age Threshold: " UINTX_FORMAT, StringDeduplicationAgeThreshold);
}
--- a/hotspot/src/share/vm/gc/g1/g1StringDedupThread.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1StringDedupThread.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -103,6 +103,7 @@
SuspendibleThreadSetJoiner sts_join;
stat.mark_exec();
+ print_start(stat);
// Process the queue
for (;;) {
@@ -123,9 +124,8 @@
stat.mark_done();
- // Print statistics
total_stat.add(stat);
- print(stat, total_stat);
+ print_end(stat, total_stat);
}
G1StringDedupTable::clean_entry_cache();
@@ -136,14 +136,16 @@
G1StringDedupQueue::cancel_wait();
}
-void G1StringDedupThread::print(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
- if (log_is_enabled(Info, gc, stringdedup)) {
- G1StringDedupStat::print_summary(last_stat, total_stat);
- if (log_is_enabled(Debug, gc, stringdedup)) {
- G1StringDedupStat::print_statistics(last_stat, false);
- G1StringDedupStat::print_statistics(total_stat, true);
- G1StringDedupTable::print_statistics();
- G1StringDedupQueue::print_statistics();
- }
+void G1StringDedupThread::print_start(const G1StringDedupStat& last_stat) {
+ G1StringDedupStat::print_start(last_stat);
+}
+
+void G1StringDedupThread::print_end(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
+ G1StringDedupStat::print_end(last_stat, total_stat);
+ if (log_is_enabled(Debug, gc, stringdedup)) {
+ G1StringDedupStat::print_statistics(last_stat, false);
+ G1StringDedupStat::print_statistics(total_stat, true);
+ G1StringDedupTable::print_statistics();
+ G1StringDedupQueue::print_statistics();
}
}
--- a/hotspot/src/share/vm/gc/g1/g1StringDedupThread.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1StringDedupThread.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -43,7 +43,8 @@
G1StringDedupThread();
~G1StringDedupThread();
- void print(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
+ void print_start(const G1StringDedupStat& last_stat);
+ void print_end(const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat);
void run_service();
void stop_service();
--- a/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/g1YoungRemSetSamplingThread.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -71,38 +71,51 @@
_monitor.notify();
}
+class G1YoungRemSetSamplingClosure : public HeapRegionClosure {
+ SuspendibleThreadSetJoiner* _sts;
+ size_t _regions_visited;
+ size_t _sampled_rs_lengths;
+public:
+ G1YoungRemSetSamplingClosure(SuspendibleThreadSetJoiner* sts) :
+ HeapRegionClosure(), _sts(sts), _regions_visited(0), _sampled_rs_lengths(0) { }
+
+ virtual bool doHeapRegion(HeapRegion* r) {
+ size_t rs_length = r->rem_set()->occupied();
+ _sampled_rs_lengths += rs_length;
+
+ // Update the collection set policy information for this region
+ G1CollectedHeap::heap()->collection_set()->update_young_region_prediction(r, rs_length);
+
+ _regions_visited++;
+
+ if (_regions_visited == 10) {
+ if (_sts->should_yield()) {
+ _sts->yield();
+ // A gc may have occurred and our sampling data is stale and further
+ // traversal of the collection set is unsafe
+ return true;
+ }
+ _regions_visited = 0;
+ }
+ return false;
+ }
+
+ size_t sampled_rs_lengths() const { return _sampled_rs_lengths; }
+};
+
void G1YoungRemSetSamplingThread::sample_young_list_rs_lengths() {
SuspendibleThreadSetJoiner sts;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1Policy* g1p = g1h->g1_policy();
- G1CollectionSet* g1cs = g1h->collection_set();
+
if (g1p->adaptive_young_list_length()) {
- int regions_visited = 0;
- HeapRegion* hr = g1cs->inc_head();
- size_t sampled_rs_lengths = 0;
-
- while (hr != NULL) {
- size_t rs_length = hr->rem_set()->occupied();
- sampled_rs_lengths += rs_length;
-
- // Update the collection set policy information for this region
- g1cs->update_young_region_prediction(hr, rs_length);
-
- ++regions_visited;
+ G1YoungRemSetSamplingClosure cl(&sts);
- // we try to yield every time we visit 10 regions
- if (regions_visited == 10) {
- if (sts.should_yield()) {
- sts.yield();
- // A gc may have occurred and our sampling data is stale and further
- // traversal of the collection set is unsafe
- return;
- }
- regions_visited = 0;
- }
- assert(hr == g1cs->inc_tail() || hr->next_in_collection_set() != NULL, "next should only be null at tail of icset");
- hr = hr->next_in_collection_set();
+ G1CollectionSet* g1cs = g1h->collection_set();
+ g1cs->iterate(&cl);
+
+ if (cl.complete()) {
+ g1p->revise_young_list_target_length_if_necessary(cl.sampled_rs_lengths());
}
- g1p->revise_young_list_target_length_if_necessary(sampled_rs_lengths);
}
}
--- a/hotspot/src/share/vm/gc/g1/heapRegion.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -284,7 +284,6 @@
_hrm_index(hrm_index),
_allocation_context(AllocationContext::system()),
_humongous_start_region(NULL),
- _next_in_special_set(NULL),
_evacuation_failed(false),
_prev_marked_bytes(0), _next_marked_bytes(0), _gc_efficiency(0.0),
_next(NULL), _prev(NULL),
--- a/hotspot/src/share/vm/gc/g1/heapRegion.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -261,12 +261,6 @@
// True iff an attempt to evacuate an object in the region failed.
bool _evacuation_failed;
- // A heap region may be a member one of a number of special subsets, each
- // represented as linked lists through the field below. Currently, there
- // is only one set:
- // The collection set.
- HeapRegion* _next_in_special_set;
-
// Fields used by the HeapRegionSetBase class and subclasses.
HeapRegion* _next;
HeapRegion* _prev;
@@ -476,9 +470,6 @@
inline bool in_collection_set() const;
- inline HeapRegion* next_in_collection_set() const;
- inline void set_next_in_collection_set(HeapRegion* r);
-
void set_allocation_context(AllocationContext_t context) {
_allocation_context = context;
}
@@ -744,7 +735,7 @@
// Terminates the iteration when the "doHeapRegion" method returns "true".
class HeapRegionClosure : public StackObj {
friend class HeapRegionManager;
- friend class G1CollectedHeap;
+ friend class G1CollectionSet;
bool _complete;
void incomplete() { _complete = false; }
--- a/hotspot/src/share/vm/gc/g1/heapRegion.inline.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/g1/heapRegion.inline.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -230,18 +230,4 @@
return G1CollectedHeap::heap()->is_in_cset(this);
}
-inline HeapRegion* HeapRegion::next_in_collection_set() const {
- assert(in_collection_set(), "should only invoke on member of CS.");
- assert(_next_in_special_set == NULL ||
- _next_in_special_set->in_collection_set(),
- "Malformed CS.");
- return _next_in_special_set;
-}
-
-void HeapRegion::set_next_in_collection_set(HeapRegion* r) {
- assert(in_collection_set(), "should only invoke on member of CS.");
- assert(r == NULL || r->in_collection_set(), "Malformed CS.");
- _next_in_special_set = r;
-}
-
#endif // SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
--- a/hotspot/src/share/vm/gc/parallel/gcTaskManager.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/parallel/gcTaskManager.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -386,13 +386,21 @@
void GCTaskManager::add_workers(bool initializing) {
os::ThreadType worker_type = os::pgc_thread;
+ uint previous_created_workers = _created_workers;
+
_created_workers = WorkerManager::add_workers(this,
_active_workers,
- (uint) _workers,
+ _workers,
_created_workers,
worker_type,
initializing);
_active_workers = MIN2(_created_workers, _active_workers);
+
+ WorkerManager::log_worker_creation(this, previous_created_workers, _active_workers, _created_workers, initializing);
+}
+
+const char* GCTaskManager::group_name() {
+ return "ParGC Thread";
}
void GCTaskManager::initialize() {
--- a/hotspot/src/share/vm/gc/parallel/gcTaskManager.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/parallel/gcTaskManager.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -556,6 +556,8 @@
GCTaskThread* install_worker(uint worker_id);
// Add GC workers as needed.
void add_workers(bool initializing);
+ // Base name (without worker id #) of threads.
+ const char* group_name();
};
//
--- a/hotspot/src/share/vm/gc/parallel/gcTaskThread.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/parallel/gcTaskThread.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -45,7 +45,7 @@
_time_stamp_index(0)
{
set_id(which);
- set_name("ParGC Thread#%d", which);
+ set_name("%s#%d", manager->group_name(), which);
}
GCTaskThread::~GCTaskThread() {
--- a/hotspot/src/share/vm/gc/parallel/gcTaskThread.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/parallel/gcTaskThread.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -55,6 +55,7 @@
return new GCTaskThread(manager, which, processor_id);
}
public:
+
static void destroy(GCTaskThread* manager) {
if (manager != NULL) {
delete manager;
--- a/hotspot/src/share/vm/gc/shared/collectedHeap.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/shared/collectedHeap.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -159,6 +159,8 @@
inline static void post_allocation_setup_array(KlassHandle klass,
HeapWord* obj, int length);
+ inline static void post_allocation_setup_class(KlassHandle klass, HeapWord* obj, int size);
+
// Clears an allocated object.
inline static void init_obj(HeapWord* obj, size_t size);
@@ -300,6 +302,7 @@
inline static oop obj_allocate(KlassHandle klass, int size, TRAPS);
inline static oop array_allocate(KlassHandle klass, int size, int length, TRAPS);
inline static oop array_allocate_nozero(KlassHandle klass, int size, int length, TRAPS);
+ inline static oop class_allocate(KlassHandle klass, int size, TRAPS);
inline static void post_allocation_install_obj_klass(KlassHandle klass,
oop obj);
--- a/hotspot/src/share/vm/gc/shared/collectedHeap.inline.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/shared/collectedHeap.inline.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP
#define SHARE_VM_GC_SHARED_COLLECTEDHEAP_INLINE_HPP
+#include "classfile/javaClasses.hpp"
#include "gc/shared/allocTracer.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/threadLocalAllocBuffer.inline.hpp"
@@ -96,6 +97,22 @@
post_allocation_notify(klass, (oop)obj, size);
}
+void CollectedHeap::post_allocation_setup_class(KlassHandle klass,
+ HeapWord* obj,
+ int size) {
+ // Set oop_size field before setting the _klass field
+ // in post_allocation_setup_common() because the klass field
+ // indicates that the object is parsable by concurrent GC.
+ oop new_cls = (oop)obj;
+ assert(size > 0, "oop_size must be positive.");
+ java_lang_Class::set_oop_size(new_cls, size);
+ post_allocation_setup_common(klass, obj);
+ assert(Universe::is_bootstrapping() ||
+ !new_cls->is_array(), "must not be an array");
+ // notify jvmti and dtrace
+ post_allocation_notify(klass, new_cls, size);
+}
+
void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
HeapWord* obj,
int length) {
@@ -207,6 +224,16 @@
return (oop)obj;
}
+oop CollectedHeap::class_allocate(KlassHandle klass, int size, TRAPS) {
+ debug_only(check_for_valid_allocation_state());
+ assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
+ assert(size >= 0, "int won't convert to size_t");
+ HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
+ post_allocation_setup_class(klass, obj, size); // set oop_size
+ NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
+ return (oop)obj;
+}
+
oop CollectedHeap::array_allocate(KlassHandle klass,
int size,
int length,
--- a/hotspot/src/share/vm/gc/shared/workerManager.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/shared/workerManager.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -47,18 +47,18 @@
// threads and a failure would not be optimal but should not be fatal.
template <class WorkerType>
static uint add_workers (WorkerType* holder,
- uint active_workers,
- uint total_workers,
- uint created_workers,
- os::ThreadType worker_type,
- bool initializing) {
+ uint active_workers,
+ uint total_workers,
+ uint created_workers,
+ os::ThreadType worker_type,
+ bool initializing) {
uint start = created_workers;
uint end = MIN2(active_workers, total_workers);
for (uint worker_id = start; worker_id < end; worker_id += 1) {
WorkerThread* new_worker = holder->install_worker(worker_id);
assert(new_worker != NULL, "Failed to allocate GangWorker");
if (new_worker == NULL || !os::create_thread(new_worker, worker_type)) {
- if(initializing) {
+ if (initializing) {
vm_exit_out_of_memory(0, OOM_MALLOC_ERROR,
"Cannot create worker GC thread. Out of system resources.");
}
@@ -67,11 +67,21 @@
os::start_thread(new_worker);
}
- log_trace(gc, task)("AdaptiveSizePolicy::add_workers() : "
- "active_workers: %u created_workers: %u",
- active_workers, created_workers);
+ return created_workers;
+ }
- return created_workers;
+ // Log (at trace level) a change in the number of created workers.
+ template <class WorkerType>
+ static void log_worker_creation(WorkerType* holder,
+ uint previous_created_workers,
+ uint active_workers,
+ uint created_workers,
+ bool initializing) {
+ if (previous_created_workers < created_workers) {
+ const char* initializing_msg = initializing ? "Adding initial" : "Creating additional";
+ log_trace(gc, task)("%s %s(s) previously created workers %u active workers %u total created workers %u",
+ initializing_msg, holder->group_name(), previous_created_workers, active_workers, created_workers);
+ }
}
};
#endif // SHARE_VM_GC_SHARED_WORKERMANAGER_HPP
--- a/hotspot/src/share/vm/gc/shared/workgroup.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/shared/workgroup.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -66,6 +66,7 @@
} else {
worker_type = os::pgc_thread;
}
+ uint previous_created_workers = _created_workers;
_created_workers = WorkerManager::add_workers(this,
active_workers,
@@ -74,6 +75,8 @@
worker_type,
initializing);
_active_workers = MIN2(_created_workers, _active_workers);
+
+ WorkerManager::log_worker_creation(this, previous_created_workers, _active_workers, _created_workers, initializing);
}
AbstractGangWorker* AbstractWorkGang::worker(uint i) const {
--- a/hotspot/src/share/vm/gc/shared/workgroup.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/gc/shared/workgroup.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -176,6 +176,9 @@
// Return the Ith worker.
AbstractGangWorker* worker(uint i) const;
+ // Base name (without worker id #) of threads.
+ const char* group_name() { return name(); }
+
void threads_do(ThreadClosure* tc) const;
// Create a GC worker and install it into the work gang.
--- a/hotspot/src/share/vm/interpreter/bytecodeStream.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodeStream.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,12 +31,12 @@
// set next bytecode position
address bcp = RawBytecodeStream::bcp();
address end = method()->code_base() + end_bci();
- int l = Bytecodes::raw_special_length_at(bcp, end);
- if (l <= 0 || (_bci + l) > _end_bci) {
+ int len = Bytecodes::raw_special_length_at(bcp, end);
+ // Very large tableswitch or lookupswitch size can cause _next_bci to overflow.
+ if (len <= 0 || (_bci > _end_bci - len) || (_bci - len >= _next_bci)) {
code = Bytecodes::_illegal;
} else {
- _next_bci += l;
- assert(_bci < _next_bci, "length must be > 0");
+ _next_bci += len;
// set attributes
_is_wide = false;
// check for special (uncommon) cases
--- a/hotspot/src/share/vm/interpreter/bytecodeStream.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/interpreter/bytecodeStream.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -135,12 +135,15 @@
code = Bytecodes::code_or_bp_at(bcp);
// set next bytecode position
- int l = Bytecodes::length_for(code);
- if (l > 0 && (_bci + l) <= _end_bci) {
+ int len = Bytecodes::length_for(code);
+ if (len > 0 && (_bci <= _end_bci - len)) {
assert(code != Bytecodes::_wide && code != Bytecodes::_tableswitch
&& code != Bytecodes::_lookupswitch, "can't be special bytecode");
_is_wide = false;
- _next_bci += l;
+ _next_bci += len;
+ if (_next_bci <= _bci) { // Check for integer overflow
+ code = Bytecodes::_illegal;
+ }
_raw_code = code;
return code;
} else {
@@ -189,19 +192,23 @@
// note that we cannot advance before having the
// tty bytecode otherwise the stepping is wrong!
// (carefull: length_for(...) must be used first!)
- int l = Bytecodes::length_for(code);
- if (l == 0) l = Bytecodes::length_at(_method(), bcp);
- _next_bci += l;
- assert(_bci < _next_bci, "length must be > 0");
- // set attributes
- _is_wide = false;
- // check for special (uncommon) cases
- if (code == Bytecodes::_wide) {
- raw_code = (Bytecodes::Code)bcp[1];
- code = raw_code; // wide BCs are always Java-normal
- _is_wide = true;
+ int len = Bytecodes::length_for(code);
+ if (len == 0) len = Bytecodes::length_at(_method(), bcp);
+ if (len <= 0 || (_bci > _end_bci - len) || (_bci - len >= _next_bci)) {
+ raw_code = code = Bytecodes::_illegal;
+ } else {
+ _next_bci += len;
+ assert(_bci < _next_bci, "length must be > 0");
+ // set attributes
+ _is_wide = false;
+ // check for special (uncommon) cases
+ if (code == Bytecodes::_wide) {
+ raw_code = (Bytecodes::Code)bcp[1];
+ code = raw_code; // wide BCs are always Java-normal
+ _is_wide = true;
+ }
+ assert(Bytecodes::is_java_code(code), "sanity check");
}
- assert(Bytecodes::is_java_code(code), "sanity check");
}
_raw_code = raw_code;
_code = code;
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -576,27 +576,27 @@
// compute auxiliary field attributes
TosState state = as_TosState(info.field_type());
- // We need to delay resolving put instructions on final fields
- // until we actually invoke one. This is required so we throw
- // exceptions at the correct place. If we do not resolve completely
- // in the current pass, leaving the put_code set to zero will
- // cause the next put instruction to reresolve.
- Bytecodes::Code put_code = (Bytecodes::Code)0;
-
- // We also need to delay resolving getstatic instructions until the
- // class is intitialized. This is required so that access to the static
+ // Put instructions on final fields are not resolved. This is required so we throw
+ // exceptions at the correct place (when the instruction is actually invoked).
+ // If we do not resolve an instruction in the current pass, leaving the put_code
+ // set to zero will cause the next put instruction to the same field to reresolve.
+ //
+ // Also, we need to delay resolving getstatic and putstatic instructions until the
+ // class is initialized. This is required so that access to the static
// field will call the initialization function every time until the class
// is completely initialized ala. in 2.17.5 in JVM Specification.
InstanceKlass* klass = InstanceKlass::cast(info.field_holder());
bool uninitialized_static = ((bytecode == Bytecodes::_getstatic || bytecode == Bytecodes::_putstatic) &&
!klass->is_initialized());
+
+ Bytecodes::Code put_code = (Bytecodes::Code)0;
+ if (is_put && !info.access_flags().is_final() && !uninitialized_static) {
+ put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield);
+ }
+
Bytecodes::Code get_code = (Bytecodes::Code)0;
-
if (!uninitialized_static) {
get_code = ((is_static) ? Bytecodes::_getstatic : Bytecodes::_getfield);
- if (is_put || !info.access_flags().is_final()) {
- put_code = ((is_static) ? Bytecodes::_putstatic : Bytecodes::_putfield);
- }
}
cp_cache_entry->set_field(
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -970,7 +970,7 @@
if (is_initialized_static_final_update || is_initialized_instance_final_update) {
ss.print("Update to %s final field %s.%s attempted from a different method (%s) than the initializer method %s ",
is_static ? "static" : "non-static", resolved_klass()->external_name(), fd.name()->as_C_string(),
- current_klass()->external_name(),
+ m()->name()->as_C_string(),
is_static ? "<clinit>" : "<init>");
THROW_MSG(vmSymbols::java_lang_IllegalAccessError(), ss.as_string());
}
--- a/hotspot/src/share/vm/logging/logConfiguration.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/logging/logConfiguration.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -415,17 +415,8 @@
void LogConfiguration::describe_current_configuration(outputStream* out){
out->print_cr("Log output configuration:");
for (size_t i = 0; i < _n_outputs; i++) {
- out->print("#" SIZE_FORMAT ": %s ", i, _outputs[i]->name());
- out->print_raw(_outputs[i]->config_string());
- out->print(" ");
- char delimiter[2] = {0};
- for (size_t d = 0; d < LogDecorators::Count; d++) {
- LogDecorators::Decorator decorator = static_cast<LogDecorators::Decorator>(d);
- if (_outputs[i]->decorators().is_decorator(decorator)) {
- out->print("%s%s", delimiter, LogDecorators::name(decorator));
- *delimiter = ',';
- }
- }
+ out->print("#" SIZE_FORMAT ": ", i);
+ _outputs[i]->describe(out);
out->cr();
}
}
--- a/hotspot/src/share/vm/logging/logFileOutput.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/logging/logFileOutput.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -428,3 +428,13 @@
result[result_len] = '\0';
return result;
}
+
+void LogFileOutput::describe(outputStream *out) {
+ LogOutput::describe(out);
+ out->print(" ");
+
+ out->print("filecount=%u,filesize=" SIZE_FORMAT "%s", _file_count,
+ byte_size_in_proper_unit(_rotate_size),
+ proper_unit_for_byte_size(_rotate_size));
+}
+
--- a/hotspot/src/share/vm/logging/logFileOutput.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/logging/logFileOutput.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -85,6 +85,7 @@
virtual int write(const LogDecorations& decorations, const char* msg);
virtual int write(LogMessageBuffer::Iterator msg_iterator);
virtual void force_rotate();
+ virtual void describe(outputStream *out);
virtual const char* name() const {
return _name;
--- a/hotspot/src/share/vm/logging/logOutput.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/logging/logOutput.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -83,3 +83,18 @@
break;
}
}
+
+void LogOutput::describe(outputStream *out) {
+ out->print("%s ", name());
+ out->print_raw(config_string());
+ out->print(" ");
+ char delimiter[2] = {0};
+ for (size_t d = 0; d < LogDecorators::Count; d++) {
+ LogDecorators::Decorator decorator = static_cast<LogDecorators::Decorator>(d);
+ if (decorators().is_decorator(decorator)) {
+ out->print("%s%s", delimiter, LogDecorators::name(decorator));
+ *delimiter = ',';
+ }
+ }
+}
+
--- a/hotspot/src/share/vm/logging/logOutput.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/logging/logOutput.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -83,6 +83,8 @@
// Do nothing by default.
}
+ virtual void describe(outputStream *out);
+
virtual const char* name() const = 0;
virtual bool initialize(const char* options, outputStream* errstream) = 0;
virtual int write(const LogDecorations& decorations, const char* msg) = 0;
--- a/hotspot/src/share/vm/logging/logPrefix.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/logging/logPrefix.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -74,6 +74,7 @@
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ref)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, ref, start)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, start)) \
+ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, stringtable)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, sweep)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, task)) \
LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, task, start)) \
--- a/hotspot/src/share/vm/memory/metaspace.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -3106,10 +3106,6 @@
assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
- if (MetaspaceSize < 256*K) {
- vm_exit_during_initialization("Too small initial Metaspace size");
- }
-
MinMetaspaceExpansion = align_size_down_bounded(MinMetaspaceExpansion, _commit_alignment);
MaxMetaspaceExpansion = align_size_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -60,6 +60,7 @@
bool MetaspaceShared::_check_classes_made_progress;
bool MetaspaceShared::_has_error_classes;
bool MetaspaceShared::_archive_loading_failed = false;
+bool MetaspaceShared::_remapped_readwrite = false;
address MetaspaceShared::_cds_i2i_entry_code_buffers = NULL;
size_t MetaspaceShared::_cds_i2i_entry_code_buffers_size = 0;
SharedMiscRegion MetaspaceShared::_mc;
@@ -806,6 +807,10 @@
exit(1);
}
}
+
+ // Copy the verification constraints from C_HEAP-alloced GrowableArrays to RO-alloced
+ // Arrays
+ SystemDictionaryShared::finalize_verification_constraints();
}
void MetaspaceShared::prepare_for_dumping() {
@@ -1181,6 +1186,7 @@
if (!mapinfo->remap_shared_readonly_as_readwrite()) {
return false;
}
+ _remapped_readwrite = true;
}
return true;
}
--- a/hotspot/src/share/vm/memory/metaspaceShared.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/memory/metaspaceShared.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -125,6 +125,7 @@
static bool _check_classes_made_progress;
static bool _has_error_classes;
static bool _archive_loading_failed;
+ static bool _remapped_readwrite;
static address _cds_i2i_entry_code_buffers;
static size_t _cds_i2i_entry_code_buffers_size;
@@ -205,6 +206,10 @@
// sharing is enabled. Simply returns true if sharing is not enabled
// or if the remapping has already been done by a prior call.
static bool remap_shared_readonly_as_readwrite() NOT_CDS_RETURN_(true);
+ static bool remapped_readwrite() {
+ CDS_ONLY(return _remapped_readwrite);
+ NOT_CDS(return false);
+ }
static void print_shared_spaces();
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -27,6 +27,7 @@
#include "classfile/classFileStream.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/systemDictionary.hpp"
+#include "classfile/systemDictionaryShared.hpp"
#include "classfile/verifier.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/dependencyContext.hpp"
@@ -597,6 +598,8 @@
// also sets rewritten
this_k->rewrite_class(CHECK_false);
+ } else if (this_k->is_shared()) {
+ SystemDictionaryShared::check_verification_constraints(this_k, CHECK_false);
}
// relocate jsrs and link methods after they are all rewritten
@@ -606,7 +609,12 @@
// methods have been rewritten since rewrite may
// fabricate new Method*s.
// also does loader constraint checking
- if (!this_k()->is_shared()) {
+ //
+ // initialize_vtable and initialize_itable need to be rerun for
+ // a shared class if the class is not loaded by the NULL classloader.
+ ClassLoaderData * loader_data = this_k->class_loader_data();
+ if (!(this_k->is_shared() &&
+ loader_data->is_the_null_class_loader_data())) {
ResourceMark rm(THREAD);
this_k->vtable()->initialize_vtable(true, CHECK_false);
this_k->itable()->initialize_itable(true, CHECK_false);
--- a/hotspot/src/share/vm/oops/instanceMirrorKlass.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/oops/instanceMirrorKlass.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,13 +50,12 @@
// Query before forming handle.
int size = instance_size(k);
KlassHandle h_k(THREAD, this);
- instanceOop i = (instanceOop)CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
+
+ assert(size > 0, "total object size must be positive: %d", size);
// Since mirrors can be variable sized because of the static fields, store
// the size in the mirror itself.
- java_lang_Class::set_oop_size(i, size);
-
- return i;
+ return (instanceOop)CollectedHeap::class_allocate(h_k, size, CHECK_NULL);
}
int InstanceMirrorKlass::oop_size(oop obj) const {
--- a/hotspot/src/share/vm/oops/klassVtable.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/oops/klassVtable.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -27,6 +27,7 @@
#include "classfile/vmSymbols.hpp"
#include "gc/shared/gcLocker.hpp"
#include "logging/log.hpp"
+#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.inline.hpp"
#include "oops/instanceKlass.hpp"
@@ -42,6 +43,10 @@
return InstanceKlass::cast(_klass());
}
+bool klassVtable::is_preinitialized_vtable() {
+ return _klass->is_shared() && !MetaspaceShared::remapped_readwrite();
+}
+
// this function computes the vtable size (including the size needed for miranda
// methods) and the number of miranda methods in this class.
@@ -126,6 +131,12 @@
int klassVtable::initialize_from_super(KlassHandle super) {
if (super.is_null()) {
return 0;
+ } else if (is_preinitialized_vtable()) {
+ // A shared class' vtable is preinitialized at dump time. No need to copy
+ // methods from super class for shared class, as that was already done
+ // during archiving time. However, if Jvmti has redefined a class,
+ // copy super class's vtable in case the super class has changed.
+ return super->vtable()->length();
} else {
// copy methods from superKlass
klassVtable* superVtable = super->vtable();
@@ -152,6 +163,8 @@
KlassHandle super (THREAD, klass()->java_super());
int nofNewEntries = 0;
+ bool is_shared = _klass->is_shared();
+
if (!klass()->is_array_klass()) {
ResourceMark rm(THREAD);
log_develop_debug(vtables)("Initializing: %s", _klass->name()->as_C_string());
@@ -164,6 +177,7 @@
#endif
if (Universe::is_bootstrapping()) {
+ assert(!is_shared, "sanity");
// just clear everything
for (int i = 0; i < _length; i++) table()[i].clear();
return;
@@ -203,6 +217,7 @@
if (len > 0) {
Array<int>* def_vtable_indices = NULL;
if ((def_vtable_indices = ik()->default_vtable_indices()) == NULL) {
+ assert(!is_shared, "shared class def_vtable_indices does not exist");
def_vtable_indices = ik()->create_new_default_vtable_indices(len, CHECK);
} else {
assert(def_vtable_indices->length() == len, "reinit vtable len?");
@@ -217,7 +232,15 @@
// needs new entry
if (needs_new_entry) {
put_method_at(mh(), initialized);
- def_vtable_indices->at_put(i, initialized); //set vtable index
+ if (is_preinitialized_vtable()) {
+ // At runtime initialize_vtable is rerun for a shared class
+ // (loaded by the non-boot loader) as part of link_class_impl().
+ // The dumptime vtable index should be the same as the runtime index.
+ assert(def_vtable_indices->at(i) == initialized,
+ "dump time vtable index is different from runtime index");
+ } else {
+ def_vtable_indices->at_put(i, initialized); //set vtable index
+ }
initialized++;
}
}
@@ -378,7 +401,8 @@
}
// we need a new entry if there is no superclass
- if (klass->super() == NULL) {
+ Klass* super = klass->super();
+ if (super == NULL) {
return allocate_new;
}
@@ -407,7 +431,15 @@
Symbol* target_classname = target_klass->name();
for(int i = 0; i < super_vtable_len; i++) {
- Method* super_method = method_at(i);
+ Method* super_method;
+ if (is_preinitialized_vtable()) {
+ // If this is a shared class, the vtable is already in the final state (fully
+ // initialized). Need to look at the super's vtable.
+ klassVtable* superVtable = super->vtable();
+ super_method = superVtable->method_at(i);
+ } else {
+ super_method = method_at(i);
+ }
// Check if method name matches
if (super_method->name() == name && super_method->signature() == signature) {
@@ -475,7 +507,15 @@
target_method()->set_vtable_index(i);
} else {
if (def_vtable_indices != NULL) {
- def_vtable_indices->at_put(default_index, i);
+ if (is_preinitialized_vtable()) {
+ // At runtime initialize_vtable is rerun as part of link_class_impl()
+ // for a shared class loaded by the non-boot loader.
+ // The dumptime vtable index should be the same as the runtime index.
+ assert(def_vtable_indices->at(default_index) == i,
+ "dump time vtable index is different from runtime index");
+ } else {
+ def_vtable_indices->at_put(default_index, i);
+ }
}
assert(super_method->is_default_method() || super_method->is_overpass()
|| super_method->is_abstract(), "default override error");
@@ -490,17 +530,26 @@
}
void klassVtable::put_method_at(Method* m, int index) {
- if (log_develop_is_enabled(Trace, vtables)) {
- ResourceMark rm;
- outputStream* logst = Log(vtables)::trace_stream();
- const char* sig = (m != NULL) ? m->name_and_sig_as_C_string() : "<NULL>";
- logst->print("adding %s at index %d, flags: ", sig, index);
- if (m != NULL) {
- m->print_linkage_flags(logst);
+ if (is_preinitialized_vtable()) {
+ // At runtime initialize_vtable is rerun as part of link_class_impl()
+ // for shared class loaded by the non-boot loader to obtain the loader
+ // constraints based on the runtime classloaders' context. The dumptime
+ // method at the vtable index should be the same as the runtime method.
+ assert(table()[index].method() == m,
+ "archived method is different from the runtime method");
+ } else {
+ if (log_develop_is_enabled(Trace, vtables)) {
+ ResourceMark rm;
+ outputStream* logst = Log(vtables)::trace_stream();
+ const char* sig = (m != NULL) ? m->name_and_sig_as_C_string() : "<NULL>";
+ logst->print("adding %s at index %d, flags: ", sig, index);
+ if (m != NULL) {
+ m->print_linkage_flags(logst);
+ }
+ logst->cr();
}
- logst->cr();
+ table()[index].set(m);
}
- table()[index].set(m);
}
// Find out if a method "m" with superclass "super", loader "classloader" and
@@ -950,7 +999,15 @@
void itableMethodEntry::initialize(Method* m) {
if (m == NULL) return;
- _method = m;
+ if (MetaspaceShared::is_in_shared_space((void*)&_method) &&
+ !MetaspaceShared::remapped_readwrite()) {
+ // At runtime initialize_itable is rerun as part of link_class_impl()
+ // for a shared class loaded by the non-boot loader.
+ // The dumptime itable method entry should be the same as the runtime entry.
+ assert(_method == m, "sanity");
+ } else {
+ _method = m;
+ }
}
klassItable::klassItable(instanceKlassHandle klass) {
@@ -1054,7 +1111,11 @@
logst->cr();
}
if (!m->has_vtable_index()) {
- assert(m->vtable_index() == Method::pending_itable_index, "set by initialize_vtable");
+ // A shared method could have an initialized itable_index that
+ // is < 0.
+ assert(m->vtable_index() == Method::pending_itable_index ||
+ m->is_shared(),
+ "set by initialize_vtable");
m->set_itable_index(ime_num);
// Progress to next itable entry
ime_num++;
@@ -1248,7 +1309,6 @@
}
#endif // INCLUDE_JVMTI
-
// Setup
class InterfaceVisiterClosure : public StackObj {
public:
--- a/hotspot/src/share/vm/oops/klassVtable.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/oops/klassVtable.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -153,6 +153,19 @@
Array<Klass*>* local_interfaces);
void verify_against(outputStream* st, klassVtable* vt, int index);
inline InstanceKlass* ik() const;
+ // When loading a class from CDS archive at run time, and no class redefintion
+ // has happened, it is expected that the class's itable/vtables are
+ // laid out exactly the same way as they had been during dump time.
+ // Therefore, in klassVtable::initialize_[iv]table, we do not layout the
+ // tables again. Instead, we only rerun the process to create/check
+ // the class loader constraints. In non-product builds, we add asserts to
+ // guarantee that the table's layout would be the same as at dump time.
+ //
+ // If JVMTI redefines any class, the read-only shared memory are remapped
+ // as read-write. A shared class' vtable/itable are re-initialized and
+ // might have different layout due to class redefinition of the shared class
+ // or its super types.
+ bool is_preinitialized_vtable();
};
--- a/hotspot/src/share/vm/oops/method.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/oops/method.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -313,6 +313,33 @@
unlink_method();
}
+void Method::set_vtable_index(int index) {
+ if (is_shared() && !MetaspaceShared::remapped_readwrite()) {
+ // At runtime initialize_vtable is rerun as part of link_class_impl()
+ // for a shared class loaded by the non-boot loader to obtain the loader
+ // constraints based on the runtime classloaders' context.
+ return; // don't write into the shared class
+ } else {
+ _vtable_index = index;
+ }
+}
+
+void Method::set_itable_index(int index) {
+ if (is_shared() && !MetaspaceShared::remapped_readwrite()) {
+ // At runtime initialize_itable is rerun as part of link_class_impl()
+ // for a shared class loaded by the non-boot loader to obtain the loader
+ // constraints based on the runtime classloaders' context. The dumptime
+ // itable index should be the same as the runtime index.
+ assert(_vtable_index == itable_index_max - index,
+ "archived itable index is different from runtime index");
+ return; // don’t write into the shared class
+ } else {
+ _vtable_index = itable_index_max - index;
+ }
+ assert(valid_itable_index(), "");
+}
+
+
bool Method::was_executed_more_than(int n) {
// Invocation counter is reset when the Method* is compiled.
--- a/hotspot/src/share/vm/oops/method.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/oops/method.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -470,12 +470,12 @@
DEBUG_ONLY(bool valid_vtable_index() const { return _vtable_index >= nonvirtual_vtable_index; })
bool has_vtable_index() const { return _vtable_index >= 0; }
int vtable_index() const { return _vtable_index; }
- void set_vtable_index(int index) { _vtable_index = index; }
+ void set_vtable_index(int index);
DEBUG_ONLY(bool valid_itable_index() const { return _vtable_index <= pending_itable_index; })
bool has_itable_index() const { return _vtable_index <= itable_index_max; }
int itable_index() const { assert(valid_itable_index(), "");
return itable_index_max - _vtable_index; }
- void set_itable_index(int index) { _vtable_index = itable_index_max - index; assert(valid_itable_index(), ""); }
+ void set_itable_index(int index);
// interpreter entry
address interpreter_entry() const { return _i2i_entry; }
--- a/hotspot/src/share/vm/oops/oop.inline.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -258,8 +258,8 @@
}
}
- assert(s % MinObjAlignment == 0, "alignment check");
- assert(s > 0, "Bad size calculated");
+ assert(s % MinObjAlignment == 0, "Oop size is not properly aligned: %d", s);
+ assert(s > 0, "Oop size must be greater than zero, not %d", s);
return s;
}
--- a/hotspot/src/share/vm/opto/library_call.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/opto/library_call.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -2405,8 +2405,13 @@
Compile::AliasType* alias_type = C->alias_type(adr_type);
assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
- assert(alias_type->adr_type() == TypeRawPtr::BOTTOM || alias_type->adr_type() == TypeOopPtr::BOTTOM ||
- alias_type->basic_type() != T_ILLEGAL, "field, array element or unknown");
+ // Only field, array element or unknown locations are supported.
+ if (alias_type->adr_type() != TypeRawPtr::BOTTOM &&
+ alias_type->adr_type() != TypeOopPtr::BOTTOM &&
+ alias_type->basic_type() == T_ILLEGAL) {
+ return false;
+ }
+
bool mismatched = false;
BasicType bt = alias_type->basic_type();
if (bt != T_ILLEGAL) {
@@ -2782,12 +2787,6 @@
ShouldNotReachHere();
}
- // Null check receiver.
- receiver = null_check(receiver);
- if (stopped()) {
- return true;
- }
-
// Build field offset expression.
// We currently rely on the cookies produced by Unsafe.xxxFieldOffset
// to be plain byte offsets, which are also the same as those accepted
@@ -2799,8 +2798,6 @@
const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
Compile::AliasType* alias_type = C->alias_type(adr_type);
- assert(alias_type->adr_type() == TypeRawPtr::BOTTOM || alias_type->adr_type() == TypeOopPtr::BOTTOM ||
- alias_type->basic_type() != T_ILLEGAL, "field, array element or unknown");
BasicType bt = alias_type->basic_type();
if (bt != T_ILLEGAL &&
((bt == T_OBJECT || bt == T_ARRAY) != (type == T_OBJECT))) {
@@ -2832,6 +2829,12 @@
ShouldNotReachHere();
}
+ // Null check receiver.
+ receiver = null_check(receiver);
+ if (stopped()) {
+ return true;
+ }
+
int alias_idx = C->get_alias_index(adr_type);
// Memory-model-wise, a LoadStore acts like a little synchronized
--- a/hotspot/src/share/vm/prims/jvmti.xml Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/prims/jvmti.xml Wed Jul 05 21:59:15 2017 +0200
@@ -6509,6 +6509,59 @@
<errors>
</errors>
</function>
+
+ <function id="GetNamedModule" num="40" since="9">
+ <synopsis>Get Named Module</synopsis>
+ <description>
+ Return the <code>java.lang.reflect.Module</code> object for a named
+ module defined to a class loader that contains a given package.
+ The module is returned via <code>module_ptr</code>.
+ <p/>
+ If a named module is defined to the class loader and it
+ contains the package then that named module is returned,
+ otherwise <code>NULL</code> is returned.
+ <p/>
+ </description>
+ <origin>new</origin>
+ <capabilities>
+ </capabilities>
+ <parameters>
+ <param id="class_loader">
+ <ptrtype>
+ <jobject/>
+ <nullok>the bootstrap loader is assumed</nullok>
+ </ptrtype>
+ <description>
+ A class loader.
+ If the <code>class_loader</code> is not <code>NULL</code>
+ or a subclass of <code>java.lang.ClassLoader</code>
+ this function returns
+ <errorlink id="JVMTI_ERROR_ILLEGAL_ARGUMENT"></errorlink>.
+ </description>
+ </param>
+ <param id="package_name">
+ <inbuf><char/></inbuf>
+ <description>
+ The name of the package, encoded as a
+ <internallink id="mUTF">modified UTF-8</internallink> string.
+ The package name is in internal form (JVMS 4.2.1);
+ identifiers are separated by forward slashes rather than periods.
+ </description>
+ </param>
+ <param id="module_ptr">
+ <outptr><jobject/></outptr>
+ <description>
+ On return, points to a <code>java.lang.reflect.Module</code> object
+ or points to <code>NULL</code>.
+ </description>
+ </param>
+ </parameters>
+ <errors>
+ <error id="JVMTI_ERROR_ILLEGAL_ARGUMENT">
+ If class loader is not <code>NULL</code> and is not a class loader object.
+ </error>
+ </errors>
+ </function>
</category>
<category id="class" label="Class">
@@ -12462,6 +12515,14 @@
<code>new_class_data</code> has been set, it becomes the
<code>class_data</code> for the next agent.
<p/>
+ When handling a class load in the live phase, then the
+ <functionlink id="GetNamedModule"></functionlink>
+ function can be used to map class loader and a package name to a module.
+ When a class is being redefined or retransformed then
+ <code>class_being_redefined</code> is non <code>NULL</code> and so
+ the JNI <code>GetModule</code> function can also be used
+ to obtain the Module.
+ <p/>
The order that this event is sent to each environment differs
from other events.
This event is sent to environments in the following order:
@@ -14427,20 +14488,15 @@
<change date="19 June 2013" version="1.2.3">
Added support for statically linked agents.
</change>
- <change date="20 January 2016" version="9.0.0">
+ <change date="5 July 2016" version="9.0.0">
Support for modules:
- The majorversion is 9 now
- The ClassFileLoadHook events are not sent during the primordial phase anymore.
- Add new function GetAllModules
- </change>
- <change date="17 February 2016" version="9.0.0">
- Support for modules:
- Add new capability can_generate_early_vmstart
- Allow CompiledMethodLoad events at start phase
- </change>
- <change date="14 April 2016" version="9.0.0">
- Support for modules:
- Add new capability can_generate_early_class_hook_events
+ - Add new function GetNamedModule
</change>
</changehistory>
--- a/hotspot/src/share/vm/prims/jvmtiEnv.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiEnv.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/classLoaderExt.hpp"
+#include "classfile/modules.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "interpreter/bytecodeStream.hpp"
@@ -201,6 +202,28 @@
} /* end GetAllModules */
+// class_loader - NULL is a valid value, must be pre-checked
+// package_name - pre-checked for NULL
+// module_ptr - pre-checked for NULL
+jvmtiError
+JvmtiEnv::GetNamedModule(jobject class_loader, const char* package_name, jobject* module_ptr) {
+ JavaThread* THREAD = JavaThread::current(); // pass to macros
+ ResourceMark rm(THREAD);
+
+ Handle h_loader (THREAD, JNIHandles::resolve(class_loader));
+ // Check that loader is a subclass of java.lang.ClassLoader.
+ if (h_loader.not_null() && !java_lang_ClassLoader::is_subclass(h_loader->klass())) {
+ return JVMTI_ERROR_ILLEGAL_ARGUMENT;
+ }
+ jobject module = Modules::get_named_module(h_loader, package_name, THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ CLEAR_PENDING_EXCEPTION;
+ return JVMTI_ERROR_INTERNAL; // unexpected exception
+ }
+ *module_ptr = module;
+ return JVMTI_ERROR_NONE;
+} /* end GetNamedModule */
+
//
// Class functions
//
--- a/hotspot/src/share/vm/runtime/objectMonitor.cpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/runtime/objectMonitor.cpp Wed Jul 05 21:59:15 2017 +0200
@@ -131,8 +131,6 @@
static int Knob_QMode = 0; // EntryList-cxq policy - queue discipline
static volatile int InitDone = 0;
-#define TrySpin TrySpin_VaryDuration
-
// -----------------------------------------------------------------------------
// Theory of operations -- Monitors lists, thread residency, etc:
//
@@ -1848,13 +1846,8 @@
// hysteresis control to damp the transition rate between spinning and
// not spinning.
-intptr_t ObjectMonitor::SpinCallbackArgument = 0;
-int (*ObjectMonitor::SpinCallbackFunction)(intptr_t, int) = NULL;
-
// Spinning: Fixed frequency (100%), vary duration
-
-
-int ObjectMonitor::TrySpin_VaryDuration(Thread * Self) {
+int ObjectMonitor::TrySpin(Thread * Self) {
// Dumb, brutal spin. Good for comparative measurements against adaptive spinning.
int ctr = Knob_FixedSpin;
if (ctr != 0) {
@@ -1948,11 +1941,6 @@
goto Abort; // abrupt spin egress
}
if (Knob_UsePause & 1) SpinPause();
-
- int (*scb)(intptr_t,int) = SpinCallbackFunction;
- if (hits > 50 && scb != NULL) {
- int abend = (*scb)(SpinCallbackArgument, 0);
- }
}
if (Knob_UsePause & 2) SpinPause();
--- a/hotspot/src/share/vm/runtime/objectMonitor.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/runtime/objectMonitor.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -161,9 +161,6 @@
Thread * volatile _Responsible;
volatile int _Spinner; // for exit->spinner handoff optimization
- volatile int _SpinFreq; // Spin 1-out-of-N attempts: success rate
- volatile int _SpinClock;
- volatile intptr_t _SpinState; // MCS/CLH list of spinners
volatile int _SpinDuration;
volatile jint _count; // reference count to prevent reclamation/deflation
@@ -238,10 +235,6 @@
static int cxq_offset_in_bytes() { return offset_of(ObjectMonitor, _cxq); }
static int succ_offset_in_bytes() { return offset_of(ObjectMonitor, _succ); }
static int EntryList_offset_in_bytes() { return offset_of(ObjectMonitor, _EntryList); }
- static int FreeNext_offset_in_bytes() { return offset_of(ObjectMonitor, FreeNext); }
- static int WaitSet_offset_in_bytes() { return offset_of(ObjectMonitor, _WaitSet); }
- static int Responsible_offset_in_bytes() { return offset_of(ObjectMonitor, _Responsible); }
- static int Spinner_offset_in_bytes() { return offset_of(ObjectMonitor, _Spinner); }
// ObjectMonitor references can be ORed with markOopDesc::monitor_value
// as part of the ObjectMonitor tagging mechanism. When we combine an
@@ -257,11 +250,6 @@
#define OM_OFFSET_NO_MONITOR_VALUE_TAG(f) \
((ObjectMonitor::f ## _offset_in_bytes()) - markOopDesc::monitor_value)
- // Eventually we'll make provisions for multiple callbacks, but
- // now one will suffice.
- static int (*SpinCallbackFunction)(intptr_t, int);
- static intptr_t SpinCallbackArgument;
-
markOop header() const;
void set_header(markOop hdr);
@@ -312,8 +300,6 @@
_cxq = NULL;
_WaitSet = NULL;
_recursions = 0;
- _SpinFreq = 0;
- _SpinClock = 0;
}
public:
@@ -353,9 +339,7 @@
void UnlinkAfterAcquire(Thread * Self, ObjectWaiter * SelfNode);
int TryLock(Thread * Self);
int NotRunnable(Thread * Self, Thread * Owner);
- int TrySpin_Fixed(Thread * Self);
- int TrySpin_VaryFrequency(Thread * Self);
- int TrySpin_VaryDuration(Thread * Self);
+ int TrySpin(Thread * Self);
void ExitEpilog(Thread * Self, ObjectWaiter * Wakee);
bool ExitSuspendEquivalent(JavaThread * Self);
void post_monitor_wait_event(EventJavaMonitorWait * event,
--- a/hotspot/src/share/vm/runtime/synchronizer.hpp Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/src/share/vm/runtime/synchronizer.hpp Wed Jul 05 21:59:15 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -144,8 +144,6 @@
static void verify() PRODUCT_RETURN;
static int verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
- static void RegisterSpinCallback(int(*)(intptr_t, int), intptr_t);
-
private:
enum { _BLOCKSIZE = 128 };
// global list of blocks of monitors
--- a/hotspot/test/compiler/ciReplay/TestVM_no_comp_level.sh Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/test/compiler/ciReplay/TestVM_no_comp_level.sh Wed Jul 05 21:59:15 2017 +0200
@@ -29,6 +29,7 @@
## @summary testing of ciReplay with using generated by VM replay.txt w/o comp_level
## @author igor.ignatyev@oracle.com
## @requires vm.flightRecorder != true
+## @ignore 8157984
## @run shell TestVM_no_comp_level.sh
##
--- a/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestResolvedJavaType.java Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/test/compiler/jvmci/jdk.vm.ci.runtime.test/src/jdk/vm/ci/runtime/test/TestResolvedJavaType.java Wed Jul 05 21:59:15 2017 +0200
@@ -25,6 +25,7 @@
* @test
* @requires (vm.simpleArch == "x64" | vm.simpleArch == "sparcv9" | vm.simpleArch == "aarch64")
* @library ../../../../../
+ * @ignore 8161550
* @modules java.base/jdk.internal.reflect
* jdk.vm.ci/jdk.vm.ci.meta
* jdk.vm.ci/jdk.vm.ci.runtime
--- a/hotspot/test/compiler/rangechecks/TestRangeCheckSmearing.java Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/test/compiler/rangechecks/TestRangeCheckSmearing.java Wed Jul 05 21:59:15 2017 +0200
@@ -28,6 +28,7 @@
* @library /testlibrary /test/lib /
* @modules java.base/jdk.internal.misc
* java.management
+ * @ignore 8157984
* @build TestRangeCheckSmearing
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
* jdk.test.lib.Platform
--- a/hotspot/test/compiler/tiered/NonTieredLevelsTest.java Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/test/compiler/tiered/NonTieredLevelsTest.java Wed Jul 05 21:59:15 2017 +0200
@@ -26,6 +26,7 @@
* @library /testlibrary /test/lib /
* @modules java.base/jdk.internal.misc
* @modules java.management
+ * @ignore 8157984
* @build NonTieredLevelsTest
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/unsafe/OpaqueAccesses.java Wed Jul 05 21:59:15 2017 +0200
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Oracle designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8155781
+ * @modules java.base/jdk.internal.misc
+ *
+ * @run main/bootclasspath/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions
+ * -XX:-TieredCompilation -Xbatch
+ * -XX:CompileCommand=dontinline,compiler.unsafe.OpaqueAccesses::test*
+ * compiler.unsafe.OpaqueAccesses
+ */
+package compiler.unsafe;
+
+import jdk.internal.misc.Unsafe;
+
+import java.lang.reflect.Field;
+
+public class OpaqueAccesses {
+ private static final Unsafe UNSAFE = Unsafe.getUnsafe();
+
+ private static final Object INSTANCE = new OpaqueAccesses();
+
+ private static final Object[] ARRAY = new Object[10];
+
+ private static final long F_OFFSET;
+ private static final long E_OFFSET;
+
+ static {
+ try {
+ Field field = OpaqueAccesses.class.getDeclaredField("f");
+ F_OFFSET = UNSAFE.objectFieldOffset(field);
+
+ E_OFFSET = UNSAFE.arrayBaseOffset(ARRAY.getClass());
+ } catch (NoSuchFieldException e) {
+ throw new Error(e);
+ }
+ }
+
+ private Object f = new Object();
+
+ static Object testFixedOffsetField(Object o) {
+ return UNSAFE.getObject(o, F_OFFSET);
+ }
+
+ static int testFixedOffsetHeader0(Object o) {
+ return UNSAFE.getInt(o, 0);
+ }
+
+ static int testFixedOffsetHeader4(Object o) {
+ return UNSAFE.getInt(o, 4);
+ }
+
+ static Object testFixedBase(long off) {
+ return UNSAFE.getObject(INSTANCE, off);
+ }
+
+ static Object testOpaque(Object o, long off) {
+ return UNSAFE.getObject(o, off);
+ }
+
+ static int testFixedOffsetHeaderArray0(Object[] arr) {
+ return UNSAFE.getInt(arr, 0);
+ }
+
+ static int testFixedOffsetHeaderArray4(Object[] arr) {
+ return UNSAFE.getInt(arr, 4);
+ }
+
+ static Object testFixedOffsetArray(Object[] arr) {
+ return UNSAFE.getObject(arr, E_OFFSET);
+ }
+
+ static Object testFixedBaseArray(long off) {
+ return UNSAFE.getObject(ARRAY, off);
+ }
+
+ static Object testOpaqueArray(Object[] o, long off) {
+ return UNSAFE.getObject(o, off);
+ }
+
+ static final long ADDR = UNSAFE.allocateMemory(10);
+ static boolean flag;
+
+ static int testMixedAccess() {
+ flag = !flag;
+ Object o = (flag ? INSTANCE : null);
+ long off = (flag ? F_OFFSET : ADDR);
+ return UNSAFE.getInt(o, off);
+ }
+
+ public static void main(String[] args) {
+ for (int i = 0; i < 20_000; i++) {
+ // Instance
+ testFixedOffsetField(INSTANCE);
+ testFixedOffsetHeader0(INSTANCE);
+ testFixedOffsetHeader4(INSTANCE);
+ testFixedBase(F_OFFSET);
+ testOpaque(INSTANCE, F_OFFSET);
+ testMixedAccess();
+
+ // Array
+ testFixedOffsetHeaderArray0(ARRAY);
+ testFixedOffsetHeaderArray4(ARRAY);
+ testFixedOffsetArray(ARRAY);
+ testFixedBaseArray(E_OFFSET);
+ testOpaqueArray(ARRAY, E_OFFSET);
+ }
+ System.out.println("TEST PASSED");
+ }
+}
--- a/hotspot/test/gc/TestSmallHeap.java Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/test/gc/TestSmallHeap.java Wed Jul 05 21:59:15 2017 +0200
@@ -27,6 +27,7 @@
* @requires vm.gc=="null"
* @summary Verify that starting the VM with a small heap works
* @library /testlibrary /test/lib /test/lib/share/classes
+ * @ignore 8161552
* @modules java.base/jdk.internal.misc
* @modules java.management/sun.management
* @build TestSmallHeap
--- a/hotspot/test/gc/arguments/TestParallelHeapSizeFlags.java Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/test/gc/arguments/TestParallelHeapSizeFlags.java Wed Jul 05 21:59:15 2017 +0200
@@ -29,6 +29,7 @@
* parallel collectors.
* @requires vm.gc=="null"
* @library /testlibrary /test/lib
+ * @ignore 8161552
* @modules java.base/jdk.internal.misc
* java.management
* @build TestParallelHeapSizeFlags TestMaxHeapSizeTools
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/gc/ergonomics/TestInitialGCThreadLogging.java Wed Jul 05 21:59:15 2017 +0200
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test TestInitialGCThreadLogging
+ * @bug 8157240
+ * @summary Check trace logging of initial GC threads.
+ * @requires vm.gc=="null"
+ * @key gc
+ * @modules java.base/jdk.internal.misc
+ * @library /testlibrary
+ */
+
+import jdk.test.lib.ProcessTools;
+import jdk.test.lib.OutputAnalyzer;
+
+public class TestInitialGCThreadLogging {
+ public static void main(String[] args) throws Exception {
+
+ testInitialGCThreadLogging("UseConcMarkSweepGC", "GC Thread");
+
+ testInitialGCThreadLogging("UseG1GC", "GC Thread");
+
+ testInitialGCThreadLogging("UseParallelGC", "ParGC Thread");
+ }
+
+ private static void verifyDynamicNumberOfGCThreads(OutputAnalyzer output, String threadName) {
+ output.shouldHaveExitValue(0); // test should run succesfully
+ output.shouldContain(threadName);
+ }
+
+ private static void testInitialGCThreadLogging(String gcFlag, String threadName) throws Exception {
+ // UseDynamicNumberOfGCThreads and TraceDynamicGCThreads enabled
+ String[] baseArgs = {"-XX:+" + gcFlag, "-Xmx10M", "-XX:+UseDynamicNumberOfGCThreads", "-Xlog:gc+task=trace", "-version"};
+
+ // Base test with gc and +UseDynamicNumberOfGCThreads:
+ ProcessBuilder pb_enabled = ProcessTools.createJavaProcessBuilder(baseArgs);
+ verifyDynamicNumberOfGCThreads(new OutputAnalyzer(pb_enabled.start()), threadName);
+ }
+}
--- a/hotspot/test/gc/g1/TestStringSymbolTableStats.java Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/test/gc/g1/TestStringSymbolTableStats.java Wed Jul 05 21:59:15 2017 +0200
@@ -46,7 +46,7 @@
System.out.println("Output:\n" + output.getOutput());
- output.shouldContain("Cleaned string and symbol table");
+ output.shouldMatch("GC\\(\\d+\\) Cleaned string and symbol table");
output.shouldHaveExitValue(0);
}
--- a/hotspot/test/gc/metaspace/TestMetaspaceInitialization.java Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/test/gc/metaspace/TestMetaspaceInitialization.java Wed Jul 05 21:59:15 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,11 +24,11 @@
import java.util.ArrayList;
/* @test TestMetaspaceInitialization
- * @bug 8042933
+ * @bug 8024945
* @summary Tests to initialize metaspace with a very low MetaspaceSize
* @modules java.base/jdk.internal.misc
* @library /testlibrary
- * @run main/othervm -XX:MetaspaceSize=2m TestMetaspaceInitialization
+ * @run main/othervm -XX:MetaspaceSize=0 TestMetaspaceInitialization
*/
public class TestMetaspaceInitialization {
private class Internal {
--- a/hotspot/test/gc/metaspace/TestMetaspaceSizeFlags.java Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/test/gc/metaspace/TestMetaspaceSizeFlags.java Wed Jul 05 21:59:15 2017 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,9 +47,6 @@
// 8024650: MaxMetaspaceSize was adjusted instead of MetaspaceSize.
testMaxMetaspaceSizeLTMetaspaceSize(MAX_ALIGNMENT, MAX_ALIGNMENT * 2);
testMaxMetaspaceSizeGTMetaspaceSize(MAX_ALIGNMENT * 2, MAX_ALIGNMENT);
- testTooSmallInitialMetaspace(0, 0);
- testTooSmallInitialMetaspace(0, MAX_ALIGNMENT);
- testTooSmallInitialMetaspace(MAX_ALIGNMENT, 0);
}
private static void testMaxMetaspaceSizeEQMetaspaceSize(long maxMetaspaceSize, long metaspaceSize) throws Exception {
@@ -73,11 +70,6 @@
Asserts.assertEQ(mf.metaspaceSize, metaspaceSize);
}
- private static void testTooSmallInitialMetaspace(long maxMetaspaceSize, long metaspaceSize) throws Exception {
- OutputAnalyzer output = run(maxMetaspaceSize, metaspaceSize);
- output.shouldContain("Too small initial Metaspace size");
- }
-
private static MetaspaceFlags runAndGetValue(long maxMetaspaceSize, long metaspaceSize) throws Exception {
OutputAnalyzer output = run(maxMetaspaceSize, metaspaceSize);
output.shouldNotMatch("Error occurred during initialization of VM\n.*");
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/Final/TestPutField.jasm Wed Jul 05 21:59:15 2017 +0200
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+public class TestPutField
+version 53:0
+{
+
+final Field test_field:"I";
+
+
+public Method <init>:"()V"
+ stack 2 locals 1
+{
+ aload_0;
+ dup;
+ invokespecial Method java/lang/Object.<init>:"()V";
+ bipush 13;
+ putfield Field test_field:"I";
+ return;
+}
+
+public Method aMethod:"()I"
+ stack 2 locals 2
+{
+ aload_0;
+ getfield Field test_field:"I";
+ istore_1;
+ aload_0;
+ bipush 14;
+ putfield Field test_field:"I";
+ iload_1;
+ ireturn;
+}
+
+
+public static Method test:"()V"
+ stack 2 locals 2
+{
+ new class TestPutField;
+ astore_0;
+ aload_0;
+ invokespecial Method <init>:"()V";
+ getstatic Field java/lang/System.out:"Ljava/io/PrintStream;";
+ astore_1;
+ aload_1;
+ aload_0;
+ invokevirtual Method aMethod:"()I";
+ invokevirtual Method java/io/PrintStream.println:"(I)V";
+ aload_1;
+ aload_0;
+ getfield Field test_field:"I";
+ invokevirtual Method java/io/PrintStream.println:"(I)V";
+ return;
+}
+
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/Final/TestPutMain.java Wed Jul 05 21:59:15 2017 +0200
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/*
+ * @test
+ * @bug 8160527
+ * @summary The VM does not always perform checks added by 8157181 when updating final instance fields
+ * @library /testlibrary
+ * @compile TestPutField.jasm
+ * @compile TestPutStatic.jasm
+ * @compile TestPutMain.java
+ * @run main/othervm TestPutMain
+ */
+
+import jdk.test.lib.Asserts;
+
+public class TestPutMain {
+ public static void main(String[] args) {
+ boolean exception = false;
+ try {
+ TestPutField.test();
+ } catch (java.lang.IllegalAccessError e) {
+ exception = true;
+ }
+
+ Asserts.assertTrue(exception, "FAILED: Expected IllegalAccessError for illegal update to final instance field was not thrown.");
+
+ exception = false;
+ try {
+ TestPutStatic.test();
+ } catch (java.lang.IllegalAccessError e) {
+ exception = true;
+ }
+
+ Asserts.assertTrue(exception, "FAILED: Expected IllegalAccessError for illegal update to final static field was not thrown.");
+
+ System.out.println("PASSED: Expected IllegalAccessError was thrown.");
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/Final/TestPutStatic.jasm Wed Jul 05 21:59:15 2017 +0200
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+public class TestPutStatic
+version 53:0
+{
+
+final static Field test_field:"I";
+
+
+public static Method <clinit>:"()V"
+ stack 2 locals 1
+{
+ bipush 13;
+ putstatic Field test_field:"I";
+ return;
+}
+
+public static Method aMethod:"()I"
+ stack 1 locals 1
+{
+ getstatic Field test_field:"I";
+ istore_0;
+ bipush 14;
+ putstatic Field test_field:"I";
+ iload_0;
+ ireturn;
+}
+
+
+public static Method test:"()V"
+ stack 2 locals 1
+{
+ getstatic Field java/lang/System.out:"Ljava/io/PrintStream;";
+ astore_0;
+ aload_0;
+ invokestatic Method aMethod:"()I";
+ invokevirtual Method java/io/PrintStream.println:"(I)V";
+ aload_0;
+ getstatic Field test_field:"I";
+ invokevirtual Method java/io/PrintStream.println:"(I)V";
+ return;
+}
+
+}
--- a/hotspot/test/runtime/SharedArchiveFile/SASymbolTableTest.java Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/test/runtime/SharedArchiveFile/SASymbolTableTest.java Wed Jul 05 21:59:15 2017 +0200
@@ -24,6 +24,9 @@
/*
* @test SASymbolTableTest
* @summary Walk symbol table using SA, with and without CDS.
+ * Started failing on 2016.06.24 due to 8160376 on MacOS X so quarantine
+ * it on that platform:
+ * @requires os.family != "mac"
* @library /testlibrary
* @modules java.base/jdk.internal.misc
* jdk.hotspot.agent/sun.jvm.hotspot.oops
--- a/hotspot/test/runtime/Unsafe/GetUnsafe.java Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/test/runtime/Unsafe/GetUnsafe.java Wed Jul 05 21:59:15 2017 +0200
@@ -26,6 +26,7 @@
* @summary Verifies that getUnsafe() actually throws SecurityException when unsafeAccess is prohibited.
* @library /testlibrary
* @modules java.base/jdk.internal.misc
+ * @ignore 8161947
* @run main GetUnsafe
*/
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/serviceability/jvmti/GetNamedModule/MyPackage/GetNamedModuleTest.java Wed Jul 05 21:59:15 2017 +0200
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package MyPackage;
+
+/**
+ * @test
+ * @summary Verifies the JVMTI GetNamedModule API
+ * @compile GetNamedModuleTest.java
+ * @run main/othervm/native -agentlib:GetNamedModuleTest MyPackage.GetNamedModuleTest
+ */
+
+import java.io.PrintStream;
+
+public class GetNamedModuleTest {
+
+ static {
+ try {
+ System.loadLibrary("GetNamedModuleTest");
+ } catch (UnsatisfiedLinkError ule) {
+ System.err.println("Could not load GetNamedModuleTest library");
+ System.err.println("java.library.path: "
+ + System.getProperty("java.library.path"));
+ throw ule;
+ }
+ }
+
+ native static int check();
+
+ public static void main(String args[]) {
+ int status = check();
+ if (status != 0) {
+ throw new RuntimeException("Non-zero status returned from the agent: " + status);
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/serviceability/jvmti/GetNamedModule/libGetNamedModuleTest.c Wed Jul 05 21:59:15 2017 +0200
@@ -0,0 +1,405 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include "jvmti.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef JNI_ENV_ARG
+
+#ifdef __cplusplus
+#define JNI_ENV_ARG(x, y) y
+#define JNI_ENV_PTR(x) x
+#else
+#define JNI_ENV_ARG(x,y) x, y
+#define JNI_ENV_PTR(x) (*x)
+#endif
+
+#endif
+
+#define TranslateError(err) "JVMTI error"
+
+#define PASSED 0
+#define FAILED 2
+
+static const char *EXC_CNAME = "java/lang/Exception";
+static const char* MOD_CNAME = "Ljava/lang/reflect/Module;";
+
+static jvmtiEnv *jvmti = NULL;
+static jint result = PASSED;
+static jboolean printdump = JNI_FALSE;
+
+static jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved);
+
+JNIEXPORT
+jint JNICALL Agent_OnLoad(JavaVM *jvm, char *options, void *reserved) {
+ return Agent_Initialize(jvm, options, reserved);
+}
+
+JNIEXPORT
+jint JNICALL Agent_OnAttach(JavaVM *jvm, char *options, void *reserved) {
+ return Agent_Initialize(jvm, options, reserved);
+}
+
+JNIEXPORT
+jint JNICALL JNI_OnLoad(JavaVM *jvm, void *reserved) {
+ return JNI_VERSION_1_8;
+}
+
+static
+jint Agent_Initialize(JavaVM *jvm, char *options, void *reserved) {
+ jint res;
+
+ if (options != NULL && strcmp(options, "printdump") == 0) {
+ printdump = JNI_TRUE;
+ }
+
+ res = JNI_ENV_PTR(jvm)->GetEnv(JNI_ENV_ARG(jvm, (void **) &jvmti),
+ JVMTI_VERSION_9);
+ if (res != JNI_OK || jvmti == NULL) {
+ printf(" Error: wrong result of a valid call to GetEnv!\n");
+ return JNI_ERR;
+ }
+
+ return JNI_OK;
+}
+
+static
+jint throw_exc(JNIEnv *env, char *msg) {
+ jclass exc_class = JNI_ENV_PTR(env)->FindClass(JNI_ENV_ARG(env, EXC_CNAME));
+
+ if (exc_class == NULL) {
+ printf("throw_exc: Error in FindClass(env, %s)\n", EXC_CNAME);
+ return -1;
+ }
+ return JNI_ENV_PTR(env)->ThrowNew(JNI_ENV_ARG(env, exc_class), msg);
+}
+
+static
+jobject get_class_loader(jclass cls) {
+ jvmtiError err = JVMTI_ERROR_NONE;
+ jobject loader = NULL;
+
+ if (printdump == JNI_TRUE) {
+ printf(">>> getting class loader ...\n");
+ }
+ err = (*jvmti)->GetClassLoader(jvmti, cls, &loader);
+ if (err != JVMTI_ERROR_NONE) {
+ printf(" Error in GetClassLoader: %s (%d)\n", TranslateError(err), err);
+ }
+ return loader;
+}
+
+static
+jclass jlrM(JNIEnv *env) {
+ jclass cls = NULL;
+
+ cls = JNI_ENV_PTR(env)->FindClass(JNI_ENV_ARG(env, MOD_CNAME));
+ if (cls == NULL) {
+ printf(" Error in JNI FindClass: %s\n", MOD_CNAME);
+ }
+ return cls;
+}
+
+jmethodID
+get_method(JNIEnv *env, jclass clazz, const char * name, const char *sig) {
+ jmethodID method = NULL;
+
+ method = JNI_ENV_PTR(env)->GetMethodID(JNI_ENV_ARG(env, clazz), name, sig);
+ if (method == NULL) {
+ printf(" Error in JNI GetMethodID %s with signature %s", name, sig);
+ }
+ return method;
+}
+
+static
+jobject get_module_loader(JNIEnv *env, jobject module) {
+ static jmethodID cl_method = NULL;
+ jobject loader = NULL;
+
+ if (cl_method == NULL) {
+ cl_method = get_method(env, jlrM(env), "getClassLoader", "()Ljava/lang/ClassLoader;");
+ }
+ loader = (jobject)JNI_ENV_PTR(env)->CallObjectMethod(JNI_ENV_ARG(env, module), cl_method);
+ return loader;
+}
+
+static
+const char* get_module_name(JNIEnv *env, jobject module) {
+ static jmethodID method = NULL;
+ jobject loader = NULL;
+ jstring jstr = NULL;
+ const char *name = NULL;
+ const char *nstr = NULL;
+
+ if (method == NULL) {
+ method = get_method(env, jlrM(env), "getName", "()Ljava/lang/String;");
+ }
+ jstr = (jstring)JNI_ENV_PTR(env)->CallObjectMethod(JNI_ENV_ARG(env, module), method);
+ if (jstr != NULL) {
+ name = JNI_ENV_PTR(env)->GetStringUTFChars(JNI_ENV_ARG(env, jstr), NULL);
+ }
+ loader = get_module_loader(env, module);
+ nstr = (name == NULL) ? "<UNNAMED>" : name;
+ printf(" loader: %p, module: %p, name: %s\n", loader, module, nstr);
+ return name;
+}
+
+static
+jvmtiError get_module(JNIEnv *env,
+ jobject loader,
+ const char* pkg_name,
+ jobject* module_ptr,
+ const char** mod_name_ptr) {
+ jvmtiError err = JVMTI_ERROR_NONE;
+ const char* name = (pkg_name == NULL) ? "<NULL>" : pkg_name;
+
+ printf(">>> getting module by loader %p and package \"%s\"\n", loader, name);
+ *mod_name_ptr = NULL;
+ err = (*jvmti)->GetNamedModule(jvmti, loader, pkg_name, module_ptr);
+ if (err != JVMTI_ERROR_NONE) {
+ printf(" Error in GetNamedModule for package \"%s\": %s (%d)\n",
+ pkg_name, TranslateError(err), err);
+ return err;
+ }
+ printf(" returned module: %p\n", *module_ptr);
+ if (*module_ptr == NULL) { // named module was not found
+ return err;
+ }
+ *mod_name_ptr = get_module_name(env, *module_ptr);
+ return err;
+}
+
+static
+jint get_all_modules(JNIEnv *env) {
+ jvmtiError err;
+ jint cnt = -1;
+ jint idx = 0;
+ jobject* modules;
+
+ printf(">>> Inspecting modules with GetAllModules\n");
+ err = (*jvmti)->GetAllModules(jvmti, &cnt, &modules);
+ if (err != JVMTI_ERROR_NONE) {
+ printf("Error in GetAllModules: %d\n", err);
+ return -1;
+ }
+ for (idx = 0; idx < cnt; ++idx) {
+ get_module_name(env, modules[idx]);
+ }
+ return cnt;
+}
+
+static
+jint check_bad_loader(JNIEnv *env, jobject loader) {
+ jvmtiError err = JVMTI_ERROR_NONE;
+ jobject module = NULL;
+ const char* mod_name = NULL;
+
+ err = get_module(env, loader, "", &module, &mod_name);
+ if (err != JVMTI_ERROR_ILLEGAL_ARGUMENT) {
+ return FAILED;
+ }
+ printf(" got expected JVMTI_ERROR_ILLEGAL_ARGUMENT for bad loader\n");
+ return PASSED;
+}
+
+static
+jint check_system_loader(JNIEnv *env, jobject loader) {
+ jvmtiError err = JVMTI_ERROR_NONE;
+ jobject module = NULL;
+ const char* exp_name = NULL;
+ const char* mod_name = NULL;
+
+ // NULL pointer for package name
+ err = get_module(env, loader, NULL, &module, &mod_name);
+ if (err != JVMTI_ERROR_NULL_POINTER) {
+ throw_exc(env, "check #SN1: failed to return JVMTI_ERROR_NULL_POINTER for NULL package");
+ return FAILED;
+ }
+
+ // NULL pointer for module_ptr
+ err = (*jvmti)->GetNamedModule(jvmti, loader, "", NULL);
+ if (err != JVMTI_ERROR_NULL_POINTER) {
+ throw_exc(env, "check #SN2: failed to return JVMTI_ERROR_NULL_POINTER for NULL module_ptr");
+ return FAILED;
+ }
+
+ // Unnamed/default package ""
+ err = get_module(env, loader, "", &module, &mod_name);
+ if (err != JVMTI_ERROR_NONE) {
+ throw_exc(env, "check #S1: failed to return JVMTI_ERROR_NONE for default package");
+ return FAILED;
+ }
+ if (module != NULL || mod_name != NULL) {
+ throw_exc(env, "check #S2: failed to return NULL-module for default package");
+ return FAILED;
+ }
+
+ // Test package: MyPackage
+ err = get_module(env, loader, "MyPackage", &module, &mod_name);
+ if (err != JVMTI_ERROR_NONE) {
+ throw_exc(env, "check #S3: failed to return JVMTI_ERROR_NONE for MyPackage");
+ return FAILED;
+ }
+ if (module != NULL || mod_name != NULL) {
+ throw_exc(env, "check #S4: failed to return NULL-module for MyPackage");
+ return FAILED;
+ }
+
+ // Package: com/sun/jdi
+ exp_name = "jdk.jdi";
+ err = get_module(env, loader, "com/sun/jdi", &module, &mod_name);
+ if (err != JVMTI_ERROR_NONE) {
+ throw_exc(env, "check #S5: failed to return JVMTI_ERROR_NONE for test package");
+ return FAILED;
+ }
+ if (module == NULL || mod_name == NULL) {
+ throw_exc(env, "check #S6: failed to return named module for com/sun/jdi package");
+ return FAILED;
+ }
+ if (strcmp(mod_name, exp_name) != 0) {
+ printf("check #S7: failed to return right module, expected: %s, returned: %s\n",
+ exp_name, mod_name);
+ throw_exc(env, "check #S7: failed to return jdk.jdi module for com/sun/jdi package");
+ return FAILED;
+ }
+
+ // Non-existing package: "bad/package/name"
+ err = get_module(env, loader, "bad/package/name", &module, &mod_name);
+ if (err != JVMTI_ERROR_NONE) {
+ throw_exc(env, "check #S8: failed to return JVMTI_ERROR_NONE for bad package");
+ return FAILED;
+ }
+ if (module != NULL || mod_name != NULL) {
+ throw_exc(env, "check #S9: failed to return NULL-module for bad package");
+ return FAILED;
+ }
+ return PASSED;
+}
+
+static
+jint check_bootstrap_loader(JNIEnv *env, jobject loader) {
+ jvmtiError err = JVMTI_ERROR_NONE;
+ jobject module = NULL;
+ const char* exp_name = NULL;
+ const char* mod_name = NULL;
+
+ // NULL pointer for package name
+ err = get_module(env, loader, NULL, &module, &mod_name);
+ if (err != JVMTI_ERROR_NULL_POINTER) {
+ throw_exc(env, "check #BN1: failed to return JVMTI_ERROR_NULL_POINTER for NULL package");
+ return FAILED;
+ }
+
+ // NULL pointer for module_ptr
+ err = (*jvmti)->GetNamedModule(jvmti, loader, "", NULL);
+ if (err != JVMTI_ERROR_NULL_POINTER) {
+ throw_exc(env, "check #BN2: failed to return JVMTI_ERROR_NULL_POINTER for NULL module_ptr");
+ return FAILED;
+ }
+
+ // Unnamed/default package ""
+ err = get_module(env, loader, "", &module, &mod_name);
+ if (err != JVMTI_ERROR_NONE) {
+ throw_exc(env, "check #B1: failed to return JVMTI_ERROR_NONE for default package");
+ return FAILED;
+ }
+ if (module != NULL || mod_name != NULL) {
+ throw_exc(env, "check #B2: failed to return NULL-module for default package");
+ return FAILED;
+ }
+
+ // Normal package from java.base module: "java/lang"
+ exp_name = "java.base";
+ err = get_module(env, loader, "java/lang", &module, &mod_name);
+ if (err != JVMTI_ERROR_NONE) {
+ throw_exc(env, "check #B3: failed to return JVMTI_ERROR_NONE for java/lang package");
+ return FAILED;
+ }
+ if (module == NULL || mod_name == NULL) {
+ throw_exc(env, "check #B4: failed to return named module for java/lang package");
+ return FAILED;
+ }
+ if (strcmp(exp_name, mod_name) != 0) {
+ printf("check #B5: failed to return right module, expected: %s, returned: %s\n",
+ exp_name, mod_name);
+ throw_exc(env, "check #B5: failed to return expected module for java/lang package");
+ return FAILED;
+ }
+
+ // Non-existing package: "bad/package/name"
+ err = get_module(env, loader, "bad/package/name", &module, &mod_name);
+ if (err != JVMTI_ERROR_NONE) {
+ throw_exc(env, "check #B6: failed to return JVMTI_ERROR_NONE for bad package");
+ return FAILED;
+ }
+ if (module != NULL || mod_name != NULL) {
+ throw_exc(env, "check #B7: failed to return NULL-module for bad package");
+ return FAILED;
+ }
+ return PASSED;
+}
+
+JNIEXPORT jint JNICALL
+Java_MyPackage_GetNamedModuleTest_check(JNIEnv *env, jclass cls) {
+ jobject loader = NULL;
+
+ if (jvmti == NULL) {
+ throw_exc(env, "JVMTI client was not properly loaded!\n");
+ return FAILED;
+ }
+
+ get_all_modules(env);
+
+ printf("\n*** Check for bad ClassLoader ***\n\n");
+ result = check_bad_loader(env, (jobject)cls);
+ if (result != PASSED) {
+ throw_exc(env, "check #L1: failed to return JVMTI_ERROR_ILLEGAL_ARGUMENT for bad loader");
+ return result;
+ }
+
+ loader = get_class_loader(cls);
+ if (loader == NULL) {
+ throw_exc(env, "check #L2: failed to return non-NULL loader for valid test class");
+ return FAILED;
+ }
+
+ printf("\n*** Checks for System ClassLoader ***\n\n");
+ result = check_system_loader(env, loader);
+ if (result != PASSED) {
+ return result;
+ }
+
+ printf("\n*** Checks for Bootstrap ClassLoader ***\n\n");
+ result = check_bootstrap_loader(env, NULL);
+
+ return result;
+}
+
+#ifdef __cplusplus
+}
+#endif
--- a/hotspot/test/serviceability/sa/TestClassLoaderStats.java Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/test/serviceability/sa/TestClassLoaderStats.java Wed Jul 05 21:59:15 2017 +0200
@@ -32,6 +32,9 @@
/*
* @test
+ * @summary Started failing on 2016.06.24 due to 8160376 on MacOS X so
+ * quarantine it on that platform:
+ * @requires os.family != "mac"
* @modules java.base/jdk.internal.misc
* @library /test/lib/share/classes
* @library /testlibrary
--- a/hotspot/test/serviceability/sa/TestStackTrace.java Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/test/serviceability/sa/TestStackTrace.java Wed Jul 05 21:59:15 2017 +0200
@@ -32,6 +32,9 @@
/*
* @test
+ * @summary Started failing on 2016.06.24 due to 8160376 on MacOS X so
+ * quarantine it on that platform:
+ * @requires os.family != "mac"
* @modules java.base/jdk.internal.misc
* @library /test/lib/share/classes
* @library /testlibrary
--- a/hotspot/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java Tue Jul 26 10:06:19 2016 -0700
+++ b/hotspot/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java Wed Jul 05 21:59:15 2017 +0200
@@ -43,6 +43,9 @@
* @bug 6313383
* @key regression
* @summary Regression test for hprof export issue due to large heaps (>2G)
+ * Started failing on 2016.06.24 due to 8160376 on MacOS X so quarantine
+ * it on that platform:
+ * @requires os.family != "mac"
* @library /testlibrary
* @modules java.base/jdk.internal.misc
* java.compiler