--- a/hotspot/agent/src/os/linux/ps_core.c Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/agent/src/os/linux/ps_core.c Thu Sep 26 13:33:01 2013 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -698,29 +698,58 @@
// read segments of a shared object
static bool read_lib_segments(struct ps_prochandle* ph, int lib_fd, ELF_EHDR* lib_ehdr, uintptr_t lib_base) {
- int i = 0;
- ELF_PHDR* phbuf;
- ELF_PHDR* lib_php = NULL;
+ int i = 0;
+ ELF_PHDR* phbuf;
+ ELF_PHDR* lib_php = NULL;
+
+ int page_size=sysconf(_SC_PAGE_SIZE);
- if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL)
- return false;
+ if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL) {
+ return false;
+ }
+
+ // we want to process only PT_LOAD segments that are not writable.
+ // i.e., text segments. The read/write/exec (data) segments would
+ // have been already added from core file segments.
+ for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
+ if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
+
+ uintptr_t target_vaddr = lib_php->p_vaddr + lib_base;
+ map_info *existing_map = core_lookup(ph, target_vaddr);
- // we want to process only PT_LOAD segments that are not writable.
- // i.e., text segments. The read/write/exec (data) segments would
- // have been already added from core file segments.
- for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
- if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
- if (add_map_info(ph, lib_fd, lib_php->p_offset, lib_php->p_vaddr + lib_base, lib_php->p_filesz) == NULL)
- goto err;
+ if (existing_map == NULL){
+ if (add_map_info(ph, lib_fd, lib_php->p_offset,
+ target_vaddr, lib_php->p_filesz) == NULL) {
+ goto err;
+ }
+ } else {
+ if ((existing_map->memsz != page_size) &&
+ (existing_map->fd != lib_fd) &&
+ (existing_map->memsz != lib_php->p_filesz)){
+
+ print_debug("address conflict @ 0x%lx (size = %ld, flags = %d\n)",
+ target_vaddr, lib_php->p_filesz, lib_php->p_flags);
+ goto err;
+ }
+
+ /* replace PT_LOAD segment with library segment */
+ print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n",
+ existing_map->memsz, lib_php->p_filesz);
+
+ existing_map->fd = lib_fd;
+ existing_map->offset = lib_php->p_offset;
+ existing_map->memsz = lib_php->p_filesz;
}
- lib_php++;
- }
+ }
+
+ lib_php++;
+ }
- free(phbuf);
- return true;
+ free(phbuf);
+ return true;
err:
- free(phbuf);
- return false;
+ free(phbuf);
+ return false;
}
// process segments from interpreter (ld.so or ld-linux.so)
--- a/hotspot/make/excludeSrc.make Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/make/excludeSrc.make Thu Sep 26 13:33:01 2013 -0700
@@ -88,7 +88,7 @@
g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
- heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
+ g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
--- a/hotspot/make/hotspot_version Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/make/hotspot_version Thu Sep 26 13:33:01 2013 -0700
@@ -35,7 +35,7 @@
HS_MAJOR_VER=25
HS_MINOR_VER=0
-HS_BUILD_NUMBER=51
+HS_BUILD_NUMBER=52
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
--- a/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/cpu/sparc/vm/vtableStubs_sparc.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -52,6 +52,11 @@
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
const int sparc_code_length = VtableStub::pd_code_size_limit(true);
VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index);
+ // Can be NULL if there is no free space in the code cache.
+ if (s == NULL) {
+ return NULL;
+ }
+
ResourceMark rm;
CodeBuffer cb(s->entry_point(), sparc_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
@@ -125,6 +130,11 @@
VtableStub* VtableStubs::create_itable_stub(int itable_index) {
const int sparc_code_length = VtableStub::pd_code_size_limit(false);
VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index);
+ // Can be NULL if there is no free space in the code cache.
+ if (s == NULL) {
+ return NULL;
+ }
+
ResourceMark rm;
CodeBuffer cb(s->entry_point(), sparc_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
--- a/hotspot/src/cpu/x86/vm/vtableStubs_x86_32.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/vtableStubs_x86_32.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -58,6 +58,11 @@
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
const int i486_code_length = VtableStub::pd_code_size_limit(true);
VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index);
+ // Can be NULL if there is no free space in the code cache.
+ if (s == NULL) {
+ return NULL;
+ }
+
ResourceMark rm;
CodeBuffer cb(s->entry_point(), i486_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
@@ -132,6 +137,11 @@
// add code here, bump the code stub size returned by pd_code_size_limit!
const int i486_code_length = VtableStub::pd_code_size_limit(false);
VtableStub* s = new(i486_code_length) VtableStub(false, itable_index);
+ // Can be NULL if there is no free space in the code cache.
+ if (s == NULL) {
+ return NULL;
+ }
+
ResourceMark rm;
CodeBuffer cb(s->entry_point(), i486_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
--- a/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/cpu/x86/vm/vtableStubs_x86_64.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -49,6 +49,11 @@
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
const int amd64_code_length = VtableStub::pd_code_size_limit(true);
VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index);
+ // Can be NULL if there is no free space in the code cache.
+ if (s == NULL) {
+ return NULL;
+ }
+
ResourceMark rm;
CodeBuffer cb(s->entry_point(), amd64_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
@@ -126,6 +131,11 @@
// returned by pd_code_size_limit!
const int amd64_code_length = VtableStub::pd_code_size_limit(false);
VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index);
+ // Can be NULL if there is no free space in the code cache.
+ if (s == NULL) {
+ return NULL;
+ }
+
ResourceMark rm;
CodeBuffer cb(s->entry_point(), amd64_code_length);
MacroAssembler* masm = new MacroAssembler(&cb);
--- a/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/c1/c1_GraphBuilder.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -4219,7 +4219,9 @@
}
}
- if (!PrintInlining) return;
+ if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
+ return;
+ }
CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
if (success && CIPrintMethodCodes) {
callee->print_codes();
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -438,6 +438,29 @@
return true;
}
+bool java_lang_String::equals(oop str1, oop str2) {
+ assert(str1->klass() == SystemDictionary::String_klass(),
+ "must be java String");
+ assert(str2->klass() == SystemDictionary::String_klass(),
+ "must be java String");
+ typeArrayOop value1 = java_lang_String::value(str1);
+ int offset1 = java_lang_String::offset(str1);
+ int length1 = java_lang_String::length(str1);
+ typeArrayOop value2 = java_lang_String::value(str2);
+ int offset2 = java_lang_String::offset(str2);
+ int length2 = java_lang_String::length(str2);
+
+ if (length1 != length2) {
+ return false;
+ }
+ for (int i = 0; i < length1; i++) {
+ if (value1->char_at(i + offset1) != value2->char_at(i + offset2)) {
+ return false;
+ }
+ }
+ return true;
+}
+
void java_lang_String::print(Handle java_string, outputStream* st) {
oop obj = java_string();
assert(obj->klass() == SystemDictionary::String_klass(), "must be java_string");
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -182,6 +182,7 @@
static unsigned int hash_string(oop java_string);
static bool equals(oop java_string, jchar* chars, int len);
+ static bool equals(oop str1, oop str2);
// Conversion between '.' and '/' formats
static Handle externalize_classname(Handle java_string, TRAPS) { return char_converter(java_string, '/', '.', THREAD); }
--- a/hotspot/src/share/vm/classfile/symbolTable.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/classfile/symbolTable.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -341,7 +341,7 @@
Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len,
unsigned int hashValue_arg, bool c_heap, TRAPS) {
- assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
+ assert(!Universe::heap()->is_in_reserved(name),
"proposed name of symbol must be stable");
// Don't allow symbols to be created which cannot fit in a Symbol*.
@@ -685,7 +685,7 @@
if (found_string != NULL) return found_string;
debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
- assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
+ assert(!Universe::heap()->is_in_reserved(name),
"proposed name of symbol must be stable");
Handle string;
@@ -807,6 +807,8 @@
}
}
+// This verification is part of Universe::verify() and needs to be quick.
+// See StringTable::verify_and_compare() below for exhaustive verification.
void StringTable::verify() {
for (int i = 0; i < the_table()->table_size(); ++i) {
HashtableEntry<oop, mtSymbol>* p = the_table()->bucket(i);
@@ -825,6 +827,162 @@
the_table()->dump_table(st, "StringTable");
}
+StringTable::VerifyRetTypes StringTable::compare_entries(
+ int bkt1, int e_cnt1,
+ HashtableEntry<oop, mtSymbol>* e_ptr1,
+ int bkt2, int e_cnt2,
+ HashtableEntry<oop, mtSymbol>* e_ptr2) {
+ // These entries are sanity checked by verify_and_compare_entries()
+ // before this function is called.
+ oop str1 = e_ptr1->literal();
+ oop str2 = e_ptr2->literal();
+
+ if (str1 == str2) {
+ tty->print_cr("ERROR: identical oop values (0x" PTR_FORMAT ") "
+ "in entry @ bucket[%d][%d] and entry @ bucket[%d][%d]",
+ str1, bkt1, e_cnt1, bkt2, e_cnt2);
+ return _verify_fail_continue;
+ }
+
+ if (java_lang_String::equals(str1, str2)) {
+ tty->print_cr("ERROR: identical String values in entry @ "
+ "bucket[%d][%d] and entry @ bucket[%d][%d]",
+ bkt1, e_cnt1, bkt2, e_cnt2);
+ return _verify_fail_continue;
+ }
+
+ return _verify_pass;
+}
+
+StringTable::VerifyRetTypes StringTable::verify_entry(int bkt, int e_cnt,
+ HashtableEntry<oop, mtSymbol>* e_ptr,
+ StringTable::VerifyMesgModes mesg_mode) {
+
+ VerifyRetTypes ret = _verify_pass; // be optimistic
+
+ oop str = e_ptr->literal();
+ if (str == NULL) {
+ if (mesg_mode == _verify_with_mesgs) {
+ tty->print_cr("ERROR: NULL oop value in entry @ bucket[%d][%d]", bkt,
+ e_cnt);
+ }
+ // NULL oop means no more verifications are possible
+ return _verify_fail_done;
+ }
+
+ if (str->klass() != SystemDictionary::String_klass()) {
+ if (mesg_mode == _verify_with_mesgs) {
+ tty->print_cr("ERROR: oop is not a String in entry @ bucket[%d][%d]",
+ bkt, e_cnt);
+ }
+ // not a String means no more verifications are possible
+ return _verify_fail_done;
+ }
+
+ unsigned int h = java_lang_String::hash_string(str);
+ if (e_ptr->hash() != h) {
+ if (mesg_mode == _verify_with_mesgs) {
+ tty->print_cr("ERROR: broken hash value in entry @ bucket[%d][%d], "
+ "bkt_hash=%d, str_hash=%d", bkt, e_cnt, e_ptr->hash(), h);
+ }
+ ret = _verify_fail_continue;
+ }
+
+ if (the_table()->hash_to_index(h) != bkt) {
+ if (mesg_mode == _verify_with_mesgs) {
+ tty->print_cr("ERROR: wrong index value for entry @ bucket[%d][%d], "
+ "str_hash=%d, hash_to_index=%d", bkt, e_cnt, h,
+ the_table()->hash_to_index(h));
+ }
+ ret = _verify_fail_continue;
+ }
+
+ return ret;
+}
+
+// See StringTable::verify() above for the quick verification that is
+// part of Universe::verify(). This verification is exhaustive and
+// reports on every issue that is found. StringTable::verify() only
+// reports on the first issue that is found.
+//
+// StringTable::verify_entry() checks:
+// - oop value != NULL (same as verify())
+// - oop value is a String
+// - hash(String) == hash in entry (same as verify())
+// - index for hash == index of entry (same as verify())
+//
+// StringTable::compare_entries() checks:
+// - oops are unique across all entries
+// - String values are unique across all entries
+//
+int StringTable::verify_and_compare_entries() {
+ assert(StringTable_lock->is_locked(), "sanity check");
+
+ int fail_cnt = 0;
+
+ // first, verify all the entries individually:
+ for (int bkt = 0; bkt < the_table()->table_size(); bkt++) {
+ HashtableEntry<oop, mtSymbol>* e_ptr = the_table()->bucket(bkt);
+ for (int e_cnt = 0; e_ptr != NULL; e_ptr = e_ptr->next(), e_cnt++) {
+ VerifyRetTypes ret = verify_entry(bkt, e_cnt, e_ptr, _verify_with_mesgs);
+ if (ret != _verify_pass) {
+ fail_cnt++;
+ }
+ }
+ }
+
+ // Optimization: if the above check did not find any failures, then
+ // the comparison loop below does not need to call verify_entry()
+ // before calling compare_entries(). If there were failures, then we
+ // have to call verify_entry() to see if the entry can be passed to
+ // compare_entries() safely. When we call verify_entry() in the loop
+ // below, we do so quietly to void duplicate messages and we don't
+ // increment fail_cnt because the failures have already been counted.
+ bool need_entry_verify = (fail_cnt != 0);
+
+ // second, verify all entries relative to each other:
+ for (int bkt1 = 0; bkt1 < the_table()->table_size(); bkt1++) {
+ HashtableEntry<oop, mtSymbol>* e_ptr1 = the_table()->bucket(bkt1);
+ for (int e_cnt1 = 0; e_ptr1 != NULL; e_ptr1 = e_ptr1->next(), e_cnt1++) {
+ if (need_entry_verify) {
+ VerifyRetTypes ret = verify_entry(bkt1, e_cnt1, e_ptr1,
+ _verify_quietly);
+ if (ret == _verify_fail_done) {
+ // cannot use the current entry to compare against other entries
+ continue;
+ }
+ }
+
+ for (int bkt2 = bkt1; bkt2 < the_table()->table_size(); bkt2++) {
+ HashtableEntry<oop, mtSymbol>* e_ptr2 = the_table()->bucket(bkt2);
+ int e_cnt2;
+ for (e_cnt2 = 0; e_ptr2 != NULL; e_ptr2 = e_ptr2->next(), e_cnt2++) {
+ if (bkt1 == bkt2 && e_cnt2 <= e_cnt1) {
+ // skip the entries up to and including the one that
+ // we're comparing against
+ continue;
+ }
+
+ if (need_entry_verify) {
+ VerifyRetTypes ret = verify_entry(bkt2, e_cnt2, e_ptr2,
+ _verify_quietly);
+ if (ret == _verify_fail_done) {
+ // cannot compare against this entry
+ continue;
+ }
+ }
+
+ // compare two entries, report and count any failures:
+ if (compare_entries(bkt1, e_cnt1, e_ptr1, bkt2, e_cnt2, e_ptr2)
+ != _verify_pass) {
+ fail_cnt++;
+ }
+ }
+ }
+ }
+ }
+ return fail_cnt;
+}
// Create a new table and using alternate hash code, populate the new table
// with the existing strings. Set flag to use the alternate hash code afterwards.
--- a/hotspot/src/share/vm/classfile/symbolTable.hpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/classfile/symbolTable.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -311,6 +311,26 @@
static void verify();
static void dump(outputStream* st);
+ enum VerifyMesgModes {
+ _verify_quietly = 0,
+ _verify_with_mesgs = 1
+ };
+
+ enum VerifyRetTypes {
+ _verify_pass = 0,
+ _verify_fail_continue = 1,
+ _verify_fail_done = 2
+ };
+
+ static VerifyRetTypes compare_entries(int bkt1, int e_cnt1,
+ HashtableEntry<oop, mtSymbol>* e_ptr1,
+ int bkt2, int e_cnt2,
+ HashtableEntry<oop, mtSymbol>* e_ptr2);
+ static VerifyRetTypes verify_entry(int bkt, int e_cnt,
+ HashtableEntry<oop, mtSymbol>* e_ptr,
+ VerifyMesgModes mesg_mode);
+ static int verify_and_compare_entries();
+
// Sharing
static void copy_buckets(char** top, char*end) {
the_table()->Hashtable<oop, mtSymbol>::copy_buckets(top, end);
--- a/hotspot/src/share/vm/code/compiledIC.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/code/compiledIC.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -160,7 +160,7 @@
// High-level access to an inline cache. Guaranteed to be MT-safe.
-void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
+bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
@@ -170,8 +170,10 @@
assert(bytecode == Bytecodes::_invokeinterface, "");
int itable_index = call_info->itable_index();
entry = VtableStubs::find_itable_stub(itable_index);
+ if (entry == false) {
+ return false;
+ }
#ifdef ASSERT
- assert(entry != NULL, "entry not computed");
int index = call_info->resolved_method()->itable_index();
assert(index == itable_index, "CallInfo pre-computes this");
#endif //ASSERT
@@ -184,6 +186,9 @@
int vtable_index = call_info->vtable_index();
assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
entry = VtableStubs::find_vtable_stub(vtable_index);
+ if (entry == NULL) {
+ return false;
+ }
InlineCacheBuffer::create_transition_stub(this, NULL, entry);
}
@@ -200,6 +205,7 @@
// race because the IC entry was complete when we safepointed so
// cleaning it immediately is harmless.
// assert(is_megamorphic(), "sanity check");
+ return true;
}
--- a/hotspot/src/share/vm/code/compiledIC.hpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/code/compiledIC.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -226,7 +226,10 @@
//
void set_to_clean(); // Can only be called during a safepoint operation
void set_to_monomorphic(CompiledICInfo& info);
- void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
+
+ // Returns true if successful and false otherwise. The call can fail if memory
+ // allocation in the code cache fails.
+ bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
--- a/hotspot/src/share/vm/code/vtableStubs.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/code/vtableStubs.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -46,12 +46,9 @@
address VtableStub::_chunk_end = NULL;
VMReg VtableStub::_receiver_location = VMRegImpl::Bad();
-static int num_vtable_chunks = 0;
-
void* VtableStub::operator new(size_t size, int code_size) throw() {
assert(size == sizeof(VtableStub), "mismatched size");
- num_vtable_chunks++;
// compute real VtableStub size (rounded to nearest word)
const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
// malloc them in chunks to minimize header overhead
@@ -60,7 +57,7 @@
const int bytes = chunk_factor * real_size + pd_code_alignment();
BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
if (blob == NULL) {
- vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "CodeCache: no room for vtable chunks");
+ return NULL;
}
_chunk = blob->content_begin();
_chunk_end = _chunk + bytes;
@@ -121,6 +118,12 @@
} else {
s = create_itable_stub(vtable_index);
}
+
+ // Creation of vtable or itable can fail if there is not enough free space in the code cache.
+ if (s == NULL) {
+ return NULL;
+ }
+
enter(is_vtable_stub, vtable_index, s);
if (PrintAdapterHandlers) {
tty->print_cr("Decoding VtableStub %s[%d]@%d",
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc_implementation/g1/g1BiasedArray.hpp"
+
+#ifndef PRODUCT
+void G1BiasedMappedArrayBase::verify_index(idx_t index) const {
+ guarantee(_base != NULL, "Array not initialized");
+ guarantee(index < length(), err_msg("Index out of bounds index: "SIZE_FORMAT" length: "SIZE_FORMAT, index, length()));
+}
+
+void G1BiasedMappedArrayBase::verify_biased_index(idx_t biased_index) const {
+ guarantee(_biased_base != NULL, "Array not initialized");
+ guarantee(biased_index >= bias() && biased_index < (bias() + length()),
+ err_msg("Biased index out of bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
+}
+
+void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_index) const {
+ guarantee(_biased_base != NULL, "Array not initialized");
+ guarantee(biased_index >= bias() && biased_index <= (bias() + length()),
+ err_msg("Biased index out of inclusive bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
+}
+
+class TestMappedArray : public G1BiasedMappedArray<int> {
+protected:
+ virtual int default_value() const { return 0xBAADBABE; }
+public:
+ static void test_biasedarray() {
+ const size_t REGION_SIZE_IN_WORDS = 512;
+ const size_t NUM_REGIONS = 20;
+ HeapWord* fake_heap = (HeapWord*)LP64_ONLY(0xBAAA00000) NOT_LP64(0xBA000000); // Any value that is non-zero
+
+ TestMappedArray array;
+ array.initialize(fake_heap, fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS,
+ REGION_SIZE_IN_WORDS * HeapWordSize);
+ // Check address calculation (bounds)
+ assert(array.bottom_address_mapped() == fake_heap,
+ err_msg("bottom mapped address should be "PTR_FORMAT", but is "PTR_FORMAT, fake_heap, array.bottom_address_mapped()));
+ assert(array.end_address_mapped() == (fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS), "must be");
+
+ int* bottom = array.address_mapped_to(fake_heap);
+ assert((void*)bottom == (void*) array.base(), "must be");
+ int* end = array.address_mapped_to(fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS);
+ assert((void*)end == (void*)(array.base() + array.length()), "must be");
+ // The entire array should contain default value elements
+ for (int* current = bottom; current < end; current++) {
+ assert(*current == array.default_value(), "must be");
+ }
+
+ // Test setting values in the table
+
+ HeapWord* region_start_address = fake_heap + REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2);
+ HeapWord* region_end_address = fake_heap + (REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2) + REGION_SIZE_IN_WORDS - 1);
+
+ // Set/get by address tests: invert some value; first retrieve one
+ int actual_value = array.get_by_index(NUM_REGIONS / 2);
+ array.set_by_index(NUM_REGIONS / 2, ~actual_value);
+ // Get the same value by address, should correspond to the start of the "region"
+ int value = array.get_by_address(region_start_address);
+ assert(value == ~actual_value, "must be");
+ // Get the same value by address, at one HeapWord before the start
+ value = array.get_by_address(region_start_address - 1);
+ assert(value == array.default_value(), "must be");
+ // Get the same value by address, at the end of the "region"
+ value = array.get_by_address(region_end_address);
+ assert(value == ~actual_value, "must be");
+ // Make sure the next value maps to another index
+ value = array.get_by_address(region_end_address + 1);
+ assert(value == array.default_value(), "must be");
+
+ // Reset the value in the array
+ array.set_by_address(region_start_address + (region_end_address - region_start_address) / 2, actual_value);
+
+ // The entire array should have the default value again
+ for (int* current = bottom; current < end; current++) {
+ assert(*current == array.default_value(), "must be");
+ }
+
+ // Set/get by index tests: invert some value
+ idx_t index = NUM_REGIONS / 2;
+ actual_value = array.get_by_index(index);
+ array.set_by_index(index, ~actual_value);
+
+ value = array.get_by_index(index);
+ assert(value == ~actual_value, "must be");
+
+ value = array.get_by_index(index - 1);
+ assert(value == array.default_value(), "must be");
+
+ value = array.get_by_index(index + 1);
+ assert(value == array.default_value(), "must be");
+
+ array.set_by_index(0, 0);
+ value = array.get_by_index(0);
+ assert(value == 0, "must be");
+
+ array.set_by_index(array.length() - 1, 0);
+ value = array.get_by_index(array.length() - 1);
+ assert(value == 0, "must be");
+
+ array.set_by_index(index, 0);
+
+ // The array should have three zeros, and default values otherwise
+ size_t num_zeros = 0;
+ for (int* current = bottom; current < end; current++) {
+ assert(*current == array.default_value() || *current == 0, "must be");
+ if (*current == 0) {
+ num_zeros++;
+ }
+ }
+ assert(num_zeros == 3, "must be");
+ }
+};
+
+void TestG1BiasedArray_test() {
+ TestMappedArray::test_biasedarray();
+}
+
+#endif
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
+
+#include "utilities/debug.hpp"
+#include "memory/allocation.inline.hpp"
+
+// Implements the common base functionality for arrays that contain provisions
+// for accessing its elements using a biased index.
+// The element type is defined by the instantiating the template.
+class G1BiasedMappedArrayBase VALUE_OBJ_CLASS_SPEC {
+ friend class VMStructs;
+public:
+ typedef size_t idx_t;
+protected:
+ address _base; // the real base address
+ size_t _length; // the length of the array
+ address _biased_base; // base address biased by "bias" elements
+ size_t _bias; // the bias, i.e. the offset biased_base is located to the right in elements
+ uint _shift_by; // the amount of bits to shift right when mapping to an index of the array.
+
+protected:
+
+ G1BiasedMappedArrayBase() : _base(NULL), _length(0), _biased_base(NULL),
+ _bias(0), _shift_by(0) { }
+
+ // Allocate a new array, generic version.
+ static address create_new_base_array(size_t length, size_t elem_size) {
+ assert(length > 0, "just checking");
+ assert(elem_size > 0, "just checking");
+ return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC);
+ }
+
+ // Initialize the members of this class. The biased start address of this array
+ // is the bias (in elements) multiplied by the element size.
+ void initialize_base(address base, size_t length, size_t bias, size_t elem_size, uint shift_by) {
+ assert(base != NULL, "just checking");
+ assert(length > 0, "just checking");
+ assert(shift_by < sizeof(uintptr_t) * 8, err_msg("Shifting by %zd, larger than word size?", shift_by));
+ _base = base;
+ _length = length;
+ _biased_base = base - (bias * elem_size);
+ _bias = bias;
+ _shift_by = shift_by;
+ }
+
+ // Allocate and initialize this array to cover the heap addresses in the range
+ // of [bottom, end).
+ void initialize(HeapWord* bottom, HeapWord* end, size_t target_elem_size_in_bytes, size_t mapping_granularity_in_bytes) {
+ assert(mapping_granularity_in_bytes > 0, "just checking");
+ assert(is_power_of_2(mapping_granularity_in_bytes),
+ err_msg("mapping granularity must be power of 2, is %zd", mapping_granularity_in_bytes));
+ assert((uintptr_t)bottom % mapping_granularity_in_bytes == 0,
+ err_msg("bottom mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
+ mapping_granularity_in_bytes, bottom));
+ assert((uintptr_t)end % mapping_granularity_in_bytes == 0,
+ err_msg("end mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
+ mapping_granularity_in_bytes, end));
+ size_t num_target_elems = (end - bottom) / (mapping_granularity_in_bytes / HeapWordSize);
+ idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes;
+ address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes);
+ initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes));
+ }
+
+ size_t bias() const { return _bias; }
+ uint shift_by() const { return _shift_by; }
+
+ void verify_index(idx_t index) const PRODUCT_RETURN;
+ void verify_biased_index(idx_t biased_index) const PRODUCT_RETURN;
+ void verify_biased_index_inclusive_end(idx_t biased_index) const PRODUCT_RETURN;
+
+public:
+ // Return the length of the array in elements.
+ size_t length() const { return _length; }
+};
+
+// Array that provides biased access and mapping from (valid) addresses in the
+// heap into this array.
+template<class T>
+class G1BiasedMappedArray : public G1BiasedMappedArrayBase {
+public:
+ typedef G1BiasedMappedArrayBase::idx_t idx_t;
+
+ T* base() const { return (T*)G1BiasedMappedArrayBase::_base; }
+ // Return the element of the given array at the given index. Assume
+ // the index is valid. This is a convenience method that does sanity
+ // checking on the index.
+ T get_by_index(idx_t index) const {
+ verify_index(index);
+ return this->base()[index];
+ }
+
+ // Set the element of the given array at the given index to the
+ // given value. Assume the index is valid. This is a convenience
+ // method that does sanity checking on the index.
+ void set_by_index(idx_t index, T value) {
+ verify_index(index);
+ this->base()[index] = value;
+ }
+
+ // The raw biased base pointer.
+ T* biased_base() const { return (T*)G1BiasedMappedArrayBase::_biased_base; }
+
+ // Return the element of the given array that covers the given word in the
+ // heap. Assumes the index is valid.
+ T get_by_address(HeapWord* value) const {
+ idx_t biased_index = ((uintptr_t)value) >> this->shift_by();
+ this->verify_biased_index(biased_index);
+ return biased_base()[biased_index];
+ }
+
+ // Set the value of the array entry that corresponds to the given array.
+ void set_by_address(HeapWord * address, T value) {
+ idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
+ this->verify_biased_index(biased_index);
+ biased_base()[biased_index] = value;
+ }
+
+protected:
+ // Returns the address of the element the given address maps to
+ T* address_mapped_to(HeapWord* address) {
+ idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
+ this->verify_biased_index_inclusive_end(biased_index);
+ return biased_base() + biased_index;
+ }
+
+public:
+ // Return the smallest address (inclusive) in the heap that this array covers.
+ HeapWord* bottom_address_mapped() const {
+ return (HeapWord*) ((uintptr_t)this->bias() << this->shift_by());
+ }
+
+ // Return the highest address (exclusive) in the heap that this array covers.
+ HeapWord* end_address_mapped() const {
+ return (HeapWord*) ((uintptr_t)(this->bias() + this->length()) << this->shift_by());
+ }
+
+protected:
+ virtual T default_value() const = 0;
+ // Set all elements of the given array to the given value.
+ void clear() {
+ T value = default_value();
+ for (idx_t i = 0; i < length(); i++) {
+ set_by_index(i, value);
+ }
+ }
+public:
+ G1BiasedMappedArray() {}
+
+ // Allocate and initialize this array to cover the heap addresses in the range
+ // of [bottom, end).
+ void initialize(HeapWord* bottom, HeapWord* end, size_t mapping_granularity) {
+ G1BiasedMappedArrayBase::initialize(bottom, end, sizeof(T), mapping_granularity);
+ this->clear();
+ }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -2069,8 +2069,10 @@
_g1_storage.initialize(g1_rs, 0);
_g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
_hrs.initialize((HeapWord*) _g1_reserved.start(),
- (HeapWord*) _g1_reserved.end(),
- _expansion_regions);
+ (HeapWord*) _g1_reserved.end());
+ assert(_hrs.max_length() == _expansion_regions,
+ err_msg("max length: %u expansion regions: %u",
+ _hrs.max_length(), _expansion_regions));
// Do later initialization work for concurrent refinement.
_cg1r->init();
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -71,27 +71,16 @@
// Public
-void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
- uint max_length) {
+void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) {
assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
"bottom should be heap region aligned");
assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
"end should be heap region aligned");
- _length = 0;
- _heap_bottom = bottom;
- _heap_end = end;
- _region_shift = HeapRegion::LogOfHRGrainBytes;
_next_search_index = 0;
_allocated_length = 0;
- _max_length = max_length;
- _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length, mtGC);
- memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*));
- _regions_biased = _regions - ((uintx) bottom >> _region_shift);
-
- assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
- "bottom should be included in the region with index 0");
+ _regions.initialize(bottom, end, HeapRegion::GrainBytes);
}
MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
@@ -101,15 +90,15 @@
G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapWord* next_bottom = old_end;
- assert(_heap_bottom <= next_bottom, "invariant");
+ assert(heap_bottom() <= next_bottom, "invariant");
while (next_bottom < new_end) {
- assert(next_bottom < _heap_end, "invariant");
+ assert(next_bottom < heap_end(), "invariant");
uint index = length();
- assert(index < _max_length, "otherwise we cannot expand further");
+ assert(index < max_length(), "otherwise we cannot expand further");
if (index == 0) {
// We have not allocated any regions so far
- assert(next_bottom == _heap_bottom, "invariant");
+ assert(next_bottom == heap_bottom(), "invariant");
} else {
// next_bottom should match the end of the last/previous region
assert(next_bottom == at(index - 1)->end(), "invariant");
@@ -122,8 +111,8 @@
// allocation failed, we bail out and return what we have done so far
return MemRegion(old_end, next_bottom);
}
- assert(_regions[index] == NULL, "invariant");
- _regions[index] = new_hr;
+ assert(_regions.get_by_index(index) == NULL, "invariant");
+ _regions.set_by_index(index, new_hr);
increment_allocated_length();
}
// Have to increment the length first, otherwise we will get an
@@ -228,26 +217,26 @@
#ifndef PRODUCT
void HeapRegionSeq::verify_optional() {
- guarantee(_length <= _allocated_length,
+ guarantee(length() <= _allocated_length,
err_msg("invariant: _length: %u _allocated_length: %u",
- _length, _allocated_length));
- guarantee(_allocated_length <= _max_length,
+ length(), _allocated_length));
+ guarantee(_allocated_length <= max_length(),
err_msg("invariant: _allocated_length: %u _max_length: %u",
- _allocated_length, _max_length));
- guarantee(_next_search_index <= _length,
+ _allocated_length, max_length()));
+ guarantee(_next_search_index <= length(),
err_msg("invariant: _next_search_index: %u _length: %u",
- _next_search_index, _length));
+ _next_search_index, length()));
- HeapWord* prev_end = _heap_bottom;
+ HeapWord* prev_end = heap_bottom();
for (uint i = 0; i < _allocated_length; i += 1) {
- HeapRegion* hr = _regions[i];
+ HeapRegion* hr = _regions.get_by_index(i);
guarantee(hr != NULL, err_msg("invariant: i: %u", i));
guarantee(hr->bottom() == prev_end,
err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
i, HR_FORMAT_PARAMS(hr), prev_end));
guarantee(hr->hrs_index() == i,
err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
- if (i < _length) {
+ if (i < length()) {
// Asserts will fire if i is >= _length
HeapWord* addr = hr->bottom();
guarantee(addr_to_region(addr) == hr, "sanity");
@@ -265,8 +254,8 @@
prev_end = hr->end();
}
}
- for (uint i = _allocated_length; i < _max_length; i += 1) {
- guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i));
+ for (uint i = _allocated_length; i < max_length(); i += 1) {
+ guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
}
}
#endif // PRODUCT
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -25,10 +25,17 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
+#include "gc_implementation/g1/g1BiasedArray.hpp"
+
class HeapRegion;
class HeapRegionClosure;
class FreeRegionList;
+class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
+ protected:
+ virtual HeapRegion* default_value() const { return NULL; }
+};
+
// This class keeps track of the region metadata (i.e., HeapRegion
// instances). They are kept in the _regions array in address
// order. A region's index in the array corresponds to its index in
@@ -44,35 +51,21 @@
//
// We keep track of three lengths:
//
-// * _length (returned by length()) is the number of currently
+// * _committed_length (returned by length()) is the number of currently
// committed regions.
// * _allocated_length (not exposed outside this class) is the
// number of regions for which we have HeapRegions.
-// * _max_length (returned by max_length()) is the maximum number of
-// regions the heap can have.
+// * max_length() returns the maximum number of regions the heap can have.
//
-// and maintain that: _length <= _allocated_length <= _max_length
+// and maintain that: _committed_length <= _allocated_length <= max_length()
class HeapRegionSeq: public CHeapObj<mtGC> {
friend class VMStructs;
- // The array that holds the HeapRegions.
- HeapRegion** _regions;
-
- // Version of _regions biased to address 0
- HeapRegion** _regions_biased;
+ G1HeapRegionTable _regions;
// The number of regions committed in the heap.
- uint _length;
-
- // The address of the first reserved word in the heap.
- HeapWord* _heap_bottom;
-
- // The address of the last reserved word in the heap - 1.
- HeapWord* _heap_end;
-
- // The log of the region byte size.
- uint _region_shift;
+ uint _committed_length;
// A hint for which index to start searching from for humongous
// allocations.
@@ -81,37 +74,33 @@
// The number of regions for which we have allocated HeapRegions for.
uint _allocated_length;
- // The maximum number of regions in the heap.
- uint _max_length;
-
// Find a contiguous set of empty regions of length num, starting
// from the given index.
uint find_contiguous_from(uint from, uint num);
- // Map a heap address to a biased region index. Assume that the
- // address is valid.
- inline uintx addr_to_index_biased(HeapWord* addr) const;
-
void increment_allocated_length() {
- assert(_allocated_length < _max_length, "pre-condition");
+ assert(_allocated_length < max_length(), "pre-condition");
_allocated_length++;
}
void increment_length() {
- assert(_length < _max_length, "pre-condition");
- _length++;
+ assert(length() < max_length(), "pre-condition");
+ _committed_length++;
}
void decrement_length() {
- assert(_length > 0, "pre-condition");
- _length--;
+ assert(length() > 0, "pre-condition");
+ _committed_length--;
}
+ HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
+ HeapWord* heap_end() const {return _regions.end_address_mapped(); }
+
public:
// Empty contructor, we'll initialize it with the initialize() method.
- HeapRegionSeq() { }
+ HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
- void initialize(HeapWord* bottom, HeapWord* end, uint max_length);
+ void initialize(HeapWord* bottom, HeapWord* end);
// Return the HeapRegion at the given index. Assume that the index
// is valid.
@@ -126,10 +115,10 @@
inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
// Return the number of regions that have been committed in the heap.
- uint length() const { return _length; }
+ uint length() const { return _committed_length; }
// Return the maximum number of regions in the heap.
- uint max_length() const { return _max_length; }
+ uint max_length() const { return (uint)_regions.length(); }
// Expand the sequence to reflect that the heap has grown from
// old_end to new_end. Either create new HeapRegions, or re-use
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -28,28 +28,16 @@
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
-inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
- assert(_heap_bottom <= addr && addr < _heap_end,
- err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
- addr, _heap_bottom, _heap_end));
- uintx index = (uintx) addr >> _region_shift;
- return index;
-}
-
inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
- assert(_heap_bottom <= addr && addr < _heap_end,
- err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
- addr, _heap_bottom, _heap_end));
- uintx index_biased = addr_to_index_biased(addr);
- HeapRegion* hr = _regions_biased[index_biased];
+ HeapRegion* hr = _regions.get_by_address(addr);
assert(hr != NULL, "invariant");
return hr;
}
inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
- if (addr != NULL && addr < _heap_end) {
- assert(addr >= _heap_bottom,
- err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, _heap_bottom));
+ if (addr != NULL && addr < heap_end()) {
+ assert(addr >= heap_bottom(),
+ err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, heap_bottom()));
return addr_to_region_unsafe(addr);
}
return NULL;
@@ -57,7 +45,7 @@
inline HeapRegion* HeapRegionSeq::at(uint index) const {
assert(index < length(), "pre-condition");
- HeapRegion* hr = _regions[index];
+ HeapRegion* hr = _regions.get_by_index(index);
assert(hr != NULL, "sanity");
assert(hr->hrs_index() == index, "sanity");
return hr;
--- a/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -34,8 +34,14 @@
static_field(HeapRegion, GrainBytes, size_t) \
static_field(HeapRegion, LogOfHRGrainBytes, int) \
\
- nonstatic_field(HeapRegionSeq, _regions, HeapRegion**) \
- nonstatic_field(HeapRegionSeq, _length, uint) \
+ nonstatic_field(G1HeapRegionTable, _base, address) \
+ nonstatic_field(G1HeapRegionTable, _length, size_t) \
+ nonstatic_field(G1HeapRegionTable, _biased_base, address) \
+ nonstatic_field(G1HeapRegionTable, _bias, size_t) \
+ nonstatic_field(G1HeapRegionTable, _shift_by, uint) \
+ \
+ nonstatic_field(HeapRegionSeq, _regions, G1HeapRegionTable) \
+ nonstatic_field(HeapRegionSeq, _committed_length, uint) \
\
nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \
nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \
@@ -58,6 +64,8 @@
#define VM_TYPES_G1(declare_type, declare_toplevel_type) \
\
+ declare_toplevel_type(G1HeapRegionTable) \
+ \
declare_type(G1CollectedHeap, SharedHeap) \
\
declare_type(HeapRegion, ContiguousSpace) \
--- a/hotspot/src/share/vm/memory/gcLocker.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/memory/gcLocker.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -122,7 +122,7 @@
// strictly needed. It's added here to make it clear that
// the GC will NOT be performed if any other caller
// of GC_locker::lock() still needs GC locked.
- if (!is_active()) {
+ if (!is_active_internal()) {
_doing_gc = true;
{
// Must give up the lock while at a safepoint
--- a/hotspot/src/share/vm/memory/gcLocker.hpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/memory/gcLocker.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -88,7 +88,7 @@
public:
// Accessors
static bool is_active() {
- assert(_needs_gc || SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
+ assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
return is_active_internal();
}
static bool needs_gc() { return _needs_gc; }
--- a/hotspot/src/share/vm/memory/metaspace.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "gc_interface/collectedHeap.hpp"
+#include "memory/allocation.hpp"
#include "memory/binaryTreeDictionary.hpp"
#include "memory/freeList.hpp"
#include "memory/collectorPolicy.hpp"
@@ -111,7 +112,7 @@
// Has three lists of free chunks, and a total size and
// count that includes all three
-class ChunkManager VALUE_OBJ_CLASS_SPEC {
+class ChunkManager : public CHeapObj<mtInternal> {
// Free list of chunks of different sizes.
// SpecializedChunk
@@ -158,7 +159,12 @@
public:
- ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
+ ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
+ : _free_chunks_total(0), _free_chunks_count(0) {
+ _free_chunks[SpecializedIndex].set_size(specialized_size);
+ _free_chunks[SmallIndex].set_size(small_size);
+ _free_chunks[MediumIndex].set_size(medium_size);
+ }
// add or delete (return) a chunk to the global freelist.
Metachunk* chunk_freelist_allocate(size_t word_size);
@@ -219,7 +225,7 @@
void locked_print_free_chunks(outputStream* st);
void locked_print_sum_free_chunks(outputStream* st);
- void print_on(outputStream* st);
+ void print_on(outputStream* st) const;
};
// Used to manage the free list of Metablocks (a block corresponds
@@ -276,11 +282,6 @@
// VirtualSpace
Metachunk* first_chunk() { return (Metachunk*) bottom(); }
- void inc_container_count();
-#ifdef ASSERT
- uint container_count_slow();
-#endif
-
public:
VirtualSpaceNode(size_t byte_size);
@@ -314,8 +315,10 @@
void inc_top(size_t word_size) { _top += word_size; }
uintx container_count() { return _container_count; }
+ void inc_container_count();
void dec_container_count();
#ifdef ASSERT
+ uint container_count_slow();
void verify_container_count();
#endif
@@ -421,8 +424,6 @@
VirtualSpaceNode* _virtual_space_list;
// virtual space currently being used for allocations
VirtualSpaceNode* _current_virtual_space;
- // Free chunk list for all other metadata
- ChunkManager _chunk_manager;
// Can this virtual list allocate >1 spaces? Also, used to determine
// whether to allocate unlimited small chunks in this virtual space
@@ -475,7 +476,6 @@
return _current_virtual_space;
}
- ChunkManager* chunk_manager() { return &_chunk_manager; }
bool is_class() const { return _is_class; }
// Allocate the first virtualspace.
@@ -494,14 +494,7 @@
void dec_virtual_space_count();
// Unlink empty VirtualSpaceNodes and free it.
- void purge();
-
- // Used and capacity in the entire list of virtual spaces.
- // These are global values shared by all Metaspaces
- size_t capacity_words_sum();
- size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
- size_t used_words_sum();
- size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
+ void purge(ChunkManager* chunk_manager);
bool contains(const void *ptr);
@@ -582,18 +575,12 @@
// Type of metadata allocated.
Metaspace::MetadataType _mdtype;
- // Chunk related size
- size_t _medium_chunk_bunch;
-
// List of chunks in use by this SpaceManager. Allocations
// are done from the current chunk. The list is used for deallocating
// chunks when the SpaceManager is freed.
Metachunk* _chunks_in_use[NumberOfInUseLists];
Metachunk* _current_chunk;
- // Virtual space where allocation comes from.
- VirtualSpaceList* _vs_list;
-
// Number of small chunks to allocate to a manager
// If class space manager, small chunks are unlimited
static uint const _small_chunk_limit;
@@ -626,7 +613,9 @@
}
Metaspace::MetadataType mdtype() { return _mdtype; }
- VirtualSpaceList* vs_list() const { return _vs_list; }
+
+ VirtualSpaceList* vs_list() const { return Metaspace::get_space_list(_mdtype); }
+ ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
Metachunk* current_chunk() const { return _current_chunk; }
void set_current_chunk(Metachunk* v) {
@@ -648,18 +637,19 @@
public:
SpaceManager(Metaspace::MetadataType mdtype,
- Mutex* lock,
- VirtualSpaceList* vs_list);
+ Mutex* lock);
~SpaceManager();
enum ChunkMultiples {
MediumChunkMultiple = 4
};
+ bool is_class() { return _mdtype == Metaspace::ClassType; }
+
// Accessors
size_t specialized_chunk_size() { return SpecializedChunk; }
- size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
- size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
+ size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
+ size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
size_t allocated_blocks_words() const { return _allocated_blocks_words; }
@@ -762,7 +752,7 @@
_container_count++;
assert(_container_count == container_count_slow(),
err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
- "container_count_slow() " SIZE_FORMAT,
+ " container_count_slow() " SIZE_FORMAT,
_container_count, container_count_slow()));
}
@@ -775,7 +765,7 @@
void VirtualSpaceNode::verify_container_count() {
assert(_container_count == container_count_slow(),
err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
- "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
+ " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
}
#endif
@@ -1020,7 +1010,7 @@
// Walk the list of VirtualSpaceNodes and delete
// nodes with a 0 container_count. Remove Metachunks in
// the node from their respective freelists.
-void VirtualSpaceList::purge() {
+void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
assert_lock_strong(SpaceManager::expand_lock());
// Don't use a VirtualSpaceListIterator because this
// list is being changed and a straightforward use of an iterator is not safe.
@@ -1042,7 +1032,7 @@
prev_vsl->set_next(vsl->next());
}
- vsl->purge(chunk_manager());
+ vsl->purge(chunk_manager);
dec_reserved_words(vsl->reserved_words());
dec_committed_words(vsl->committed_words());
dec_virtual_space_count();
@@ -1064,36 +1054,6 @@
#endif
}
-size_t VirtualSpaceList::used_words_sum() {
- size_t allocated_by_vs = 0;
- VirtualSpaceListIterator iter(virtual_space_list());
- while (iter.repeat()) {
- VirtualSpaceNode* vsl = iter.get_next();
- // Sum used region [bottom, top) in each virtualspace
- allocated_by_vs += vsl->used_words_in_vs();
- }
- assert(allocated_by_vs >= chunk_manager()->free_chunks_total_words(),
- err_msg("Total in free chunks " SIZE_FORMAT
- " greater than total from virtual_spaces " SIZE_FORMAT,
- allocated_by_vs, chunk_manager()->free_chunks_total_words()));
- size_t used =
- allocated_by_vs - chunk_manager()->free_chunks_total_words();
- return used;
-}
-
-// Space available in all MetadataVirtualspaces allocated
-// for metadata. This is the upper limit on the capacity
-// of chunks allocated out of all the MetadataVirtualspaces.
-size_t VirtualSpaceList::capacity_words_sum() {
- size_t capacity = 0;
- VirtualSpaceListIterator iter(virtual_space_list());
- while (iter.repeat()) {
- VirtualSpaceNode* vsl = iter.get_next();
- capacity += vsl->capacity_words_in_vs();
- }
- return capacity;
-}
-
VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
_is_class(false),
_virtual_space_list(NULL),
@@ -1104,10 +1064,6 @@
MutexLockerEx cl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag);
bool initialization_succeeded = grow_vs(word_size);
-
- _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
- _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
- _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
assert(initialization_succeeded,
" VirtualSpaceList initialization should not fail");
}
@@ -1123,9 +1079,6 @@
Mutex::_no_safepoint_check_flag);
VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
bool succeeded = class_entry->initialize();
- _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
- _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
- _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
assert(succeeded, " VirtualSpaceList initialization should not fail");
link_vs(class_entry);
}
@@ -1142,7 +1095,7 @@
}
// Reserve the space
size_t vs_byte_size = vs_word_size * BytesPerWord;
- assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
+ assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned");
// Allocate the meta virtual space and initialize it.
VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
@@ -1195,15 +1148,8 @@
size_t grow_chunks_by_words,
size_t medium_chunk_bunch) {
- // Get a chunk from the chunk freelist
- Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
-
- if (next != NULL) {
- next->container()->inc_container_count();
- } else {
- // Allocate a chunk out of the current virtual space.
- next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
- }
+ // Allocate a chunk out of the current virtual space.
+ Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
if (next == NULL) {
// Not enough room in current virtual space. Try to commit
@@ -1221,12 +1167,14 @@
// being used for CompressedHeaders, don't allocate a new virtualspace.
if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
// Get another virtual space.
- size_t grow_vs_words =
- MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
+ size_t allocation_aligned_expand_words =
+ align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord);
+ size_t grow_vs_words =
+ MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words);
if (grow_vs(grow_vs_words)) {
// Got it. It's on the list now. Get a chunk from it.
assert(current_virtual_space()->expanded_words() == 0,
- "New virtuals space nodes should not have expanded");
+ "New virtual space nodes should not have expanded");
size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
page_size_words);
@@ -1342,8 +1290,9 @@
// reserved space, because this is a larger space prereserved for compressed
// class pointers.
if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
- size_t real_allocated = Metaspace::space_list()->reserved_words() +
- MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
+ size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
+ size_t class_allocated = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
+ size_t real_allocated = nonclass_allocated + class_allocated;
if (real_allocated >= MaxMetaspaceSize) {
return false;
}
@@ -1536,15 +1485,15 @@
if (dummy_chunk == NULL) {
break;
}
- vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
+ sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
if (TraceMetadataChunkAllocation && Verbose) {
gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
sm->sum_count_in_chunks_in_use());
dummy_chunk->print_on(gclog_or_tty);
gclog_or_tty->print_cr(" Free chunks total %d count %d",
- vsl->chunk_manager()->free_chunks_total_words(),
- vsl->chunk_manager()->free_chunks_count());
+ sm->chunk_manager()->free_chunks_total_words(),
+ sm->chunk_manager()->free_chunks_count());
}
}
} else {
@@ -1796,6 +1745,8 @@
// work.
chunk->set_is_free(false);
#endif
+ chunk->container()->inc_container_count();
+
slow_locked_verify();
return chunk;
}
@@ -1830,9 +1781,9 @@
return chunk;
}
-void ChunkManager::print_on(outputStream* out) {
+void ChunkManager::print_on(outputStream* out) const {
if (PrintFLSStatistics != 0) {
- humongous_dictionary()->report_statistics();
+ const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
}
}
@@ -1979,8 +1930,8 @@
}
}
- vs_list()->chunk_manager()->locked_print_free_chunks(st);
- vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
+ chunk_manager()->locked_print_free_chunks(st);
+ chunk_manager()->locked_print_sum_free_chunks(st);
}
size_t SpaceManager::calc_chunk_size(size_t word_size) {
@@ -2084,9 +2035,7 @@
}
SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
- Mutex* lock,
- VirtualSpaceList* vs_list) :
- _vs_list(vs_list),
+ Mutex* lock) :
_mdtype(mdtype),
_allocated_blocks_words(0),
_allocated_chunks_words(0),
@@ -2172,9 +2121,7 @@
MutexLockerEx fcl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag);
- ChunkManager* chunk_manager = vs_list()->chunk_manager();
-
- chunk_manager->slow_locked_verify();
+ chunk_manager()->slow_locked_verify();
dec_total_from_size_metrics();
@@ -2188,8 +2135,8 @@
// Have to update before the chunks_in_use lists are emptied
// below.
- chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
- sum_count_in_chunks_in_use());
+ chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
+ sum_count_in_chunks_in_use());
// Add all the chunks in use by this space manager
// to the global list of free chunks.
@@ -2204,11 +2151,11 @@
chunk_size_name(i));
}
Metachunk* chunks = chunks_in_use(i);
- chunk_manager->return_chunks(i, chunks);
+ chunk_manager()->return_chunks(i, chunks);
set_chunks_in_use(i, NULL);
if (TraceMetadataChunkAllocation && Verbose) {
gclog_or_tty->print_cr("updated freelist count %d %s",
- chunk_manager->free_chunks(i)->count(),
+ chunk_manager()->free_chunks(i)->count(),
chunk_size_name(i));
}
assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
@@ -2245,16 +2192,16 @@
humongous_chunks->word_size(), HumongousChunkGranularity));
Metachunk* next_humongous_chunks = humongous_chunks->next();
humongous_chunks->container()->dec_container_count();
- chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
+ chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
humongous_chunks = next_humongous_chunks;
}
if (TraceMetadataChunkAllocation && Verbose) {
gclog_or_tty->print_cr("");
gclog_or_tty->print_cr("updated dictionary count %d %s",
- chunk_manager->humongous_dictionary()->total_count(),
+ chunk_manager()->humongous_dictionary()->total_count(),
chunk_size_name(HumongousIndex));
}
- chunk_manager->slow_locked_verify();
+ chunk_manager()->slow_locked_verify();
}
const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
@@ -2343,9 +2290,7 @@
gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
sum_count_in_chunks_in_use());
new_chunk->print_on(gclog_or_tty);
- if (vs_list() != NULL) {
- vs_list()->chunk_manager()->locked_print_free_chunks(gclog_or_tty);
- }
+ chunk_manager()->locked_print_free_chunks(gclog_or_tty);
}
}
@@ -2361,10 +2306,14 @@
Metachunk* SpaceManager::get_new_chunk(size_t word_size,
size_t grow_chunks_by_words) {
-
- Metachunk* next = vs_list()->get_new_chunk(word_size,
- grow_chunks_by_words,
- medium_chunk_bunch());
+ // Get a chunk from the chunk freelist
+ Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
+
+ if (next == NULL) {
+ next = vs_list()->get_new_chunk(word_size,
+ grow_chunks_by_words,
+ medium_chunk_bunch());
+ }
if (TraceMetadataHumongousAllocation && next != NULL &&
SpaceManager::is_humongous(next->word_size())) {
@@ -2644,13 +2593,12 @@
size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
- VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
- if (list == NULL) {
+ ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
+ if (chunk_manager == NULL) {
return 0;
}
- ChunkManager* chunk = list->chunk_manager();
- chunk->slow_verify();
- return chunk->free_chunks_total_words();
+ chunk_manager->slow_verify();
+ return chunk_manager->free_chunks_total_words();
}
size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
@@ -2801,9 +2749,9 @@
}
void MetaspaceAux::verify_free_chunks() {
- Metaspace::space_list()->chunk_manager()->verify();
+ Metaspace::chunk_manager_metadata()->verify();
if (Metaspace::using_class_space()) {
- Metaspace::class_space_list()->chunk_manager()->verify();
+ Metaspace::chunk_manager_class()->verify();
}
}
@@ -2874,6 +2822,9 @@
VirtualSpaceList* Metaspace::_space_list = NULL;
VirtualSpaceList* Metaspace::_class_space_list = NULL;
+ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
+ChunkManager* Metaspace::_chunk_manager_class = NULL;
+
#define VIRTUALSPACEMULTIPLIER 2
#ifdef _LP64
@@ -2981,6 +2932,7 @@
err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
assert(using_class_space(), "Must be using class space");
_class_space_list = new VirtualSpaceList(rs);
+ _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
}
#endif
@@ -3006,6 +2958,7 @@
// remainder is the misc code and data chunks.
cds_total = FileMapInfo::shared_spaces_size();
_space_list = new VirtualSpaceList(cds_total/wordSize);
+ _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
#ifdef _LP64
// Set the compressed klass pointer base so that decoding of these pointers works
@@ -3073,15 +3026,30 @@
size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
// Initialize the list of virtual spaces.
_space_list = new VirtualSpaceList(word_size);
+ _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
}
}
+Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
+ size_t chunk_word_size,
+ size_t chunk_bunch) {
+ // Get a chunk from the chunk freelist
+ Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
+ if (chunk != NULL) {
+ return chunk;
+ }
+
+ return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch);
+}
+
void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
assert(space_list() != NULL,
"Metadata VirtualSpaceList has not been initialized");
-
- _vsm = new SpaceManager(NonClassType, lock, space_list());
+ assert(chunk_manager_metadata() != NULL,
+ "Metadata ChunkManager has not been initialized");
+
+ _vsm = new SpaceManager(NonClassType, lock);
if (_vsm == NULL) {
return;
}
@@ -3090,11 +3058,13 @@
vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
if (using_class_space()) {
- assert(class_space_list() != NULL,
- "Class VirtualSpaceList has not been initialized");
+ assert(class_space_list() != NULL,
+ "Class VirtualSpaceList has not been initialized");
+ assert(chunk_manager_class() != NULL,
+ "Class ChunkManager has not been initialized");
// Allocate SpaceManager for classes.
- _class_vsm = new SpaceManager(ClassType, lock, class_space_list());
+ _class_vsm = new SpaceManager(ClassType, lock);
if (_class_vsm == NULL) {
return;
}
@@ -3103,9 +3073,9 @@
MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
// Allocate chunk for metadata objects
- Metachunk* new_chunk =
- space_list()->get_initialization_chunk(word_size,
- vsm()->medium_chunk_bunch());
+ Metachunk* new_chunk = get_initialization_chunk(NonClassType,
+ word_size,
+ vsm()->medium_chunk_bunch());
assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
if (new_chunk != NULL) {
// Add to this manager's list of chunks in use and current_chunk().
@@ -3114,9 +3084,9 @@
// Allocate chunk for class metadata objects
if (using_class_space()) {
- Metachunk* class_chunk =
- class_space_list()->get_initialization_chunk(class_word_size,
- class_vsm()->medium_chunk_bunch());
+ Metachunk* class_chunk = get_initialization_chunk(ClassType,
+ class_word_size,
+ class_vsm()->medium_chunk_bunch());
if (class_chunk != NULL) {
class_vsm()->add_chunk(class_chunk, true);
}
@@ -3333,12 +3303,16 @@
}
}
+void Metaspace::purge(MetadataType mdtype) {
+ get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
+}
+
void Metaspace::purge() {
MutexLockerEx cl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag);
- space_list()->purge();
+ purge(NonClassType);
if (using_class_space()) {
- class_space_list()->purge();
+ purge(ClassType);
}
}
@@ -3385,7 +3359,7 @@
#ifndef PRODUCT
-class MetaspaceAuxTest : AllStatic {
+class TestMetaspaceAuxTest : AllStatic {
public:
static void test_reserved() {
size_t reserved = MetaspaceAux::reserved_bytes();
@@ -3425,14 +3399,25 @@
}
}
+ static void test_virtual_space_list_large_chunk() {
+ VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
+ MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
+ // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
+ // vm_allocation_granularity aligned on Windows.
+ size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
+ large_size += (os::vm_page_size()/BytesPerWord);
+ vs_list->get_new_chunk(large_size, large_size, 0);
+ }
+
static void test() {
test_reserved();
test_committed();
+ test_virtual_space_list_large_chunk();
}
};
-void MetaspaceAux_test() {
- MetaspaceAuxTest::test();
+void TestMetaspaceAux_test() {
+ TestMetaspaceAuxTest::test();
}
#endif
--- a/hotspot/src/share/vm/memory/metaspace.hpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/memory/metaspace.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -56,12 +56,15 @@
// +-------------------+
//
+class ChunkManager;
class ClassLoaderData;
class Metablock;
+class Metachunk;
class MetaWord;
class Mutex;
class outputStream;
class SpaceManager;
+class VirtualSpaceList;
// Metaspaces each have a SpaceManager and allocations
// are done by the SpaceManager. Allocations are done
@@ -76,8 +79,6 @@
// allocate() method returns a block for use as a
// quantum of metadata.
-class VirtualSpaceList;
-
class Metaspace : public CHeapObj<mtClass> {
friend class VMStructs;
friend class SpaceManager;
@@ -102,6 +103,10 @@
private:
void initialize(Mutex* lock, MetaspaceType type);
+ Metachunk* get_initialization_chunk(MetadataType mdtype,
+ size_t chunk_word_size,
+ size_t chunk_bunch);
+
// Align up the word size to the allocation word size
static size_t align_word_size_up(size_t);
@@ -134,6 +139,10 @@
static VirtualSpaceList* _space_list;
static VirtualSpaceList* _class_space_list;
+ static ChunkManager* _chunk_manager_metadata;
+ static ChunkManager* _chunk_manager_class;
+
+ public:
static VirtualSpaceList* space_list() { return _space_list; }
static VirtualSpaceList* class_space_list() { return _class_space_list; }
static VirtualSpaceList* get_space_list(MetadataType mdtype) {
@@ -141,6 +150,14 @@
return mdtype == ClassType ? class_space_list() : space_list();
}
+ static ChunkManager* chunk_manager_metadata() { return _chunk_manager_metadata; }
+ static ChunkManager* chunk_manager_class() { return _chunk_manager_class; }
+ static ChunkManager* get_chunk_manager(MetadataType mdtype) {
+ assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
+ return mdtype == ClassType ? chunk_manager_class() : chunk_manager_metadata();
+ }
+
+ private:
// This is used by DumpSharedSpaces only, where only _vsm is used. So we will
// maintain a single list for now.
void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
@@ -199,6 +216,7 @@
void dump(outputStream* const out) const;
// Free empty virtualspaces
+ static void purge(MetadataType mdtype);
static void purge();
void print_on(outputStream* st) const;
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -103,9 +103,10 @@
if (k->oop_is_instance()) {
InstanceKlass* ik = InstanceKlass::cast(k);
for (int i = 0; i < ik->methods()->length(); i++) {
- ResourceMark rm;
Method* m = ik->methods()->at(i);
- (new Fingerprinter(m))->fingerprint();
+ Fingerprinter fp(m);
+ // The side effect of this call sets method's fingerprint field.
+ fp.fingerprint();
}
}
}
--- a/hotspot/src/share/vm/oops/constantPool.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/oops/constantPool.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -108,16 +108,16 @@
void ConstantPool::initialize_resolved_references(ClassLoaderData* loader_data,
intStack reference_map,
int constant_pool_map_length,
- TRAPS) {
+ TRAPS) {
// Initialized the resolved object cache.
int map_length = reference_map.length();
if (map_length > 0) {
// Only need mapping back to constant pool entries. The map isn't used for
- // invokedynamic resolved_reference entries. The constant pool cache index
- // has the mapping back to both the constant pool and to the resolved
- // reference index.
+ // invokedynamic resolved_reference entries. For invokedynamic entries,
+ // the constant pool cache index has the mapping back to both the constant
+ // pool and to the resolved reference index.
if (constant_pool_map_length > 0) {
- Array<u2>* om = MetadataFactory::new_array<u2>(loader_data, map_length, CHECK);
+ Array<u2>* om = MetadataFactory::new_array<u2>(loader_data, constant_pool_map_length, CHECK);
for (int i = 0; i < constant_pool_map_length; i++) {
int x = reference_map.at(i);
@@ -182,16 +182,9 @@
int ConstantPool::cp_to_object_index(int cp_index) {
// this is harder don't do this so much.
- for (int i = 0; i< reference_map()->length(); i++) {
- if (reference_map()->at(i) == cp_index) return i;
- // Zero entry is divider between constant pool indices for strings,
- // method handles and method types. After that the index is a constant
- // pool cache index for invokedynamic. Stop when zero (which can never
- // be a constant pool index)
- if (reference_map()->at(i) == 0) break;
- }
- // We might not find the index.
- return _no_index_sentinel;
+ int i = reference_map()->find(cp_index);
+ // We might not find the index for jsr292 call.
+ return (i < 0) ? _no_index_sentinel : i;
}
Klass* ConstantPool::klass_at_impl(constantPoolHandle this_oop, int which, TRAPS) {
@@ -840,8 +833,7 @@
// If the string has already been interned, this entry will be non-null
oop str = this_oop->resolved_references()->obj_at(obj_index);
if (str != NULL) return str;
-
- Symbol* sym = this_oop->unresolved_string_at(which);
+ Symbol* sym = this_oop->unresolved_string_at(which);
str = StringTable::intern(sym, CHECK_(NULL));
this_oop->string_at_put(which, obj_index, str);
assert(java_lang_String::is_instance(str), "must be string");
@@ -1619,9 +1611,11 @@
case JVM_CONSTANT_UnresolvedClassInError:
case JVM_CONSTANT_StringIndex:
case JVM_CONSTANT_MethodType:
+ case JVM_CONSTANT_MethodTypeInError:
return 3;
case JVM_CONSTANT_MethodHandle:
+ case JVM_CONSTANT_MethodHandleInError:
return 4; //tag, ref_kind, ref_index
case JVM_CONSTANT_Integer:
@@ -1802,8 +1796,8 @@
case JVM_CONSTANT_MethodHandle:
case JVM_CONSTANT_MethodHandleInError: {
*bytes = JVM_CONSTANT_MethodHandle;
- int kind = method_handle_ref_kind_at(idx);
- idx1 = method_handle_index_at(idx);
+ int kind = method_handle_ref_kind_at_error_ok(idx);
+ idx1 = method_handle_index_at_error_ok(idx);
*(bytes+1) = (unsigned char) kind;
Bytes::put_Java_u2((address) (bytes+2), idx1);
DBG(printf("JVM_CONSTANT_MethodHandle: %d %hd", kind, idx1));
@@ -1812,7 +1806,7 @@
case JVM_CONSTANT_MethodType:
case JVM_CONSTANT_MethodTypeInError: {
*bytes = JVM_CONSTANT_MethodType;
- idx1 = method_type_index_at(idx);
+ idx1 = method_type_index_at_error_ok(idx);
Bytes::put_Java_u2((address) (bytes+1), idx1);
DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1));
break;
@@ -2000,12 +1994,12 @@
break;
case JVM_CONSTANT_MethodHandle :
case JVM_CONSTANT_MethodHandleInError :
- st->print("ref_kind=%d", method_handle_ref_kind_at(index));
- st->print(" ref_index=%d", method_handle_index_at(index));
+ st->print("ref_kind=%d", method_handle_ref_kind_at_error_ok(index));
+ st->print(" ref_index=%d", method_handle_index_at_error_ok(index));
break;
case JVM_CONSTANT_MethodType :
case JVM_CONSTANT_MethodTypeInError :
- st->print("signature_index=%d", method_type_index_at(index));
+ st->print("signature_index=%d", method_type_index_at_error_ok(index));
break;
case JVM_CONSTANT_InvokeDynamic :
{
--- a/hotspot/src/share/vm/oops/constantPool.hpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/oops/constantPool.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -231,7 +231,6 @@
static int cache_offset_in_bytes() { return offset_of(ConstantPool, _cache); }
static int pool_holder_offset_in_bytes() { return offset_of(ConstantPool, _pool_holder); }
static int resolved_references_offset_in_bytes() { return offset_of(ConstantPool, _resolved_references); }
- static int reference_map_offset_in_bytes() { return offset_of(ConstantPool, _reference_map); }
// Storing constants
@@ -475,18 +474,42 @@
return *int_at_addr(which);
}
+ private:
+ int method_handle_ref_kind_at(int which, bool error_ok) {
+ assert(tag_at(which).is_method_handle() ||
+ (error_ok && tag_at(which).is_method_handle_in_error()), "Corrupted constant pool");
+ return extract_low_short_from_int(*int_at_addr(which)); // mask out unwanted ref_index bits
+ }
+ int method_handle_index_at(int which, bool error_ok) {
+ assert(tag_at(which).is_method_handle() ||
+ (error_ok && tag_at(which).is_method_handle_in_error()), "Corrupted constant pool");
+ return extract_high_short_from_int(*int_at_addr(which)); // shift out unwanted ref_kind bits
+ }
+ int method_type_index_at(int which, bool error_ok) {
+ assert(tag_at(which).is_method_type() ||
+ (error_ok && tag_at(which).is_method_type_in_error()), "Corrupted constant pool");
+ return *int_at_addr(which);
+ }
+ public:
int method_handle_ref_kind_at(int which) {
- assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
- return extract_low_short_from_int(*int_at_addr(which)); // mask out unwanted ref_index bits
+ return method_handle_ref_kind_at(which, false);
+ }
+ int method_handle_ref_kind_at_error_ok(int which) {
+ return method_handle_ref_kind_at(which, true);
}
int method_handle_index_at(int which) {
- assert(tag_at(which).is_method_handle(), "Corrupted constant pool");
- return extract_high_short_from_int(*int_at_addr(which)); // shift out unwanted ref_kind bits
+ return method_handle_index_at(which, false);
+ }
+ int method_handle_index_at_error_ok(int which) {
+ return method_handle_index_at(which, true);
}
int method_type_index_at(int which) {
- assert(tag_at(which).is_method_type(), "Corrupted constant pool");
- return *int_at_addr(which);
+ return method_type_index_at(which, false);
}
+ int method_type_index_at_error_ok(int which) {
+ return method_type_index_at(which, true);
+ }
+
// Derived queries:
Symbol* method_handle_name_ref_at(int which) {
int member = method_handle_index_at(which);
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -2769,24 +2769,17 @@
st->print(BULLET"field annotations: "); fields_annotations()->print_value_on(st); st->cr();
st->print(BULLET"field type annotations: "); fields_type_annotations()->print_value_on(st); st->cr();
{
- ResourceMark rm;
- // PreviousVersionInfo objects returned via PreviousVersionWalker
- // contain a GrowableArray of handles. We have to clean up the
- // GrowableArray _after_ the PreviousVersionWalker destructor
- // has destroyed the handles.
- {
- bool have_pv = false;
- PreviousVersionWalker pvw((InstanceKlass*)this);
- for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
- pv_info != NULL; pv_info = pvw.next_previous_version()) {
- if (!have_pv)
- st->print(BULLET"previous version: ");
- have_pv = true;
- pv_info->prev_constant_pool_handle()()->print_value_on(st);
- }
- if (have_pv) st->cr();
- } // pvw is cleaned up
- } // rm is cleaned up
+ bool have_pv = false;
+ PreviousVersionWalker pvw(Thread::current(), (InstanceKlass*)this);
+ for (PreviousVersionNode * pv_node = pvw.next_previous_version();
+ pv_node != NULL; pv_node = pvw.next_previous_version()) {
+ if (!have_pv)
+ st->print(BULLET"previous version: ");
+ have_pv = true;
+ pv_node->prev_constant_pool()->print_value_on(st);
+ }
+ if (have_pv) st->cr();
+ } // pvw is cleaned up
if (generic_signature() != NULL) {
st->print(BULLET"generic signature: ");
@@ -3317,34 +3310,34 @@
Array<Method*>* old_methods = ikh->methods();
if (cp_ref->on_stack()) {
- PreviousVersionNode * pv_node = NULL;
- if (emcp_method_count == 0) {
+ PreviousVersionNode * pv_node = NULL;
+ if (emcp_method_count == 0) {
// non-shared ConstantPool gets a reference
- pv_node = new PreviousVersionNode(cp_ref, !cp_ref->is_shared(), NULL);
- RC_TRACE(0x00000400,
- ("add: all methods are obsolete; flushing any EMCP refs"));
- } else {
- int local_count = 0;
+ pv_node = new PreviousVersionNode(cp_ref, NULL);
+ RC_TRACE(0x00000400,
+ ("add: all methods are obsolete; flushing any EMCP refs"));
+ } else {
+ int local_count = 0;
GrowableArray<Method*>* method_refs = new (ResourceObj::C_HEAP, mtClass)
- GrowableArray<Method*>(emcp_method_count, true);
- for (int i = 0; i < old_methods->length(); i++) {
- if (emcp_methods->at(i)) {
- // this old method is EMCP. Save it only if it's on the stack
- Method* old_method = old_methods->at(i);
- if (old_method->on_stack()) {
- method_refs->append(old_method);
+ GrowableArray<Method*>(emcp_method_count, true);
+ for (int i = 0; i < old_methods->length(); i++) {
+ if (emcp_methods->at(i)) {
+ // this old method is EMCP. Save it only if it's on the stack
+ Method* old_method = old_methods->at(i);
+ if (old_method->on_stack()) {
+ method_refs->append(old_method);
+ }
+ if (++local_count >= emcp_method_count) {
+ // no more EMCP methods so bail out now
+ break;
}
- if (++local_count >= emcp_method_count) {
- // no more EMCP methods so bail out now
- break;
}
}
- }
// non-shared ConstantPool gets a reference
- pv_node = new PreviousVersionNode(cp_ref, !cp_ref->is_shared(), method_refs);
+ pv_node = new PreviousVersionNode(cp_ref, method_refs);
}
// append new previous version.
- _previous_versions->append(pv_node);
+ _previous_versions->append(pv_node);
}
// Since the caller is the VMThread and we are at a safepoint, this
@@ -3445,6 +3438,8 @@
return m;
}
}
+ // None found, return null for the caller to handle.
+ return NULL;
}
return m;
}
@@ -3461,10 +3456,9 @@
// Construct a PreviousVersionNode entry for the array hung off
// the InstanceKlass.
PreviousVersionNode::PreviousVersionNode(ConstantPool* prev_constant_pool,
- bool prev_cp_is_weak, GrowableArray<Method*>* prev_EMCP_methods) {
+ GrowableArray<Method*>* prev_EMCP_methods) {
_prev_constant_pool = prev_constant_pool;
- _prev_cp_is_weak = prev_cp_is_weak;
_prev_EMCP_methods = prev_EMCP_methods;
}
@@ -3480,99 +3474,38 @@
}
}
-
-// Construct a PreviousVersionInfo entry
-PreviousVersionInfo::PreviousVersionInfo(PreviousVersionNode *pv_node) {
- _prev_constant_pool_handle = constantPoolHandle(); // NULL handle
- _prev_EMCP_method_handles = NULL;
-
- ConstantPool* cp = pv_node->prev_constant_pool();
- assert(cp != NULL, "constant pool ref was unexpectedly cleared");
- if (cp == NULL) {
- return; // robustness
- }
-
- // make the ConstantPool* safe to return
- _prev_constant_pool_handle = constantPoolHandle(cp);
-
- GrowableArray<Method*>* method_refs = pv_node->prev_EMCP_methods();
- if (method_refs == NULL) {
- // the InstanceKlass did not have any EMCP methods
- return;
- }
-
- _prev_EMCP_method_handles = new GrowableArray<methodHandle>(10);
-
- int n_methods = method_refs->length();
- for (int i = 0; i < n_methods; i++) {
- Method* method = method_refs->at(i);
- assert (method != NULL, "method has been cleared");
- if (method == NULL) {
- continue; // robustness
- }
- // make the Method* safe to return
- _prev_EMCP_method_handles->append(methodHandle(method));
- }
-}
-
-
-// Destroy a PreviousVersionInfo
-PreviousVersionInfo::~PreviousVersionInfo() {
- // Since _prev_EMCP_method_handles is not C-heap allocated, we
- // don't have to delete it.
-}
-
-
// Construct a helper for walking the previous versions array
-PreviousVersionWalker::PreviousVersionWalker(InstanceKlass *ik) {
+PreviousVersionWalker::PreviousVersionWalker(Thread* thread, InstanceKlass *ik) {
+ _thread = thread;
_previous_versions = ik->previous_versions();
_current_index = 0;
- // _hm needs no initialization
_current_p = NULL;
-}
-
-
-// Destroy a PreviousVersionWalker
-PreviousVersionWalker::~PreviousVersionWalker() {
- // Delete the current info just in case the caller didn't walk to
- // the end of the previous versions list. No harm if _current_p is
- // already NULL.
- delete _current_p;
-
- // When _hm is destroyed, all the Handles returned in
- // PreviousVersionInfo objects will be destroyed.
- // Also, after this destructor is finished it will be
- // safe to delete the GrowableArray allocated in the
- // PreviousVersionInfo objects.
+ _current_constant_pool_handle = constantPoolHandle(thread, ik->constants());
}
// Return the interesting information for the next previous version
// of the klass. Returns NULL if there are no more previous versions.
-PreviousVersionInfo* PreviousVersionWalker::next_previous_version() {
+PreviousVersionNode* PreviousVersionWalker::next_previous_version() {
if (_previous_versions == NULL) {
// no previous versions so nothing to return
return NULL;
}
- delete _current_p; // cleanup the previous info for the caller
- _current_p = NULL; // reset to NULL so we don't delete same object twice
+ _current_p = NULL; // reset to NULL
+ _current_constant_pool_handle = NULL;
int length = _previous_versions->length();
while (_current_index < length) {
PreviousVersionNode * pv_node = _previous_versions->at(_current_index++);
- PreviousVersionInfo * pv_info = new (ResourceObj::C_HEAP, mtClass)
- PreviousVersionInfo(pv_node);
-
- constantPoolHandle cp_h = pv_info->prev_constant_pool_handle();
- assert (!cp_h.is_null(), "null cp found in previous version");
-
- // The caller will need to delete pv_info when they are done with it.
- _current_p = pv_info;
- return pv_info;
+
+ // Save a handle to the constant pool for this previous version,
+ // which keeps all the methods from being deallocated.
+ _current_constant_pool_handle = constantPoolHandle(_thread, pv_node->prev_constant_pool());
+ _current_p = pv_node;
+ return pv_node;
}
- // all of the underlying nodes' info has been deleted
return NULL;
} // end next_previous_version()
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -1126,21 +1126,11 @@
// A collection point for interesting information about the previous
-// version(s) of an InstanceKlass. This class uses weak references to
-// the information so that the information may be collected as needed
-// by the system. If the information is shared, then a regular
-// reference must be used because a weak reference would be seen as
-// collectible. A GrowableArray of PreviousVersionNodes is attached
-// to the InstanceKlass as needed. See PreviousVersionWalker below.
+// version(s) of an InstanceKlass. A GrowableArray of PreviousVersionNodes
+// is attached to the InstanceKlass as needed. See PreviousVersionWalker below.
class PreviousVersionNode : public CHeapObj<mtClass> {
private:
- // A shared ConstantPool is never collected so we'll always have
- // a reference to it so we can update items in the cache. We'll
- // have a weak reference to a non-shared ConstantPool until all
- // of the methods (EMCP or obsolete) have been collected; the
- // non-shared ConstantPool becomes collectible at that point.
- ConstantPool* _prev_constant_pool; // regular or weak reference
- bool _prev_cp_is_weak; // true if not a shared ConstantPool
+ ConstantPool* _prev_constant_pool;
// If the previous version of the InstanceKlass doesn't have any
// EMCP methods, then _prev_EMCP_methods will be NULL. If all the
@@ -1149,8 +1139,8 @@
GrowableArray<Method*>* _prev_EMCP_methods;
public:
- PreviousVersionNode(ConstantPool* prev_constant_pool, bool prev_cp_is_weak,
- GrowableArray<Method*>* prev_EMCP_methods);
+ PreviousVersionNode(ConstantPool* prev_constant_pool,
+ GrowableArray<Method*>* prev_EMCP_methods);
~PreviousVersionNode();
ConstantPool* prev_constant_pool() const {
return _prev_constant_pool;
@@ -1161,59 +1151,26 @@
};
-// A Handle-ized version of PreviousVersionNode.
-class PreviousVersionInfo : public ResourceObj {
- private:
- constantPoolHandle _prev_constant_pool_handle;
- // If the previous version of the InstanceKlass doesn't have any
- // EMCP methods, then _prev_EMCP_methods will be NULL. Since the
- // methods cannot be collected while we hold a handle,
- // _prev_EMCP_methods should never have a length of zero.
- GrowableArray<methodHandle>* _prev_EMCP_method_handles;
-
-public:
- PreviousVersionInfo(PreviousVersionNode *pv_node);
- ~PreviousVersionInfo();
- constantPoolHandle prev_constant_pool_handle() const {
- return _prev_constant_pool_handle;
- }
- GrowableArray<methodHandle>* prev_EMCP_method_handles() const {
- return _prev_EMCP_method_handles;
- }
-};
-
-
-// Helper object for walking previous versions. This helper cleans up
-// the Handles that it allocates when the helper object is destroyed.
-// The PreviousVersionInfo object returned by next_previous_version()
-// is only valid until a subsequent call to next_previous_version() or
-// the helper object is destroyed.
+// Helper object for walking previous versions.
class PreviousVersionWalker : public StackObj {
private:
+ Thread* _thread;
GrowableArray<PreviousVersionNode *>* _previous_versions;
int _current_index;
- // Fields for cleaning up when we are done walking the previous versions:
- // A HandleMark for the PreviousVersionInfo handles:
- HandleMark _hm;
+
+ // A pointer to the current node object so we can handle the deletes.
+ PreviousVersionNode* _current_p;
- // It would be nice to have a ResourceMark field in this helper also,
- // but the ResourceMark code says to be careful to delete handles held
- // in GrowableArrays _before_ deleting the GrowableArray. Since we
- // can't guarantee the order in which the fields are destroyed, we
- // have to let the creator of the PreviousVersionWalker object do
- // the right thing. Also, adding a ResourceMark here causes an
- // include loop.
-
- // A pointer to the current info object so we can handle the deletes.
- PreviousVersionInfo * _current_p;
+ // The constant pool handle keeps all the methods in this class from being
+ // deallocated from the metaspace during class unloading.
+ constantPoolHandle _current_constant_pool_handle;
public:
- PreviousVersionWalker(InstanceKlass *ik);
- ~PreviousVersionWalker();
+ PreviousVersionWalker(Thread* thread, InstanceKlass *ik);
// Return the interesting information for the next previous version
// of the klass. Returns NULL if there are no more previous versions.
- PreviousVersionInfo* next_previous_version();
+ PreviousVersionNode* next_previous_version();
};
--- a/hotspot/src/share/vm/opto/bytecodeInfo.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/opto/bytecodeInfo.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -123,7 +123,7 @@
// Allows targeted inlining
if(callee_method->should_inline()) {
*wci_result = *(WarmCallInfo::always_hot());
- if (PrintInlining && Verbose) {
+ if (C->print_inlining() && Verbose) {
CompileTask::print_inline_indent(inline_level());
tty->print_cr("Inlined method is hot: ");
}
@@ -137,7 +137,7 @@
if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
size < InlineThrowMaxSize ) {
wci_result->set_profit(wci_result->profit() * 100);
- if (PrintInlining && Verbose) {
+ if (C->print_inlining() && Verbose) {
CompileTask::print_inline_indent(inline_level());
tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count());
}
@@ -491,7 +491,7 @@
C->log()->inline_fail(inline_msg);
}
}
- if (PrintInlining) {
+ if (C->print_inlining()) {
C->print_inlining(callee_method, inline_level(), caller_bci, inline_msg);
if (callee_method == NULL) tty->print(" callee not monotonic or profiled");
if (Verbose && callee_method) {
@@ -540,7 +540,7 @@
#ifndef PRODUCT
if (UseOldInlining && InlineWarmCalls
- && (PrintOpto || PrintOptoInlining || PrintInlining)) {
+ && (PrintOpto || C->print_inlining())) {
bool cold = wci.is_cold();
bool hot = !cold && wci.is_hot();
bool old_cold = !success;
@@ -617,7 +617,7 @@
callee_method->is_compiled_lambda_form()) {
max_inline_level_adjust += 1; // don't count method handle calls from java.lang.invoke implem
}
- if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) {
+ if (max_inline_level_adjust != 0 && C->print_inlining() && (Verbose || WizardMode)) {
CompileTask::print_inline_indent(inline_level());
tty->print_cr(" \\-> discounting inline depth");
}
--- a/hotspot/src/share/vm/opto/callGenerator.hpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/opto/callGenerator.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -159,8 +159,9 @@
virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }
static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) {
- if (PrintInlining)
+ if (C->print_inlining()) {
C->print_inlining(callee, inline_level, bci, msg);
+ }
}
};
--- a/hotspot/src/share/vm/opto/compile.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/opto/compile.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -654,7 +654,7 @@
_inlining_progress(false),
_inlining_incrementally(false),
_print_inlining_list(NULL),
- _print_inlining(0) {
+ _print_inlining_idx(0) {
C = this;
CompileWrapper cw(this);
@@ -679,6 +679,8 @@
set_print_assembly(print_opto_assembly);
set_parsed_irreducible_loop(false);
#endif
+ set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
+ set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
if (ProfileTraps) {
// Make sure the method being compiled gets its own MDO,
@@ -710,7 +712,7 @@
PhaseGVN gvn(node_arena(), estimated_size);
set_initial_gvn(&gvn);
- if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
+ if (print_inlining() || print_intrinsics()) {
_print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
}
{ // Scope for timing the parser
@@ -937,7 +939,7 @@
_inlining_progress(false),
_inlining_incrementally(false),
_print_inlining_list(NULL),
- _print_inlining(0) {
+ _print_inlining_idx(0) {
C = this;
#ifndef PRODUCT
@@ -3611,7 +3613,7 @@
}
void Compile::dump_inlining() {
- if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
+ if (print_inlining() || print_intrinsics()) {
// Print inlining message for candidates that we couldn't inline
// for lack of space or non constant receiver
for (int i = 0; i < _late_inlines.length(); i++) {
@@ -3635,7 +3637,7 @@
}
}
for (int i = 0; i < _print_inlining_list->length(); i++) {
- tty->print(_print_inlining_list->at(i).ss()->as_string());
+ tty->print(_print_inlining_list->adr_at(i)->ss()->as_string());
}
}
}
--- a/hotspot/src/share/vm/opto/compile.hpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/opto/compile.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -312,6 +312,8 @@
bool _do_method_data_update; // True if we generate code to update MethodData*s
int _AliasLevel; // Locally-adjusted version of AliasLevel flag.
bool _print_assembly; // True if we should dump assembly code for this compilation
+ bool _print_inlining; // True if we should print inlining for this compilation
+ bool _print_intrinsics; // True if we should print intrinsics for this compilation
#ifndef PRODUCT
bool _trace_opto_output;
bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
@@ -414,7 +416,7 @@
};
GrowableArray<PrintInliningBuffer>* _print_inlining_list;
- int _print_inlining;
+ int _print_inlining_idx;
// Only keep nodes in the expensive node list that need to be optimized
void cleanup_expensive_nodes(PhaseIterGVN &igvn);
@@ -426,24 +428,24 @@
public:
outputStream* print_inlining_stream() const {
- return _print_inlining_list->at(_print_inlining).ss();
+ return _print_inlining_list->adr_at(_print_inlining_idx)->ss();
}
void print_inlining_skip(CallGenerator* cg) {
- if (PrintInlining) {
- _print_inlining_list->at(_print_inlining).set_cg(cg);
- _print_inlining++;
- _print_inlining_list->insert_before(_print_inlining, PrintInliningBuffer());
+ if (_print_inlining) {
+ _print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg);
+ _print_inlining_idx++;
+ _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
}
}
void print_inlining_insert(CallGenerator* cg) {
- if (PrintInlining) {
+ if (_print_inlining) {
for (int i = 0; i < _print_inlining_list->length(); i++) {
- if (_print_inlining_list->at(i).cg() == cg) {
+ if (_print_inlining_list->adr_at(i)->cg() == cg) {
_print_inlining_list->insert_before(i+1, PrintInliningBuffer());
- _print_inlining = i+1;
- _print_inlining_list->at(i).set_cg(NULL);
+ _print_inlining_idx = i+1;
+ _print_inlining_list->adr_at(i)->set_cg(NULL);
return;
}
}
@@ -572,6 +574,10 @@
int AliasLevel() const { return _AliasLevel; }
bool print_assembly() const { return _print_assembly; }
void set_print_assembly(bool z) { _print_assembly = z; }
+ bool print_inlining() const { return _print_inlining; }
+ void set_print_inlining(bool z) { _print_inlining = z; }
+ bool print_intrinsics() const { return _print_intrinsics; }
+ void set_print_intrinsics(bool z) { _print_intrinsics = z; }
// check the CompilerOracle for special behaviours for this compile
bool method_has_option(const char * option) {
return method() != NULL && method()->has_option(option);
--- a/hotspot/src/share/vm/opto/doCall.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/opto/doCall.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -41,9 +41,9 @@
#include "runtime/sharedRuntime.hpp"
void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
- if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) {
+ if (TraceTypeProfile || C->print_inlining()) {
outputStream* out = tty;
- if (!PrintInlining) {
+ if (!C->print_inlining()) {
if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {
method->print_short_name();
tty->cr();
--- a/hotspot/src/share/vm/opto/library_call.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/opto/library_call.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -543,7 +543,7 @@
Compile* C = kit.C;
int nodes = C->unique();
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
char buf[1000];
const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
tty->print_cr("Intrinsic %s", str);
@@ -554,7 +554,7 @@
// Try to inline the intrinsic.
if (kit.try_to_inline()) {
- if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+ if (C->print_intrinsics() || C->print_inlining()) {
C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
}
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
@@ -570,7 +570,7 @@
}
// The intrinsic bailed out
- if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+ if (C->print_intrinsics() || C->print_inlining()) {
if (jvms->has_method()) {
// Not a root compile.
const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
@@ -592,7 +592,7 @@
int nodes = C->unique();
#ifndef PRODUCT
assert(is_predicted(), "sanity");
- if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
char buf[1000];
const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
tty->print_cr("Predicate for intrinsic %s", str);
@@ -603,7 +603,7 @@
Node* slow_ctl = kit.try_to_predicate();
if (!kit.failing()) {
- if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+ if (C->print_intrinsics() || C->print_inlining()) {
C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
}
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
@@ -617,7 +617,7 @@
}
// The intrinsic bailed out
- if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
+ if (C->print_intrinsics() || C->print_inlining()) {
if (jvms->has_method()) {
// Not a root compile.
const char* msg = "failed to generate predicate for intrinsic";
@@ -2299,7 +2299,7 @@
const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
#ifndef PRODUCT
- if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
+ if (C->print_intrinsics() || C->print_inlining()) {
tty->print(" from base type: "); adr_type->dump();
tty->print(" sharpened value: "); tjp->dump();
}
@@ -3260,7 +3260,7 @@
if (mirror_con == NULL) return false; // cannot happen?
#ifndef PRODUCT
- if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
+ if (C->print_intrinsics() || C->print_inlining()) {
ciType* k = mirror_con->java_mirror_type();
if (k) {
tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
@@ -3952,14 +3952,14 @@
// caller sensitive methods.
bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
}
#endif
if (!jvms()->has_method()) {
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr(" Bailing out because intrinsic was inlined at top level");
}
#endif
@@ -3983,7 +3983,7 @@
// Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
if (!m->caller_sensitive()) {
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr(" Bailing out: CallerSensitive annotation expected at frame %d", n);
}
#endif
@@ -3999,7 +3999,7 @@
set_result(makecon(TypeInstPtr::make(caller_mirror)));
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr(" Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
tty->print_cr(" JVM state at this point:");
for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
@@ -4015,7 +4015,7 @@
}
#ifndef PRODUCT
- if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
+ if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
tty->print_cr(" Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
tty->print_cr(" JVM state at this point:");
for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
--- a/hotspot/src/share/vm/prims/jni.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/prims/jni.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -5046,7 +5046,10 @@
void TestReservedSpace_test();
void TestReserveMemorySpecial_test();
void TestVirtualSpace_test();
-void MetaspaceAux_test();
+void TestMetaspaceAux_test();
+#if INCLUDE_ALL_GCS
+void TestG1BiasedArray_test();
+#endif
void execute_internal_vm_tests() {
if (ExecuteInternalVMTests) {
@@ -5054,7 +5057,7 @@
run_unit_test(TestReservedSpace_test());
run_unit_test(TestReserveMemorySpecial_test());
run_unit_test(TestVirtualSpace_test());
- run_unit_test(MetaspaceAux_test());
+ run_unit_test(TestMetaspaceAux_test());
run_unit_test(GlobalDefinitions::test_globals());
run_unit_test(GCTimerAllTest::all());
run_unit_test(arrayOopDesc::test_max_array_length());
@@ -5066,6 +5069,7 @@
run_unit_test(VMStructs::test());
#endif
#if INCLUDE_ALL_GCS
+ run_unit_test(TestG1BiasedArray_test());
run_unit_test(HeapRegionRemSet::test_prt());
#endif
tty->print_cr("All internal VM tests passed");
--- a/hotspot/src/share/vm/prims/jvm.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/prims/jvm.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -1835,16 +1835,27 @@
}
JVM_END
-JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredMethods(JNIEnv *env, jclass ofClass, jboolean publicOnly))
-{
- JVMWrapper("JVM_GetClassDeclaredMethods");
+static bool select_method(methodHandle method, bool want_constructor) {
+ if (want_constructor) {
+ return (method->is_initializer() && !method->is_static());
+ } else {
+ return (!method->is_initializer() && !method->is_overpass());
+ }
+}
+
+static jobjectArray get_class_declared_methods_helper(
+ JNIEnv *env,
+ jclass ofClass, jboolean publicOnly,
+ bool want_constructor,
+ Klass* klass, TRAPS) {
+
JvmtiVMObjectAllocEventCollector oam;
// Exclude primitive types and array types
if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass))
|| java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->oop_is_array()) {
// Return empty array
- oop res = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), 0, CHECK_NULL);
+ oop res = oopFactory::new_objArray(klass, 0, CHECK_NULL);
return (jobjectArray) JNIHandles::make_local(env, res);
}
@@ -1855,87 +1866,67 @@
Array<Method*>* methods = k->methods();
int methods_length = methods->length();
+
+ // Save original method_idnum in case of redefinition, which can change
+ // the idnum of obsolete methods. The new method will have the same idnum
+ // but if we refresh the methods array, the counts will be wrong.
+ ResourceMark rm(THREAD);
+ GrowableArray<int>* idnums = new GrowableArray<int>(methods_length);
int num_methods = 0;
- int i;
- for (i = 0; i < methods_length; i++) {
+ for (int i = 0; i < methods_length; i++) {
methodHandle method(THREAD, methods->at(i));
- if (!method->is_initializer() && !method->is_overpass()) {
+ if (select_method(method, want_constructor)) {
if (!publicOnly || method->is_public()) {
+ idnums->push(method->method_idnum());
++num_methods;
}
}
}
// Allocate result
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), num_methods, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(klass, num_methods, CHECK_NULL);
objArrayHandle result (THREAD, r);
- int out_idx = 0;
- for (i = 0; i < methods_length; i++) {
- methodHandle method(THREAD, methods->at(i));
- if (!method->is_initializer() && !method->is_overpass()) {
- if (!publicOnly || method->is_public()) {
- oop m = Reflection::new_method(method, UseNewReflection, false, CHECK_NULL);
- result->obj_at_put(out_idx, m);
- ++out_idx;
+ // Now just put the methods that we selected above, but go by their idnum
+ // in case of redefinition. The methods can be redefined at any safepoint,
+ // so above when allocating the oop array and below when creating reflect
+ // objects.
+ for (int i = 0; i < num_methods; i++) {
+ methodHandle method(THREAD, k->method_with_idnum(idnums->at(i)));
+ if (method.is_null()) {
+ // Method may have been deleted and seems this API can handle null
+ // Otherwise should probably put a method that throws NSME
+ result->obj_at_put(i, NULL);
+ } else {
+ oop m;
+ if (want_constructor) {
+ m = Reflection::new_constructor(method, CHECK_NULL);
+ } else {
+ m = Reflection::new_method(method, UseNewReflection, false, CHECK_NULL);
}
+ result->obj_at_put(i, m);
}
}
- assert(out_idx == num_methods, "just checking");
+
return (jobjectArray) JNIHandles::make_local(env, result());
}
+
+JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredMethods(JNIEnv *env, jclass ofClass, jboolean publicOnly))
+{
+ JVMWrapper("JVM_GetClassDeclaredMethods");
+ return get_class_declared_methods_helper(env, ofClass, publicOnly,
+ /*want_constructor*/ false,
+ SystemDictionary::reflect_Method_klass(), THREAD);
+}
JVM_END
JVM_ENTRY(jobjectArray, JVM_GetClassDeclaredConstructors(JNIEnv *env, jclass ofClass, jboolean publicOnly))
{
JVMWrapper("JVM_GetClassDeclaredConstructors");
- JvmtiVMObjectAllocEventCollector oam;
-
- // Exclude primitive types and array types
- if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass))
- || java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass))->oop_is_array()) {
- // Return empty array
- oop res = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), 0 , CHECK_NULL);
- return (jobjectArray) JNIHandles::make_local(env, res);
- }
-
- instanceKlassHandle k(THREAD, java_lang_Class::as_Klass(JNIHandles::resolve_non_null(ofClass)));
-
- // Ensure class is linked
- k->link_class(CHECK_NULL);
-
- Array<Method*>* methods = k->methods();
- int methods_length = methods->length();
- int num_constructors = 0;
-
- int i;
- for (i = 0; i < methods_length; i++) {
- methodHandle method(THREAD, methods->at(i));
- if (method->is_initializer() && !method->is_static()) {
- if (!publicOnly || method->is_public()) {
- ++num_constructors;
- }
- }
- }
-
- // Allocate result
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), num_constructors, CHECK_NULL);
- objArrayHandle result(THREAD, r);
-
- int out_idx = 0;
- for (i = 0; i < methods_length; i++) {
- methodHandle method(THREAD, methods->at(i));
- if (method->is_initializer() && !method->is_static()) {
- if (!publicOnly || method->is_public()) {
- oop m = Reflection::new_constructor(method, CHECK_NULL);
- result->obj_at_put(out_idx, m);
- ++out_idx;
- }
- }
- }
- assert(out_idx == num_constructors, "just checking");
- return (jobjectArray) JNIHandles::make_local(env, result());
+ return get_class_declared_methods_helper(env, ofClass, publicOnly,
+ /*want_constructor*/ true,
+ SystemDictionary::reflect_Constructor_klass(), THREAD);
}
JVM_END
--- a/hotspot/src/share/vm/prims/jvmtiEnvBase.hpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiEnvBase.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -406,7 +406,11 @@
VMOp_Type type() const { return VMOp_GetCurrentContendedMonitor; }
jvmtiError result() { return _result; }
void doit() {
- _result = ((JvmtiEnvBase *)_env)->get_current_contended_monitor(_calling_thread,_java_thread,_owned_monitor_ptr);
+ _result = JVMTI_ERROR_THREAD_NOT_ALIVE;
+ if (Threads::includes(_java_thread) && !_java_thread->is_exiting() &&
+ _java_thread->threadObj() != NULL) {
+ _result = ((JvmtiEnvBase *)_env)->get_current_contended_monitor(_calling_thread,_java_thread,_owned_monitor_ptr);
+ }
}
};
--- a/hotspot/src/share/vm/prims/jvmtiImpl.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiImpl.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -273,59 +273,49 @@
// add/remove breakpoint to/from versions of the method that
// are EMCP. Directly or transitively obsolete methods are
- // not saved in the PreviousVersionInfo.
+ // not saved in the PreviousVersionNodes.
Thread *thread = Thread::current();
instanceKlassHandle ikh = instanceKlassHandle(thread, _method->method_holder());
Symbol* m_name = _method->name();
Symbol* m_signature = _method->signature();
- {
- ResourceMark rm(thread);
- // PreviousVersionInfo objects returned via PreviousVersionWalker
- // contain a GrowableArray of handles. We have to clean up the
- // GrowableArray _after_ the PreviousVersionWalker destructor
- // has destroyed the handles.
- {
- // search previous versions if they exist
- PreviousVersionWalker pvw((InstanceKlass *)ikh());
- for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
- pv_info != NULL; pv_info = pvw.next_previous_version()) {
- GrowableArray<methodHandle>* methods =
- pv_info->prev_EMCP_method_handles();
+ // search previous versions if they exist
+ PreviousVersionWalker pvw(thread, (InstanceKlass *)ikh());
+ for (PreviousVersionNode * pv_node = pvw.next_previous_version();
+ pv_node != NULL; pv_node = pvw.next_previous_version()) {
+ GrowableArray<Method*>* methods = pv_node->prev_EMCP_methods();
- if (methods == NULL) {
- // We have run into a PreviousVersion generation where
- // all methods were made obsolete during that generation's
- // RedefineClasses() operation. At the time of that
- // operation, all EMCP methods were flushed so we don't
- // have to go back any further.
- //
- // A NULL methods array is different than an empty methods
- // array. We cannot infer any optimizations about older
- // generations from an empty methods array for the current
- // generation.
- break;
- }
+ if (methods == NULL) {
+ // We have run into a PreviousVersion generation where
+ // all methods were made obsolete during that generation's
+ // RedefineClasses() operation. At the time of that
+ // operation, all EMCP methods were flushed so we don't
+ // have to go back any further.
+ //
+ // A NULL methods array is different than an empty methods
+ // array. We cannot infer any optimizations about older
+ // generations from an empty methods array for the current
+ // generation.
+ break;
+ }
- for (int i = methods->length() - 1; i >= 0; i--) {
- methodHandle method = methods->at(i);
- // obsolete methods that are running are not deleted from
- // previous version array, but they are skipped here.
- if (!method->is_obsolete() &&
- method->name() == m_name &&
- method->signature() == m_signature) {
- RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)",
- meth_act == &Method::set_breakpoint ? "sett" : "clear",
- method->name()->as_C_string(),
- method->signature()->as_C_string()));
+ for (int i = methods->length() - 1; i >= 0; i--) {
+ Method* method = methods->at(i);
+ // obsolete methods that are running are not deleted from
+ // previous version array, but they are skipped here.
+ if (!method->is_obsolete() &&
+ method->name() == m_name &&
+ method->signature() == m_signature) {
+ RC_TRACE(0x00000800, ("%sing breakpoint in %s(%s)",
+ meth_act == &Method::set_breakpoint ? "sett" : "clear",
+ method->name()->as_C_string(),
+ method->signature()->as_C_string()));
- ((Method*)method()->*meth_act)(_bci);
- break;
- }
- }
+ (method->*meth_act)(_bci);
+ break;
}
- } // pvw is cleaned up
- } // rm is cleaned up
+ }
+ }
}
void JvmtiBreakpoint::set() {
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -2807,28 +2807,20 @@
&trace_name_printed);
}
}
- {
- ResourceMark rm(_thread);
- // PreviousVersionInfo objects returned via PreviousVersionWalker
- // contain a GrowableArray of handles. We have to clean up the
- // GrowableArray _after_ the PreviousVersionWalker destructor
- // has destroyed the handles.
- {
- // the previous versions' constant pool caches may need adjustment
- PreviousVersionWalker pvw(ik);
- for (PreviousVersionInfo * pv_info = pvw.next_previous_version();
- pv_info != NULL; pv_info = pvw.next_previous_version()) {
- other_cp = pv_info->prev_constant_pool_handle();
- cp_cache = other_cp->cache();
- if (cp_cache != NULL) {
- cp_cache->adjust_method_entries(_matching_old_methods,
- _matching_new_methods,
- _matching_methods_length,
- &trace_name_printed);
- }
- }
- } // pvw is cleaned up
- } // rm is cleaned up
+
+ // the previous versions' constant pool caches may need adjustment
+ PreviousVersionWalker pvw(_thread, ik);
+ for (PreviousVersionNode * pv_node = pvw.next_previous_version();
+ pv_node != NULL; pv_node = pvw.next_previous_version()) {
+ other_cp = pv_node->prev_constant_pool();
+ cp_cache = other_cp->cache();
+ if (cp_cache != NULL) {
+ cp_cache->adjust_method_entries(_matching_old_methods,
+ _matching_new_methods,
+ _matching_methods_length,
+ &trace_name_printed);
+ }
+ }
}
}
@@ -2942,10 +2934,9 @@
// obsolete methods need a unique idnum
u2 num = InstanceKlass::cast(_the_class_oop)->next_method_idnum();
if (num != ConstMethod::UNSET_IDNUM) {
-// u2 old_num = old_method->method_idnum();
old_method->set_method_idnum(num);
-// TO DO: attach obsolete annotations to obsolete method's new idnum
}
+
// With tracing we try not to "yack" too much. The position of
// this trace assumes there are fewer obsolete methods than
// EMCP methods.
--- a/hotspot/src/share/vm/runtime/arguments.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -1100,6 +1100,7 @@
}
}
+#if defined(COMPILER2) || defined(_LP64) || !INCLUDE_CDS
// Conflict: required to use shared spaces (-Xshare:on), but
// incompatible command line options were chosen.
@@ -1112,6 +1113,7 @@
FLAG_SET_DEFAULT(UseSharedSpaces, false);
}
}
+#endif
void Arguments::set_tiered_flags() {
// With tiered, set default policy to AdvancedThresholdPolicy, which is 3.
@@ -1520,16 +1522,18 @@
FLAG_SET_ERGO(bool, UseParallelGC, true);
}
}
- // Shared spaces work fine with other GCs but causes bytecode rewriting
- // to be disabled, which hurts interpreter performance and decreases
- // server performance. On server class machines, keep the default
- // off unless it is asked for. Future work: either add bytecode rewriting
- // at link time, or rewrite bytecodes in non-shared methods.
- if (!DumpSharedSpaces && !RequireSharedSpaces &&
- (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
- no_shared_spaces();
- }
}
+#ifdef COMPILER2
+ // Shared spaces work fine with other GCs but causes bytecode rewriting
+ // to be disabled, which hurts interpreter performance and decreases
+ // server performance. When -server is specified, keep the default off
+ // unless it is asked for. Future work: either add bytecode rewriting
+ // at link time, or rewrite bytecodes in non-shared methods.
+ if (!DumpSharedSpaces && !RequireSharedSpaces &&
+ (FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
+ no_shared_spaces();
+ }
+#endif
set_conservative_max_heap_alignment();
@@ -2439,21 +2443,6 @@
return result;
}
- if (AggressiveOpts) {
- // Insert alt-rt.jar between user-specified bootclasspath
- // prefix and the default bootclasspath. os::set_boot_path()
- // uses meta_index_dir as the default bootclasspath directory.
- const char* altclasses_jar = "alt-rt.jar";
- size_t altclasses_path_len = strlen(get_meta_index_dir()) + 1 +
- strlen(altclasses_jar);
- char* altclasses_path = NEW_C_HEAP_ARRAY(char, altclasses_path_len, mtInternal);
- strcpy(altclasses_path, get_meta_index_dir());
- strcat(altclasses_path, altclasses_jar);
- scp.add_suffix_to_prefix(altclasses_path);
- scp_assembly_required = true;
- FREE_C_HEAP_ARRAY(char, altclasses_path, mtInternal);
- }
-
// Parse _JAVA_OPTIONS environment variable (if present) (mimics classic VM)
result = parse_java_options_environment_variable(&scp, &scp_assembly_required);
if (result != JNI_OK) {
--- a/hotspot/src/share/vm/runtime/globals.hpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -2526,6 +2526,9 @@
product(bool, PrintStringTableStatistics, false, \
"print statistics about the StringTable and SymbolTable") \
\
+ diagnostic(bool, VerifyStringTableAtExit, false, \
+ "verify StringTable contents at exit") \
+ \
notproduct(bool, PrintSymbolTableSizeHistogram, false, \
"print histogram of the symbol table") \
\
--- a/hotspot/src/share/vm/runtime/handles.hpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/runtime/handles.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -136,7 +136,7 @@
// Specific Handles for different oop types
#define DEF_METADATA_HANDLE(name, type) \
class name##Handle; \
- class name##Handle { \
+ class name##Handle : public StackObj { \
type* _value; \
Thread* _thread; \
protected: \
@@ -175,7 +175,7 @@
// Writing this class explicitly, since DEF_METADATA_HANDLE(klass) doesn't
// provide the necessary Klass* <-> Klass* conversions. This Klass
// could be removed when we don't have the Klass* typedef anymore.
-class KlassHandle {
+class KlassHandle : public StackObj {
Klass* _value;
protected:
Klass* obj() const { return _value; }
--- a/hotspot/src/share/vm/runtime/handles.inline.hpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/runtime/handles.inline.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -79,6 +79,7 @@
} else { \
_thread = Thread::current(); \
} \
+ assert (_thread->is_in_stack((address)this), "not on stack?"); \
_thread->metadata_handles()->push((Metadata*)_value); \
} else { \
_thread = NULL; \
@@ -95,6 +96,7 @@
} else { \
_thread = Thread::current(); \
} \
+ assert (_thread->is_in_stack((address)this), "not on stack?"); \
_thread->metadata_handles()->push((Metadata*)_value); \
} else { \
_thread = NULL; \
--- a/hotspot/src/share/vm/runtime/java.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/runtime/java.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -544,6 +544,19 @@
// it will run into trouble when system destroys static variables.
MemTracker::shutdown(MemTracker::NMT_normal);
+ if (VerifyStringTableAtExit) {
+ int fail_cnt = 0;
+ {
+ MutexLocker ml(StringTable_lock);
+ fail_cnt = StringTable::verify_and_compare_entries();
+ }
+
+ if (fail_cnt != 0) {
+ tty->print_cr("ERROR: fail_cnt=%d", fail_cnt);
+ guarantee(fail_cnt == 0, "unexpected StringTable verification failures");
+ }
+ }
+
#undef BEFORE_EXIT_NOT_RUN
#undef BEFORE_EXIT_RUNNING
#undef BEFORE_EXIT_DONE
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -1506,8 +1506,11 @@
info, CHECK_(methodHandle()));
inline_cache->set_to_monomorphic(info);
} else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
- // Change to megamorphic
- inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
+ // Potential change to megamorphic
+ bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
+ if (!successful) {
+ inline_cache->set_to_clean();
+ }
} else {
// Either clean or megamorphic
}
--- a/hotspot/src/share/vm/runtime/vm_version.hpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/runtime/vm_version.hpp Thu Sep 26 13:33:01 2013 -0700
@@ -78,7 +78,13 @@
static const char* jre_release_version();
// does HW support an 8-byte compare-exchange operation?
- static bool supports_cx8() {return _supports_cx8;}
+ static bool supports_cx8() {
+#ifdef SUPPORTS_NATIVE_CX8
+ return true;
+#else
+ return _supports_cx8;
+#endif
+ }
// does HW support atomic get-and-set or atomic get-and-add? Used
// to guide intrinsification decisions for Unsafe atomic ops
static bool supports_atomic_getset4() {return _supports_atomic_getset4;}
--- a/hotspot/src/share/vm/services/attachListener.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/services/attachListener.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -470,7 +470,17 @@
vmSymbols::threadgroup_string_void_signature(),
thread_group,
string,
- CHECK);
+ THREAD);
+
+ if (HAS_PENDING_EXCEPTION) {
+ tty->print_cr("Exception in VM (AttachListener::init) : ");
+ java_lang_Throwable::print(PENDING_EXCEPTION, tty);
+ tty->cr();
+
+ CLEAR_PENDING_EXCEPTION;
+
+ return;
+ }
KlassHandle group(THREAD, SystemDictionary::ThreadGroup_klass());
JavaCalls::call_special(&result,
@@ -479,7 +489,17 @@
vmSymbols::add_method_name(),
vmSymbols::thread_void_signature(),
thread_oop, // ARG 1
- CHECK);
+ THREAD);
+
+ if (HAS_PENDING_EXCEPTION) {
+ tty->print_cr("Exception in VM (AttachListener::init) : ");
+ java_lang_Throwable::print(PENDING_EXCEPTION, tty);
+ tty->cr();
+
+ CLEAR_PENDING_EXCEPTION;
+
+ return;
+ }
{ MutexLocker mu(Threads_lock);
JavaThread* listener_thread = new JavaThread(&attach_listener_thread_entry);
--- a/hotspot/src/share/vm/services/diagnosticArgument.cpp Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/src/share/vm/services/diagnosticArgument.cpp Thu Sep 26 13:33:01 2013 -0700
@@ -61,7 +61,7 @@
}
void GenDCmdArgument::to_string(char* c, char* buf, size_t len) {
- jio_snprintf(buf, len, "%s", c);
+ jio_snprintf(buf, len, "%s", (c != NULL) ? c : "");
}
void GenDCmdArgument::to_string(StringArrayArgument* f, char* buf, size_t len) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/print/PrintInlining.java Thu Sep 26 13:33:01 2013 -0700
@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8022585
+ * @summary VM crashes when ran with -XX:+PrintInlining
+ * @run main/othervm -Xcomp -XX:+PrintInlining PrintInlining
+ *
+ */
+
+public class PrintInlining {
+ public static void main(String[] args) {
+ System.out.println("Passed");
+ }
+}
--- a/hotspot/test/runtime/CDSCompressedKPtrs/XShareAuto.java Thu Sep 26 10:43:15 2013 -0700
+++ b/hotspot/test/runtime/CDSCompressedKPtrs/XShareAuto.java Thu Sep 26 13:33:01 2013 -0700
@@ -33,16 +33,9 @@
public class XShareAuto {
public static void main(String[] args) throws Exception {
- if (!Platform.is64bit()) {
- System.out.println("ObjectAlignmentInBytes for CDS is only " +
- "supported on 64bit platforms; this plaform is " +
- System.getProperty("sun.arch.data.model"));
- System.out.println("Skipping the test");
- return;
- }
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
- "-XX:+UnlockDiagnosticVMOptions", "-XX:SharedArchiveFile=./sample.jsa",
- "-Xshare:dump");
+ "-server", "-XX:+UnlockDiagnosticVMOptions",
+ "-XX:SharedArchiveFile=./sample.jsa", "-Xshare:dump");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("Loading classes to share");
output.shouldHaveExitValue(0);