--- a/hotspot/make/excludeSrc.make Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/make/excludeSrc.make Thu Aug 14 13:13:15 2014 +0000
@@ -70,7 +70,8 @@
CXXFLAGS += -DINCLUDE_CDS=0
CFLAGS += -DINCLUDE_CDS=0
- Src_Files_EXCLUDE += filemap.cpp metaspaceShared.cpp
+ Src_Files_EXCLUDE += filemap.cpp metaspaceShared*.cpp sharedPathsMiscInfo.cpp \
+ systemDictionaryShared.cpp classLoaderExt.cpp sharedClassUtil.cpp
endif
ifeq ($(INCLUDE_ALL_GCS), false)
--- a/hotspot/src/cpu/ppc/vm/cppInterpreterGenerator_ppc.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/ppc/vm/cppInterpreterGenerator_ppc.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -26,8 +26,9 @@
#ifndef CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
#define CPU_PPC_VM_CPPINTERPRETERGENERATOR_PPC_HPP
- address generate_normal_entry(void);
- address generate_native_entry(void);
+ address generate_normal_entry(bool synchronized);
+ address generate_native_entry(bool synchronized);
+ address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
void lock_method(void);
void unlock_method(void);
--- a/hotspot/src/cpu/ppc/vm/cppInterpreter_ppc.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/ppc/vm/cppInterpreter_ppc.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -938,8 +938,9 @@
// Interpreter stub for calling a native method. (C++ interpreter)
// This sets up a somewhat different looking stack for calling the native method
// than the typical interpreter frame setup.
+// The synchronized parameter is ignored.
//
-address CppInterpreterGenerator::generate_native_entry(void) {
+address CppInterpreterGenerator::generate_native_entry(bool synchronized) {
if (native_entry != NULL) return native_entry;
address entry = __ pc();
@@ -1729,7 +1730,8 @@
__ std(R0, BasicObjectLock::obj_offset_in_bytes(), stack_base); // Mark lock as unused
}
-address CppInterpreterGenerator::generate_normal_entry(void) {
+// The synchronized parameter is ignored
+address CppInterpreterGenerator::generate_normal_entry(bool synchronized) {
if (interpreter_frame_manager != NULL) return interpreter_frame_manager;
address entry = __ pc();
@@ -2789,38 +2791,6 @@
return interpreter_frame_manager;
}
-// Generate code for various sorts of method entries
-//
-address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
- address entry_point = NULL;
-
- switch (kind) {
- case Interpreter::zerolocals : break;
- case Interpreter::zerolocals_synchronized : break;
- case Interpreter::native : // Fall thru
- case Interpreter::native_synchronized : entry_point = ((CppInterpreterGenerator*)this)->generate_native_entry(); break;
- case Interpreter::empty : break;
- case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
- case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
- // These are special interpreter intrinsics which we don't support so far.
- case Interpreter::java_lang_math_sin : break;
- case Interpreter::java_lang_math_cos : break;
- case Interpreter::java_lang_math_tan : break;
- case Interpreter::java_lang_math_abs : break;
- case Interpreter::java_lang_math_log : break;
- case Interpreter::java_lang_math_log10 : break;
- case Interpreter::java_lang_math_sqrt : break;
- case Interpreter::java_lang_math_pow : break;
- case Interpreter::java_lang_math_exp : break;
- case Interpreter::java_lang_ref_reference_get: entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
- default : ShouldNotReachHere(); break;
- }
-
- if (entry_point) {
- return entry_point;
- }
- return ((InterpreterGenerator*)this)->generate_normal_entry();
-}
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
: CppInterpreterGenerator(code) {
--- a/hotspot/src/cpu/ppc/vm/interpreterGenerator_ppc.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/ppc/vm/interpreterGenerator_ppc.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2012, 2013 SAP AG. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -31,7 +31,12 @@
private:
address generate_abstract_entry(void);
- address generate_accessor_entry(void);
+ address generate_jump_to_normal_entry(void);
+ address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
+ address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
address generate_Reference_get_entry(void);
+ // Not supported
+ address generate_CRC32_update_entry() { return NULL; }
+ address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
#endif // CPU_PPC_VM_INTERPRETERGENERATOR_PPC_HPP
--- a/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/ppc/vm/interpreter_ppc.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -428,6 +428,19 @@
return entry;
}
+
+// Call an accessor method (assuming it is resolved, otherwise drop into
+// vanilla (slow path) entry.
+address InterpreterGenerator::generate_jump_to_normal_entry(void) {
+ address entry = __ pc();
+ address normal_entry = Interpreter::entry_for_kind(Interpreter::zerolocals);
+ assert(normal_entry != NULL, "should already be generated.");
+ __ branch_to_entry(normal_entry, R11_scratch1);
+ __ flush();
+
+ return entry;
+}
+
// Abstract method entry.
//
address InterpreterGenerator::generate_abstract_entry(void) {
@@ -485,203 +498,6 @@
return entry;
}
-// Call an accessor method (assuming it is resolved, otherwise drop into
-// vanilla (slow path) entry.
-address InterpreterGenerator::generate_accessor_entry(void) {
- if (!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods))) {
- return NULL;
- }
-
- Label Lslow_path, Lacquire;
-
- const Register
- Rclass_or_obj = R3_ARG1,
- Rconst_method = R4_ARG2,
- Rcodes = Rconst_method,
- Rcpool_cache = R5_ARG3,
- Rscratch = R11_scratch1,
- Rjvmti_mode = Rscratch,
- Roffset = R12_scratch2,
- Rflags = R6_ARG4,
- Rbtable = R7_ARG5;
-
- static address branch_table[number_of_states];
-
- address entry = __ pc();
-
- // Check for safepoint:
- // Ditch this, real man don't need safepoint checks.
-
- // Also check for JVMTI mode
- // Check for null obj, take slow path if so.
- __ ld(Rclass_or_obj, Interpreter::stackElementSize, CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp));
- __ lwz(Rjvmti_mode, thread_(interp_only_mode));
- __ cmpdi(CCR1, Rclass_or_obj, 0);
- __ cmpwi(CCR0, Rjvmti_mode, 0);
- __ crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2);
- __ beq(CCR0, Lslow_path); // this==null or jvmti_mode!=0
-
- // Do 2 things in parallel:
- // 1. Load the index out of the first instruction word, which looks like this:
- // <0x2a><0xb4><index (2 byte, native endianess)>.
- // 2. Load constant pool cache base.
- __ ld(Rconst_method, in_bytes(Method::const_offset()), R19_method);
- __ ld(Rcpool_cache, in_bytes(ConstMethod::constants_offset()), Rconst_method);
-
- __ lhz(Rcodes, in_bytes(ConstMethod::codes_offset()) + 2, Rconst_method); // Lower half of 32 bit field.
- __ ld(Rcpool_cache, ConstantPool::cache_offset_in_bytes(), Rcpool_cache);
-
- // Get the const pool entry by means of <index>.
- const int codes_shift = exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord);
- __ slwi(Rscratch, Rcodes, codes_shift); // (codes&0xFFFF)<<codes_shift
- __ add(Rcpool_cache, Rscratch, Rcpool_cache);
-
- // Check if cpool cache entry is resolved.
- // We are resolved if the indices offset contains the current bytecode.
- ByteSize cp_base_offset = ConstantPoolCache::base_offset();
- // Big Endian:
- __ lbz(Rscratch, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::indices_offset()) + 7 - 2, Rcpool_cache);
- __ cmpwi(CCR0, Rscratch, Bytecodes::_getfield);
- __ bne(CCR0, Lslow_path);
- __ isync(); // Order succeeding loads wrt. load of _indices field from cpool_cache.
-
- // Finally, start loading the value: Get cp cache entry into regs.
- __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcpool_cache);
- __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcpool_cache);
-
- // Following code is from templateTable::getfield_or_static
- // Load pointer to branch table
- __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
-
- // Get volatile flag
- __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // extract volatile bit
- // note: sync is needed before volatile load on PPC64
-
- // Check field type
- __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
-
-#ifdef ASSERT
- Label LFlagInvalid;
- __ cmpldi(CCR0, Rflags, number_of_states);
- __ bge(CCR0, LFlagInvalid);
-
- __ ld(R9_ARG7, 0, R1_SP);
- __ ld(R10_ARG8, 0, R21_sender_SP);
- __ cmpd(CCR0, R9_ARG7, R10_ARG8);
- __ asm_assert_eq("backlink", 0x543);
-#endif // ASSERT
- __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
-
- // Load from branch table and dispatch (volatile case: one instruction ahead)
- __ sldi(Rflags, Rflags, LogBytesPerWord);
- __ cmpwi(CCR6, Rscratch, 1); // volatile?
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
- }
- __ ldx(Rbtable, Rbtable, Rflags);
-
- if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
- __ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
- }
- __ mtctr(Rbtable);
- __ bctr();
-
-#ifdef ASSERT
- __ bind(LFlagInvalid);
- __ stop("got invalid flag", 0x6541);
-
- bool all_uninitialized = true,
- all_initialized = true;
- for (int i = 0; i<number_of_states; ++i) {
- all_uninitialized = all_uninitialized && (branch_table[i] == NULL);
- all_initialized = all_initialized && (branch_table[i] != NULL);
- }
- assert(all_uninitialized != all_initialized, "consistency"); // either or
-
- __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
- if (branch_table[vtos] == 0) branch_table[vtos] = __ pc(); // non-volatile_entry point
- if (branch_table[dtos] == 0) branch_table[dtos] = __ pc(); // non-volatile_entry point
- if (branch_table[ftos] == 0) branch_table[ftos] = __ pc(); // non-volatile_entry point
- __ stop("unexpected type", 0x6551);
-#endif
-
- if (branch_table[itos] == 0) { // generate only once
- __ align(32, 28, 28); // align load
- __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
- branch_table[itos] = __ pc(); // non-volatile_entry point
- __ lwax(R3_RET, Rclass_or_obj, Roffset);
- __ beq(CCR6, Lacquire);
- __ blr();
- }
-
- if (branch_table[ltos] == 0) { // generate only once
- __ align(32, 28, 28); // align load
- __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
- branch_table[ltos] = __ pc(); // non-volatile_entry point
- __ ldx(R3_RET, Rclass_or_obj, Roffset);
- __ beq(CCR6, Lacquire);
- __ blr();
- }
-
- if (branch_table[btos] == 0) { // generate only once
- __ align(32, 28, 28); // align load
- __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
- branch_table[btos] = __ pc(); // non-volatile_entry point
- __ lbzx(R3_RET, Rclass_or_obj, Roffset);
- __ extsb(R3_RET, R3_RET);
- __ beq(CCR6, Lacquire);
- __ blr();
- }
-
- if (branch_table[ctos] == 0) { // generate only once
- __ align(32, 28, 28); // align load
- __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
- branch_table[ctos] = __ pc(); // non-volatile_entry point
- __ lhzx(R3_RET, Rclass_or_obj, Roffset);
- __ beq(CCR6, Lacquire);
- __ blr();
- }
-
- if (branch_table[stos] == 0) { // generate only once
- __ align(32, 28, 28); // align load
- __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
- branch_table[stos] = __ pc(); // non-volatile_entry point
- __ lhax(R3_RET, Rclass_or_obj, Roffset);
- __ beq(CCR6, Lacquire);
- __ blr();
- }
-
- if (branch_table[atos] == 0) { // generate only once
- __ align(32, 28, 28); // align load
- __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
- branch_table[atos] = __ pc(); // non-volatile_entry point
- __ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rclass_or_obj);
- __ verify_oop(R3_RET);
- //__ dcbt(R3_RET); // prefetch
- __ beq(CCR6, Lacquire);
- __ blr();
- }
-
- __ align(32, 12);
- __ bind(Lacquire);
- __ twi_0(R3_RET);
- __ isync(); // acquire
- __ blr();
-
-#ifdef ASSERT
- for (int i = 0; i<number_of_states; ++i) {
- assert(branch_table[i], "accessor_entry initialization");
- //tty->print_cr("accessor_entry: branch_table[%d] = 0x%llx (opcode 0x%llx)", i, branch_table[i], *((unsigned int*)branch_table[i]));
- }
-#endif
-
- __ bind(Lslow_path);
- __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), Rscratch);
- __ flush();
-
- return entry;
-}
-
// Interpreter intrinsic for WeakReference.get().
// 1. Don't push a full blown frame and go on dispatching, but fetch the value
// into R8 and return quickly
@@ -713,7 +529,6 @@
// and so we don't need to call the G1 pre-barrier. Thus we can use the
// regular method entry code to generate the NPE.
//
- // This code is based on generate_accessor_enty.
address entry = __ pc();
@@ -768,7 +583,7 @@
return entry;
} else {
- return generate_accessor_entry();
+ return generate_jump_to_normal_entry();
}
}
--- a/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreterGenerator_ppc.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -30,7 +30,6 @@
address generate_normal_entry(bool synchronized);
address generate_native_entry(bool synchronized);
address generate_math_entry(AbstractInterpreter::MethodKind kind);
- address generate_empty_entry(void);
void lock_method(Register Rflags, Register Rscratch1, Register Rscratch2, bool flags_preloaded=false);
void unlock_method(bool check_exceptions = true);
--- a/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -602,48 +602,6 @@
// End of helpers
-// ============================================================================
-// Various method entries
-//
-
-// Empty method, generate a very fast return. We must skip this entry if
-// someone's debugging, indicated by the flag
-// "interp_mode" in the Thread obj.
-// Note: empty methods are generated mostly methods that do assertions, which are
-// disabled in the "java opt build".
-address TemplateInterpreterGenerator::generate_empty_entry(void) {
- if (!UseFastEmptyMethods) {
- NOT_PRODUCT(__ should_not_reach_here();)
- return Interpreter::entry_for_kind(Interpreter::zerolocals);
- }
-
- Label Lslow_path;
- const Register Rjvmti_mode = R11_scratch1;
- address entry = __ pc();
-
- __ lwz(Rjvmti_mode, thread_(interp_only_mode));
- __ cmpwi(CCR0, Rjvmti_mode, 0);
- __ bne(CCR0, Lslow_path); // jvmti_mode!=0
-
- // Noone's debuggin: Simply return.
- // Pop c2i arguments (if any) off when we return.
-#ifdef ASSERT
- __ ld(R9_ARG7, 0, R1_SP);
- __ ld(R10_ARG8, 0, R21_sender_SP);
- __ cmpd(CCR0, R9_ARG7, R10_ARG8);
- __ asm_assert_eq("backlink", 0x545);
-#endif // ASSERT
- __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.
-
- // And we're done.
- __ blr();
-
- __ bind(Lslow_path);
- __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), R11_scratch1);
- __ flush();
-
- return entry;
-}
// Support abs and sqrt like in compiler.
// For others we can use a normal (native) entry.
@@ -1289,45 +1247,6 @@
return entry;
}
-// =============================================================================
-// Entry points
-
-address AbstractInterpreterGenerator::generate_method_entry(
- AbstractInterpreter::MethodKind kind) {
- // Determine code generation flags.
- bool synchronized = false;
- address entry_point = NULL;
-
- switch (kind) {
- case Interpreter::zerolocals : break;
- case Interpreter::zerolocals_synchronized: synchronized = true; break;
- case Interpreter::native : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false); break;
- case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*) this)->generate_native_entry(true); break;
- case Interpreter::empty : entry_point = ((InterpreterGenerator*) this)->generate_empty_entry(); break;
- case Interpreter::accessor : entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry(); break;
- case Interpreter::abstract : entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry(); break;
-
- case Interpreter::java_lang_math_sin : // fall thru
- case Interpreter::java_lang_math_cos : // fall thru
- case Interpreter::java_lang_math_tan : // fall thru
- case Interpreter::java_lang_math_abs : // fall thru
- case Interpreter::java_lang_math_log : // fall thru
- case Interpreter::java_lang_math_log10 : // fall thru
- case Interpreter::java_lang_math_sqrt : // fall thru
- case Interpreter::java_lang_math_pow : // fall thru
- case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind); break;
- case Interpreter::java_lang_ref_reference_get
- : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
- default : ShouldNotReachHere(); break;
- }
-
- if (entry_point) {
- return entry_point;
- }
-
- return ((InterpreterGenerator*) this)->generate_normal_entry(synchronized);
-}
-
// These should never be compiled since the interpreter will prefer
// the compiled version to the intrinsic version.
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
@@ -1355,7 +1274,7 @@
int callee_locals,
bool is_top_frame) {
// Note: This calculation must exactly parallel the frame setup
- // in AbstractInterpreterGenerator::generate_method_entry.
+ // in InterpreterGenerator::generate_fixed_frame.
assert(Interpreter::stackElementWords == 1, "sanity");
const int max_alignment_space = StackAlignmentInBytes / Interpreter::stackElementSize;
const int abi_scratch = is_top_frame ? (frame::abi_reg_args_size / Interpreter::stackElementSize) :
--- a/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/sparc/vm/cppInterpreter_sparc.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterGenerator.hpp"
#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/interp_masm.hpp"
#include "oops/arrayOop.hpp"
#include "oops/methodData.hpp"
#include "oops/method.hpp"
@@ -68,9 +69,7 @@
#define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
#define __ _masm->
-Label frame_manager_entry;
-Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
- // c++ interpreter entry point this holds that entry point label.
+Label frame_manager_entry; // c++ interpreter entry point this holds that entry point label.
static address unctrap_frame_manager_entry = NULL;
@@ -452,110 +451,6 @@
return NULL;
}
-// Call an accessor method (assuming it is resolved, otherwise drop into
-// vanilla (slow path) entry
-
-// Generates code to elide accessor methods
-// Uses G3_scratch and G1_scratch as scratch
-address InterpreterGenerator::generate_accessor_entry(void) {
-
- // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
- // parameter size = 1
- // Note: We can only use this code if the getfield has been resolved
- // and if we don't have a null-pointer exception => check for
- // these conditions first and use slow path if necessary.
- address entry = __ pc();
- Label slow_path;
-
- if ( UseFastAccessorMethods) {
- // Check if we need to reach a safepoint and generate full interpreter
- // frame if so.
- AddressLiteral sync_state(SafepointSynchronize::address_of_state());
- __ load_contents(sync_state, G3_scratch);
- __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
- __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
- __ delayed()->nop();
-
- // Check if local 0 != NULL
- __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
- __ tst(Otos_i); // check if local 0 == NULL and go the slow path
- __ brx(Assembler::zero, false, Assembler::pn, slow_path);
- __ delayed()->nop();
-
-
- // read first instruction word and extract bytecode @ 1 and index @ 2
- // get first 4 bytes of the bytecodes (big endian!)
- __ ld_ptr(Address(G5_method, in_bytes(Method::const_offset())), G1_scratch);
- __ ld(Address(G1_scratch, in_bytes(ConstMethod::codes_offset())), G1_scratch);
-
- // move index @ 2 far left then to the right most two bytes.
- __ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
- __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
- ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
-
- // get constant pool cache
- __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G3_scratch);
- __ ld_ptr(G3_scratch, in_bytes(ConstMethod::constants_offset()), G3_scratch);
- __ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
-
- // get specific constant pool cache entry
- __ add(G3_scratch, G1_scratch, G3_scratch);
-
- // Check the constant Pool cache entry to see if it has been resolved.
- // If not, need the slow path.
- ByteSize cp_base_offset = ConstantPoolCache::base_offset();
- __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::indices_offset()), G1_scratch);
- __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
- __ and3(G1_scratch, 0xFF, G1_scratch);
- __ cmp(G1_scratch, Bytecodes::_getfield);
- __ br(Assembler::notEqual, false, Assembler::pn, slow_path);
- __ delayed()->nop();
-
- // Get the type and return field offset from the constant pool cache
- __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset()), G1_scratch);
- __ ld_ptr(G3_scratch, in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset()), G3_scratch);
-
- Label xreturn_path;
- // Need to differentiate between igetfield, agetfield, bgetfield etc.
- // because they are different sizes.
- // Get the type from the constant pool cache
- __ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
- // Make sure we don't need to mask G1_scratch after the above shift
- ConstantPoolCacheEntry::verify_tos_state_shift();
- __ cmp(G1_scratch, atos );
- __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
- __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
- __ cmp(G1_scratch, itos);
- __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
- __ delayed()->ld(Otos_i, G3_scratch, Otos_i);
- __ cmp(G1_scratch, stos);
- __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
- __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
- __ cmp(G1_scratch, ctos);
- __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
- __ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
-#ifdef ASSERT
- __ cmp(G1_scratch, btos);
- __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
- __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
- __ should_not_reach_here();
-#endif
- __ ldsb(Otos_i, G3_scratch, Otos_i);
- __ bind(xreturn_path);
-
- // _ireturn/_areturn
- __ retl(); // return from leaf routine
- __ delayed()->mov(O5_savedSP, SP);
-
- // Generate regular method entry
- __ bind(slow_path);
- __ ba(fast_accessor_slow_entry_path);
- __ delayed()->nop();
- return entry;
- }
- return NULL;
-}
-
address InterpreterGenerator::generate_Reference_get_entry(void) {
#if INCLUDE_ALL_GCS
if (UseG1GC) {
@@ -573,7 +468,7 @@
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
- return generate_accessor_entry();
+ return generate_jump_to_normal_entry();
}
//
@@ -1870,23 +1765,6 @@
__ ba(call_interpreter_2);
__ delayed()->st_ptr(O1, STATE(_stack));
-
- // Fast accessor methods share this entry point.
- // This works because frame manager is in the same codelet
- // This can either be an entry via call_stub/c1/c2 or a recursive interpreter call
- // we need to do a little register fixup here once we distinguish the two of them
- if (UseFastAccessorMethods && !synchronized) {
- // Call stub_return address still in O7
- __ bind(fast_accessor_slow_entry_path);
- __ set((intptr_t)return_from_native_method - 8, Gtmp1);
- __ cmp(Gtmp1, O7); // returning to interpreter?
- __ brx(Assembler::equal, true, Assembler::pt, re_dispatch); // yep
- __ delayed()->nop();
- __ ba(re_dispatch);
- __ delayed()->mov(G0, prevState); // initial entry
-
- }
-
// interpreter returning to native code (call_stub/c1/c2)
// convert result and unwind initial activation
// L2_scratch - scaled result type index
--- a/hotspot/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/sparc/vm/interpreterGenerator_sparc.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,9 +32,11 @@
address generate_normal_entry(bool synchronized);
address generate_native_entry(bool synchronized);
address generate_abstract_entry(void);
- address generate_math_entry(AbstractInterpreter::MethodKind kind);
- address generate_empty_entry(void);
- address generate_accessor_entry(void);
+ // there are no math intrinsics on sparc
+ address generate_math_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
+ address generate_jump_to_normal_entry(void);
+ address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
+ address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
address generate_Reference_get_entry(void);
void lock_method(void);
void save_native_result(void);
@@ -43,4 +45,7 @@
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
void generate_counter_overflow(Label& Lcontinue);
+ // Not supported
+ address generate_CRC32_update_entry() { return NULL; }
+ address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
#endif // CPU_SPARC_VM_INTERPRETERGENERATOR_SPARC_HPP
--- a/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/sparc/vm/interpreter_sparc.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -241,6 +241,15 @@
// Various method entries
+address InterpreterGenerator::generate_jump_to_normal_entry(void) {
+ address entry = __ pc();
+ assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
+ AddressLiteral al(Interpreter::entry_for_kind(Interpreter::zerolocals));
+ __ jump_to(al, G3_scratch);
+ __ delayed()->nop();
+ return entry;
+}
+
// Abstract method entry
// Attempt to execute abstract method. Throw exception
//
@@ -255,159 +264,6 @@
}
-
-//----------------------------------------------------------------------------------------------------
-// Entry points & stack frame layout
-//
-// Here we generate the various kind of entries into the interpreter.
-// The two main entry type are generic bytecode methods and native call method.
-// These both come in synchronized and non-synchronized versions but the
-// frame layout they create is very similar. The other method entry
-// types are really just special purpose entries that are really entry
-// and interpretation all in one. These are for trivial methods like
-// accessor, empty, or special math methods.
-//
-// When control flow reaches any of the entry types for the interpreter
-// the following holds ->
-//
-// C2 Calling Conventions:
-//
-// The entry code below assumes that the following registers are set
-// when coming in:
-// G5_method: holds the Method* of the method to call
-// Lesp: points to the TOS of the callers expression stack
-// after having pushed all the parameters
-//
-// The entry code does the following to setup an interpreter frame
-// pop parameters from the callers stack by adjusting Lesp
-// set O0 to Lesp
-// compute X = (max_locals - num_parameters)
-// bump SP up by X to accomadate the extra locals
-// compute X = max_expression_stack
-// + vm_local_words
-// + 16 words of register save area
-// save frame doing a save sp, -X, sp growing towards lower addresses
-// set Lbcp, Lmethod, LcpoolCache
-// set Llocals to i0
-// set Lmonitors to FP - rounded_vm_local_words
-// set Lesp to Lmonitors - 4
-//
-// The frame has now been setup to do the rest of the entry code
-
-// Try this optimization: Most method entries could live in a
-// "one size fits all" stack frame without all the dynamic size
-// calculations. It might be profitable to do all this calculation
-// statically and approximately for "small enough" methods.
-
-//-----------------------------------------------------------------------------------------------
-
-// C1 Calling conventions
-//
-// Upon method entry, the following registers are setup:
-//
-// g2 G2_thread: current thread
-// g5 G5_method: method to activate
-// g4 Gargs : pointer to last argument
-//
-//
-// Stack:
-//
-// +---------------+ <--- sp
-// | |
-// : reg save area :
-// | |
-// +---------------+ <--- sp + 0x40
-// | |
-// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
-// | |
-// +---------------+ <--- sp + 0x5c
-// | |
-// : free :
-// | |
-// +---------------+ <--- Gargs
-// | |
-// : arguments :
-// | |
-// +---------------+
-// | |
-//
-//
-//
-// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
-//
-// +---------------+ <--- sp
-// | |
-// : reg save area :
-// | |
-// +---------------+ <--- sp + 0x40
-// | |
-// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
-// | |
-// +---------------+ <--- sp + 0x5c
-// | |
-// : :
-// | | <--- Lesp
-// +---------------+ <--- Lmonitors (fp - 0x18)
-// | VM locals |
-// +---------------+ <--- fp
-// | |
-// : reg save area :
-// | |
-// +---------------+ <--- fp + 0x40
-// | |
-// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
-// | |
-// +---------------+ <--- fp + 0x5c
-// | |
-// : free :
-// | |
-// +---------------+
-// | |
-// : nonarg locals :
-// | |
-// +---------------+
-// | |
-// : arguments :
-// | | <--- Llocals
-// +---------------+ <--- Gargs
-// | |
-
-address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
- // determine code generation flags
- bool synchronized = false;
- address entry_point = NULL;
-
- switch (kind) {
- case Interpreter::zerolocals : break;
- case Interpreter::zerolocals_synchronized: synchronized = true; break;
- case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
- case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
- case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
- case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
- case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
-
- case Interpreter::java_lang_math_sin : break;
- case Interpreter::java_lang_math_cos : break;
- case Interpreter::java_lang_math_tan : break;
- case Interpreter::java_lang_math_sqrt : break;
- case Interpreter::java_lang_math_abs : break;
- case Interpreter::java_lang_math_log : break;
- case Interpreter::java_lang_math_log10 : break;
- case Interpreter::java_lang_math_pow : break;
- case Interpreter::java_lang_math_exp : break;
- case Interpreter::java_lang_ref_reference_get
- : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
- default:
- fatal(err_msg("unexpected method kind: %d", kind));
- break;
- }
-
- if (entry_point) return entry_point;
-
- return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
-}
-
-
bool AbstractInterpreter::can_be_compiled(methodHandle m) {
// No special entry points that preclude compilation
return true;
--- a/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -456,6 +456,115 @@
// Generate a fixed interpreter frame. This is identical setup for interpreted
// methods and for native methods hence the shared code.
+
+//----------------------------------------------------------------------------------------------------
+// Stack frame layout
+//
+// When control flow reaches any of the entry types for the interpreter
+// the following holds ->
+//
+// C2 Calling Conventions:
+//
+// The entry code below assumes that the following registers are set
+// when coming in:
+// G5_method: holds the Method* of the method to call
+// Lesp: points to the TOS of the callers expression stack
+// after having pushed all the parameters
+//
+// The entry code does the following to setup an interpreter frame
+// pop parameters from the callers stack by adjusting Lesp
+// set O0 to Lesp
+// compute X = (max_locals - num_parameters)
+// bump SP up by X to accomadate the extra locals
+// compute X = max_expression_stack
+// + vm_local_words
+// + 16 words of register save area
+// save frame doing a save sp, -X, sp growing towards lower addresses
+// set Lbcp, Lmethod, LcpoolCache
+// set Llocals to i0
+// set Lmonitors to FP - rounded_vm_local_words
+// set Lesp to Lmonitors - 4
+//
+// The frame has now been setup to do the rest of the entry code
+
+// Try this optimization: Most method entries could live in a
+// "one size fits all" stack frame without all the dynamic size
+// calculations. It might be profitable to do all this calculation
+// statically and approximately for "small enough" methods.
+
+//-----------------------------------------------------------------------------------------------
+
+// C1 Calling conventions
+//
+// Upon method entry, the following registers are setup:
+//
+// g2 G2_thread: current thread
+// g5 G5_method: method to activate
+// g4 Gargs : pointer to last argument
+//
+//
+// Stack:
+//
+// +---------------+ <--- sp
+// | |
+// : reg save area :
+// | |
+// +---------------+ <--- sp + 0x40
+// | |
+// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
+// | |
+// +---------------+ <--- sp + 0x5c
+// | |
+// : free :
+// | |
+// +---------------+ <--- Gargs
+// | |
+// : arguments :
+// | |
+// +---------------+
+// | |
+//
+//
+//
+// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
+//
+// +---------------+ <--- sp
+// | |
+// : reg save area :
+// | |
+// +---------------+ <--- sp + 0x40
+// | |
+// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
+// | |
+// +---------------+ <--- sp + 0x5c
+// | |
+// : :
+// | | <--- Lesp
+// +---------------+ <--- Lmonitors (fp - 0x18)
+// | VM locals |
+// +---------------+ <--- fp
+// | |
+// : reg save area :
+// | |
+// +---------------+ <--- fp + 0x40
+// | |
+// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
+// | |
+// +---------------+ <--- fp + 0x5c
+// | |
+// : free :
+// | |
+// +---------------+
+// | |
+// : nonarg locals :
+// | |
+// +---------------+
+// | |
+// : arguments :
+// | | <--- Llocals
+// +---------------+ <--- Gargs
+// | |
+
void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
//
//
@@ -599,136 +708,6 @@
}
-// Empty method, generate a very fast return.
-
-address InterpreterGenerator::generate_empty_entry(void) {
-
- // A method that does nother but return...
-
- address entry = __ pc();
- Label slow_path;
-
- // do nothing for empty methods (do not even increment invocation counter)
- if ( UseFastEmptyMethods) {
- // If we need a safepoint check, generate full interpreter entry.
- AddressLiteral sync_state(SafepointSynchronize::address_of_state());
- __ set(sync_state, G3_scratch);
- __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
-
- // Code: _return
- __ retl();
- __ delayed()->mov(O5_savedSP, SP);
-
- __ bind(slow_path);
- (void) generate_normal_entry(false);
-
- return entry;
- }
- return NULL;
-}
-
-// Call an accessor method (assuming it is resolved, otherwise drop into
-// vanilla (slow path) entry
-
-// Generates code to elide accessor methods
-// Uses G3_scratch and G1_scratch as scratch
-address InterpreterGenerator::generate_accessor_entry(void) {
-
- // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof;
- // parameter size = 1
- // Note: We can only use this code if the getfield has been resolved
- // and if we don't have a null-pointer exception => check for
- // these conditions first and use slow path if necessary.
- address entry = __ pc();
- Label slow_path;
-
-
- // XXX: for compressed oops pointer loading and decoding doesn't fit in
- // delay slot and damages G1
- if ( UseFastAccessorMethods && !UseCompressedOops ) {
- // Check if we need to reach a safepoint and generate full interpreter
- // frame if so.
- AddressLiteral sync_state(SafepointSynchronize::address_of_state());
- __ load_contents(sync_state, G3_scratch);
- __ cmp(G3_scratch, SafepointSynchronize::_not_synchronized);
- __ cmp_and_br_short(G3_scratch, SafepointSynchronize::_not_synchronized, Assembler::notEqual, Assembler::pn, slow_path);
-
- // Check if local 0 != NULL
- __ ld_ptr(Gargs, G0, Otos_i ); // get local 0
- // check if local 0 == NULL and go the slow path
- __ br_null_short(Otos_i, Assembler::pn, slow_path);
-
-
- // read first instruction word and extract bytecode @ 1 and index @ 2
- // get first 4 bytes of the bytecodes (big endian!)
- __ ld_ptr(G5_method, Method::const_offset(), G1_scratch);
- __ ld(G1_scratch, ConstMethod::codes_offset(), G1_scratch);
-
- // move index @ 2 far left then to the right most two bytes.
- __ sll(G1_scratch, 2*BitsPerByte, G1_scratch);
- __ srl(G1_scratch, 2*BitsPerByte - exact_log2(in_words(
- ConstantPoolCacheEntry::size()) * BytesPerWord), G1_scratch);
-
- // get constant pool cache
- __ ld_ptr(G5_method, Method::const_offset(), G3_scratch);
- __ ld_ptr(G3_scratch, ConstMethod::constants_offset(), G3_scratch);
- __ ld_ptr(G3_scratch, ConstantPool::cache_offset_in_bytes(), G3_scratch);
-
- // get specific constant pool cache entry
- __ add(G3_scratch, G1_scratch, G3_scratch);
-
- // Check the constant Pool cache entry to see if it has been resolved.
- // If not, need the slow path.
- ByteSize cp_base_offset = ConstantPoolCache::base_offset();
- __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::indices_offset(), G1_scratch);
- __ srl(G1_scratch, 2*BitsPerByte, G1_scratch);
- __ and3(G1_scratch, 0xFF, G1_scratch);
- __ cmp_and_br_short(G1_scratch, Bytecodes::_getfield, Assembler::notEqual, Assembler::pn, slow_path);
-
- // Get the type and return field offset from the constant pool cache
- __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), G1_scratch);
- __ ld_ptr(G3_scratch, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), G3_scratch);
-
- Label xreturn_path;
- // Need to differentiate between igetfield, agetfield, bgetfield etc.
- // because they are different sizes.
- // Get the type from the constant pool cache
- __ srl(G1_scratch, ConstantPoolCacheEntry::tos_state_shift, G1_scratch);
- // Make sure we don't need to mask G1_scratch after the above shift
- ConstantPoolCacheEntry::verify_tos_state_shift();
- __ cmp(G1_scratch, atos );
- __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
- __ delayed()->ld_ptr(Otos_i, G3_scratch, Otos_i);
- __ cmp(G1_scratch, itos);
- __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
- __ delayed()->ld(Otos_i, G3_scratch, Otos_i);
- __ cmp(G1_scratch, stos);
- __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
- __ delayed()->ldsh(Otos_i, G3_scratch, Otos_i);
- __ cmp(G1_scratch, ctos);
- __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
- __ delayed()->lduh(Otos_i, G3_scratch, Otos_i);
-#ifdef ASSERT
- __ cmp(G1_scratch, btos);
- __ br(Assembler::equal, true, Assembler::pt, xreturn_path);
- __ delayed()->ldsb(Otos_i, G3_scratch, Otos_i);
- __ should_not_reach_here();
-#endif
- __ ldsb(Otos_i, G3_scratch, Otos_i);
- __ bind(xreturn_path);
-
- // _ireturn/_areturn
- __ retl(); // return from leaf routine
- __ delayed()->mov(O5_savedSP, SP);
-
- // Generate regular method entry
- __ bind(slow_path);
- (void) generate_normal_entry(false);
- return entry;
- }
- return NULL;
-}
-
// Method entry for java.lang.ref.Reference.get.
address InterpreterGenerator::generate_Reference_get_entry(void) {
#if INCLUDE_ALL_GCS
@@ -806,7 +785,7 @@
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
- return generate_accessor_entry();
+ return generate_jump_to_normal_entry();
}
//
@@ -1242,8 +1221,6 @@
// Generic method entry to (asm) interpreter
-//------------------------------------------------------------------------------------------------------------------------
-//
address InterpreterGenerator::generate_normal_entry(bool synchronized) {
address entry = __ pc();
@@ -1410,123 +1387,6 @@
return entry;
}
-
-//----------------------------------------------------------------------------------------------------
-// Entry points & stack frame layout
-//
-// Here we generate the various kind of entries into the interpreter.
-// The two main entry type are generic bytecode methods and native call method.
-// These both come in synchronized and non-synchronized versions but the
-// frame layout they create is very similar. The other method entry
-// types are really just special purpose entries that are really entry
-// and interpretation all in one. These are for trivial methods like
-// accessor, empty, or special math methods.
-//
-// When control flow reaches any of the entry types for the interpreter
-// the following holds ->
-//
-// C2 Calling Conventions:
-//
-// The entry code below assumes that the following registers are set
-// when coming in:
-// G5_method: holds the Method* of the method to call
-// Lesp: points to the TOS of the callers expression stack
-// after having pushed all the parameters
-//
-// The entry code does the following to setup an interpreter frame
-// pop parameters from the callers stack by adjusting Lesp
-// set O0 to Lesp
-// compute X = (max_locals - num_parameters)
-// bump SP up by X to accomadate the extra locals
-// compute X = max_expression_stack
-// + vm_local_words
-// + 16 words of register save area
-// save frame doing a save sp, -X, sp growing towards lower addresses
-// set Lbcp, Lmethod, LcpoolCache
-// set Llocals to i0
-// set Lmonitors to FP - rounded_vm_local_words
-// set Lesp to Lmonitors - 4
-//
-// The frame has now been setup to do the rest of the entry code
-
-// Try this optimization: Most method entries could live in a
-// "one size fits all" stack frame without all the dynamic size
-// calculations. It might be profitable to do all this calculation
-// statically and approximately for "small enough" methods.
-
-//-----------------------------------------------------------------------------------------------
-
-// C1 Calling conventions
-//
-// Upon method entry, the following registers are setup:
-//
-// g2 G2_thread: current thread
-// g5 G5_method: method to activate
-// g4 Gargs : pointer to last argument
-//
-//
-// Stack:
-//
-// +---------------+ <--- sp
-// | |
-// : reg save area :
-// | |
-// +---------------+ <--- sp + 0x40
-// | |
-// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
-// | |
-// +---------------+ <--- sp + 0x5c
-// | |
-// : free :
-// | |
-// +---------------+ <--- Gargs
-// | |
-// : arguments :
-// | |
-// +---------------+
-// | |
-//
-//
-//
-// AFTER FRAME HAS BEEN SETUP for method interpretation the stack looks like:
-//
-// +---------------+ <--- sp
-// | |
-// : reg save area :
-// | |
-// +---------------+ <--- sp + 0x40
-// | |
-// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
-// | |
-// +---------------+ <--- sp + 0x5c
-// | |
-// : :
-// | | <--- Lesp
-// +---------------+ <--- Lmonitors (fp - 0x18)
-// | VM locals |
-// +---------------+ <--- fp
-// | |
-// : reg save area :
-// | |
-// +---------------+ <--- fp + 0x40
-// | |
-// : extra 7 slots : note: these slots are not really needed for the interpreter (fix later)
-// | |
-// +---------------+ <--- fp + 0x5c
-// | |
-// : free :
-// | |
-// +---------------+
-// | |
-// : nonarg locals :
-// | |
-// +---------------+
-// | |
-// : arguments :
-// | | <--- Llocals
-// +---------------+ <--- Gargs
-// | |
-
static int size_activation_helper(int callee_extra_locals, int max_stack, int monitor_size) {
// Figure out the size of an interpreter frame (in words) given that we have a fully allocated
--- a/hotspot/src/cpu/x86/vm/cppInterpreterGenerator_x86.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/x86/vm/cppInterpreterGenerator_x86.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,21 +27,6 @@
protected:
-#if 0
- address generate_asm_interpreter_entry(bool synchronized);
- address generate_native_entry(bool synchronized);
- address generate_abstract_entry(void);
- address generate_math_entry(AbstractInterpreter::MethodKind kind);
- address generate_empty_entry(void);
- address generate_accessor_entry(void);
- address generate_Reference_get_entry(void);
- void lock_method(void);
- void generate_stack_overflow_check(void);
-
- void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
- void generate_counter_overflow(Label* do_continue);
-#endif
-
void generate_more_monitors();
void generate_deopt_handling();
address generate_interpreter_frame_manager(bool synchronized); // C++ interpreter only
--- a/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/x86/vm/cppInterpreter_x86.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -66,9 +66,6 @@
#define __ _masm->
#define STATE(field_name) (Address(state, byte_offset_of(BytecodeInterpreter, field_name)))
-Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
- // c++ interpreter entry point this holds that entry point label.
-
// default registers for state and sender_sp
// state and sender_sp are the same on 32bit because we have no choice.
// state could be rsi on 64bit but it is an arg reg and not callee save
@@ -660,7 +657,6 @@
// generate_method_entry) so the guard should work for them too.
//
- // monitor entry size: see picture of stack set (generate_method_entry) and frame_i486.hpp
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
// total overhead size: entry_size + (saved rbp, thru expr stack bottom).
@@ -794,156 +790,6 @@
__ lock_object(monitor);
}
-// Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
-
-address InterpreterGenerator::generate_accessor_entry(void) {
-
- // rbx: Method*
-
- // rsi/r13: senderSP must preserved for slow path, set SP to it on fast path
-
- Label xreturn_path;
-
- // do fastpath for resolved accessor methods
- if (UseFastAccessorMethods) {
-
- address entry_point = __ pc();
-
- Label slow_path;
- // If we need a safepoint check, generate full interpreter entry.
- ExternalAddress state(SafepointSynchronize::address_of_state());
- __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
- SafepointSynchronize::_not_synchronized);
-
- __ jcc(Assembler::notEqual, slow_path);
- // ASM/C++ Interpreter
- // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
- // Note: We can only use this code if the getfield has been resolved
- // and if we don't have a null-pointer exception => check for
- // these conditions first and use slow path if necessary.
- // rbx,: method
- // rcx: receiver
- __ movptr(rax, Address(rsp, wordSize));
-
- // check if local 0 != NULL and read field
- __ testptr(rax, rax);
- __ jcc(Assembler::zero, slow_path);
-
- // read first instruction word and extract bytecode @ 1 and index @ 2
- __ movptr(rdx, Address(rbx, Method::const_offset()));
- __ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
- __ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
- // Shift codes right to get the index on the right.
- // The bytecode fetched looks like <index><0xb4><0x2a>
- __ shrl(rdx, 2*BitsPerByte);
- __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
- __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
-
- // rax,: local 0
- // rbx,: method
- // rcx: receiver - do not destroy since it is needed for slow path!
- // rcx: scratch
- // rdx: constant pool cache index
- // rdi: constant pool cache
- // rsi/r13: sender sp
-
- // check if getfield has been resolved and read constant pool cache entry
- // check the validity of the cache entry by testing whether _indices field
- // contains Bytecode::_getfield in b1 byte.
- assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
- __ movl(rcx,
- Address(rdi,
- rdx,
- Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
- __ shrl(rcx, 2*BitsPerByte);
- __ andl(rcx, 0xFF);
- __ cmpl(rcx, Bytecodes::_getfield);
- __ jcc(Assembler::notEqual, slow_path);
-
- // Note: constant pool entry is not valid before bytecode is resolved
- __ movptr(rcx,
- Address(rdi,
- rdx,
- Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
- __ movl(rdx,
- Address(rdi,
- rdx,
- Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
-
- Label notByte, notShort, notChar;
- const Address field_address (rax, rcx, Address::times_1);
-
- // Need to differentiate between igetfield, agetfield, bgetfield etc.
- // because they are different sizes.
- // Use the type from the constant pool cache
- __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
- // Make sure we don't need to mask rdx after the above shift
- ConstantPoolCacheEntry::verify_tos_state_shift();
-#ifdef _LP64
- Label notObj;
- __ cmpl(rdx, atos);
- __ jcc(Assembler::notEqual, notObj);
- // atos
- __ movptr(rax, field_address);
- __ jmp(xreturn_path);
-
- __ bind(notObj);
-#endif // _LP64
- __ cmpl(rdx, btos);
- __ jcc(Assembler::notEqual, notByte);
- __ load_signed_byte(rax, field_address);
- __ jmp(xreturn_path);
-
- __ bind(notByte);
- __ cmpl(rdx, stos);
- __ jcc(Assembler::notEqual, notShort);
- __ load_signed_short(rax, field_address);
- __ jmp(xreturn_path);
-
- __ bind(notShort);
- __ cmpl(rdx, ctos);
- __ jcc(Assembler::notEqual, notChar);
- __ load_unsigned_short(rax, field_address);
- __ jmp(xreturn_path);
-
- __ bind(notChar);
-#ifdef ASSERT
- Label okay;
-#ifndef _LP64
- __ cmpl(rdx, atos);
- __ jcc(Assembler::equal, okay);
-#endif // _LP64
- __ cmpl(rdx, itos);
- __ jcc(Assembler::equal, okay);
- __ stop("what type is this?");
- __ bind(okay);
-#endif // ASSERT
- // All the rest are a 32 bit wordsize
- __ movl(rax, field_address);
-
- __ bind(xreturn_path);
-
- // _ireturn/_areturn
- __ pop(rdi); // get return address
- __ mov(rsp, sender_sp_on_entry); // set sp to sender sp
- __ jmp(rdi);
-
- // generate a vanilla interpreter entry as the slow path
- __ bind(slow_path);
- // We will enter c++ interpreter looking like it was
- // called by the call_stub this will cause it to return
- // a tosca result to the invoker which might have been
- // the c++ interpreter itself.
-
- __ jmp(fast_accessor_slow_entry_path);
- return entry_point;
-
- } else {
- return NULL;
- }
-
-}
-
address InterpreterGenerator::generate_Reference_get_entry(void) {
#if INCLUDE_ALL_GCS
if (UseG1GC) {
@@ -961,7 +807,7 @@
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
- return generate_accessor_entry();
+ return generate_jump_to_normal_entry();
}
//
@@ -1670,10 +1516,6 @@
address entry_point = __ pc();
- // Fast accessor methods share this entry point.
- // This works because frame manager is in the same codelet
- if (UseFastAccessorMethods && !synchronized) __ bind(fast_accessor_slow_entry_path);
-
Label dispatch_entry_2;
__ movptr(rcx, sender_sp_on_entry);
__ movptr(state, (int32_t)NULL_WORD); // no current activation
@@ -2212,40 +2054,6 @@
return entry_point;
}
-address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
- // determine code generation flags
- bool synchronized = false;
- address entry_point = NULL;
-
- switch (kind) {
- case Interpreter::zerolocals : break;
- case Interpreter::zerolocals_synchronized: synchronized = true; break;
- case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break;
- case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break;
- case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break;
- case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break;
- case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break;
-
- case Interpreter::java_lang_math_sin : // fall thru
- case Interpreter::java_lang_math_cos : // fall thru
- case Interpreter::java_lang_math_tan : // fall thru
- case Interpreter::java_lang_math_abs : // fall thru
- case Interpreter::java_lang_math_log : // fall thru
- case Interpreter::java_lang_math_log10 : // fall thru
- case Interpreter::java_lang_math_sqrt : // fall thru
- case Interpreter::java_lang_math_pow : // fall thru
- case Interpreter::java_lang_math_exp : // fall thru
- entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break;
- case Interpreter::java_lang_ref_reference_get
- : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break;
- default : ShouldNotReachHere(); break;
- }
-
- if (entry_point) return entry_point;
-
- return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized);
-
-}
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
: CppInterpreterGenerator(code) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "interpreter/interp_masm.hpp"
+
+#define __ _masm->
+
+// Jump into normal path for accessor and empty entry to jump to normal entry
+// The "fast" optimization don't update compilation count therefore can disable inlining
+// for these functions that should be inlined.
+address InterpreterGenerator::generate_jump_to_normal_entry(void) {
+ address entry_point = __ pc();
+
+ assert(Interpreter::entry_for_kind(Interpreter::zerolocals) != NULL, "should already be generated");
+ __ jump(RuntimeAddress(Interpreter::entry_for_kind(Interpreter::zerolocals)));
+ return entry_point;
+}
+
+// Abstract method entry
+// Attempt to execute abstract method. Throw exception
+address InterpreterGenerator::generate_abstract_entry(void) {
+
+ address entry_point = __ pc();
+
+ // abstract method entry
+
+#ifndef CC_INTERP
+ // pop return address, reset last_sp to NULL
+ __ empty_expression_stack();
+ __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
+ __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
+#endif
+
+ // throw exception
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
+ // the call_VM checks for exception, so we should never return here.
+ __ should_not_reach_here();
+
+ return entry_point;
+}
--- a/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/x86/vm/interpreterGenerator_x86.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,8 +36,9 @@
address generate_native_entry(bool synchronized);
address generate_abstract_entry(void);
address generate_math_entry(AbstractInterpreter::MethodKind kind);
- address generate_empty_entry(void);
- address generate_accessor_entry(void);
+ address generate_jump_to_normal_entry(void);
+ address generate_accessor_entry(void) { return generate_jump_to_normal_entry(); }
+ address generate_empty_entry(void) { return generate_jump_to_normal_entry(); }
address generate_Reference_get_entry();
address generate_CRC32_update_entry();
address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind);
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86_32.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -67,45 +67,6 @@
}
-//
-// Various method entries (that c++ and asm interpreter agree upon)
-//------------------------------------------------------------------------------------------------------------------------
-//
-//
-
-// Empty method, generate a very fast return.
-
-address InterpreterGenerator::generate_empty_entry(void) {
-
- // rbx,: Method*
- // rcx: receiver (unused)
- // rsi: previous interpreter state (C++ interpreter) must preserve
- // rsi: sender sp must set sp to this value on return
-
- if (!UseFastEmptyMethods) return NULL;
-
- address entry_point = __ pc();
-
- // If we need a safepoint check, generate full interpreter entry.
- Label slow_path;
- ExternalAddress state(SafepointSynchronize::address_of_state());
- __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
- SafepointSynchronize::_not_synchronized);
- __ jcc(Assembler::notEqual, slow_path);
-
- // do nothing for empty methods (do not even increment invocation counter)
- // Code: _return
- // _return
- // return w/o popping parameters
- __ pop(rax);
- __ mov(rsp, rsi);
- __ jmp(rax);
-
- __ bind(slow_path);
- (void) generate_normal_entry(false);
- return entry_point;
-}
-
address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
// rbx,: Method*
@@ -216,36 +177,6 @@
}
-// Abstract method entry
-// Attempt to execute abstract method. Throw exception
-address InterpreterGenerator::generate_abstract_entry(void) {
-
- // rbx,: Method*
- // rcx: receiver (unused)
- // rsi: previous interpreter state (C++ interpreter) must preserve
-
- // rsi: sender SP
-
- address entry_point = __ pc();
-
- // abstract method entry
-
-#ifndef CC_INTERP
- // pop return address, reset last_sp to NULL
- __ empty_expression_stack();
- __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
- __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
-#endif
-
- // throw exception
- __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
- // the call_VM checks for exception, so we should never return here.
- __ should_not_reach_here();
-
- return entry_point;
-}
-
-
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
--- a/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/x86/vm/interpreter_x86_64.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -301,66 +301,6 @@
return entry_point;
}
-
-// Abstract method entry
-// Attempt to execute abstract method. Throw exception
-address InterpreterGenerator::generate_abstract_entry(void) {
- // rbx: Method*
- // r13: sender SP
-
- address entry_point = __ pc();
-
- // abstract method entry
-
-#ifndef CC_INTERP
- // pop return address, reset last_sp to NULL
- __ empty_expression_stack();
- __ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
- __ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
-#endif
-
- // throw exception
- __ call_VM(noreg, CAST_FROM_FN_PTR(address,
- InterpreterRuntime::throw_AbstractMethodError));
- // the call_VM checks for exception, so we should never return here.
- __ should_not_reach_here();
-
- return entry_point;
-}
-
-
-// Empty method, generate a very fast return.
-
-address InterpreterGenerator::generate_empty_entry(void) {
- // rbx: Method*
- // r13: sender sp must set sp to this value on return
-
- if (!UseFastEmptyMethods) {
- return NULL;
- }
-
- address entry_point = __ pc();
-
- // If we need a safepoint check, generate full interpreter entry.
- Label slow_path;
- __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
- SafepointSynchronize::_not_synchronized);
- __ jcc(Assembler::notEqual, slow_path);
-
- // do nothing for empty methods (do not even increment invocation counter)
- // Code: _return
- // _return
- // return w/o popping parameters
- __ pop(rax);
- __ mov(rsp, r13);
- __ jmp(rax);
-
- __ bind(slow_path);
- (void) generate_normal_entry(false);
- return entry_point;
-
-}
-
void Deoptimization::unwind_callee_save_values(frame* f, vframeArray* vframe_array) {
// This code is sort of the equivalent of C2IAdapter::setup_stack_frame back in
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -38,7 +38,7 @@
int callee_locals,
bool is_top_frame) {
// Note: This calculation must exactly parallel the frame setup
- // in AbstractInterpreterGenerator::generate_method_entry.
+ // in InterpreterGenerator::generate_fixed_frame.
// fixed size of an interpreter frame:
int overhead = frame::sender_sp_offset -
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -468,10 +468,10 @@
// rax,
// NOTE: since the additional locals are also always pushed (wasn't obvious in
- // generate_method_entry) so the guard should work for them too.
+ // generate_fixed_frame) so the guard should work for them too.
//
- // monitor entry size: see picture of stack set (generate_method_entry) and frame_x86.hpp
+ // monitor entry size: see picture of stack in frame_x86.hpp
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
// total overhead size: entry_size + (saved rbp, thru expr stack bottom).
@@ -633,145 +633,6 @@
__ movptr(Address(rsp, 0), rsp); // set expression stack bottom
}
-// End of helpers
-
-//
-// Various method entries
-//------------------------------------------------------------------------------------------------------------------------
-//
-//
-
-// Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
-
-address InterpreterGenerator::generate_accessor_entry(void) {
-
- // rbx,: Method*
- // rcx: receiver (preserve for slow entry into asm interpreter)
-
- // rsi: senderSP must preserved for slow path, set SP to it on fast path
-
- address entry_point = __ pc();
- Label xreturn_path;
-
- // do fastpath for resolved accessor methods
- if (UseFastAccessorMethods) {
- Label slow_path;
- // If we need a safepoint check, generate full interpreter entry.
- ExternalAddress state(SafepointSynchronize::address_of_state());
- __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
- SafepointSynchronize::_not_synchronized);
-
- __ jcc(Assembler::notEqual, slow_path);
- // ASM/C++ Interpreter
- // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites thereof; parameter size = 1
- // Note: We can only use this code if the getfield has been resolved
- // and if we don't have a null-pointer exception => check for
- // these conditions first and use slow path if necessary.
- // rbx,: method
- // rcx: receiver
- __ movptr(rax, Address(rsp, wordSize));
-
- // check if local 0 != NULL and read field
- __ testptr(rax, rax);
- __ jcc(Assembler::zero, slow_path);
-
- // read first instruction word and extract bytecode @ 1 and index @ 2
- __ movptr(rdx, Address(rbx, Method::const_offset()));
- __ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
- __ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
- // Shift codes right to get the index on the right.
- // The bytecode fetched looks like <index><0xb4><0x2a>
- __ shrl(rdx, 2*BitsPerByte);
- __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
- __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
-
- // rax,: local 0
- // rbx,: method
- // rcx: receiver - do not destroy since it is needed for slow path!
- // rcx: scratch
- // rdx: constant pool cache index
- // rdi: constant pool cache
- // rsi: sender sp
-
- // check if getfield has been resolved and read constant pool cache entry
- // check the validity of the cache entry by testing whether _indices field
- // contains Bytecode::_getfield in b1 byte.
- assert(in_words(ConstantPoolCacheEntry::size()) == 4, "adjust shift below");
- __ movl(rcx,
- Address(rdi,
- rdx,
- Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()));
- __ shrl(rcx, 2*BitsPerByte);
- __ andl(rcx, 0xFF);
- __ cmpl(rcx, Bytecodes::_getfield);
- __ jcc(Assembler::notEqual, slow_path);
-
- // Note: constant pool entry is not valid before bytecode is resolved
- __ movptr(rcx,
- Address(rdi,
- rdx,
- Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()));
- __ movl(rdx,
- Address(rdi,
- rdx,
- Address::times_ptr, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()));
-
- Label notByte, notShort, notChar;
- const Address field_address (rax, rcx, Address::times_1);
-
- // Need to differentiate between igetfield, agetfield, bgetfield etc.
- // because they are different sizes.
- // Use the type from the constant pool cache
- __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
- // Make sure we don't need to mask rdx after the above shift
- ConstantPoolCacheEntry::verify_tos_state_shift();
- __ cmpl(rdx, btos);
- __ jcc(Assembler::notEqual, notByte);
- __ load_signed_byte(rax, field_address);
- __ jmp(xreturn_path);
-
- __ bind(notByte);
- __ cmpl(rdx, stos);
- __ jcc(Assembler::notEqual, notShort);
- __ load_signed_short(rax, field_address);
- __ jmp(xreturn_path);
-
- __ bind(notShort);
- __ cmpl(rdx, ctos);
- __ jcc(Assembler::notEqual, notChar);
- __ load_unsigned_short(rax, field_address);
- __ jmp(xreturn_path);
-
- __ bind(notChar);
-#ifdef ASSERT
- Label okay;
- __ cmpl(rdx, atos);
- __ jcc(Assembler::equal, okay);
- __ cmpl(rdx, itos);
- __ jcc(Assembler::equal, okay);
- __ stop("what type is this?");
- __ bind(okay);
-#endif // ASSERT
- // All the rest are a 32 bit wordsize
- // This is ok for now. Since fast accessors should be going away
- __ movptr(rax, field_address);
-
- __ bind(xreturn_path);
-
- // _ireturn/_areturn
- __ pop(rdi); // get return address
- __ mov(rsp, rsi); // set sp to sender sp
- __ jmp(rdi);
-
- // generate a vanilla interpreter entry as the slow path
- __ bind(slow_path);
-
- (void) generate_normal_entry(false);
- return entry_point;
- }
- return NULL;
-
-}
// Method entry for java.lang.ref.Reference.get.
address InterpreterGenerator::generate_Reference_get_entry(void) {
@@ -862,7 +723,7 @@
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
- return generate_accessor_entry();
+ return generate_jump_to_normal_entry();
}
/**
@@ -1557,100 +1418,6 @@
return entry_point;
}
-//------------------------------------------------------------------------------------------------------------------------
-// Entry points
-//
-// Here we generate the various kind of entries into the interpreter.
-// The two main entry type are generic bytecode methods and native call method.
-// These both come in synchronized and non-synchronized versions but the
-// frame layout they create is very similar. The other method entry
-// types are really just special purpose entries that are really entry
-// and interpretation all in one. These are for trivial methods like
-// accessor, empty, or special math methods.
-//
-// When control flow reaches any of the entry types for the interpreter
-// the following holds ->
-//
-// Arguments:
-//
-// rbx,: Method*
-// rcx: receiver
-//
-//
-// Stack layout immediately at entry
-//
-// [ return address ] <--- rsp
-// [ parameter n ]
-// ...
-// [ parameter 1 ]
-// [ expression stack ] (caller's java expression stack)
-
-// Assuming that we don't go to one of the trivial specialized
-// entries the stack will look like below when we are ready to execute
-// the first bytecode (or call the native routine). The register usage
-// will be as the template based interpreter expects (see interpreter_x86.hpp).
-//
-// local variables follow incoming parameters immediately; i.e.
-// the return address is moved to the end of the locals).
-//
-// [ monitor entry ] <--- rsp
-// ...
-// [ monitor entry ]
-// [ expr. stack bottom ]
-// [ saved rsi ]
-// [ current rdi ]
-// [ Method* ]
-// [ saved rbp, ] <--- rbp,
-// [ return address ]
-// [ local variable m ]
-// ...
-// [ local variable 1 ]
-// [ parameter n ]
-// ...
-// [ parameter 1 ] <--- rdi
-
-address AbstractInterpreterGenerator::generate_method_entry(AbstractInterpreter::MethodKind kind) {
- // determine code generation flags
- bool synchronized = false;
- address entry_point = NULL;
- InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
-
- switch (kind) {
- case Interpreter::zerolocals : break;
- case Interpreter::zerolocals_synchronized: synchronized = true; break;
- case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break;
- case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break;
- case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break;
- case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break;
- case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break;
-
- case Interpreter::java_lang_math_sin : // fall thru
- case Interpreter::java_lang_math_cos : // fall thru
- case Interpreter::java_lang_math_tan : // fall thru
- case Interpreter::java_lang_math_abs : // fall thru
- case Interpreter::java_lang_math_log : // fall thru
- case Interpreter::java_lang_math_log10 : // fall thru
- case Interpreter::java_lang_math_sqrt : // fall thru
- case Interpreter::java_lang_math_pow : // fall thru
- case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break;
- case Interpreter::java_lang_ref_reference_get
- : entry_point = ig_this->generate_Reference_get_entry(); break;
- case Interpreter::java_util_zip_CRC32_update
- : entry_point = ig_this->generate_CRC32_update_entry(); break;
- case Interpreter::java_util_zip_CRC32_updateBytes
- : // fall thru
- case Interpreter::java_util_zip_CRC32_updateByteBuffer
- : entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
- default:
- fatal(err_msg("unexpected method kind: %d", kind));
- break;
- }
-
- if (entry_point) return entry_point;
-
- return ig_this->generate_normal_entry(synchronized);
-
-}
// These should never be compiled since the interpreter will prefer
// the compiled version to the intrinsic version.
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -400,7 +400,7 @@
// page mechanism will work for that.
//
// NOTE: Since the additional locals are also always pushed (wasn't
-// obvious in generate_method_entry) so the guard should work for them
+// obvious in generate_fixed_frame) so the guard should work for them
// too.
//
// Args:
@@ -411,8 +411,7 @@
// rax
void InterpreterGenerator::generate_stack_overflow_check(void) {
- // monitor entry size: see picture of stack set
- // (generate_method_entry) and frame_amd64.hpp
+ // monitor entry size: see picture of stack in frame_x86.hpp
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
// total overhead size: entry_size + (saved rbp through expr stack
@@ -600,153 +599,6 @@
// End of helpers
-// Various method entries
-//------------------------------------------------------------------------------------------------------------------------
-//
-//
-
-// Call an accessor method (assuming it is resolved, otherwise drop
-// into vanilla (slow path) entry
-address InterpreterGenerator::generate_accessor_entry(void) {
- // rbx: Method*
-
- // r13: senderSP must preserver for slow path, set SP to it on fast path
-
- address entry_point = __ pc();
- Label xreturn_path;
-
- // do fastpath for resolved accessor methods
- if (UseFastAccessorMethods) {
- // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites
- // thereof; parameter size = 1
- // Note: We can only use this code if the getfield has been resolved
- // and if we don't have a null-pointer exception => check for
- // these conditions first and use slow path if necessary.
- Label slow_path;
- // If we need a safepoint check, generate full interpreter entry.
- __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
- SafepointSynchronize::_not_synchronized);
-
- __ jcc(Assembler::notEqual, slow_path);
- // rbx: method
- __ movptr(rax, Address(rsp, wordSize));
-
- // check if local 0 != NULL and read field
- __ testptr(rax, rax);
- __ jcc(Assembler::zero, slow_path);
-
- // read first instruction word and extract bytecode @ 1 and index @ 2
- __ movptr(rdx, Address(rbx, Method::const_offset()));
- __ movptr(rdi, Address(rdx, ConstMethod::constants_offset()));
- __ movl(rdx, Address(rdx, ConstMethod::codes_offset()));
- // Shift codes right to get the index on the right.
- // The bytecode fetched looks like <index><0xb4><0x2a>
- __ shrl(rdx, 2 * BitsPerByte);
- __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
- __ movptr(rdi, Address(rdi, ConstantPool::cache_offset_in_bytes()));
-
- // rax: local 0
- // rbx: method
- // rdx: constant pool cache index
- // rdi: constant pool cache
-
- // check if getfield has been resolved and read constant pool cache entry
- // check the validity of the cache entry by testing whether _indices field
- // contains Bytecode::_getfield in b1 byte.
- assert(in_words(ConstantPoolCacheEntry::size()) == 4,
- "adjust shift below");
- __ movl(rcx,
- Address(rdi,
- rdx,
- Address::times_8,
- ConstantPoolCache::base_offset() +
- ConstantPoolCacheEntry::indices_offset()));
- __ shrl(rcx, 2 * BitsPerByte);
- __ andl(rcx, 0xFF);
- __ cmpl(rcx, Bytecodes::_getfield);
- __ jcc(Assembler::notEqual, slow_path);
-
- // Note: constant pool entry is not valid before bytecode is resolved
- __ movptr(rcx,
- Address(rdi,
- rdx,
- Address::times_8,
- ConstantPoolCache::base_offset() +
- ConstantPoolCacheEntry::f2_offset()));
- // edx: flags
- __ movl(rdx,
- Address(rdi,
- rdx,
- Address::times_8,
- ConstantPoolCache::base_offset() +
- ConstantPoolCacheEntry::flags_offset()));
-
- Label notObj, notInt, notByte, notShort;
- const Address field_address(rax, rcx, Address::times_1);
-
- // Need to differentiate between igetfield, agetfield, bgetfield etc.
- // because they are different sizes.
- // Use the type from the constant pool cache
- __ shrl(rdx, ConstantPoolCacheEntry::tos_state_shift);
- // Make sure we don't need to mask edx after the above shift
- ConstantPoolCacheEntry::verify_tos_state_shift();
-
- __ cmpl(rdx, atos);
- __ jcc(Assembler::notEqual, notObj);
- // atos
- __ load_heap_oop(rax, field_address);
- __ jmp(xreturn_path);
-
- __ bind(notObj);
- __ cmpl(rdx, itos);
- __ jcc(Assembler::notEqual, notInt);
- // itos
- __ movl(rax, field_address);
- __ jmp(xreturn_path);
-
- __ bind(notInt);
- __ cmpl(rdx, btos);
- __ jcc(Assembler::notEqual, notByte);
- // btos
- __ load_signed_byte(rax, field_address);
- __ jmp(xreturn_path);
-
- __ bind(notByte);
- __ cmpl(rdx, stos);
- __ jcc(Assembler::notEqual, notShort);
- // stos
- __ load_signed_short(rax, field_address);
- __ jmp(xreturn_path);
-
- __ bind(notShort);
-#ifdef ASSERT
- Label okay;
- __ cmpl(rdx, ctos);
- __ jcc(Assembler::equal, okay);
- __ stop("what type is this?");
- __ bind(okay);
-#endif
- // ctos
- __ load_unsigned_short(rax, field_address);
-
- __ bind(xreturn_path);
-
- // _ireturn/_areturn
- __ pop(rdi);
- __ mov(rsp, r13);
- __ jmp(rdi);
- __ ret(0);
-
- // generate a vanilla interpreter entry as the slow path
- __ bind(slow_path);
- (void) generate_normal_entry(false);
- } else {
- (void) generate_normal_entry(false);
- }
-
- return entry_point;
-}
-
// Method entry for java.lang.ref.Reference.get.
address InterpreterGenerator::generate_Reference_get_entry(void) {
#if INCLUDE_ALL_GCS
@@ -773,8 +625,6 @@
// and so we don't need to call the G1 pre-barrier. Thus we can use the
// regular method entry code to generate the NPE.
//
- // This code is based on generate_accessor_enty.
- //
// rbx: Method*
// r13: senderSP must preserve for slow path, set SP to it on fast path
@@ -832,7 +682,7 @@
// If G1 is not enabled then attempt to go through the accessor entry point
// Reference.get is an accessor
- return generate_accessor_entry();
+ return generate_jump_to_normal_entry();
}
/**
@@ -1566,100 +1416,6 @@
return entry_point;
}
-// Entry points
-//
-// Here we generate the various kind of entries into the interpreter.
-// The two main entry type are generic bytecode methods and native
-// call method. These both come in synchronized and non-synchronized
-// versions but the frame layout they create is very similar. The
-// other method entry types are really just special purpose entries
-// that are really entry and interpretation all in one. These are for
-// trivial methods like accessor, empty, or special math methods.
-//
-// When control flow reaches any of the entry types for the interpreter
-// the following holds ->
-//
-// Arguments:
-//
-// rbx: Method*
-//
-// Stack layout immediately at entry
-//
-// [ return address ] <--- rsp
-// [ parameter n ]
-// ...
-// [ parameter 1 ]
-// [ expression stack ] (caller's java expression stack)
-
-// Assuming that we don't go to one of the trivial specialized entries
-// the stack will look like below when we are ready to execute the
-// first bytecode (or call the native routine). The register usage
-// will be as the template based interpreter expects (see
-// interpreter_amd64.hpp).
-//
-// local variables follow incoming parameters immediately; i.e.
-// the return address is moved to the end of the locals).
-//
-// [ monitor entry ] <--- rsp
-// ...
-// [ monitor entry ]
-// [ expr. stack bottom ]
-// [ saved r13 ]
-// [ current r14 ]
-// [ Method* ]
-// [ saved ebp ] <--- rbp
-// [ return address ]
-// [ local variable m ]
-// ...
-// [ local variable 1 ]
-// [ parameter n ]
-// ...
-// [ parameter 1 ] <--- r14
-
-address AbstractInterpreterGenerator::generate_method_entry(
- AbstractInterpreter::MethodKind kind) {
- // determine code generation flags
- bool synchronized = false;
- address entry_point = NULL;
- InterpreterGenerator* ig_this = (InterpreterGenerator*)this;
-
- switch (kind) {
- case Interpreter::zerolocals : break;
- case Interpreter::zerolocals_synchronized: synchronized = true; break;
- case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break;
- case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break;
- case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break;
- case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break;
- case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break;
-
- case Interpreter::java_lang_math_sin : // fall thru
- case Interpreter::java_lang_math_cos : // fall thru
- case Interpreter::java_lang_math_tan : // fall thru
- case Interpreter::java_lang_math_abs : // fall thru
- case Interpreter::java_lang_math_log : // fall thru
- case Interpreter::java_lang_math_log10 : // fall thru
- case Interpreter::java_lang_math_sqrt : // fall thru
- case Interpreter::java_lang_math_pow : // fall thru
- case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break;
- case Interpreter::java_lang_ref_reference_get
- : entry_point = ig_this->generate_Reference_get_entry(); break;
- case Interpreter::java_util_zip_CRC32_update
- : entry_point = ig_this->generate_CRC32_update_entry(); break;
- case Interpreter::java_util_zip_CRC32_updateBytes
- : // fall thru
- case Interpreter::java_util_zip_CRC32_updateByteBuffer
- : entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break;
- default:
- fatal(err_msg("unexpected method kind: %d", kind));
- break;
- }
-
- if (entry_point) {
- return entry_point;
- }
-
- return ig_this->generate_normal_entry(synchronized);
-}
// These should never be compiled since the interpreter will prefer
// the compiled version to the intrinsic version.
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -831,60 +831,6 @@
return generate_entry((address) CppInterpreter::normal_entry);
}
-address AbstractInterpreterGenerator::generate_method_entry(
- AbstractInterpreter::MethodKind kind) {
- address entry_point = NULL;
-
- switch (kind) {
- case Interpreter::zerolocals:
- case Interpreter::zerolocals_synchronized:
- break;
-
- case Interpreter::native:
- entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false);
- break;
-
- case Interpreter::native_synchronized:
- entry_point = ((InterpreterGenerator*) this)->generate_native_entry(false);
- break;
-
- case Interpreter::empty:
- entry_point = ((InterpreterGenerator*) this)->generate_empty_entry();
- break;
-
- case Interpreter::accessor:
- entry_point = ((InterpreterGenerator*) this)->generate_accessor_entry();
- break;
-
- case Interpreter::abstract:
- entry_point = ((InterpreterGenerator*) this)->generate_abstract_entry();
- break;
-
- case Interpreter::java_lang_math_sin:
- case Interpreter::java_lang_math_cos:
- case Interpreter::java_lang_math_tan:
- case Interpreter::java_lang_math_abs:
- case Interpreter::java_lang_math_log:
- case Interpreter::java_lang_math_log10:
- case Interpreter::java_lang_math_sqrt:
- case Interpreter::java_lang_math_pow:
- case Interpreter::java_lang_math_exp:
- entry_point = ((InterpreterGenerator*) this)->generate_math_entry(kind);
- break;
-
- case Interpreter::java_lang_ref_reference_get:
- entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry();
- break;
-
- default:
- ShouldNotReachHere();
- }
-
- if (entry_point == NULL)
- entry_point = ((InterpreterGenerator*) this)->generate_normal_entry(false);
-
- return entry_point;
-}
InterpreterGenerator::InterpreterGenerator(StubQueue* code)
: CppInterpreterGenerator(code) {
--- a/hotspot/src/cpu/zero/vm/globals_zero.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/zero/vm/globals_zero.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -61,6 +61,12 @@
define_pd_global(uintx, TypeProfileLevel, 0);
-#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct)
+#define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
+ product(bool, UseFastEmptyMethods, true, \
+ "Use fast method entry code for empty methods") \
+ \
+ product(bool, UseFastAccessorMethods, true, \
+ "Use fast method entry code for accessor methods") \
+ \
#endif // CPU_ZERO_VM_GLOBALS_ZERO_HPP
--- a/hotspot/src/cpu/zero/vm/interpreterGenerator_zero.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/cpu/zero/vm/interpreterGenerator_zero.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -39,4 +39,7 @@
address generate_accessor_entry();
address generate_Reference_get_entry();
+ // Not supported
+ address generate_CRC32_update_entry() { return NULL; }
+ address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { return NULL; }
#endif // CPU_ZERO_VM_INTERPRETERGENERATOR_ZERO_HPP
--- a/hotspot/src/os/linux/vm/os_linux.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/os/linux/vm/os_linux.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -2246,7 +2246,7 @@
const siginfo_t* si = (const siginfo_t*)siginfo;
os::Posix::print_siginfo_brief(st, si);
-
+#if INCLUDE_CDS
if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
UseSharedSpaces) {
FileMapInfo* mapinfo = FileMapInfo::current_info();
@@ -2256,6 +2256,7 @@
" possible disk/network problem.");
}
}
+#endif
st->cr();
}
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -31,6 +31,9 @@
#include "classfile/javaClasses.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
+#if INCLUDE_CDS
+#include "classfile/systemDictionaryShared.hpp"
+#endif
#include "classfile/verificationType.hpp"
#include "classfile/verifier.hpp"
#include "classfile/vmSymbols.hpp"
@@ -60,6 +63,7 @@
#include "services/threadService.hpp"
#include "utilities/array.hpp"
#include "utilities/globalDefinitions.hpp"
+#include "utilities/ostream.hpp"
// We generally try to create the oops directly when parsing, rather than
// allocating temporary data structures and copying the bytes twice. A
@@ -3786,7 +3790,15 @@
instanceKlassHandle nullHandle;
// Figure out whether we can skip format checking (matching classic VM behavior)
- _need_verify = Verifier::should_verify_for(class_loader(), verify);
+ if (DumpSharedSpaces) {
+ // verify == true means it's a 'remote' class (i.e., non-boot class)
+ // Verification decision is based on BytecodeVerificationRemote flag
+ // for those classes.
+ _need_verify = (verify) ? BytecodeVerificationRemote :
+ BytecodeVerificationLocal;
+ } else {
+ _need_verify = Verifier::should_verify_for(class_loader(), verify);
+ }
// Set the verify flag in stream
cfs->set_verify(_need_verify);
@@ -3805,6 +3817,18 @@
u2 minor_version = cfs->get_u2_fast();
u2 major_version = cfs->get_u2_fast();
+ if (DumpSharedSpaces && major_version < JAVA_1_5_VERSION) {
+ ResourceMark rm;
+ warning("Pre JDK 1.5 class not supported by CDS: %u.%u %s",
+ major_version, minor_version, name->as_C_string());
+ Exceptions::fthrow(
+ THREAD_AND_LOCATION,
+ vmSymbols::java_lang_UnsupportedClassVersionError(),
+ "Unsupported major.minor version for dump time %u.%u",
+ major_version,
+ minor_version);
+ }
+
// Check version numbers - we check this even with verifier off
if (!is_supported_version(major_version, minor_version)) {
if (name == NULL) {
@@ -3912,6 +3936,18 @@
if (cfs->source() != NULL) tty->print(" from %s", cfs->source());
tty->print_cr("]");
}
+#if INCLUDE_CDS
+ if (DumpLoadedClassList != NULL && cfs->source() != NULL && classlist_file->is_open()) {
+ // Only dump the classes that can be stored into CDS archive
+ if (SystemDictionaryShared::is_sharing_possible(loader_data)) {
+ if (name != NULL) {
+ ResourceMark rm(THREAD);
+ classlist_file->print_cr("%s", name->as_C_string());
+ classlist_file->flush();
+ }
+ }
+ }
+#endif
u2 super_class_index = cfs->get_u2_fast();
instanceKlassHandle super_klass = parse_super_class(super_class_index,
--- a/hotspot/src/share/vm/classfile/classLoader.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -26,8 +26,13 @@
#include "classfile/classFileParser.hpp"
#include "classfile/classFileStream.hpp"
#include "classfile/classLoader.hpp"
+#include "classfile/classLoaderExt.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/javaClasses.hpp"
+#if INCLUDE_CDS
+#include "classfile/sharedPathsMiscInfo.hpp"
+#include "classfile/sharedClassUtil.hpp"
+#endif
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "compiler/compileBroker.hpp"
@@ -35,6 +40,7 @@
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/oopMapCache.hpp"
#include "memory/allocation.inline.hpp"
+#include "memory/filemap.hpp"
#include "memory/generation.hpp"
#include "memory/oopFactory.hpp"
#include "memory/universe.inline.hpp"
@@ -114,8 +120,12 @@
ClassPathEntry* ClassLoader::_first_entry = NULL;
ClassPathEntry* ClassLoader::_last_entry = NULL;
+int ClassLoader::_num_entries = 0;
PackageHashtable* ClassLoader::_package_hash_table = NULL;
+#if INCLUDE_CDS
+SharedPathsMiscInfo* ClassLoader::_shared_paths_misc_info = NULL;
+#endif
// helper routines
bool string_starts_with(const char* str, const char* str_to_find) {
size_t str_len = strlen(str);
@@ -194,6 +204,14 @@
// check if file exists
struct stat st;
if (os::stat(path, &st) == 0) {
+#if INCLUDE_CDS
+ if (DumpSharedSpaces) {
+ // We have already check in ClassLoader::check_shared_classpath() that the directory is empty, so
+ // we should never find a file underneath it -- unless user has added a new file while we are running
+ // the dump, in which case let's quit!
+ ShouldNotReachHere();
+ }
+#endif
// found file, open it
int file_handle = os::open(path, 0, 0);
if (file_handle != -1) {
@@ -228,13 +246,13 @@
FREE_C_HEAP_ARRAY(char, _zip_name, mtClass);
}
-ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
- // enable call to C land
+u1* ClassPathZipEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) {
+ // enable call to C land
JavaThread* thread = JavaThread::current();
ThreadToNativeFromVM ttn(thread);
// check whether zip archive contains name
- jint filesize, name_len;
- jzentry* entry = (*FindEntry)(_zip, name, &filesize, &name_len);
+ jint name_len;
+ jzentry* entry = (*FindEntry)(_zip, name, filesize, &name_len);
if (entry == NULL) return NULL;
u1* buffer;
char name_buf[128];
@@ -245,19 +263,33 @@
filename = NEW_RESOURCE_ARRAY(char, name_len + 1);
}
- // file found, get pointer to class in mmaped jar file.
+ // file found, get pointer to the entry in mmapped jar file.
if (ReadMappedEntry == NULL ||
!(*ReadMappedEntry)(_zip, entry, &buffer, filename)) {
- // mmaped access not available, perhaps due to compression,
+ // mmapped access not available, perhaps due to compression,
// read contents into resource array
- buffer = NEW_RESOURCE_ARRAY(u1, filesize);
+ int size = (*filesize) + ((nul_terminate) ? 1 : 0);
+ buffer = NEW_RESOURCE_ARRAY(u1, size);
if (!(*ReadEntry)(_zip, entry, buffer, filename)) return NULL;
}
+
+ // return result
+ if (nul_terminate) {
+ buffer[*filesize] = 0;
+ }
+ return buffer;
+}
+
+ClassFileStream* ClassPathZipEntry::open_stream(const char* name, TRAPS) {
+ jint filesize;
+ u1* buffer = open_entry(name, &filesize, false, CHECK_NULL);
+ if (buffer == NULL) {
+ return NULL;
+ }
if (UsePerfData) {
ClassLoader::perf_sys_classfile_bytes_read()->inc(filesize);
}
- // return result
- return new ClassFileStream(buffer, filesize, _zip_name); // Resource allocated
+ return new ClassFileStream(buffer, filesize, _zip_name); // Resource allocated
}
// invoke function for each entry in the zip file
@@ -272,12 +304,13 @@
}
}
-LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st) : ClassPathEntry() {
+LazyClassPathEntry::LazyClassPathEntry(char* path, const struct stat* st, bool throw_exception) : ClassPathEntry() {
_path = os::strdup_check_oom(path);
_st = *st;
_meta_index = NULL;
_resolved_entry = NULL;
_has_error = false;
+ _throw_exception = throw_exception;
}
LazyClassPathEntry::~LazyClassPathEntry() {
@@ -293,7 +326,11 @@
return (ClassPathEntry*) _resolved_entry;
}
ClassPathEntry* new_entry = NULL;
- new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, CHECK_NULL);
+ new_entry = ClassLoader::create_class_path_entry(_path, &_st, false, _throw_exception, CHECK_NULL);
+ if (!_throw_exception && new_entry == NULL) {
+ assert(!HAS_PENDING_EXCEPTION, "must be");
+ return NULL;
+ }
{
ThreadCritical tc;
if (_resolved_entry == NULL) {
@@ -327,6 +364,23 @@
return true;
}
+u1* LazyClassPathEntry::open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS) {
+ if (_has_error) {
+ return NULL;
+ }
+ ClassPathEntry* cpe = resolve_entry(THREAD);
+ if (cpe == NULL) {
+ _has_error = true;
+ return NULL;
+ } else if (cpe->is_jar_file()) {
+ return ((ClassPathZipEntry*)cpe)->open_entry(name, filesize, nul_terminate,THREAD);
+ } else {
+ ShouldNotReachHere();
+ *filesize = 0;
+ return NULL;
+ }
+}
+
static void print_meta_index(LazyClassPathEntry* entry,
GrowableArray<char*>& meta_packages) {
tty->print("[Meta index for %s=", entry->name());
@@ -337,15 +391,62 @@
tty->print_cr("]");
}
+#if INCLUDE_CDS
+void ClassLoader::exit_with_path_failure(const char* error, const char* message) {
+ assert(DumpSharedSpaces, "only called at dump time");
+ tty->print_cr("Hint: enable -XX:+TraceClassPaths to diagnose the failure");
+ vm_exit_during_initialization(error, message);
+}
+#endif
-void ClassLoader::setup_meta_index() {
+void ClassLoader::trace_class_path(const char* msg, const char* name) {
+ if (!TraceClassPaths) {
+ return;
+ }
+
+ if (msg) {
+ tty->print("%s", msg);
+ }
+ if (name) {
+ if (strlen(name) < 256) {
+ tty->print("%s", name);
+ } else {
+ // For very long paths, we need to print each character separately,
+ // as print_cr() has a length limit
+ while (name[0] != '\0') {
+ tty->print("%c", name[0]);
+ name++;
+ }
+ }
+ }
+ if (msg && msg[0] == '[') {
+ tty->print_cr("]");
+ } else {
+ tty->cr();
+ }
+}
+
+void ClassLoader::setup_bootstrap_meta_index() {
// Set up meta index which allows us to open boot jars lazily if
// class data sharing is enabled
+ const char* meta_index_path = Arguments::get_meta_index_path();
+ const char* meta_index_dir = Arguments::get_meta_index_dir();
+ setup_meta_index(meta_index_path, meta_index_dir, 0);
+}
+
+void ClassLoader::setup_meta_index(const char* meta_index_path, const char* meta_index_dir, int start_index) {
const char* known_version = "% VERSION 2";
- char* meta_index_path = Arguments::get_meta_index_path();
- char* meta_index_dir = Arguments::get_meta_index_dir();
FILE* file = fopen(meta_index_path, "r");
int line_no = 0;
+#if INCLUDE_CDS
+ if (DumpSharedSpaces) {
+ if (file != NULL) {
+ _shared_paths_misc_info->add_required_file(meta_index_path);
+ } else {
+ _shared_paths_misc_info->add_nonexist_path(meta_index_path);
+ }
+ }
+#endif
if (file != NULL) {
ResourceMark rm;
LazyClassPathEntry* cur_entry = NULL;
@@ -380,7 +481,7 @@
// Hand off current packages to current lazy entry (if any)
if ((cur_entry != NULL) &&
(boot_class_path_packages.length() > 0)) {
- if (TraceClassLoading && Verbose) {
+ if ((TraceClassLoading || TraceClassPaths) && Verbose) {
print_meta_index(cur_entry, boot_class_path_packages);
}
MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0),
@@ -391,8 +492,10 @@
boot_class_path_packages.clear();
// Find lazy entry corresponding to this jar file
- for (ClassPathEntry* entry = _first_entry; entry != NULL; entry = entry->next()) {
- if (entry->is_lazy() &&
+ int count = 0;
+ for (ClassPathEntry* entry = _first_entry; entry != NULL; entry = entry->next(), count++) {
+ if (count >= start_index &&
+ entry->is_lazy() &&
string_starts_with(entry->name(), meta_index_dir) &&
string_ends_with(entry->name(), &package_name[2])) {
cur_entry = (LazyClassPathEntry*) entry;
@@ -429,7 +532,7 @@
// Hand off current packages to current lazy entry (if any)
if ((cur_entry != NULL) &&
(boot_class_path_packages.length() > 0)) {
- if (TraceClassLoading && Verbose) {
+ if ((TraceClassLoading || TraceClassPaths) && Verbose) {
print_meta_index(cur_entry, boot_class_path_packages);
}
MetaIndex* index = new MetaIndex(boot_class_path_packages.adr_at(0),
@@ -440,37 +543,88 @@
}
}
+#if INCLUDE_CDS
+void ClassLoader::check_shared_classpath(const char *path) {
+ if (strcmp(path, "") == 0) {
+ exit_with_path_failure("Cannot have empty path in archived classpaths", NULL);
+ }
+
+ struct stat st;
+ if (os::stat(path, &st) == 0) {
+ if ((st.st_mode & S_IFREG) != S_IFREG) { // is directory
+ if (!os::dir_is_empty(path)) {
+ tty->print_cr("Error: non-empty directory '%s'", path);
+ exit_with_path_failure("CDS allows only empty directories in archived classpaths", NULL);
+ }
+ }
+ }
+}
+#endif
+
void ClassLoader::setup_bootstrap_search_path() {
assert(_first_entry == NULL, "should not setup bootstrap class search path twice");
char* sys_class_path = os::strdup_check_oom(Arguments::get_sysclasspath());
- if (TraceClassLoading && Verbose) {
- tty->print_cr("[Bootstrap loader class path=%s]", sys_class_path);
+ if (!PrintSharedArchiveAndExit) {
+ trace_class_path("[Bootstrap loader class path=", sys_class_path);
+ }
+#if INCLUDE_CDS
+ if (DumpSharedSpaces) {
+ _shared_paths_misc_info->add_boot_classpath(Arguments::get_sysclasspath());
}
+#endif
+ setup_search_path(sys_class_path);
+ os::free(sys_class_path);
+}
- int len = (int)strlen(sys_class_path);
+#if INCLUDE_CDS
+int ClassLoader::get_shared_paths_misc_info_size() {
+ return _shared_paths_misc_info->get_used_bytes();
+}
+
+void* ClassLoader::get_shared_paths_misc_info() {
+ return _shared_paths_misc_info->buffer();
+}
+
+bool ClassLoader::check_shared_paths_misc_info(void *buf, int size) {
+ SharedPathsMiscInfo* checker = SharedClassUtil::allocate_shared_paths_misc_info((char*)buf, size);
+ bool result = checker->check();
+ delete checker;
+ return result;
+}
+#endif
+
+void ClassLoader::setup_search_path(char *class_path) {
+ int offset = 0;
+ int len = (int)strlen(class_path);
int end = 0;
// Iterate over class path entries
for (int start = 0; start < len; start = end) {
- while (sys_class_path[end] && sys_class_path[end] != os::path_separator()[0]) {
+ while (class_path[end] && class_path[end] != os::path_separator()[0]) {
end++;
}
- char* path = NEW_C_HEAP_ARRAY(char, end-start+1, mtClass);
- strncpy(path, &sys_class_path[start], end-start);
- path[end-start] = '\0';
+ EXCEPTION_MARK;
+ ResourceMark rm(THREAD);
+ char* path = NEW_RESOURCE_ARRAY(char, end - start + 1);
+ strncpy(path, &class_path[start], end - start);
+ path[end - start] = '\0';
update_class_path_entry_list(path, false);
- FREE_C_HEAP_ARRAY(char, path, mtClass);
- while (sys_class_path[end] == os::path_separator()[0]) {
+#if INCLUDE_CDS
+ if (DumpSharedSpaces) {
+ check_shared_classpath(path);
+ }
+#endif
+ while (class_path[end] == os::path_separator()[0]) {
end++;
}
}
- os::free(sys_class_path);
}
-ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st, bool lazy, TRAPS) {
+ClassPathEntry* ClassLoader::create_class_path_entry(char *path, const struct stat* st,
+ bool lazy, bool throw_exception, TRAPS) {
JavaThread* thread = JavaThread::current();
if (lazy) {
- return new LazyClassPathEntry(path, st);
+ return new LazyClassPathEntry(path, st, throw_exception);
}
ClassPathEntry* new_entry = NULL;
if ((st->st_mode & S_IFREG) == S_IFREG) {
@@ -479,7 +633,11 @@
char canonical_path[JVM_MAXPATHLEN];
if (!get_canonical_path(path, canonical_path, JVM_MAXPATHLEN)) {
// This matches the classic VM
- THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL);
+ if (throw_exception) {
+ THROW_MSG_(vmSymbols::java_io_IOException(), "Bad pathname", NULL);
+ } else {
+ return NULL;
+ }
}
char* error_msg = NULL;
jzfile* zip;
@@ -491,7 +649,7 @@
}
if (zip != NULL && error_msg == NULL) {
new_entry = new ClassPathZipEntry(zip, path);
- if (TraceClassLoading) {
+ if (TraceClassLoading || TraceClassPaths) {
tty->print_cr("[Opened %s]", path);
}
} else {
@@ -505,12 +663,16 @@
msg = NEW_RESOURCE_ARRAY(char, len); ;
jio_snprintf(msg, len - 1, "error in opening JAR file <%s> %s", error_msg, path);
}
- THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL);
+ if (throw_exception) {
+ THROW_MSG_(vmSymbols::java_lang_ClassNotFoundException(), msg, NULL);
+ } else {
+ return NULL;
+ }
}
} else {
// Directory
new_entry = new ClassPathDirEntry(path);
- if (TraceClassLoading) {
+ if (TraceClassLoading || TraceClassPaths) {
tty->print_cr("[Path %s]", path);
}
}
@@ -571,23 +733,37 @@
_last_entry = new_entry;
}
}
+ _num_entries ++;
}
-void ClassLoader::update_class_path_entry_list(char *path,
- bool check_for_duplicates) {
+// Returns true IFF the file/dir exists and the entry was successfully created.
+bool ClassLoader::update_class_path_entry_list(char *path,
+ bool check_for_duplicates,
+ bool throw_exception) {
struct stat st;
if (os::stat(path, &st) == 0) {
// File or directory found
ClassPathEntry* new_entry = NULL;
Thread* THREAD = Thread::current();
- new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, CHECK);
+ new_entry = create_class_path_entry(path, &st, LazyBootClassLoader, throw_exception, CHECK_(false));
+ if (new_entry == NULL) {
+ return false;
+ }
// The kernel VM adds dynamically to the end of the classloader path and
// doesn't reorder the bootclasspath which would break java.lang.Package
// (see PackageInfo).
// Add new entry to linked list
if (!check_for_duplicates || !contains_entry(new_entry)) {
- add_to_list(new_entry);
+ ClassLoaderExt::add_class_path_entry(path, check_for_duplicates, new_entry);
}
+ return true;
+ } else {
+#if INCLUDE_CDS
+ if (DumpSharedSpaces) {
+ _shared_paths_misc_info->add_nonexist_path(path);
+ }
+ return false;
+#endif
}
}
@@ -739,10 +915,10 @@
assert(n == number_of_entries(), "just checking");
}
- void copy_table(char** top, char* end, PackageHashtable* table);
+ CDS_ONLY(void copy_table(char** top, char* end, PackageHashtable* table);)
};
-
+#if INCLUDE_CDS
void PackageHashtable::copy_table(char** top, char* end,
PackageHashtable* table) {
// Copy (relocate) the table to the shared space.
@@ -750,33 +926,30 @@
// Calculate the space needed for the package name strings.
int i;
- int n = 0;
- for (i = 0; i < table_size(); ++i) {
- for (PackageInfo* pp = table->bucket(i);
- pp != NULL;
- pp = pp->next()) {
- n += (int)(strlen(pp->pkgname()) + 1);
- }
- }
- if (*top + n + sizeof(intptr_t) >= end) {
- report_out_of_shared_space(SharedMiscData);
- }
-
- // Copy the table data (the strings) to the shared space.
- n = align_size_up(n, sizeof(HeapWord));
- *(intptr_t*)(*top) = n;
- *top += sizeof(intptr_t);
+ intptr_t* tableSize = (intptr_t*)(*top);
+ *top += sizeof(intptr_t); // For table size
+ char* tableStart = *top;
for (i = 0; i < table_size(); ++i) {
for (PackageInfo* pp = table->bucket(i);
pp != NULL;
pp = pp->next()) {
int n1 = (int)(strlen(pp->pkgname()) + 1);
+ if (*top + n1 >= end) {
+ report_out_of_shared_space(SharedMiscData);
+ }
pp->set_pkgname((char*)memcpy(*top, pp->pkgname(), n1));
*top += n1;
}
}
*top = (char*)align_size_up((intptr_t)*top, sizeof(HeapWord));
+ if (*top >= end) {
+ report_out_of_shared_space(SharedMiscData);
+ }
+
+ // Write table size
+ intptr_t len = *top - (char*)tableStart;
+ *tableSize = len;
}
@@ -787,7 +960,7 @@
void ClassLoader::copy_package_info_table(char** top, char* end) {
_package_hash_table->copy_table(top, end, _package_hash_table);
}
-
+#endif
PackageInfo* ClassLoader::lookup_package(const char *pkgname) {
const char *cp = strrchr(pkgname, '/');
@@ -880,7 +1053,8 @@
instanceKlassHandle ClassLoader::load_classfile(Symbol* h_name, TRAPS) {
ResourceMark rm(THREAD);
- EventMark m("loading class %s", h_name->as_C_string());
+ const char* class_name = h_name->as_C_string();
+ EventMark m("loading class %s", class_name);
ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion);
stringStream st;
@@ -888,18 +1062,24 @@
// st.print("%s.class", h_name->as_utf8());
st.print_raw(h_name->as_utf8());
st.print_raw(".class");
- char* name = st.as_string();
+ const char* file_name = st.as_string();
+ ClassLoaderExt::Context context(class_name, file_name, THREAD);
// Lookup stream for parsing .class file
ClassFileStream* stream = NULL;
int classpath_index = 0;
+ ClassPathEntry* e = NULL;
+ instanceKlassHandle h;
{
PerfClassTraceTime vmtimer(perf_sys_class_lookup_time(),
((JavaThread*) THREAD)->get_thread_stat()->perf_timers_addr(),
PerfClassTraceTime::CLASS_LOAD);
- ClassPathEntry* e = _first_entry;
+ e = _first_entry;
while (e != NULL) {
- stream = e->open_stream(name, CHECK_NULL);
+ stream = e->open_stream(file_name, CHECK_NULL);
+ if (!context.check(stream, classpath_index)) {
+ return h; // NULL
+ }
if (stream != NULL) {
break;
}
@@ -908,9 +1088,7 @@
}
}
- instanceKlassHandle h;
if (stream != NULL) {
-
// class file found, parse it
ClassFileParser parser(stream);
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
@@ -920,12 +1098,19 @@
loader_data,
protection_domain,
parsed_name,
- false,
- CHECK_(h));
-
- // add to package table
- if (add_package(name, classpath_index, THREAD)) {
- h = result;
+ context.should_verify(classpath_index),
+ THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ ResourceMark rm;
+ if (DumpSharedSpaces) {
+ tty->print_cr("Preload Error: Failed to load %s", class_name);
+ }
+ return h;
+ }
+ h = context.record_result(classpath_index, e, result, THREAD);
+ } else {
+ if (DumpSharedSpaces) {
+ tty->print_cr("Preload Error: Cannot find %s", class_name);
}
}
@@ -1020,14 +1205,27 @@
// lookup zip library entry points
load_zip_library();
+#if INCLUDE_CDS
// initialize search path
+ if (DumpSharedSpaces) {
+ _shared_paths_misc_info = SharedClassUtil::allocate_shared_paths_misc_info();
+ }
+#endif
setup_bootstrap_search_path();
if (LazyBootClassLoader) {
// set up meta index which makes boot classpath initialization lazier
- setup_meta_index();
+ setup_bootstrap_meta_index();
}
}
+#if INCLUDE_CDS
+void ClassLoader::initialize_shared_path() {
+ if (DumpSharedSpaces) {
+ ClassLoaderExt::setup_search_paths();
+ _shared_paths_misc_info->write_jint(0); // see comments in SharedPathsMiscInfo::check()
+ }
+}
+#endif
jlong ClassLoader::classloader_time_ms() {
return UsePerfData ?
--- a/hotspot/src/share/vm/classfile/classLoader.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/classfile/classLoader.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -107,6 +107,7 @@
const char* name() { return _zip_name; }
ClassPathZipEntry(jzfile* zip, const char* zip_name);
~ClassPathZipEntry();
+ u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
ClassFileStream* open_stream(const char* name, TRAPS);
void contents_do(void f(const char* name, void* context), void* context);
// Debugging
@@ -122,13 +123,15 @@
struct stat _st;
MetaIndex* _meta_index;
bool _has_error;
+ bool _throw_exception;
volatile ClassPathEntry* _resolved_entry;
ClassPathEntry* resolve_entry(TRAPS);
public:
bool is_jar_file();
const char* name() { return _path; }
- LazyClassPathEntry(char* path, const struct stat* st);
+ LazyClassPathEntry(char* path, const struct stat* st, bool throw_exception);
virtual ~LazyClassPathEntry();
+ u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
ClassFileStream* open_stream(const char* name, TRAPS);
void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
@@ -140,6 +143,7 @@
class PackageHashtable;
class PackageInfo;
+class SharedPathsMiscInfo;
template <MEMFLAGS F> class HashtableBucket;
class ClassLoader: AllStatic {
@@ -147,7 +151,7 @@
enum SomeConstants {
package_hash_table_size = 31 // Number of buckets
};
- private:
+ protected:
friend class LazyClassPathEntry;
// Performance counters
@@ -189,10 +193,15 @@
static ClassPathEntry* _first_entry;
// Last entry in linked list of ClassPathEntry instances
static ClassPathEntry* _last_entry;
+ static int _num_entries;
+
// Hash table used to keep track of loaded packages
static PackageHashtable* _package_hash_table;
static const char* _shared_archive;
+ // Info used by CDS
+ CDS_ONLY(static SharedPathsMiscInfo * _shared_paths_misc_info;)
+
// Hash function
static unsigned int hash(const char *s, int n);
// Returns the package file name corresponding to the specified package
@@ -203,19 +212,23 @@
static bool add_package(const char *pkgname, int classpath_index, TRAPS);
// Initialization
- static void setup_meta_index();
+ static void setup_bootstrap_meta_index();
+ static void setup_meta_index(const char* meta_index_path, const char* meta_index_dir,
+ int start_index);
static void setup_bootstrap_search_path();
+ static void setup_search_path(char *class_path);
+
static void load_zip_library();
static ClassPathEntry* create_class_path_entry(char *path, const struct stat* st,
- bool lazy, TRAPS);
+ bool lazy, bool throw_exception, TRAPS);
// Canonicalizes path names, so strcmp will work properly. This is mainly
// to avoid confusing the zip library
static bool get_canonical_path(char* orig, char* out, int len);
public:
- // Used by the kernel jvm.
- static void update_class_path_entry_list(char *path,
- bool check_for_duplicates);
+ static bool update_class_path_entry_list(char *path,
+ bool check_for_duplicates,
+ bool throw_exception=true);
static void print_bootclasspath();
// Timing
@@ -298,6 +311,7 @@
// Initialization
static void initialize();
+ CDS_ONLY(static void initialize_shared_path();)
static void create_package_info_table();
static void create_package_info_table(HashtableBucket<mtClass> *t, int length,
int number_of_entries);
@@ -312,10 +326,21 @@
return e;
}
+#if INCLUDE_CDS
// Sharing dump and restore
static void copy_package_info_buckets(char** top, char* end);
static void copy_package_info_table(char** top, char* end);
+ static void check_shared_classpath(const char *path);
+ static void finalize_shared_paths_misc_info();
+ static int get_shared_paths_misc_info_size();
+ static void* get_shared_paths_misc_info();
+ static bool check_shared_paths_misc_info(void* info, int size);
+ static void exit_with_path_failure(const char* error, const char* message);
+#endif
+
+ static void trace_class_path(const char* msg, const char* name = NULL);
+
// VM monitoring and management support
static jlong classloader_time_ms();
static jlong class_method_total_size();
@@ -339,7 +364,7 @@
// Force compilation of all methods in all classes in bootstrap class path (stress test)
#ifndef PRODUCT
- private:
+ protected:
static int _compile_the_world_class_counter;
static int _compile_the_world_method_counter;
public:
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/classLoaderExt.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
+#define SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
+
+#include "classfile/classLoader.hpp"
+
+class ClassLoaderExt: public ClassLoader { // AllStatic
+public:
+
+ class Context {
+ const char* _file_name;
+ public:
+ Context(const char* class_name, const char* file_name, TRAPS) {
+ _file_name = file_name;
+ }
+
+ bool check(ClassFileStream* stream, const int classpath_index) {
+ return true;
+ }
+
+ bool should_verify(int classpath_index) {
+ return false;
+ }
+
+ instanceKlassHandle record_result(const int classpath_index,
+ ClassPathEntry* e, instanceKlassHandle result, TRAPS) {
+ if (ClassLoader::add_package(_file_name, classpath_index, THREAD)) {
+ if (DumpSharedSpaces) {
+ result->set_shared_classpath_index(classpath_index);
+ }
+ return result;
+ } else {
+ return instanceKlassHandle(); // NULL
+ }
+ }
+ };
+
+
+ static void add_class_path_entry(char* path, bool check_for_duplicates,
+ ClassPathEntry* new_entry) {
+ ClassLoader::add_to_list(new_entry);
+ }
+ static void setup_search_paths() {}
+};
+
+#endif // SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
--- a/hotspot/src/share/vm/classfile/dictionary.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/classfile/dictionary.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -220,6 +220,29 @@
_pd_cache_table->roots_oops_do(strong, weak);
}
+void Dictionary::remove_classes_in_error_state() {
+ assert(DumpSharedSpaces, "supported only when dumping");
+ DictionaryEntry* probe = NULL;
+ for (int index = 0; index < table_size(); index++) {
+ for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) {
+ probe = *p;
+ InstanceKlass* ik = InstanceKlass::cast(probe->klass());
+ if (ik->is_in_error_state()) { // purge this entry
+ *p = probe->next();
+ if (probe == _current_class_entry) {
+ _current_class_entry = NULL;
+ }
+ free_entry(probe);
+ ResourceMark rm;
+ tty->print_cr("Removed error class: %s", ik->external_name());
+ continue;
+ }
+
+ p = probe->next_addr();
+ }
+ }
+}
+
void Dictionary::always_strong_oops_do(OopClosure* blk) {
// Follow all system classes and temporary placeholders in dictionary; only
// protection domain oops contain references into the heap. In a first
@@ -693,16 +716,17 @@
// ----------------------------------------------------------------------------
-#ifndef PRODUCT
-void Dictionary::print() {
+void Dictionary::print(bool details) {
ResourceMark rm;
HandleMark hm;
- tty->print_cr("Java system dictionary (table_size=%d, classes=%d)",
- table_size(), number_of_entries());
- tty->print_cr("^ indicates that initiating loader is different from "
- "defining loader");
+ if (details) {
+ tty->print_cr("Java system dictionary (table_size=%d, classes=%d)",
+ table_size(), number_of_entries());
+ tty->print_cr("^ indicates that initiating loader is different from "
+ "defining loader");
+ }
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry* probe = bucket(index);
@@ -713,21 +737,28 @@
ClassLoaderData* loader_data = probe->loader_data();
bool is_defining_class =
(loader_data == InstanceKlass::cast(e)->class_loader_data());
- tty->print("%s%s", is_defining_class ? " " : "^",
+ tty->print("%s%s", ((!details) || is_defining_class) ? " " : "^",
e->external_name());
+ if (details) {
tty->print(", loader ");
- loader_data->print_value();
+ if (loader_data != NULL) {
+ loader_data->print_value();
+ } else {
+ tty->print("NULL");
+ }
+ }
tty->cr();
}
}
- tty->cr();
- _pd_cache_table->print();
+
+ if (details) {
+ tty->cr();
+ _pd_cache_table->print();
+ }
tty->cr();
}
-#endif
-
void Dictionary::verify() {
guarantee(number_of_entries() >= 0, "Verify of system dictionary failed");
--- a/hotspot/src/share/vm/classfile/dictionary.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/classfile/dictionary.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -100,6 +100,7 @@
void methods_do(void f(Method*));
void unlink(BoolObjectClosure* is_alive);
+ void remove_classes_in_error_state();
// Classes loaded by the bootstrap loader are always strongly reachable.
// If we're not doing class unloading, all classes are strongly reachable.
@@ -127,9 +128,7 @@
ProtectionDomainCacheEntry* cache_get(oop protection_domain);
-#ifndef PRODUCT
- void print();
-#endif
+ void print(bool details = true);
void verify();
};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/sharedClassUtil.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
+#define SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
+
+#include "classfile/sharedPathsMiscInfo.hpp"
+#include "memory/filemap.hpp"
+
+class SharedClassUtil : AllStatic {
+public:
+
+ static SharedPathsMiscInfo* allocate_shared_paths_misc_info() {
+ return new SharedPathsMiscInfo();
+ }
+
+ static SharedPathsMiscInfo* allocate_shared_paths_misc_info(char* buf, int size) {
+ return new SharedPathsMiscInfo(buf, size);
+ }
+
+ static FileMapInfo::FileMapHeader* allocate_file_map_header() {
+ return new FileMapInfo::FileMapHeader();
+ }
+
+ static size_t file_map_header_size() {
+ return sizeof(FileMapInfo::FileMapHeader);
+ }
+
+ static size_t shared_class_path_entry_size() {
+ return sizeof(SharedClassPathEntry);
+ }
+
+ static void update_shared_classpath(ClassPathEntry *cpe,
+ SharedClassPathEntry* ent,
+ time_t timestamp,
+ long filesize, TRAPS) {
+ ent->_timestamp = timestamp;
+ ent->_filesize = filesize;
+ }
+
+ static void initialize(TRAPS) {}
+
+ inline static bool is_shared_boot_class(Klass* klass) {
+ return (klass->_shared_class_path_index >= 0);
+ }
+};
+
+#endif // SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,154 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "classfile/classLoader.hpp"
+#include "classfile/classLoaderData.inline.hpp"
+#include "classfile/sharedPathsMiscInfo.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/metaspaceShared.hpp"
+#include "runtime/arguments.hpp"
+
+void SharedPathsMiscInfo::add_path(const char* path, int type) {
+ if (TraceClassPaths) {
+ tty->print("[type=%s] ", type_name(type));
+ trace_class_path("[Add misc shared path ", path);
+ }
+ write(path, strlen(path) + 1);
+ write_jint(jint(type));
+}
+
+void SharedPathsMiscInfo::ensure_size(size_t needed_bytes) {
+ assert(_allocated, "cannot modify buffer during validation.");
+ int used = get_used_bytes();
+ int target = used + int(needed_bytes);
+ if (target > _buf_size) {
+ _buf_size = _buf_size * 2 + (int)needed_bytes;
+ _buf_start = REALLOC_C_HEAP_ARRAY(char, _buf_start, _buf_size, mtClass);
+ _cur_ptr = _buf_start + used;
+ _end_ptr = _buf_start + _buf_size;
+ }
+}
+
+void SharedPathsMiscInfo::write(const void* ptr, size_t size) {
+ ensure_size(size);
+ memcpy(_cur_ptr, ptr, size);
+ _cur_ptr += size;
+}
+
+bool SharedPathsMiscInfo::read(void* ptr, size_t size) {
+ if (_cur_ptr + size <= _end_ptr) {
+ memcpy(ptr, _cur_ptr, size);
+ _cur_ptr += size;
+ return true;
+ }
+ return false;
+}
+
+bool SharedPathsMiscInfo::fail(const char* msg, const char* name) {
+ ClassLoader::trace_class_path(msg, name);
+ MetaspaceShared::set_archive_loading_failed();
+ return false;
+}
+
+bool SharedPathsMiscInfo::check() {
+ // The whole buffer must be 0 terminated so that we can use strlen and strcmp
+ // without fear.
+ _end_ptr -= sizeof(jint);
+ if (_cur_ptr >= _end_ptr) {
+ return fail("Truncated archive file header");
+ }
+ if (*_end_ptr != 0) {
+ return fail("Corrupted archive file header");
+ }
+
+ while (_cur_ptr < _end_ptr) {
+ jint type;
+ const char* path = _cur_ptr;
+ _cur_ptr += strlen(path) + 1;
+ if (!read_jint(&type)) {
+ return fail("Corrupted archive file header");
+ }
+ if (TraceClassPaths) {
+ tty->print("[type=%s ", type_name(type));
+ print_path(tty, type, path);
+ tty->print_cr("]");
+ }
+ if (!check(type, path)) {
+ if (!PrintSharedArchiveAndExit) {
+ return false;
+ }
+ } else {
+ trace_class_path("[ok");
+ }
+ }
+
+ return true;
+}
+
+bool SharedPathsMiscInfo::check(jint type, const char* path) {
+ switch (type) {
+ case BOOT:
+ if (strcmp(path, Arguments::get_sysclasspath()) != 0) {
+ return fail("[BOOT classpath mismatch, actual: -Dsun.boot.class.path=", Arguments::get_sysclasspath());
+ }
+ break;
+ case NON_EXIST: // fall-through
+ case REQUIRED:
+ {
+ struct stat st;
+ if (os::stat(path, &st) != 0) {
+ // The file does not actually exist
+ if (type == REQUIRED) {
+ // but we require it to exist -> fail
+ return fail("Required file doesn't exist");
+ }
+ } else {
+ // The file actually exists
+ if (type == NON_EXIST) {
+ // But we want it to not exist -> fail
+ return fail("File must not exist");
+ }
+ time_t timestamp;
+ long filesize;
+
+ if (!read_time(×tamp) || !read_long(&filesize)) {
+ return fail("Corrupted archive file header");
+ }
+ if (timestamp != st.st_mtime) {
+ return fail("Timestamp mismatch");
+ }
+ if (filesize != st.st_size) {
+ return fail("File size mismatch");
+ }
+ }
+ }
+ break;
+
+ default:
+ return fail("Corrupted archive file header");
+ }
+
+ return true;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/sharedPathsMiscInfo.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP
+#define SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP
+
+#include "runtime/os.hpp"
+
+// During dumping time, when processing class paths, we build up the dump-time
+// classpath. The JAR files that exist are stored in the list ClassLoader::_first_entry.
+// However, we need to store other "misc" information for run-time checking, such as
+//
+// + The values of Arguments::get_sysclasspath() used during dumping.
+//
+// + The meta-index file(s) used during dumping (incl modification time and size)
+//
+// + The class path elements specified during dumping but did not exist --
+// these elements must also be specified at run time, and they also must not
+// exist at run time.
+//
+// These misc items are stored in a linear buffer in SharedPathsMiscInfo.
+// The storage format is stream oriented to minimize its size.
+//
+// When writing the information to the archive file, SharedPathsMiscInfo is stored in
+// the archive file header. At run-time, this information is used only during initialization
+// (accessed using read() instead of mmap()), and is deallocated afterwards to save space.
+//
+// The SharedPathsMiscInfo class is used for both creating the the information (during
+// dumping time) and validation (at run time). Different constructors are used in the
+// two situations. See below.
+
+class SharedPathsMiscInfo : public CHeapObj<mtClass> {
+protected:
+ char* _buf_start;
+ char* _cur_ptr;
+ char* _end_ptr;
+ int _buf_size;
+ bool _allocated; // was _buf_start allocated by me?
+ void ensure_size(size_t needed_bytes);
+ void add_path(const char* path, int type);
+
+ void write(const void* ptr, size_t size);
+ bool read(void* ptr, size_t size);
+
+ static void trace_class_path(const char* msg, const char* name = NULL) {
+ ClassLoader::trace_class_path(msg, name);
+ }
+protected:
+ static bool fail(const char* msg, const char* name = NULL);
+ virtual bool check(jint type, const char* path);
+
+public:
+ enum {
+ INITIAL_BUF_SIZE = 128
+ };
+ // This constructor is used when creating the misc information (during dump)
+ SharedPathsMiscInfo() {
+ _buf_size = INITIAL_BUF_SIZE;
+ _cur_ptr = _buf_start = NEW_C_HEAP_ARRAY(char, _buf_size, mtClass);
+ _allocated = true;
+ }
+ // This constructor is used when validating the misc info (during run time)
+ SharedPathsMiscInfo(char *buff, int size) {
+ _cur_ptr = _buf_start = buff;
+ _end_ptr = _buf_start + size;
+ _buf_size = size;
+ _allocated = false;
+ }
+ ~SharedPathsMiscInfo() {
+ if (_allocated) {
+ FREE_C_HEAP_ARRAY(char, _buf_start, mtClass);
+ }
+ }
+ int get_used_bytes() {
+ return _cur_ptr - _buf_start;
+ }
+ void* buffer() {
+ return _buf_start;
+ }
+
+ // writing --
+
+ // The path must not exist at run-time
+ void add_nonexist_path(const char* path) {
+ add_path(path, NON_EXIST);
+ }
+
+ // The path must exist and have required size and modification time
+ void add_required_file(const char* path) {
+ add_path(path, REQUIRED);
+
+ struct stat st;
+ if (os::stat(path, &st) != 0) {
+ assert(0, "sanity");
+ ClassLoader::exit_with_path_failure("failed to os::stat(%s)", path); // should not happen
+ }
+ write_time(st.st_mtime);
+ write_long(st.st_size);
+ }
+
+ // The path must exist, and must contain exactly <num_entries> files/dirs
+ void add_boot_classpath(const char* path) {
+ add_path(path, BOOT);
+ }
+ int write_jint(jint num) {
+ write(&num, sizeof(num));
+ return 0;
+ }
+ void write_time(time_t t) {
+ write(&t, sizeof(t));
+ }
+ void write_long(long l) {
+ write(&l, sizeof(l));
+ }
+
+ bool dump_to_file(int fd) {
+ int n = get_used_bytes();
+ return (os::write(fd, _buf_start, n) == (size_t)n);
+ }
+
+ // reading --
+
+ enum {
+ BOOT = 1,
+ NON_EXIST = 2,
+ REQUIRED = 3
+ };
+
+ virtual const char* type_name(int type) {
+ switch (type) {
+ case BOOT: return "BOOT";
+ case NON_EXIST: return "NON_EXIST";
+ case REQUIRED: return "REQUIRED";
+ default: ShouldNotReachHere(); return "?";
+ }
+ }
+
+ virtual void print_path(outputStream* out, int type, const char* path) {
+ switch (type) {
+ case BOOT:
+ out->print("Expecting -Dsun.boot.class.path=%s", path);
+ break;
+ case NON_EXIST:
+ out->print("Expecting that %s does not exist", path);
+ break;
+ case REQUIRED:
+ out->print("Expecting that file %s must exist and not altered", path);
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+ }
+
+ bool check();
+ bool read_jint(jint *ptr) {
+ return read(ptr, sizeof(jint));
+ }
+ bool read_long(long *ptr) {
+ return read(ptr, sizeof(long));
+ }
+ bool read_time(time_t *ptr) {
+ return read(ptr, sizeof(time_t));
+ }
+};
+
+#endif // SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -31,10 +31,15 @@
#include "classfile/resolutionErrors.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
+#if INCLUDE_CDS
+#include "classfile/sharedClassUtil.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#endif
#include "classfile/vmSymbols.hpp"
#include "compiler/compileBroker.hpp"
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/interpreter.hpp"
+#include "memory/filemap.hpp"
#include "memory/gcLocker.hpp"
#include "memory/oopFactory.hpp"
#include "oops/instanceKlass.hpp"
@@ -110,6 +115,8 @@
CHECK);
_java_system_loader = (oop)result.get_jobject();
+
+ CDS_ONLY(SystemDictionaryShared::initialize(CHECK);)
}
@@ -974,6 +981,7 @@
// Create a new CLD for anonymous class, that uses the same class loader
// as the host_klass
guarantee(host_klass->class_loader() == class_loader(), "should be the same");
+ guarantee(!DumpSharedSpaces, "must not create anonymous classes when dumping");
loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader(), CHECK_NULL);
loader_data->record_dependency(host_klass(), CHECK_NULL);
} else {
@@ -1134,7 +1142,7 @@
return k();
}
-
+#if INCLUDE_CDS
void SystemDictionary::set_shared_dictionary(HashtableBucket<mtClass>* t, int length,
int number_of_entries) {
assert(length == _nof_buckets * sizeof(HashtableBucket<mtClass>),
@@ -1167,15 +1175,21 @@
instanceKlassHandle SystemDictionary::load_shared_class(
Symbol* class_name, Handle class_loader, TRAPS) {
instanceKlassHandle ik (THREAD, find_shared_class(class_name));
- return load_shared_class(ik, class_loader, THREAD);
+ // Make sure we only return the boot class for the NULL classloader.
+ if (ik.not_null() &&
+ SharedClassUtil::is_shared_boot_class(ik()) && class_loader.is_null()) {
+ Handle protection_domain;
+ return load_shared_class(ik, class_loader, protection_domain, THREAD);
+ }
+ return instanceKlassHandle();
}
-instanceKlassHandle SystemDictionary::load_shared_class(
- instanceKlassHandle ik, Handle class_loader, TRAPS) {
- assert(class_loader.is_null(), "non-null classloader for shared class?");
+instanceKlassHandle SystemDictionary::load_shared_class(instanceKlassHandle ik,
+ Handle class_loader,
+ Handle protection_domain, TRAPS) {
if (ik.not_null()) {
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
- Symbol* class_name = ik->name();
+ Symbol* class_name = ik->name();
// Found the class, now load the superclass and interfaces. If they
// are shared, add them to the main system dictionary and reset
@@ -1184,7 +1198,7 @@
if (ik->super() != NULL) {
Symbol* cn = ik->super()->name();
resolve_super_or_fail(class_name, cn,
- class_loader, Handle(), true, CHECK_(nh));
+ class_loader, protection_domain, true, CHECK_(nh));
}
Array<Klass*>* interfaces = ik->local_interfaces();
@@ -1197,7 +1211,7 @@
// reinitialized yet (they will be once the interface classes
// are loaded)
Symbol* name = k->name();
- resolve_super_or_fail(class_name, name, class_loader, Handle(), false, CHECK_(nh));
+ resolve_super_or_fail(class_name, name, class_loader, protection_domain, false, CHECK_(nh));
}
// Adjust methods to recover missing data. They need addresses for
@@ -1206,30 +1220,47 @@
// Updating methods must be done under a lock so multiple
// threads don't update these in parallel
- // Shared classes are all currently loaded by the bootstrap
- // classloader, so this will never cause a deadlock on
- // a custom class loader lock.
+ //
+ // Shared classes are all currently loaded by either the bootstrap or
+ // internal parallel class loaders, so this will never cause a deadlock
+ // on a custom class loader lock.
+ ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
{
Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
check_loader_lock_contention(lockObject, THREAD);
ObjectLocker ol(lockObject, THREAD, true);
- ik->restore_unshareable_info(CHECK_(nh));
+ ik->restore_unshareable_info(loader_data, protection_domain, CHECK_(nh));
}
if (TraceClassLoading) {
ResourceMark rm;
tty->print("[Loaded %s", ik->external_name());
tty->print(" from shared objects file");
+ if (class_loader.not_null()) {
+ tty->print(" by %s", loader_data->loader_name());
+ }
tty->print_cr("]");
}
+
+#if INCLUDE_CDS
+ if (DumpLoadedClassList != NULL && classlist_file->is_open()) {
+ // Only dump the classes that can be stored into CDS archive
+ if (SystemDictionaryShared::is_sharing_possible(loader_data)) {
+ ResourceMark rm(THREAD);
+ classlist_file->print_cr("%s", ik->name()->as_C_string());
+ classlist_file->flush();
+ }
+ }
+#endif
+
// notify a class loaded from shared object
ClassLoadingService::notify_class_loaded(InstanceKlass::cast(ik()),
true /* shared class */);
}
return ik;
}
-
+#endif
instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) {
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
@@ -1239,8 +1270,10 @@
// shared spaces.
instanceKlassHandle k;
{
+#if INCLUDE_CDS
PerfTraceTime vmtimer(ClassLoader::perf_shared_classload_time());
k = load_shared_class(class_name, class_loader, THREAD);
+#endif
}
if (k.is_null()) {
@@ -1599,7 +1632,6 @@
Universe::flush_dependents_on(k);
}
-
// ----------------------------------------------------------------------------
// GC support
@@ -1682,6 +1714,7 @@
void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
strong->do_oop(&_java_system_loader);
strong->do_oop(&_system_loader_lock_obj);
+ CDS_ONLY(SystemDictionaryShared::roots_oops_do(strong);)
// Adjust dictionary
dictionary()->roots_oops_do(strong, weak);
@@ -1693,6 +1726,7 @@
void SystemDictionary::oops_do(OopClosure* f) {
f->do_oop(&_java_system_loader);
f->do_oop(&_system_loader_lock_obj);
+ CDS_ONLY(SystemDictionaryShared::oops_do(f);)
// Adjust dictionary
dictionary()->oops_do(f);
@@ -1754,6 +1788,10 @@
invoke_method_table()->methods_do(f);
}
+void SystemDictionary::remove_classes_in_error_state() {
+ dictionary()->remove_classes_in_error_state();
+}
+
// ----------------------------------------------------------------------------
// Lazily load klasses
@@ -2563,10 +2601,12 @@
// ----------------------------------------------------------------------------
-#ifndef PRODUCT
+void SystemDictionary::print_shared(bool details) {
+ shared_dictionary()->print(details);
+}
-void SystemDictionary::print() {
- dictionary()->print();
+void SystemDictionary::print(bool details) {
+ dictionary()->print(details);
// Placeholders
GCMutexLocker mu(SystemDictionary_lock);
@@ -2576,7 +2616,6 @@
constraints()->print();
}
-#endif
void SystemDictionary::verify() {
guarantee(dictionary() != NULL, "Verify of system dictionary failed");
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -111,6 +111,7 @@
do_klass(SecurityManager_klass, java_lang_SecurityManager, Pre ) \
do_klass(ProtectionDomain_klass, java_security_ProtectionDomain, Pre ) \
do_klass(AccessControlContext_klass, java_security_AccessControlContext, Pre ) \
+ do_klass(SecureClassLoader_klass, java_security_SecureClassLoader, Pre ) \
do_klass(ClassNotFoundException_klass, java_lang_ClassNotFoundException, Pre ) \
do_klass(NoClassDefFoundError_klass, java_lang_NoClassDefFoundError, Pre ) \
do_klass(LinkageError_klass, java_lang_LinkageError, Pre ) \
@@ -166,6 +167,15 @@
do_klass(StringBuilder_klass, java_lang_StringBuilder, Pre ) \
do_klass(misc_Unsafe_klass, sun_misc_Unsafe, Pre ) \
\
+ /* support for CDS */ \
+ do_klass(ByteArrayInputStream_klass, java_io_ByteArrayInputStream, Pre ) \
+ do_klass(File_klass, java_io_File, Pre ) \
+ do_klass(URLClassLoader_klass, java_net_URLClassLoader, Pre ) \
+ do_klass(URL_klass, java_net_URL, Pre ) \
+ do_klass(Jar_Manifest_klass, java_util_jar_Manifest, Pre ) \
+ do_klass(sun_misc_Launcher_klass, sun_misc_Launcher, Pre ) \
+ do_klass(CodeSource_klass, java_security_CodeSource, Pre ) \
+ \
/* It's NULL in non-1.4 JDKs. */ \
do_klass(StackTraceElement_klass, java_lang_StackTraceElement, Opt ) \
/* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
@@ -221,7 +231,7 @@
static Klass* resolve_or_fail(Symbol* class_name, Handle class_loader, Handle protection_domain, bool throw_error, TRAPS);
// Convenient call for null loader and protection domain.
static Klass* resolve_or_fail(Symbol* class_name, bool throw_error, TRAPS);
-private:
+protected:
// handle error translation for resolve_or_null results
static Klass* handle_resolution_exception(Symbol* class_name, bool throw_error, KlassHandle klass_h, TRAPS);
@@ -326,6 +336,9 @@
// loaders. Returns "true" iff something was unloaded.
static bool do_unloading(BoolObjectClosure* is_alive);
+ // Used by DumpSharedSpaces only to remove classes that failed verification
+ static void remove_classes_in_error_state();
+
static int calculate_systemdictionary_size(int loadedclasses);
// Applies "f->do_oop" to all root oops in the system dictionary.
@@ -335,7 +348,7 @@
// System loader lock
static oop system_loader_lock() { return _system_loader_lock_obj; }
-private:
+protected:
// Extended Redefine classes support (tbi)
static void preloaded_classes_do(KlassClosure* f);
static void lazily_loaded_classes_do(KlassClosure* f);
@@ -348,7 +361,8 @@
static void set_shared_dictionary(HashtableBucket<mtClass>* t, int length,
int number_of_entries);
// Printing
- static void print() PRODUCT_RETURN;
+ static void print(bool details = true);
+ static void print_shared(bool details = true);
static void print_class_statistics() PRODUCT_RETURN;
static void print_method_statistics() PRODUCT_RETURN;
@@ -424,7 +438,7 @@
static void load_abstract_ownable_synchronizer_klass(TRAPS);
-private:
+protected:
// Tells whether ClassLoader.loadClassInternal is present
static bool has_loadClassInternal() { return _has_loadClassInternal; }
@@ -452,7 +466,7 @@
// Register a new class loader
static ClassLoaderData* register_loader(Handle class_loader, TRAPS);
-private:
+protected:
// Mirrors for primitive classes (created eagerly)
static oop check_mirror(oop m) {
assert(m != NULL, "mirror not initialized");
@@ -523,7 +537,7 @@
static Symbol* find_resolution_error(constantPoolHandle pool, int which,
Symbol** message);
- private:
+ protected:
enum Constants {
_loader_constraint_size = 107, // number of entries in constraint table
@@ -574,7 +588,7 @@
friend class CounterDecay;
static Klass* try_get_next_class();
-private:
+protected:
static void validate_protection_domain(instanceKlassHandle klass,
Handle class_loader,
Handle protection_domain, TRAPS);
@@ -601,10 +615,10 @@
static instanceKlassHandle find_or_define_instance_class(Symbol* class_name,
Handle class_loader,
instanceKlassHandle k, TRAPS);
- static instanceKlassHandle load_shared_class(Symbol* class_name,
- Handle class_loader, TRAPS);
static instanceKlassHandle load_shared_class(instanceKlassHandle ik,
- Handle class_loader, TRAPS);
+ Handle class_loader,
+ Handle protection_domain,
+ TRAPS);
static instanceKlassHandle load_instance_class(Symbol* class_name, Handle class_loader, TRAPS);
static Handle compute_loader_lock_object(Handle class_loader, TRAPS);
static void check_loader_lock_contention(Handle loader_lock, TRAPS);
@@ -612,9 +626,12 @@
static bool is_parallelDefine(Handle class_loader);
public:
+ static instanceKlassHandle load_shared_class(Symbol* class_name,
+ Handle class_loader,
+ TRAPS);
static bool is_ext_class_loader(Handle class_loader);
-private:
+protected:
static Klass* find_shared_class(Symbol* class_name);
// Setup link to hierarchy
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/classfile/systemDictionaryShared.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+
+#ifndef SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
+#define SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
+
+#include "classfile/systemDictionary.hpp"
+
+class SystemDictionaryShared: public SystemDictionary {
+public:
+ static void initialize(TRAPS) {}
+ static instanceKlassHandle find_or_load_shared_class(Symbol* class_name,
+ Handle class_loader,
+ TRAPS) {
+ return instanceKlassHandle();
+ }
+ static void roots_oops_do(OopClosure* blk) {}
+ static void oops_do(OopClosure* f) {}
+ static bool is_sharing_possible(ClassLoaderData* loader_data) {
+ oop class_loader = loader_data->class_loader();
+ return (class_loader == NULL);
+ }
+};
+
+#endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
--- a/hotspot/src/share/vm/classfile/verifier.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/classfile/verifier.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -2217,6 +2217,181 @@
}
}
+// Look at the method's handlers. If the bci is in the handler's try block
+// then check if the handler_pc is already on the stack. If not, push it.
+void ClassVerifier::push_handlers(ExceptionTable* exhandlers,
+ GrowableArray<u4>* handler_stack,
+ u4 bci) {
+ int exlength = exhandlers->length();
+ for(int x = 0; x < exlength; x++) {
+ if (bci >= exhandlers->start_pc(x) && bci < exhandlers->end_pc(x)) {
+ handler_stack->append_if_missing(exhandlers->handler_pc(x));
+ }
+ }
+}
+
+// Return TRUE if all code paths starting with start_bc_offset end in
+// bytecode athrow or loop.
+bool ClassVerifier::ends_in_athrow(u4 start_bc_offset) {
+ ResourceMark rm;
+ // Create bytecode stream.
+ RawBytecodeStream bcs(method());
+ u4 code_length = method()->code_size();
+ bcs.set_start(start_bc_offset);
+ u4 target;
+ // Create stack for storing bytecode start offsets for if* and *switch.
+ GrowableArray<u4>* bci_stack = new GrowableArray<u4>(30);
+ // Create stack for handlers for try blocks containing this handler.
+ GrowableArray<u4>* handler_stack = new GrowableArray<u4>(30);
+ // Create list of visited branch opcodes (goto* and if*).
+ GrowableArray<u4>* visited_branches = new GrowableArray<u4>(30);
+ ExceptionTable exhandlers(_method());
+
+ while (true) {
+ if (bcs.is_last_bytecode()) {
+ // if no more starting offsets to parse or if at the end of the
+ // method then return false.
+ if ((bci_stack->is_empty()) || ((u4)bcs.end_bci() == code_length))
+ return false;
+ // Pop a bytecode starting offset and scan from there.
+ bcs.set_start(bci_stack->pop());
+ }
+ Bytecodes::Code opcode = bcs.raw_next();
+ u4 bci = bcs.bci();
+
+ // If the bytecode is in a TRY block, push its handlers so they
+ // will get parsed.
+ push_handlers(&exhandlers, handler_stack, bci);
+
+ switch (opcode) {
+ case Bytecodes::_if_icmpeq:
+ case Bytecodes::_if_icmpne:
+ case Bytecodes::_if_icmplt:
+ case Bytecodes::_if_icmpge:
+ case Bytecodes::_if_icmpgt:
+ case Bytecodes::_if_icmple:
+ case Bytecodes::_ifeq:
+ case Bytecodes::_ifne:
+ case Bytecodes::_iflt:
+ case Bytecodes::_ifge:
+ case Bytecodes::_ifgt:
+ case Bytecodes::_ifle:
+ case Bytecodes::_if_acmpeq:
+ case Bytecodes::_if_acmpne:
+ case Bytecodes::_ifnull:
+ case Bytecodes::_ifnonnull:
+ target = bcs.dest();
+ if (visited_branches->contains(bci)) {
+ if (bci_stack->is_empty()) return true;
+ // Pop a bytecode starting offset and scan from there.
+ bcs.set_start(bci_stack->pop());
+ } else {
+ if (target > bci) { // forward branch
+ if (target >= code_length) return false;
+ // Push the branch target onto the stack.
+ bci_stack->push(target);
+ // then, scan bytecodes starting with next.
+ bcs.set_start(bcs.next_bci());
+ } else { // backward branch
+ // Push bytecode offset following backward branch onto the stack.
+ bci_stack->push(bcs.next_bci());
+ // Check bytecodes starting with branch target.
+ bcs.set_start(target);
+ }
+ // Record target so we don't branch here again.
+ visited_branches->append(bci);
+ }
+ break;
+
+ case Bytecodes::_goto:
+ case Bytecodes::_goto_w:
+ target = (opcode == Bytecodes::_goto ? bcs.dest() : bcs.dest_w());
+ if (visited_branches->contains(bci)) {
+ if (bci_stack->is_empty()) return true;
+ // Been here before, pop new starting offset from stack.
+ bcs.set_start(bci_stack->pop());
+ } else {
+ if (target >= code_length) return false;
+ // Continue scanning from the target onward.
+ bcs.set_start(target);
+ // Record target so we don't branch here again.
+ visited_branches->append(bci);
+ }
+ break;
+
+ // Check that all switch alternatives end in 'athrow' bytecodes. Since it
+ // is difficult to determine where each switch alternative ends, parse
+ // each switch alternative until either hit a 'return', 'athrow', or reach
+ // the end of the method's bytecodes. This is gross but should be okay
+ // because:
+ // 1. tableswitch and lookupswitch byte codes in handlers for ctor explicit
+ // constructor invocations should be rare.
+ // 2. if each switch alternative ends in an athrow then the parsing should be
+ // short. If there is no athrow then it is bogus code, anyway.
+ case Bytecodes::_lookupswitch:
+ case Bytecodes::_tableswitch:
+ {
+ address aligned_bcp = (address) round_to((intptr_t)(bcs.bcp() + 1), jintSize);
+ u4 default_offset = Bytes::get_Java_u4(aligned_bcp) + bci;
+ int keys, delta;
+ if (opcode == Bytecodes::_tableswitch) {
+ jint low = (jint)Bytes::get_Java_u4(aligned_bcp + jintSize);
+ jint high = (jint)Bytes::get_Java_u4(aligned_bcp + 2*jintSize);
+ // This is invalid, but let the regular bytecode verifier
+ // report this because the user will get a better error message.
+ if (low > high) return true;
+ keys = high - low + 1;
+ delta = 1;
+ } else {
+ keys = (int)Bytes::get_Java_u4(aligned_bcp + jintSize);
+ delta = 2;
+ }
+ // Invalid, let the regular bytecode verifier deal with it.
+ if (keys < 0) return true;
+
+ // Push the offset of the next bytecode onto the stack.
+ bci_stack->push(bcs.next_bci());
+
+ // Push the switch alternatives onto the stack.
+ for (int i = 0; i < keys; i++) {
+ u4 target = bci + (jint)Bytes::get_Java_u4(aligned_bcp+(3+i*delta)*jintSize);
+ if (target > code_length) return false;
+ bci_stack->push(target);
+ }
+
+ // Start bytecode parsing for the switch at the default alternative.
+ if (default_offset > code_length) return false;
+ bcs.set_start(default_offset);
+ break;
+ }
+
+ case Bytecodes::_return:
+ return false;
+
+ case Bytecodes::_athrow:
+ {
+ if (bci_stack->is_empty()) {
+ if (handler_stack->is_empty()) {
+ return true;
+ } else {
+ // Parse the catch handlers for try blocks containing athrow.
+ bcs.set_start(handler_stack->pop());
+ }
+ } else {
+ // Pop a bytecode offset and starting scanning from there.
+ bcs.set_start(bci_stack->pop());
+ }
+ }
+ break;
+
+ default:
+ ;
+ } // end switch
+ } // end while loop
+
+ return false;
+}
+
void ClassVerifier::verify_invoke_init(
RawBytecodeStream* bcs, u2 ref_class_index, VerificationType ref_class_type,
StackMapFrame* current_frame, u4 code_length, bool *this_uninit,
@@ -2236,18 +2411,26 @@
return;
}
- // Make sure that this call is not done from within a TRY block because
- // that can result in returning an incomplete object. Simply checking
- // (bci >= start_pc) also ensures that this call is not done after a TRY
- // block. That is also illegal because this call must be the first Java
- // statement in the constructor.
+ // Check if this call is done from inside of a TRY block. If so, make
+ // sure that all catch clause paths end in a throw. Otherwise, this
+ // can result in returning an incomplete object.
ExceptionTable exhandlers(_method());
int exlength = exhandlers.length();
for(int i = 0; i < exlength; i++) {
- if (bci >= exhandlers.start_pc(i)) {
- verify_error(ErrorContext::bad_code(bci),
- "Bad <init> method call from after the start of a try block");
- return;
+ u2 start_pc = exhandlers.start_pc(i);
+ u2 end_pc = exhandlers.end_pc(i);
+
+ if (bci >= start_pc && bci < end_pc) {
+ if (!ends_in_athrow(exhandlers.handler_pc(i))) {
+ verify_error(ErrorContext::bad_code(bci),
+ "Bad <init> method call from after the start of a try block");
+ return;
+ } else if (VerboseVerification) {
+ ResourceMark rm;
+ tty->print_cr(
+ "Survived call to ends_in_athrow(): %s",
+ current_class()->name()->as_C_string());
+ }
}
}
--- a/hotspot/src/share/vm/classfile/verifier.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/classfile/verifier.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -30,6 +30,7 @@
#include "oops/klass.hpp"
#include "oops/method.hpp"
#include "runtime/handles.hpp"
+#include "utilities/growableArray.hpp"
#include "utilities/exceptions.hpp"
// The verifier class
@@ -303,6 +304,16 @@
StackMapFrame* current_frame, u4 code_length, bool* this_uninit,
constantPoolHandle cp, TRAPS);
+ // Used by ends_in_athrow() to push all handlers that contain bci onto
+ // the handler_stack, if the handler is not already on the stack.
+ void push_handlers(ExceptionTable* exhandlers,
+ GrowableArray<u4>* handler_stack,
+ u4 bci);
+
+ // Returns true if all paths starting with start_bc_offset end in athrow
+ // bytecode or loop.
+ bool ends_in_athrow(u4 start_bc_offset);
+
void verify_invoke_instructions(
RawBytecodeStream* bcs, u4 code_length, StackMapFrame* current_frame,
bool* this_uninit, VerificationType return_type,
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -91,11 +91,17 @@
template(java_lang_CharSequence, "java/lang/CharSequence") \
template(java_lang_SecurityManager, "java/lang/SecurityManager") \
template(java_security_AccessControlContext, "java/security/AccessControlContext") \
+ template(java_security_CodeSource, "java/security/CodeSource") \
template(java_security_ProtectionDomain, "java/security/ProtectionDomain") \
+ template(java_security_SecureClassLoader, "java/security/SecureClassLoader") \
+ template(java_net_URLClassLoader, "java/net/URLClassLoader") \
+ template(java_net_URL, "java/net/URL") \
+ template(java_util_jar_Manifest, "java/util/jar/Manifest") \
template(impliesCreateAccessControlContext_name, "impliesCreateAccessControlContext") \
template(java_io_OutputStream, "java/io/OutputStream") \
template(java_io_Reader, "java/io/Reader") \
template(java_io_BufferedReader, "java/io/BufferedReader") \
+ template(java_io_File, "java/io/File") \
template(java_io_FileInputStream, "java/io/FileInputStream") \
template(java_io_ByteArrayInputStream, "java/io/ByteArrayInputStream") \
template(java_io_Serializable, "java/io/Serializable") \
@@ -106,6 +112,7 @@
template(java_util_Hashtable, "java/util/Hashtable") \
template(java_lang_Compiler, "java/lang/Compiler") \
template(sun_misc_Signal, "sun/misc/Signal") \
+ template(sun_misc_Launcher, "sun/misc/Launcher") \
template(java_lang_AssertionStatusDirectives, "java/lang/AssertionStatusDirectives") \
template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \
template(sun_misc_PostVMInitHook, "sun/misc/PostVMInitHook") \
@@ -396,6 +403,14 @@
template(signers_name, "signers_name") \
template(loader_data_name, "loader_data") \
template(dependencies_name, "dependencies") \
+ template(input_stream_void_signature, "(Ljava/io/InputStream;)V") \
+ template(getFileURL_name, "getFileURL") \
+ template(getFileURL_signature, "(Ljava/io/File;)Ljava/net/URL;") \
+ template(definePackageInternal_name, "definePackageInternal") \
+ template(definePackageInternal_signature, "(Ljava/lang/String;Ljava/util/jar/Manifest;Ljava/net/URL;)V") \
+ template(getProtectionDomain_name, "getProtectionDomain") \
+ template(getProtectionDomain_signature, "(Ljava/security/CodeSource;)Ljava/security/ProtectionDomain;") \
+ template(url_code_signer_array_void_signature, "(Ljava/net/URL;[Ljava/security/CodeSigner;)V") \
\
/* non-intrinsic name/signature pairs: */ \
template(register_method_name, "register") \
--- a/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/interpreter/abstractInterpreter.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -280,9 +280,6 @@
address generate_result_handler_for(BasicType type);
address generate_slow_signature_handler();
- // entry point generator
- address generate_method_entry(AbstractInterpreter::MethodKind kind);
-
void bang_stack_shadow_pages(bool native_call);
void generate_all();
--- a/hotspot/src/share/vm/interpreter/cppInterpreter.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/interpreter/cppInterpreter.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -108,7 +108,7 @@
}
-#define method_entry(kind) Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind)
+#define method_entry(kind) Interpreter::_entry_table[Interpreter::kind] = ((InterpreterGenerator*)this)->generate_method_entry(Interpreter::kind)
{ CodeletMark cm(_masm, "(kind = frame_manager)");
// all non-native method kinds
--- a/hotspot/src/share/vm/interpreter/interpreter.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/interpreter/interpreter.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -29,6 +29,7 @@
#include "interpreter/bytecodeHistogram.hpp"
#include "interpreter/bytecodeInterpreter.hpp"
#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterGenerator.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/interp_masm.hpp"
#include "interpreter/templateTable.hpp"
@@ -261,7 +262,7 @@
// Special intrinsic method?
// Note: This test must come _after_ the test for native methods,
// otherwise we will run into problems with JDK 1.2, see also
- // AbstractInterpreterGenerator::generate_method_entry() for
+ // InterpreterGenerator::generate_method_entry() for
// for details.
switch (m->intrinsic_id()) {
case vmIntrinsics::_dsin : return java_lang_math_sin ;
@@ -521,3 +522,50 @@
Interpreter::_entry_table[kind] = Interpreter::_entry_table[Interpreter::abstract];
}
}
+
+// Generate method entries
+address InterpreterGenerator::generate_method_entry(
+ AbstractInterpreter::MethodKind kind) {
+ // determine code generation flags
+ bool synchronized = false;
+ address entry_point = NULL;
+
+ switch (kind) {
+ case Interpreter::zerolocals : break;
+ case Interpreter::zerolocals_synchronized: synchronized = true; break;
+ case Interpreter::native : entry_point = generate_native_entry(false); break;
+ case Interpreter::native_synchronized : entry_point = generate_native_entry(true); break;
+ case Interpreter::empty : entry_point = generate_empty_entry(); break;
+ case Interpreter::accessor : entry_point = generate_accessor_entry(); break;
+ case Interpreter::abstract : entry_point = generate_abstract_entry(); break;
+
+ case Interpreter::java_lang_math_sin : // fall thru
+ case Interpreter::java_lang_math_cos : // fall thru
+ case Interpreter::java_lang_math_tan : // fall thru
+ case Interpreter::java_lang_math_abs : // fall thru
+ case Interpreter::java_lang_math_log : // fall thru
+ case Interpreter::java_lang_math_log10 : // fall thru
+ case Interpreter::java_lang_math_sqrt : // fall thru
+ case Interpreter::java_lang_math_pow : // fall thru
+ case Interpreter::java_lang_math_exp : entry_point = generate_math_entry(kind); break;
+ case Interpreter::java_lang_ref_reference_get
+ : entry_point = generate_Reference_get_entry(); break;
+#ifndef CC_INTERP
+ case Interpreter::java_util_zip_CRC32_update
+ : entry_point = generate_CRC32_update_entry(); break;
+ case Interpreter::java_util_zip_CRC32_updateBytes
+ : // fall thru
+ case Interpreter::java_util_zip_CRC32_updateByteBuffer
+ : entry_point = generate_CRC32_updateBytes_entry(kind); break;
+#endif // CC_INTERP
+ default:
+ fatal(err_msg("unexpected method kind: %d", kind));
+ break;
+ }
+
+ if (entry_point) {
+ return entry_point;
+ }
+
+ return generate_normal_entry(synchronized);
+}
--- a/hotspot/src/share/vm/interpreter/interpreterGenerator.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/interpreter/interpreterGenerator.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,9 +37,11 @@
class InterpreterGenerator: public CC_INTERP_ONLY(CppInterpreterGenerator)
NOT_CC_INTERP(TemplateInterpreterGenerator) {
-public:
+ public:
-InterpreterGenerator(StubQueue* _code);
+ InterpreterGenerator(StubQueue* _code);
+ // entry point generator
+ address generate_method_entry(AbstractInterpreter::MethodKind kind);
#ifdef TARGET_ARCH_x86
# include "interpreterGenerator_x86.hpp"
--- a/hotspot/src/share/vm/interpreter/templateInterpreter.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/interpreter/templateInterpreter.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -364,7 +364,7 @@
#define method_entry(kind) \
{ CodeletMark cm(_masm, "method entry point (kind = " #kind ")"); \
- Interpreter::_entry_table[Interpreter::kind] = generate_method_entry(Interpreter::kind); \
+ Interpreter::_entry_table[Interpreter::kind] = ((InterpreterGenerator*)this)->generate_method_entry(Interpreter::kind); \
}
// all non-native method kinds
--- a/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/interpreter/templateInterpreterGenerator.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -59,9 +59,6 @@
address generate_safept_entry_for(TosState state, address runtime_entry);
void generate_throw_exception();
- // entry point generator
-// address generate_method_entry(AbstractInterpreter::MethodKind kind);
-
// Instruction generation
void generate_and_dispatch (Template* t, TosState tos_out = ilgl);
void set_vtos_entry_points (Template* t, address& bep, address& cep, address& sep, address& aep, address& iep, address& lep, address& fep, address& dep, address& vep);
--- a/hotspot/src/share/vm/memory/allocation.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/memory/allocation.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -265,7 +265,8 @@
f(ConstantPool) \
f(ConstantPoolCache) \
f(Annotation) \
- f(MethodCounters)
+ f(MethodCounters) \
+ f(Deallocated)
#define METASPACE_OBJ_TYPE_DECLARE(name) name ## Type,
#define METASPACE_OBJ_TYPE_NAME_CASE(name) case name ## Type: return #name;
--- a/hotspot/src/share/vm/memory/filemap.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/memory/filemap.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -24,9 +24,14 @@
#include "precompiled.hpp"
#include "classfile/classLoader.hpp"
+#include "classfile/sharedClassUtil.hpp"
#include "classfile/symbolTable.hpp"
+#include "classfile/systemDictionaryShared.hpp"
#include "classfile/altHashing.hpp"
#include "memory/filemap.hpp"
+#include "memory/metadataFactory.hpp"
+#include "memory/oopFactory.hpp"
+#include "oops/objArrayOop.hpp"
#include "runtime/arguments.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
@@ -42,7 +47,6 @@
#endif
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
-
extern address JVM_FunctionAtStart();
extern address JVM_FunctionAtEnd();
@@ -78,16 +82,27 @@
void FileMapInfo::fail_continue(const char *msg, ...) {
va_list ap;
va_start(ap, msg);
- if (RequireSharedSpaces) {
- fail(msg, ap);
+ MetaspaceShared::set_archive_loading_failed();
+ if (PrintSharedArchiveAndExit && _validating_classpath_entry_table) {
+ // If we are doing PrintSharedArchiveAndExit and some of the classpath entries
+ // do not validate, we can still continue "limping" to validate the remaining
+ // entries. No need to quit.
+ tty->print("[");
+ tty->vprint(msg, ap);
+ tty->print_cr("]");
} else {
- if (PrintSharedSpaces) {
- tty->print_cr("UseSharedSpaces: %s", msg);
+ if (RequireSharedSpaces) {
+ fail(msg, ap);
+ } else {
+ if (PrintSharedSpaces) {
+ tty->print_cr("UseSharedSpaces: %s", msg);
+ }
}
}
va_end(ap);
UseSharedSpaces = false;
- close();
+ assert(current_info() != NULL, "singleton must be registered");
+ current_info()->close();
}
// Fill in the fileMapInfo structure with data about this VM instance.
@@ -122,67 +137,201 @@
}
}
+FileMapInfo::FileMapInfo() {
+ assert(_current_info == NULL, "must be singleton"); // not thread safe
+ _current_info = this;
+ memset(this, 0, sizeof(FileMapInfo));
+ _file_offset = 0;
+ _file_open = false;
+ _header = SharedClassUtil::allocate_file_map_header();
+ _header->_version = _invalid_version;
+}
+
+FileMapInfo::~FileMapInfo() {
+ assert(_current_info == this, "must be singleton"); // not thread safe
+ _current_info = NULL;
+}
+
void FileMapInfo::populate_header(size_t alignment) {
- _header._magic = 0xf00baba2;
- _header._version = _current_version;
- _header._alignment = alignment;
- _header._obj_alignment = ObjectAlignmentInBytes;
+ _header->populate(this, alignment);
+}
+
+size_t FileMapInfo::FileMapHeader::data_size() {
+ return SharedClassUtil::file_map_header_size() - sizeof(FileMapInfo::FileMapHeaderBase);
+}
+
+void FileMapInfo::FileMapHeader::populate(FileMapInfo* mapinfo, size_t alignment) {
+ _magic = 0xf00baba2;
+ _version = _current_version;
+ _alignment = alignment;
+ _obj_alignment = ObjectAlignmentInBytes;
+ _classpath_entry_table_size = mapinfo->_classpath_entry_table_size;
+ _classpath_entry_table = mapinfo->_classpath_entry_table;
+ _classpath_entry_size = mapinfo->_classpath_entry_size;
// The following fields are for sanity checks for whether this archive
// will function correctly with this JVM and the bootclasspath it's
// invoked with.
// JVM version string ... changes on each build.
- get_header_version(_header._jvm_ident);
+ get_header_version(_jvm_ident);
+}
+
+void FileMapInfo::allocate_classpath_entry_table() {
+ int bytes = 0;
+ int count = 0;
+ char* strptr = NULL;
+ char* strptr_max = NULL;
+ Thread* THREAD = Thread::current();
- // Build checks on classpath and jar files
- _header._num_jars = 0;
- ClassPathEntry *cpe = ClassLoader::classpath_entry(0);
- for ( ; cpe != NULL; cpe = cpe->next()) {
+ ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
+ size_t entry_size = SharedClassUtil::shared_class_path_entry_size();
- if (cpe->is_jar_file()) {
- if (_header._num_jars >= JVM_SHARED_JARS_MAX) {
- fail_stop("Too many jar files to share.", NULL);
- }
+ for (int pass=0; pass<2; pass++) {
+ ClassPathEntry *cpe = ClassLoader::classpath_entry(0);
+
+ for (int cur_entry = 0 ; cpe != NULL; cpe = cpe->next(), cur_entry++) {
+ const char *name = cpe->name();
+ int name_bytes = (int)(strlen(name) + 1);
- // Jar file - record timestamp and file size.
- struct stat st;
- const char *path = cpe->name();
- if (os::stat(path, &st) != 0) {
- // If we can't access a jar file in the boot path, then we can't
- // make assumptions about where classes get loaded from.
- fail_stop("Unable to open jar file %s.", path);
- }
- _header._jar[_header._num_jars]._timestamp = st.st_mtime;
- _header._jar[_header._num_jars]._filesize = st.st_size;
- _header._num_jars++;
- } else {
+ if (pass == 0) {
+ count ++;
+ bytes += (int)entry_size;
+ bytes += name_bytes;
+ if (TraceClassPaths || (TraceClassLoading && Verbose)) {
+ tty->print_cr("[Add main shared path (%s) %s]", (cpe->is_jar_file() ? "jar" : "dir"), name);
+ }
+ } else {
+ SharedClassPathEntry* ent = shared_classpath(cur_entry);
+ if (cpe->is_jar_file()) {
+ struct stat st;
+ if (os::stat(name, &st) != 0) {
+ // The file/dir must exist, or it would not have been added
+ // into ClassLoader::classpath_entry().
+ //
+ // If we can't access a jar file in the boot path, then we can't
+ // make assumptions about where classes get loaded from.
+ FileMapInfo::fail_stop("Unable to open jar file %s.", name);
+ }
- // If directories appear in boot classpath, they must be empty to
- // avoid having to verify each individual class file.
- const char* name = ((ClassPathDirEntry*)cpe)->name();
- if (!os::dir_is_empty(name)) {
- fail_stop("Boot classpath directory %s is not empty.", name);
+ EXCEPTION_MARK; // The following call should never throw, but would exit VM on error.
+ SharedClassUtil::update_shared_classpath(cpe, ent, st.st_mtime, st.st_size, THREAD);
+ } else {
+ ent->_filesize = -1;
+ if (!os::dir_is_empty(name)) {
+ ClassLoader::exit_with_path_failure("Cannot have non-empty directory in archived classpaths", name);
+ }
+ }
+ ent->_name = strptr;
+ if (strptr + name_bytes <= strptr_max) {
+ strncpy(strptr, name, (size_t)name_bytes); // name_bytes includes trailing 0.
+ strptr += name_bytes;
+ } else {
+ assert(0, "miscalculated buffer size");
+ }
}
}
+
+ if (pass == 0) {
+ EXCEPTION_MARK; // The following call should never throw, but would exit VM on error.
+ Array<u8>* arr = MetadataFactory::new_array<u8>(loader_data, (bytes + 7)/8, THREAD);
+ strptr = (char*)(arr->data());
+ strptr_max = strptr + bytes;
+ SharedClassPathEntry* table = (SharedClassPathEntry*)strptr;
+ strptr += entry_size * count;
+
+ _classpath_entry_table_size = count;
+ _classpath_entry_table = table;
+ _classpath_entry_size = entry_size;
+ }
}
}
+bool FileMapInfo::validate_classpath_entry_table() {
+ _validating_classpath_entry_table = true;
+
+ int count = _header->_classpath_entry_table_size;
+
+ _classpath_entry_table = _header->_classpath_entry_table;
+ _classpath_entry_size = _header->_classpath_entry_size;
+
+ for (int i=0; i<count; i++) {
+ SharedClassPathEntry* ent = shared_classpath(i);
+ struct stat st;
+ const char* name = ent->_name;
+ bool ok = true;
+ if (TraceClassPaths || (TraceClassLoading && Verbose)) {
+ tty->print_cr("[Checking shared classpath entry: %s]", name);
+ }
+ if (os::stat(name, &st) != 0) {
+ fail_continue("Required classpath entry does not exist: %s", name);
+ ok = false;
+ } else if (ent->is_dir()) {
+ if (!os::dir_is_empty(name)) {
+ fail_continue("directory is not empty: %s", name);
+ ok = false;
+ }
+ } else {
+ if (ent->_timestamp != st.st_mtime ||
+ ent->_filesize != st.st_size) {
+ ok = false;
+ if (PrintSharedArchiveAndExit) {
+ fail_continue(ent->_timestamp != st.st_mtime ?
+ "Timestamp mismatch" :
+ "File size mismatch");
+ } else {
+ fail_continue("A jar file is not the one used while building"
+ " the shared archive file: %s", name);
+ }
+ }
+ }
+ if (ok) {
+ if (TraceClassPaths || (TraceClassLoading && Verbose)) {
+ tty->print_cr("[ok]");
+ }
+ } else if (!PrintSharedArchiveAndExit) {
+ _validating_classpath_entry_table = false;
+ return false;
+ }
+ }
+
+ _classpath_entry_table_size = _header->_classpath_entry_table_size;
+ _validating_classpath_entry_table = false;
+ return true;
+}
+
// Read the FileMapInfo information from the file.
bool FileMapInfo::init_from_file(int fd) {
-
- size_t n = read(fd, &_header, sizeof(struct FileMapHeader));
- if (n != sizeof(struct FileMapHeader)) {
+ size_t sz = _header->data_size();
+ char* addr = _header->data();
+ size_t n = os::read(fd, addr, (unsigned int)sz);
+ if (n != sz) {
fail_continue("Unable to read the file header.");
return false;
}
- if (_header._version != current_version()) {
+ if (_header->_version != current_version()) {
fail_continue("The shared archive file has the wrong version.");
return false;
}
_file_offset = (long)n;
+
+ size_t info_size = _header->_paths_misc_info_size;
+ _paths_misc_info = NEW_C_HEAP_ARRAY_RETURN_NULL(char, info_size, mtClass);
+ if (_paths_misc_info == NULL) {
+ fail_continue("Unable to read the file header.");
+ return false;
+ }
+ n = os::read(fd, _paths_misc_info, (unsigned int)info_size);
+ if (n != info_size) {
+ fail_continue("Unable to read the shared path info header.");
+ FREE_C_HEAP_ARRAY(char, _paths_misc_info, mtClass);
+ _paths_misc_info = NULL;
+ return false;
+ }
+
+ _file_offset += (long)n;
return true;
}
@@ -237,7 +386,16 @@
// Write the header to the file, seek to the next allocation boundary.
void FileMapInfo::write_header() {
- write_bytes_aligned(&_header, sizeof(FileMapHeader));
+ int info_size = ClassLoader::get_shared_paths_misc_info_size();
+
+ _header->_paths_misc_info_size = info_size;
+
+ align_file_position();
+ size_t sz = _header->data_size();
+ char* addr = _header->data();
+ write_bytes(addr, (int)sz); // skip the C++ vtable
+ write_bytes(ClassLoader::get_shared_paths_misc_info(), info_size);
+ align_file_position();
}
@@ -247,7 +405,7 @@
align_file_position();
size_t used = space->used_bytes_slow(Metaspace::NonClassType);
size_t capacity = space->capacity_bytes_slow(Metaspace::NonClassType);
- struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
+ struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
write_region(i, (char*)space->bottom(), used, capacity, read_only, false);
}
@@ -257,7 +415,7 @@
void FileMapInfo::write_region(int region, char* base, size_t size,
size_t capacity, bool read_only,
bool allow_exec) {
- struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[region];
+ struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[region];
if (_file_open) {
guarantee(si->_file_offset == _file_offset, "file offset mismatch.");
@@ -339,7 +497,7 @@
// JVM/TI RedefineClasses() support:
// Remap the shared readonly space to shared readwrite, private.
bool FileMapInfo::remap_shared_readonly_as_readwrite() {
- struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0];
+ struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
if (!si->_read_only) {
// the space is already readwrite so we are done
return true;
@@ -367,7 +525,7 @@
// Map the whole region at once, assumed to be allocated contiguously.
ReservedSpace FileMapInfo::reserve_shared_memory() {
- struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0];
+ struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[0];
char* requested_addr = si->_base;
size_t size = FileMapInfo::shared_spaces_size();
@@ -389,7 +547,7 @@
static const char* shared_region_name[] = { "ReadOnly", "ReadWrite", "MiscData", "MiscCode"};
char* FileMapInfo::map_region(int i) {
- struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
+ struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
size_t used = si->_used;
size_t alignment = os::vm_allocation_granularity();
size_t size = align_size_up(used, alignment);
@@ -415,7 +573,7 @@
// Unmap a memory region in the address space.
void FileMapInfo::unmap_region(int i) {
- struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
+ struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
size_t used = si->_used;
size_t size = align_size_up(used, os::vm_allocation_granularity());
if (!os::unmap_memory(si->_base, size)) {
@@ -432,12 +590,21 @@
FileMapInfo* FileMapInfo::_current_info = NULL;
-
+SharedClassPathEntry* FileMapInfo::_classpath_entry_table = NULL;
+int FileMapInfo::_classpath_entry_table_size = 0;
+size_t FileMapInfo::_classpath_entry_size = 0x1234baad;
+bool FileMapInfo::_validating_classpath_entry_table = false;
// Open the shared archive file, read and validate the header
// information (version, boot classpath, etc.). If initialization
// fails, shared spaces are disabled and the file is closed. [See
// fail_continue.]
+//
+// Validation of the archive is done in two steps:
+//
+// [1] validate_header() - done here. This checks the header, including _paths_misc_info.
+// [2] validate_classpath_entry_table - this is done later, because the table is in the RW
+// region of the archive, which is not mapped yet.
bool FileMapInfo::initialize() {
assert(UseSharedSpaces, "UseSharedSpaces expected.");
@@ -451,92 +618,66 @@
}
init_from_file(_fd);
- if (!validate()) {
+ if (!validate_header()) {
return false;
}
- SharedReadOnlySize = _header._space[0]._capacity;
- SharedReadWriteSize = _header._space[1]._capacity;
- SharedMiscDataSize = _header._space[2]._capacity;
- SharedMiscCodeSize = _header._space[3]._capacity;
+ SharedReadOnlySize = _header->_space[0]._capacity;
+ SharedReadWriteSize = _header->_space[1]._capacity;
+ SharedMiscDataSize = _header->_space[2]._capacity;
+ SharedMiscCodeSize = _header->_space[3]._capacity;
return true;
}
-
-bool FileMapInfo::validate() {
- if (_header._version != current_version()) {
- fail_continue("The shared archive file is the wrong version.");
+bool FileMapInfo::FileMapHeader::validate() {
+ if (_version != current_version()) {
+ FileMapInfo::fail_continue("The shared archive file is the wrong version.");
return false;
}
- if (_header._magic != (int)0xf00baba2) {
- fail_continue("The shared archive file has a bad magic number.");
+ if (_magic != (int)0xf00baba2) {
+ FileMapInfo::fail_continue("The shared archive file has a bad magic number.");
return false;
}
char header_version[JVM_IDENT_MAX];
get_header_version(header_version);
- if (strncmp(_header._jvm_ident, header_version, JVM_IDENT_MAX-1) != 0) {
- fail_continue("The shared archive file was created by a different"
- " version or build of HotSpot.");
- return false;
- }
- if (_header._obj_alignment != ObjectAlignmentInBytes) {
- fail_continue("The shared archive file's ObjectAlignmentInBytes of %d"
- " does not equal the current ObjectAlignmentInBytes of %d.",
- _header._obj_alignment, ObjectAlignmentInBytes);
- return false;
- }
-
- // Cannot verify interpreter yet, as it can only be created after the GC
- // heap has been initialized.
-
- if (_header._num_jars >= JVM_SHARED_JARS_MAX) {
- fail_continue("Too many jar files to share.");
+ if (strncmp(_jvm_ident, header_version, JVM_IDENT_MAX-1) != 0) {
+ if (TraceClassPaths) {
+ tty->print_cr("Expected: %s", header_version);
+ tty->print_cr("Actual: %s", _jvm_ident);
+ }
+ FileMapInfo::fail_continue("The shared archive file was created by a different"
+ " version or build of HotSpot");
return false;
}
-
- // Build checks on classpath and jar files
- int num_jars_now = 0;
- ClassPathEntry *cpe = ClassLoader::classpath_entry(0);
- for ( ; cpe != NULL; cpe = cpe->next()) {
-
- if (cpe->is_jar_file()) {
- if (num_jars_now < _header._num_jars) {
-
- // Jar file - verify timestamp and file size.
- struct stat st;
- const char *path = cpe->name();
- if (os::stat(path, &st) != 0) {
- fail_continue("Unable to open jar file %s.", path);
- return false;
- }
- if (_header._jar[num_jars_now]._timestamp != st.st_mtime ||
- _header._jar[num_jars_now]._filesize != st.st_size) {
- fail_continue("A jar file is not the one used while building"
- " the shared archive file.");
- return false;
- }
- }
- ++num_jars_now;
- } else {
-
- // If directories appear in boot classpath, they must be empty to
- // avoid having to verify each individual class file.
- const char* name = ((ClassPathDirEntry*)cpe)->name();
- if (!os::dir_is_empty(name)) {
- fail_continue("Boot classpath directory %s is not empty.", name);
- return false;
- }
- }
- }
- if (num_jars_now < _header._num_jars) {
- fail_continue("The number of jar files in the boot classpath is"
- " less than the number the shared archive was created with.");
+ if (_obj_alignment != ObjectAlignmentInBytes) {
+ FileMapInfo::fail_continue("The shared archive file's ObjectAlignmentInBytes of %d"
+ " does not equal the current ObjectAlignmentInBytes of %d.",
+ _obj_alignment, ObjectAlignmentInBytes);
return false;
}
return true;
}
+bool FileMapInfo::validate_header() {
+ bool status = _header->validate();
+
+ if (status) {
+ if (!ClassLoader::check_shared_paths_misc_info(_paths_misc_info, _header->_paths_misc_info_size)) {
+ if (!PrintSharedArchiveAndExit) {
+ fail_continue("shared class paths mismatch (hint: enable -XX:+TraceClassPaths to diagnose the failure)");
+ status = false;
+ }
+ }
+ }
+
+ if (_paths_misc_info != NULL) {
+ FREE_C_HEAP_ARRAY(char, _paths_misc_info, mtClass);
+ _paths_misc_info = NULL;
+ }
+ return status;
+}
+
// The following method is provided to see whether a given pointer
// falls in the mapped shared space.
// Param:
@@ -545,8 +686,8 @@
// True if the p is within the mapped shared space, otherwise, false.
bool FileMapInfo::is_in_shared_space(const void* p) {
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
- if (p >= _header._space[i]._base &&
- p < _header._space[i]._base + _header._space[i]._used) {
+ if (p >= _header->_space[i]._base &&
+ p < _header->_space[i]._base + _header->_space[i]._used) {
return true;
}
}
@@ -557,7 +698,7 @@
void FileMapInfo::print_shared_spaces() {
gclog_or_tty->print_cr("Shared Spaces:");
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
- struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
+ struct FileMapInfo::FileMapHeader::space_info* si = &_header->_space[i];
gclog_or_tty->print(" %s " INTPTR_FORMAT "-" INTPTR_FORMAT,
shared_region_name[i],
si->_base, si->_base + si->_used);
@@ -570,9 +711,9 @@
if (map_info) {
map_info->fail_continue(msg);
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
- if (map_info->_header._space[i]._base != NULL) {
+ if (map_info->_header->_space[i]._base != NULL) {
map_info->unmap_region(i);
- map_info->_header._space[i]._base = NULL;
+ map_info->_header->_space[i]._base = NULL;
}
}
} else if (DumpSharedSpaces) {
--- a/hotspot/src/share/vm/memory/filemap.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/memory/filemap.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -37,30 +37,55 @@
// misc data (block offset table, string table, symbols, dictionary, etc.)
// tag(666)
-static const int JVM_SHARED_JARS_MAX = 128;
-static const int JVM_SPACENAME_MAX = 128;
static const int JVM_IDENT_MAX = 256;
-static const int JVM_ARCH_MAX = 12;
-
class Metaspace;
+class SharedClassPathEntry VALUE_OBJ_CLASS_SPEC {
+public:
+ const char *_name;
+ time_t _timestamp; // jar timestamp, 0 if is directory
+ long _filesize; // jar file size, -1 if is directory
+ bool is_dir() {
+ return _filesize == -1;
+ }
+};
+
class FileMapInfo : public CHeapObj<mtInternal> {
private:
+ friend class ManifestStream;
enum {
_invalid_version = -1,
- _current_version = 1
+ _current_version = 2
};
bool _file_open;
int _fd;
long _file_offset;
+private:
+ static SharedClassPathEntry* _classpath_entry_table;
+ static int _classpath_entry_table_size;
+ static size_t _classpath_entry_size;
+ static bool _validating_classpath_entry_table;
+
// FileMapHeader describes the shared space data in the file to be
// mapped. This structure gets written to a file. It is not a class, so
// that the compilers don't add any compiler-private data to it.
- struct FileMapHeader {
+public:
+ struct FileMapHeaderBase : public CHeapObj<mtClass> {
+ virtual bool validate() = 0;
+ virtual void populate(FileMapInfo* info, size_t alignment) = 0;
+ };
+ struct FileMapHeader : FileMapHeaderBase {
+ // Use data() and data_size() to memcopy to/from the FileMapHeader. We need to
+ // avoid read/writing the C++ vtable pointer.
+ static size_t data_size();
+ char* data() {
+ return ((char*)this) + sizeof(FileMapHeaderBase);
+ }
+
int _magic; // identify file type.
int _version; // (from enum, above.)
size_t _alignment; // how shared archive should be aligned
@@ -78,44 +103,64 @@
// The following fields are all sanity checks for whether this archive
// will function correctly with this JVM and the bootclasspath it's
// invoked with.
- char _arch[JVM_ARCH_MAX]; // architecture
char _jvm_ident[JVM_IDENT_MAX]; // identifier for jvm
- int _num_jars; // Number of jars in bootclasspath
- // Per jar file data: timestamp, size.
+ // The _paths_misc_info is a variable-size structure that records "miscellaneous"
+ // information during dumping. It is generated and validated by the
+ // SharedPathsMiscInfo class. See SharedPathsMiscInfo.hpp and sharedClassUtil.hpp for
+ // detailed description.
+ //
+ // The _paths_misc_info data is stored as a byte array in the archive file header,
+ // immediately after the _header field. This information is used only when
+ // checking the validity of the archive and is deallocated after the archive is loaded.
+ //
+ // Note that the _paths_misc_info does NOT include information for JAR files
+ // that existed during dump time. Their information is stored in _classpath_entry_table.
+ int _paths_misc_info_size;
- struct {
- time_t _timestamp; // jar timestamp.
- long _filesize; // jar file size.
- } _jar[JVM_SHARED_JARS_MAX];
- } _header;
+ // The following is a table of all the class path entries that were used
+ // during dumping. At run time, we require these files to exist and have the same
+ // size/modification time, or else the archive will refuse to load.
+ //
+ // All of these entries must be JAR files. The dumping process would fail if a non-empty
+ // directory was specified in the classpaths. If an empty directory was specified
+ // it is checked by the _paths_misc_info as described above.
+ //
+ // FIXME -- if JAR files in the tail of the list were specified but not used during dumping,
+ // they should be removed from this table, to save space and to avoid spurious
+ // loading failures during runtime.
+ int _classpath_entry_table_size;
+ size_t _classpath_entry_size;
+ SharedClassPathEntry* _classpath_entry_table;
+
+ virtual bool validate();
+ virtual void populate(FileMapInfo* info, size_t alignment);
+ };
+
+ FileMapHeader * _header;
+
const char* _full_path;
+ char* _paths_misc_info;
static FileMapInfo* _current_info;
bool init_from_file(int fd);
void align_file_position();
+ bool validate_header_impl();
public:
- FileMapInfo() {
- _file_offset = 0;
- _file_open = false;
- _header._version = _invalid_version;
- }
+ FileMapInfo();
+ ~FileMapInfo();
static int current_version() { return _current_version; }
void populate_header(size_t alignment);
- bool validate();
+ bool validate_header();
void invalidate();
- int version() { return _header._version; }
- size_t alignment() { return _header._alignment; }
- size_t space_capacity(int i) { return _header._space[i]._capacity; }
- char* region_base(int i) { return _header._space[i]._base; }
- struct FileMapHeader* header() { return &_header; }
-
- static void set_current_info(FileMapInfo* info) {
- CDS_ONLY(_current_info = info;)
- }
+ int version() { return _header->_version; }
+ size_t alignment() { return _header->_alignment; }
+ size_t space_capacity(int i) { return _header->_space[i]._capacity; }
+ char* region_base(int i) { return _header->_space[i]._base; }
+ struct FileMapHeader* header() { return _header; }
static FileMapInfo* current_info() {
CDS_ONLY(return _current_info;)
@@ -146,7 +191,7 @@
// Errors.
static void fail_stop(const char *msg, ...);
- void fail_continue(const char *msg, ...);
+ static void fail_continue(const char *msg, ...);
// Return true if given address is in the mapped shared space.
bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
@@ -160,6 +205,22 @@
// Stop CDS sharing and unmap CDS regions.
static void stop_sharing_and_unmap(const char* msg);
+
+ static void allocate_classpath_entry_table();
+ bool validate_classpath_entry_table();
+
+ static SharedClassPathEntry* shared_classpath(int index) {
+ char* p = (char*)_classpath_entry_table;
+ p += _classpath_entry_size * index;
+ return (SharedClassPathEntry*)p;
+ }
+ static const char* shared_classpath_name(int index) {
+ return shared_classpath(index)->_name;
+ }
+
+ static int get_number_of_share_classpaths() {
+ return _classpath_entry_table_size;
+ }
};
#endif // SHARE_VM_MEMORY_FILEMAP_HPP
--- a/hotspot/src/share/vm/memory/metadataFactory.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/memory/metadataFactory.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -79,6 +79,12 @@
// Deallocation method for metadata
template <class T>
static void free_metadata(ClassLoaderData* loader_data, T md) {
+ if (DumpSharedSpaces) {
+ // FIXME: the freeing code is buggy, especially when PrintSharedSpaces is enabled.
+ // Disable for now -- this means if you specify bad classes in your classlist you
+ // may have wasted space inside the archive.
+ return;
+ }
if (md != NULL) {
assert(loader_data != NULL, "shouldn't pass null");
int size = md->size();
--- a/hotspot/src/share/vm/memory/metaspace.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/memory/metaspace.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -413,6 +413,7 @@
VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
+#if INCLUDE_CDS
// This allocates memory with mmap. For DumpSharedspaces, try to reserve
// configurable address, generally at the top of the Java heap so other
// memory addresses don't conflict.
@@ -428,7 +429,9 @@
_rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
}
MetaspaceShared::set_shared_rs(&_rs);
- } else {
+ } else
+#endif
+ {
bool large_pages = should_commit_large_pages_when_reserving(bytes);
_rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
@@ -2939,11 +2942,14 @@
// between the lower base and higher address.
address lower_base;
address higher_address;
+#if INCLUDE_CDS
if (UseSharedSpaces) {
higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
(address)(metaspace_base + compressed_class_space_size()));
lower_base = MIN2(metaspace_base, cds_base);
- } else {
+ } else
+#endif
+ {
higher_address = metaspace_base + compressed_class_space_size();
lower_base = metaspace_base;
@@ -2964,6 +2970,7 @@
}
}
+#if INCLUDE_CDS
// Return TRUE if the specified metaspace_base and cds_base are close enough
// to work with compressed klass pointers.
bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
@@ -2974,6 +2981,7 @@
(address)(metaspace_base + compressed_class_space_size()));
return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
}
+#endif
// Try to allocate the metaspace at the requested addr.
void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
@@ -2993,6 +3001,7 @@
large_pages,
requested_addr, 0);
if (!metaspace_rs.is_reserved()) {
+#if INCLUDE_CDS
if (UseSharedSpaces) {
size_t increment = align_size_up(1*G, _reserve_alignment);
@@ -3007,7 +3016,7 @@
_reserve_alignment, large_pages, addr, 0);
}
}
-
+#endif
// If no successful allocation then try to allocate the space anywhere. If
// that fails then OOM doom. At this point we cannot try allocating the
// metaspace as if UseCompressedClassPointers is off because too much
@@ -3026,12 +3035,13 @@
// If we got here then the metaspace got allocated.
MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
+#if INCLUDE_CDS
// Verify that we can use shared spaces. Otherwise, turn off CDS.
if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
FileMapInfo::stop_sharing_and_unmap(
"Could not allocate metaspace at a compatible address");
}
-
+#endif
set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
UseSharedSpaces ? (address)cds_base : 0);
@@ -3115,6 +3125,7 @@
MetaspaceShared::set_max_alignment(max_alignment);
if (DumpSharedSpaces) {
+#if INCLUDE_CDS
SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
SharedMiscDataSize = align_size_up(SharedMiscDataSize, max_alignment);
@@ -3152,23 +3163,22 @@
}
Universe::set_narrow_klass_shift(0);
-#endif
-
+#endif // _LP64
+#endif // INCLUDE_CDS
} else {
+#if INCLUDE_CDS
// If using shared space, open the file that contains the shared space
// and map in the memory before initializing the rest of metaspace (so
// the addresses don't conflict)
address cds_address = NULL;
if (UseSharedSpaces) {
FileMapInfo* mapinfo = new FileMapInfo();
- memset(mapinfo, 0, sizeof(FileMapInfo));
// Open the shared archive file, read and validate the header. If
// initialization fails, shared spaces [UseSharedSpaces] are
// disabled and the file is closed.
// Map in spaces now also
if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
- FileMapInfo::set_current_info(mapinfo);
cds_total = FileMapInfo::shared_spaces_size();
cds_address = (address)mapinfo->region_base(0);
} else {
@@ -3176,21 +3186,23 @@
"archive file not closed or shared spaces not disabled.");
}
}
-
+#endif // INCLUDE_CDS
#ifdef _LP64
// If UseCompressedClassPointers is set then allocate the metaspace area
// above the heap and above the CDS area (if it exists).
if (using_class_space()) {
if (UseSharedSpaces) {
+#if INCLUDE_CDS
char* cds_end = (char*)(cds_address + cds_total);
cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
+#endif
} else {
char* base = (char*)align_ptr_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
allocate_metaspace_compressed_klass_ptrs(base, 0);
}
}
-#endif
+#endif // _LP64
// Initialize these before initializing the VirtualSpaceList
_first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
@@ -3380,6 +3392,10 @@
assert(!SafepointSynchronize::is_at_safepoint()
|| Thread::current()->is_VM_thread(), "should be the VM thread");
+ if (DumpSharedSpaces && PrintSharedSpaces) {
+ record_deallocation(ptr, vsm()->get_raw_word_size(word_size));
+ }
+
MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
@@ -3417,8 +3433,9 @@
if (result == NULL) {
report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
}
-
- space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
+ if (PrintSharedSpaces) {
+ space->record_allocation(result, type, space->vsm()->get_raw_word_size(word_size));
+ }
// Zero initialize.
Copy::fill_to_aligned_words((HeapWord*)result, word_size, 0);
@@ -3517,15 +3534,55 @@
void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
assert(DumpSharedSpaces, "sanity");
- AllocRecord *rec = new AllocRecord((address)ptr, type, (int)word_size * HeapWordSize);
+ int byte_size = (int)word_size * HeapWordSize;
+ AllocRecord *rec = new AllocRecord((address)ptr, type, byte_size);
+
if (_alloc_record_head == NULL) {
_alloc_record_head = _alloc_record_tail = rec;
- } else {
+ } else if (_alloc_record_tail->_ptr + _alloc_record_tail->_byte_size == (address)ptr) {
_alloc_record_tail->_next = rec;
_alloc_record_tail = rec;
+ } else {
+ // slow linear search, but this doesn't happen that often, and only when dumping
+ for (AllocRecord *old = _alloc_record_head; old; old = old->_next) {
+ if (old->_ptr == ptr) {
+ assert(old->_type == MetaspaceObj::DeallocatedType, "sanity");
+ int remain_bytes = old->_byte_size - byte_size;
+ assert(remain_bytes >= 0, "sanity");
+ old->_type = type;
+
+ if (remain_bytes == 0) {
+ delete(rec);
+ } else {
+ address remain_ptr = address(ptr) + byte_size;
+ rec->_ptr = remain_ptr;
+ rec->_byte_size = remain_bytes;
+ rec->_type = MetaspaceObj::DeallocatedType;
+ rec->_next = old->_next;
+ old->_byte_size = byte_size;
+ old->_next = rec;
+ }
+ return;
+ }
+ }
+ assert(0, "reallocating a freed pointer that was not recorded");
}
}
+void Metaspace::record_deallocation(void* ptr, size_t word_size) {
+ assert(DumpSharedSpaces, "sanity");
+
+ for (AllocRecord *rec = _alloc_record_head; rec; rec = rec->_next) {
+ if (rec->_ptr == ptr) {
+ assert(rec->_byte_size == (int)word_size * HeapWordSize, "sanity");
+ rec->_type = MetaspaceObj::DeallocatedType;
+ return;
+ }
+ }
+
+ assert(0, "deallocating a pointer that was not recorded");
+}
+
void Metaspace::iterate(Metaspace::AllocRecordClosure *closure) {
assert(DumpSharedSpaces, "unimplemented for !DumpSharedSpaces");
--- a/hotspot/src/share/vm/memory/metaspace.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/memory/metaspace.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -171,9 +171,10 @@
static const MetaspaceTracer* tracer() { return _tracer; }
private:
- // This is used by DumpSharedSpaces only, where only _vsm is used. So we will
+ // These 2 methods are used by DumpSharedSpaces only, where only _vsm is used. So we will
// maintain a single list for now.
void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
+ void record_deallocation(void* ptr, size_t word_size);
#ifdef _LP64
static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base);
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -26,6 +26,7 @@
#include "classfile/dictionary.hpp"
#include "classfile/loaderConstraints.hpp"
#include "classfile/placeholders.hpp"
+#include "classfile/sharedClassUtil.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
@@ -47,6 +48,10 @@
ReservedSpace* MetaspaceShared::_shared_rs = NULL;
+bool MetaspaceShared::_link_classes_made_progress;
+bool MetaspaceShared::_check_classes_made_progress;
+bool MetaspaceShared::_has_error_classes;
+bool MetaspaceShared::_archive_loading_failed = false;
// Read/write a data stream for restoring/preserving metadata pointers and
// miscellaneous data from/to the shared archive file.
@@ -446,6 +451,23 @@
SystemDictionary::classes_do(collect_classes);
tty->print_cr("Number of classes %d", _global_klass_objects->length());
+ {
+ int num_type_array = 0, num_obj_array = 0, num_inst = 0;
+ for (int i = 0; i < _global_klass_objects->length(); i++) {
+ Klass* k = _global_klass_objects->at(i);
+ if (k->oop_is_instance()) {
+ num_inst ++;
+ } else if (k->oop_is_objArray()) {
+ num_obj_array ++;
+ } else {
+ assert(k->oop_is_typeArray(), "sanity");
+ num_type_array ++;
+ }
+ }
+ tty->print_cr(" instance classes = %5d", num_inst);
+ tty->print_cr(" obj array classes = %5d", num_obj_array);
+ tty->print_cr(" type array classes = %5d", num_type_array);
+ }
// Update all the fingerprints in the shared methods.
tty->print("Calculating fingerprints ... ");
@@ -611,38 +633,58 @@
#undef fmt_space
}
-static void link_shared_classes(Klass* obj, TRAPS) {
+
+void MetaspaceShared::link_one_shared_class(Klass* obj, TRAPS) {
Klass* k = obj;
if (k->oop_is_instance()) {
InstanceKlass* ik = (InstanceKlass*) k;
// Link the class to cause the bytecodes to be rewritten and the
- // cpcache to be created.
- if (ik->init_state() < InstanceKlass::linked) {
- ik->link_class(THREAD);
- guarantee(!HAS_PENDING_EXCEPTION, "exception in class rewriting");
+ // cpcache to be created. Class verification is done according
+ // to -Xverify setting.
+ _link_classes_made_progress |= try_link_class(ik, THREAD);
+ guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
+ }
+}
+
+void MetaspaceShared::check_one_shared_class(Klass* k) {
+ if (k->oop_is_instance() && InstanceKlass::cast(k)->check_sharing_error_state()) {
+ _check_classes_made_progress = true;
+ }
+}
+
+void MetaspaceShared::link_and_cleanup_shared_classes(TRAPS) {
+ // We need to iterate because verification may cause additional classes
+ // to be loaded.
+ do {
+ _link_classes_made_progress = false;
+ SystemDictionary::classes_do(link_one_shared_class, THREAD);
+ guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
+ } while (_link_classes_made_progress);
+
+ if (_has_error_classes) {
+ // Mark all classes whose super class or interfaces failed verification.
+ do {
+ // Not completely sure if we need to do this iteratively. Anyway,
+ // we should come here only if there are unverifiable classes, which
+ // shouldn't happen in normal cases. So better safe than sorry.
+ _check_classes_made_progress = false;
+ SystemDictionary::classes_do(check_one_shared_class);
+ } while (_check_classes_made_progress);
+
+ if (IgnoreUnverifiableClassesDuringDump) {
+ // This is useful when running JCK or SQE tests. You should not
+ // enable this when running real apps.
+ SystemDictionary::remove_classes_in_error_state();
+ } else {
+ tty->print_cr("Please remove the unverifiable classes from your class list and try again");
+ exit(1);
}
}
}
-
-// Support for a simple checksum of the contents of the class list
-// file to prevent trivial tampering. The algorithm matches that in
-// the MakeClassList program used by the J2SE build process.
-#define JSUM_SEED ((jlong)CONST64(0xcafebabebabecafe))
-static jlong
-jsum(jlong start, const char *buf, const int len)
-{
- jlong h = start;
- char *p = (char *)buf, *e = p + len;
- while (p < e) {
- char c = *p++;
- if (c <= ' ') {
- /* Skip spaces and control characters */
- continue;
- }
- h = 31 * h + c;
- }
- return h;
+void MetaspaceShared::prepare_for_dumping() {
+ ClassLoader::initialize_shared_path();
+ FileMapInfo::allocate_classpath_entry_table();
}
// Preload classes from a list, populate the shared spaces and dump to a
@@ -651,72 +693,112 @@
TraceTime timer("Dump Shared Spaces", TraceStartupTime);
ResourceMark rm;
+ tty->print_cr("Allocated shared space: %d bytes at " PTR_FORMAT,
+ MetaspaceShared::shared_rs()->size(),
+ MetaspaceShared::shared_rs()->base());
+
// Preload classes to be shared.
// Should use some os:: method rather than fopen() here. aB.
- // Construct the path to the class list (in jre/lib)
- // Walk up two directories from the location of the VM and
- // optionally tack on "lib" (depending on platform)
- char class_list_path[JVM_MAXPATHLEN];
- os::jvm_path(class_list_path, sizeof(class_list_path));
- for (int i = 0; i < 3; i++) {
- char *end = strrchr(class_list_path, *os::file_separator());
- if (end != NULL) *end = '\0';
+ const char* class_list_path;
+ if (SharedClassListFile == NULL) {
+ // Construct the path to the class list (in jre/lib)
+ // Walk up two directories from the location of the VM and
+ // optionally tack on "lib" (depending on platform)
+ char class_list_path_str[JVM_MAXPATHLEN];
+ os::jvm_path(class_list_path_str, sizeof(class_list_path_str));
+ for (int i = 0; i < 3; i++) {
+ char *end = strrchr(class_list_path_str, *os::file_separator());
+ if (end != NULL) *end = '\0';
+ }
+ int class_list_path_len = (int)strlen(class_list_path_str);
+ if (class_list_path_len >= 3) {
+ if (strcmp(class_list_path_str + class_list_path_len - 3, "lib") != 0) {
+ strcat(class_list_path_str, os::file_separator());
+ strcat(class_list_path_str, "lib");
+ }
+ }
+ strcat(class_list_path_str, os::file_separator());
+ strcat(class_list_path_str, "classlist");
+ class_list_path = class_list_path_str;
+ } else {
+ class_list_path = SharedClassListFile;
}
- int class_list_path_len = (int)strlen(class_list_path);
- if (class_list_path_len >= 3) {
- if (strcmp(class_list_path + class_list_path_len - 3, "lib") != 0) {
- strcat(class_list_path, os::file_separator());
- strcat(class_list_path, "lib");
- }
+
+ int class_count = 0;
+ GrowableArray<Klass*>* class_promote_order = new GrowableArray<Klass*>();
+
+ // sun.io.Converters
+ static const char obj_array_sig[] = "[[Ljava/lang/Object;";
+ SymbolTable::new_permanent_symbol(obj_array_sig, THREAD);
+
+ // java.util.HashMap
+ static const char map_entry_array_sig[] = "[Ljava/util/Map$Entry;";
+ SymbolTable::new_permanent_symbol(map_entry_array_sig, THREAD);
+
+ tty->print_cr("Loading classes to share ...");
+ _has_error_classes = false;
+ class_count += preload_and_dump(class_list_path, class_promote_order,
+ THREAD);
+ if (ExtraSharedClassListFile) {
+ class_count += preload_and_dump(ExtraSharedClassListFile, class_promote_order,
+ THREAD);
+ }
+ tty->print_cr("Loading classes to share: done.");
+
+ if (PrintSharedSpaces) {
+ tty->print_cr("Shared spaces: preloaded %d classes", class_count);
}
- strcat(class_list_path, os::file_separator());
- strcat(class_list_path, "classlist");
+
+ // Rewrite and link classes
+ tty->print_cr("Rewriting and linking classes ...");
+
+ // Link any classes which got missed. This would happen if we have loaded classes that
+ // were not explicitly specified in the classlist. E.g., if an interface implemented by class K
+ // fails verification, all other interfaces that were not specified in the classlist but
+ // are implemented by K are not verified.
+ link_and_cleanup_shared_classes(CATCH);
+ tty->print_cr("Rewriting and linking classes: done");
+ // Create and dump the shared spaces. Everything so far is loaded
+ // with the null class loader.
+ ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
+ VM_PopulateDumpSharedSpace op(loader_data, class_promote_order);
+ VMThread::execute(&op);
+
+ // Since various initialization steps have been undone by this process,
+ // it is not reasonable to continue running a java process.
+ exit(0);
+}
+
+int MetaspaceShared::preload_and_dump(const char * class_list_path,
+ GrowableArray<Klass*>* class_promote_order,
+ TRAPS) {
FILE* file = fopen(class_list_path, "r");
+ char class_name[256];
+ int class_count = 0;
+
if (file != NULL) {
- jlong computed_jsum = JSUM_SEED;
- jlong file_jsum = 0;
-
- char class_name[256];
- int class_count = 0;
- GrowableArray<Klass*>* class_promote_order = new GrowableArray<Klass*>();
-
- // sun.io.Converters
- static const char obj_array_sig[] = "[[Ljava/lang/Object;";
- SymbolTable::new_permanent_symbol(obj_array_sig, THREAD);
-
- // java.util.HashMap
- static const char map_entry_array_sig[] = "[Ljava/util/Map$Entry;";
- SymbolTable::new_permanent_symbol(map_entry_array_sig, THREAD);
-
- tty->print("Loading classes to share ... ");
while ((fgets(class_name, sizeof class_name, file)) != NULL) {
- if (*class_name == '#') {
- jint fsh, fsl;
- if (sscanf(class_name, "# %8x%8x\n", &fsh, &fsl) == 2) {
- file_jsum = ((jlong)(fsh) << 32) | (fsl & 0xffffffff);
- }
-
+ if (*class_name == '#') { // comment
continue;
}
// Remove trailing newline
size_t name_len = strlen(class_name);
- class_name[name_len-1] = '\0';
-
- computed_jsum = jsum(computed_jsum, class_name, (const int)name_len - 1);
+ if (class_name[name_len-1] == '\n') {
+ class_name[name_len-1] = '\0';
+ }
// Got a class name - load it.
TempNewSymbol class_name_symbol = SymbolTable::new_permanent_symbol(class_name, THREAD);
guarantee(!HAS_PENDING_EXCEPTION, "Exception creating a symbol.");
Klass* klass = SystemDictionary::resolve_or_null(class_name_symbol,
THREAD);
- guarantee(!HAS_PENDING_EXCEPTION, "Exception resolving a class.");
+ CLEAR_PENDING_EXCEPTION;
if (klass != NULL) {
if (PrintSharedSpaces && Verbose && WizardMode) {
tty->print_cr("Shared spaces preloaded: %s", class_name);
}
-
InstanceKlass* ik = InstanceKlass::cast(klass);
// Should be class load order as per -XX:+TraceClassLoadingPreorder
@@ -726,52 +808,14 @@
// cpcache to be created. The linking is done as soon as classes
// are loaded in order that the related data structures (klass and
// cpCache) are located together.
-
- if (ik->init_state() < InstanceKlass::linked) {
- ik->link_class(THREAD);
- guarantee(!(HAS_PENDING_EXCEPTION), "exception in class rewriting");
- }
-
- // TODO: Resolve klasses in constant pool
- ik->constants()->resolve_class_constants(THREAD);
+ try_link_class(ik, THREAD);
+ guarantee(!HAS_PENDING_EXCEPTION, "exception in link_class");
class_count++;
} else {
- if (PrintSharedSpaces && Verbose && WizardMode) {
- tty->cr();
- tty->print_cr(" Preload failed: %s", class_name);
- }
+ //tty->print_cr("Preload failed: %s", class_name);
}
- file_jsum = 0; // Checksum must be on last line of file
- }
- if (computed_jsum != file_jsum) {
- tty->cr();
- tty->print_cr("Preload failed: checksum of class list was incorrect.");
- exit(1);
- }
-
- tty->print_cr("done. ");
-
- if (PrintSharedSpaces) {
- tty->print_cr("Shared spaces: preloaded %d classes", class_count);
}
-
- // Rewrite and unlink classes.
- tty->print("Rewriting and linking classes ... ");
-
- // Link any classes which got missed. (It's not quite clear why
- // they got missed.) This iteration would be unsafe if we weren't
- // single-threaded at this point; however we can't do it on the VM
- // thread because it requires object allocation.
- SystemDictionary::classes_do(link_shared_classes, CATCH);
- tty->print_cr("done. ");
-
- // Create and dump the shared spaces. Everything so far is loaded
- // with the null class loader.
- ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
- VM_PopulateDumpSharedSpace op(loader_data, class_promote_order);
- VMThread::execute(&op);
-
} else {
char errmsg[JVM_MAXPATHLEN];
os::lasterror(errmsg, JVM_MAXPATHLEN);
@@ -779,11 +823,39 @@
exit(1);
}
- // Since various initialization steps have been undone by this process,
- // it is not reasonable to continue running a java process.
- exit(0);
+ return class_count;
}
+// Returns true if the class's status has changed
+bool MetaspaceShared::try_link_class(InstanceKlass* ik, TRAPS) {
+ assert(DumpSharedSpaces, "should only be called during dumping");
+ if (ik->init_state() < InstanceKlass::linked) {
+ bool saved = BytecodeVerificationLocal;
+ if (!SharedClassUtil::is_shared_boot_class(ik)) {
+ // The verification decision is based on BytecodeVerificationRemote
+ // for non-system classes. Since we are using the NULL classloader
+ // to load non-system classes during dumping, we need to temporarily
+ // change BytecodeVerificationLocal to be the same as
+ // BytecodeVerificationRemote. Note this can cause the parent system
+ // classes also being verified. The extra overhead is acceptable during
+ // dumping.
+ BytecodeVerificationLocal = BytecodeVerificationRemote;
+ }
+ ik->link_class(THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ ResourceMark rm;
+ tty->print_cr("Preload Error: Verification failed for %s",
+ ik->external_name());
+ CLEAR_PENDING_EXCEPTION;
+ ik->set_in_error_state();
+ _has_error_classes = true;
+ }
+ BytecodeVerificationLocal = saved;
+ return true;
+ } else {
+ return false;
+ }
+}
// Closure for serializing initialization data in from a data area
// (ptr_array) read from the shared file.
@@ -867,7 +939,8 @@
(_rw_base = mapinfo->map_region(rw)) != NULL &&
(_md_base = mapinfo->map_region(md)) != NULL &&
(_mc_base = mapinfo->map_region(mc)) != NULL &&
- (image_alignment == (size_t)max_alignment())) {
+ (image_alignment == (size_t)max_alignment()) &&
+ mapinfo->validate_classpath_entry_table()) {
// Success (no need to do anything)
return true;
} else {
@@ -884,7 +957,7 @@
// If -Xshare:on is specified, print out the error message and exit VM,
// otherwise, set UseSharedSpaces to false and continue.
if (RequireSharedSpaces) {
- vm_exit_during_initialization("Unable to use shared archive.", NULL);
+ vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on.");
} else {
FLAG_SET_DEFAULT(UseSharedSpaces, false);
}
@@ -984,6 +1057,20 @@
// Close the mapinfo file
mapinfo->close();
+
+ if (PrintSharedArchiveAndExit) {
+ if (PrintSharedDictionary) {
+ tty->print_cr("\nShared classes:\n");
+ SystemDictionary::print_shared(false);
+ }
+ if (_archive_loading_failed) {
+ tty->print_cr("archive is invalid");
+ vm_exit(1);
+ } else {
+ tty->print_cr("archive is valid");
+ vm_exit(0);
+ }
+ }
}
// JVM/TI RedefineClasses() support:
--- a/hotspot/src/share/vm/memory/metaspaceShared.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/memory/metaspaceShared.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -38,7 +38,10 @@
// CDS support
static ReservedSpace* _shared_rs;
static int _max_alignment;
-
+ static bool _link_classes_made_progress;
+ static bool _check_classes_made_progress;
+ static bool _has_error_classes;
+ static bool _archive_loading_failed;
public:
enum {
vtbl_list_size = 17, // number of entries in the shared space vtable list.
@@ -67,7 +70,11 @@
NOT_CDS(return 0);
}
+ static void prepare_for_dumping() NOT_CDS_RETURN;
static void preload_and_dump(TRAPS) NOT_CDS_RETURN;
+ static int preload_and_dump(const char * class_list_path,
+ GrowableArray<Klass*>* class_promote_order,
+ TRAPS) NOT_CDS_RETURN;
static ReservedSpace* shared_rs() {
CDS_ONLY(return _shared_rs);
@@ -78,6 +85,9 @@
CDS_ONLY(_shared_rs = rs;)
}
+ static void set_archive_loading_failed() {
+ _archive_loading_failed = true;
+ }
static bool map_shared_spaces(FileMapInfo* mapinfo) NOT_CDS_RETURN_(false);
static void initialize_shared_spaces() NOT_CDS_RETURN;
@@ -97,5 +107,10 @@
static bool remap_shared_readonly_as_readwrite() NOT_CDS_RETURN_(true);
static void print_shared_spaces();
+
+ static bool try_link_class(InstanceKlass* ik, TRAPS);
+ static void link_one_shared_class(Klass* obj, TRAPS);
+ static void check_one_shared_class(Klass* obj);
+ static void link_and_cleanup_shared_classes(TRAPS);
};
#endif // SHARE_VM_MEMORY_METASPACE_SHARED_HPP
--- a/hotspot/src/share/vm/memory/universe.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/memory/universe.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -26,6 +26,9 @@
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/javaClasses.hpp"
+#if INCLUDE_CDS
+#include "classfile/sharedClassUtil.hpp"
+#endif
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
@@ -34,6 +37,7 @@
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/cardTableModRefBS.hpp"
+#include "memory/filemap.hpp"
#include "memory/gcLocker.inline.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/genRemSet.hpp"
@@ -239,8 +243,9 @@
void initialize_basic_type_klass(Klass* k, TRAPS) {
Klass* ok = SystemDictionary::Object_klass();
if (UseSharedSpaces) {
+ ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
assert(k->super() == ok, "u3");
- k->restore_unshareable_info(CHECK);
+ k->restore_unshareable_info(loader_data, Handle(), CHECK);
} else {
k->initialize_supers(ok, CHECK);
}
@@ -666,6 +671,10 @@
SymbolTable::create_table();
StringTable::create_table();
ClassLoader::create_package_info_table();
+
+ if (DumpSharedSpaces) {
+ MetaspaceShared::prepare_for_dumping();
+ }
}
return JNI_OK;
@@ -1155,6 +1164,11 @@
MemoryService::add_metaspace_memory_pools();
MemoryService::set_universe_heap(Universe::_collectedHeap);
+#if INCLUDE_CDS
+ if (UseSharedSpaces) {
+ SharedClassUtil::initialize(CHECK_false);
+ }
+#endif
return true;
}
--- a/hotspot/src/share/vm/oops/arrayKlass.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/oops/arrayKlass.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -186,8 +186,9 @@
set_component_mirror(NULL);
}
-void ArrayKlass::restore_unshareable_info(TRAPS) {
- Klass::restore_unshareable_info(CHECK);
+void ArrayKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
+ assert(loader_data == ClassLoaderData::the_null_class_loader_data(), "array classes belong to null loader");
+ Klass::restore_unshareable_info(loader_data, protection_domain, CHECK);
// Klass recreates the component mirror also
}
--- a/hotspot/src/share/vm/oops/arrayKlass.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/oops/arrayKlass.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -137,7 +137,7 @@
// CDS support - remove and restore oops from metadata. Oops are not shared.
virtual void remove_unshareable_info();
- virtual void restore_unshareable_info(TRAPS);
+ virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
// Printing
void print_on(outputStream* st) const;
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -2303,12 +2303,14 @@
array_klasses_do(remove_unshareable_in_class);
}
-void restore_unshareable_in_class(Klass* k, TRAPS) {
- k->restore_unshareable_info(CHECK);
+static void restore_unshareable_in_class(Klass* k, TRAPS) {
+ // Array classes have null protection domain.
+ // --> see ArrayKlass::complete_create_array_klass()
+ k->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
}
-void InstanceKlass::restore_unshareable_info(TRAPS) {
- Klass::restore_unshareable_info(CHECK);
+void InstanceKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
+ Klass::restore_unshareable_info(loader_data, protection_domain, CHECK);
instanceKlassHandle ik(THREAD, this);
Array<Method*>* methods = ik->methods();
@@ -2334,6 +2336,38 @@
ik->array_klasses_do(restore_unshareable_in_class, CHECK);
}
+// returns true IFF is_in_error_state() has been changed as a result of this call.
+bool InstanceKlass::check_sharing_error_state() {
+ assert(DumpSharedSpaces, "should only be called during dumping");
+ bool old_state = is_in_error_state();
+
+ if (!is_in_error_state()) {
+ bool bad = false;
+ for (InstanceKlass* sup = java_super(); sup; sup = sup->java_super()) {
+ if (sup->is_in_error_state()) {
+ bad = true;
+ break;
+ }
+ }
+ if (!bad) {
+ Array<Klass*>* interfaces = transitive_interfaces();
+ for (int i = 0; i < interfaces->length(); i++) {
+ Klass* iface = interfaces->at(i);
+ if (InstanceKlass::cast(iface)->is_in_error_state()) {
+ bad = true;
+ break;
+ }
+ }
+ }
+
+ if (bad) {
+ set_in_error_state();
+ }
+ }
+
+ return (old_state != is_in_error_state());
+}
+
static void clear_all_breakpoints(Method* m) {
m->clear_all_breakpoints();
}
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -980,6 +980,13 @@
u2 idnum_allocated_count() const { return _idnum_allocated_count; }
+public:
+ void set_in_error_state() {
+ assert(DumpSharedSpaces, "only call this when dumping archive");
+ _init_state = initialization_error;
+ }
+ bool check_sharing_error_state();
+
private:
// initialization state
#ifdef ASSERT
@@ -1038,7 +1045,7 @@
public:
// CDS support - remove and restore oops from metadata. Oops are not shared.
virtual void remove_unshareable_info();
- virtual void restore_unshareable_info(TRAPS);
+ virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
// jvm support
jint compute_modifier_flags(TRAPS) const;
--- a/hotspot/src/share/vm/oops/klass.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/oops/klass.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -184,6 +184,7 @@
// The klass doesn't have any references at this point.
clear_modified_oops();
clear_accumulated_modified_oops();
+ _shared_class_path_index = -1;
}
jint Klass::array_layout_helper(BasicType etype) {
@@ -500,13 +501,12 @@
set_class_loader_data(NULL);
}
-void Klass::restore_unshareable_info(TRAPS) {
+void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
TRACE_INIT_ID(this);
// If an exception happened during CDS restore, some of these fields may already be
// set. We leave the class on the CLD list, even if incomplete so that we don't
// modify the CLD list outside a safepoint.
if (class_loader_data() == NULL) {
- ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
// Restore class_loader_data to the null class loader data
set_class_loader_data(loader_data);
@@ -515,12 +515,12 @@
loader_data->add_class(this);
}
- // Recreate the class mirror. The protection_domain is always null for
- // boot loader, for now.
+ // Recreate the class mirror.
// Only recreate it if not present. A previous attempt to restore may have
// gotten an OOM later but keep the mirror if it was created.
if (java_mirror() == NULL) {
- java_lang_Class::create_mirror(this, Handle(NULL), Handle(NULL), CHECK);
+ Handle loader = loader_data->class_loader();
+ java_lang_Class::create_mirror(this, loader, protection_domain, CHECK);
}
}
--- a/hotspot/src/share/vm/oops/klass.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/oops/klass.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -147,6 +147,16 @@
jbyte _modified_oops; // Card Table Equivalent (YC/CMS support)
jbyte _accumulated_modified_oops; // Mod Union Equivalent (CMS support)
+private:
+ // This is an index into FileMapHeader::_classpath_entry_table[], to
+ // associate this class with the JAR file where it's loaded from during
+ // dump time. If a class is not loaded from the shared archive, this field is
+ // -1.
+ jshort _shared_class_path_index;
+
+ friend class SharedClassUtil;
+protected:
+
// Constructor
Klass();
@@ -253,6 +263,15 @@
void clear_accumulated_modified_oops() { _accumulated_modified_oops = 0; }
bool has_accumulated_modified_oops() { return _accumulated_modified_oops == 1; }
+ int shared_classpath_index() const {
+ return _shared_class_path_index;
+ };
+
+ void set_shared_classpath_index(int index) {
+ _shared_class_path_index = index;
+ };
+
+
protected: // internal accessors
void set_subklass(Klass* s);
void set_next_sibling(Klass* s);
@@ -422,7 +441,7 @@
public:
// CDS support - remove and restore oops from metadata. Oops are not shared.
virtual void remove_unshareable_info();
- virtual void restore_unshareable_info(TRAPS);
+ virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
protected:
// computes the subtype relationship
--- a/hotspot/src/share/vm/prims/jvm.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/prims/jvm.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -28,6 +28,10 @@
#include "classfile/javaClasses.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
+#if INCLUDE_CDS
+#include "classfile/sharedClassUtil.hpp"
+#include "classfile/systemDictionaryShared.hpp"
+#endif
#include "classfile/vmSymbols.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
#include "interpreter/bytecode.hpp"
@@ -993,7 +997,15 @@
h_loader,
Handle(),
CHECK_NULL);
-
+#if INCLUDE_CDS
+ if (k == NULL) {
+ // If the class is not already loaded, try to see if it's in the shared
+ // archive for the current classloader (h_loader).
+ instanceKlassHandle ik = SystemDictionaryShared::find_or_load_shared_class(
+ klass_name, h_loader, CHECK_NULL);
+ k = ik();
+ }
+#endif
return (k == NULL) ? NULL :
(jclass) JNIHandles::make_local(env, k->java_mirror());
JVM_END
--- a/hotspot/src/share/vm/prims/jvmtiManageCapabilities.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/prims/jvmtiManageCapabilities.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -316,6 +316,7 @@
avail.can_generate_frame_pop_events ||
avail.can_generate_method_entry_events ||
avail.can_generate_method_exit_events;
+#ifdef ZERO
bool enter_all_methods =
interp_events ||
avail.can_generate_breakpoint_events;
@@ -324,6 +325,7 @@
UseFastEmptyMethods = false;
UseFastAccessorMethods = false;
}
+#endif // ZERO
if (avail.can_generate_breakpoint_events) {
RewriteFrequentPairs = false;
--- a/hotspot/src/share/vm/runtime/arguments.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "classfile/classLoader.hpp"
#include "classfile/javaAssertions.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/symbolTable.hpp"
@@ -43,6 +44,7 @@
#include "services/memTracker.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/macros.hpp"
+#include "utilities/stringUtils.hpp"
#include "utilities/taskqueue.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
@@ -301,6 +303,10 @@
{ "ReflectionWrapResolutionErrors",JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "VerifyReflectionBytecodes", JDK_Version::jdk(9), JDK_Version::jdk(10) },
{ "AutoShutdownNMT", JDK_Version::jdk(9), JDK_Version::jdk(10) },
+#ifndef ZERO
+ { "UseFastAccessorMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
+ { "UseFastEmptyMethods", JDK_Version::jdk(9), JDK_Version::jdk(10) },
+#endif // ZERO
{ NULL, JDK_Version(0), JDK_Version(0) }
};
@@ -1071,16 +1077,6 @@
UseCompiler = true;
UseLoopCounter = true;
-#ifndef ZERO
- // Turn these off for mixed and comp. Leave them on for Zero.
- if (FLAG_IS_DEFAULT(UseFastAccessorMethods)) {
- UseFastAccessorMethods = (mode == _int);
- }
- if (FLAG_IS_DEFAULT(UseFastEmptyMethods)) {
- UseFastEmptyMethods = (mode == _int);
- }
-#endif
-
// Default values may be platform/compiler dependent -
// use the saved values
ClipInlining = Arguments::_ClipInlining;
@@ -1121,11 +1117,11 @@
// Conflict: required to use shared spaces (-Xshare:on), but
// incompatible command line options were chosen.
-static void no_shared_spaces() {
+static void no_shared_spaces(const char* message) {
if (RequireSharedSpaces) {
jio_fprintf(defaultStream::error_stream(),
"Class data sharing is inconsistent with other specified options.\n");
- vm_exit_during_initialization("Unable to use shared archive.", NULL);
+ vm_exit_during_initialization("Unable to use shared archive.", message);
} else {
FLAG_SET_DEFAULT(UseSharedSpaces, false);
}
@@ -1587,7 +1583,7 @@
// at link time, or rewrite bytecodes in non-shared methods.
if (!DumpSharedSpaces && !RequireSharedSpaces &&
(FLAG_IS_DEFAULT(UseSharedSpaces) || !UseSharedSpaces)) {
- no_shared_spaces();
+ no_shared_spaces("COMPILER2 default: -Xshare:auto | off, have to manually setup to on.");
}
#endif
@@ -3308,6 +3304,15 @@
}
}
+ // PrintSharedArchiveAndExit will turn on
+ // -Xshare:on
+ // -XX:+TraceClassPaths
+ if (PrintSharedArchiveAndExit) {
+ FLAG_SET_CMDLINE(bool, UseSharedSpaces, true);
+ FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true);
+ FLAG_SET_CMDLINE(bool, TraceClassPaths, true);
+ }
+
// Change the default value for flags which have different default values
// when working with older JDKs.
#ifdef LINUX
@@ -3316,9 +3321,55 @@
FLAG_SET_DEFAULT(UseLinuxPosixThreadCPUClocks, false);
}
#endif // LINUX
+ fix_appclasspath();
return JNI_OK;
}
+// Remove all empty paths from the app classpath (if IgnoreEmptyClassPaths is enabled)
+//
+// This is necessary because some apps like to specify classpath like -cp foo.jar:${XYZ}:bar.jar
+// in their start-up scripts. If XYZ is empty, the classpath will look like "-cp foo.jar::bar.jar".
+// Java treats such empty paths as if the user specified "-cp foo.jar:.:bar.jar". I.e., an empty
+// path is treated as the current directory.
+//
+// This causes problems with CDS, which requires that all directories specified in the classpath
+// must be empty. In most cases, applications do NOT want to load classes from the current
+// directory anyway. Adding -XX:+IgnoreEmptyClassPaths will make these applications' start-up
+// scripts compatible with CDS.
+void Arguments::fix_appclasspath() {
+ if (IgnoreEmptyClassPaths) {
+ const char separator = *os::path_separator();
+ const char* src = _java_class_path->value();
+
+ // skip over all the leading empty paths
+ while (*src == separator) {
+ src ++;
+ }
+
+ char* copy = AllocateHeap(strlen(src) + 1, mtInternal);
+ strncpy(copy, src, strlen(src) + 1);
+
+ // trim all trailing empty paths
+ for (char* tail = copy + strlen(copy) - 1; tail >= copy && *tail == separator; tail--) {
+ *tail = '\0';
+ }
+
+ char from[3] = {separator, separator, '\0'};
+ char to [2] = {separator, '\0'};
+ while (StringUtils::replace_no_expand(copy, from, to) > 0) {
+ // Keep replacing "::" -> ":" until we have no more "::" (non-windows)
+ // Keep replacing ";;" -> ";" until we have no more ";;" (windows)
+ }
+
+ _java_class_path->set_value(copy);
+ FreeHeap(copy); // a copy was made by set_value, so don't need this anymore
+ }
+
+ if (!PrintSharedArchiveAndExit) {
+ ClassLoader::trace_class_path("[classpath: ", _java_class_path->value());
+ }
+}
+
jint Arguments::finalize_vm_init_args(SysClassPath* scp_p, bool scp_assembly_required) {
// This must be done after all -D arguments have been processed.
scp_p->expand_endorsed();
@@ -3489,9 +3540,8 @@
"Cannot dump shared archive when UseCompressedOops or UseCompressedClassPointers is off.", NULL);
}
} else {
- // UseCompressedOops and UseCompressedClassPointers must be on for UseSharedSpaces.
if (!UseCompressedOops || !UseCompressedClassPointers) {
- no_shared_spaces();
+ no_shared_spaces("UseCompressedOops and UseCompressedClassPointers must be on for UseSharedSpaces.");
}
#endif
}
@@ -3731,7 +3781,7 @@
FLAG_SET_DEFAULT(UseSharedSpaces, false);
FLAG_SET_DEFAULT(PrintSharedSpaces, false);
}
- no_shared_spaces();
+ no_shared_spaces("CDS Disabled");
#endif // INCLUDE_CDS
return JNI_OK;
--- a/hotspot/src/share/vm/runtime/arguments.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/runtime/arguments.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -579,12 +579,15 @@
_meta_index_dir = meta_index_dir;
}
- static char *get_java_home() { return _java_home->value(); }
- static char *get_dll_dir() { return _sun_boot_library_path->value(); }
- static char *get_endorsed_dir() { return _java_endorsed_dirs->value(); }
- static char *get_sysclasspath() { return _sun_boot_class_path->value(); }
+ static char* get_java_home() { return _java_home->value(); }
+ static char* get_dll_dir() { return _sun_boot_library_path->value(); }
+ static char* get_endorsed_dir() { return _java_endorsed_dirs->value(); }
+ static char* get_sysclasspath() { return _sun_boot_class_path->value(); }
static char* get_meta_index_path() { return _meta_index_path; }
static char* get_meta_index_dir() { return _meta_index_dir; }
+ static char* get_ext_dirs() { return _java_ext_dirs->value(); }
+ static char* get_appclasspath() { return _java_class_path->value(); }
+ static void fix_appclasspath();
// Operation modi
static Mode mode() { return _mode; }
--- a/hotspot/src/share/vm/runtime/globals.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/runtime/globals.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -2329,6 +2329,12 @@
notproduct(bool, TraceScavenge, false, \
"Trace scavenge") \
\
+ product(bool, IgnoreEmptyClassPaths, false, \
+ "Ignore empty path elements in -classpath") \
+ \
+ product(bool, TraceClassPaths, false, \
+ "Trace processing of class paths") \
+ \
product_rw(bool, TraceClassLoading, false, \
"Trace all classes loaded") \
\
@@ -2784,12 +2790,6 @@
product(bool, UseLoopCounter, true, \
"Increment invocation counter on backward branch") \
\
- product(bool, UseFastEmptyMethods, true, \
- "Use fast method entry code for empty methods") \
- \
- product(bool, UseFastAccessorMethods, true, \
- "Use fast method entry code for accessor methods") \
- \
product_pd(bool, UseOnStackReplacement, \
"Use on stack replacement, calls runtime if invoc. counter " \
"overflows in loop") \
@@ -3769,6 +3769,13 @@
product(bool, PrintSharedSpaces, false, \
"Print usage of shared spaces") \
\
+ product(bool, PrintSharedArchiveAndExit, false, \
+ "Print shared archive file contents") \
+ \
+ product(bool, PrintSharedDictionary, false, \
+ "If PrintSharedArchiveAndExit is true, also print the shared " \
+ "dictionary") \
+ \
product(uintx, SharedReadWriteSize, NOT_LP64(12*M) LP64_ONLY(16*M), \
"Size of read-write space for metadata (in bytes)") \
\
@@ -3785,6 +3792,10 @@
NOT_LP64(LINUX_ONLY(2*G) NOT_LINUX(0)), \
"Address to allocate shared memory region for class data") \
\
+ diagnostic(bool, IgnoreUnverifiableClassesDuringDump, false, \
+ "Do not quit -Xshare:dump even if we encounter unverifiable " \
+ "classes. Just exclude them from the shared dictionary.") \
+ \
diagnostic(bool, PrintMethodHandleStubs, false, \
"Print generated stub code for method handles") \
\
@@ -3875,9 +3886,19 @@
product(bool , AllowNonVirtualCalls, false, \
"Obey the ACC_SUPER flag and allow invokenonvirtual calls") \
\
+ product(ccstr, DumpLoadedClassList, NULL, \
+ "Dump the names all loaded classes, that could be stored into " \
+ "the CDS archive, in the specified file") \
+ \
+ product(ccstr, SharedClassListFile, NULL, \
+ "Override the default CDS class list") \
+ \
diagnostic(ccstr, SharedArchiveFile, NULL, \
"Override the default location of the CDS archive file") \
\
+ product(ccstr, ExtraSharedClassListFile, NULL, \
+ "Extra classlist for building the CDS archive file") \
+ \
experimental(uintx, ArrayAllocatorMallocLimit, \
SOLARIS_ONLY(64*K) NOT_SOLARIS(max_uintx), \
"Allocation less than this value will be allocated " \
--- a/hotspot/src/share/vm/runtime/javaCalls.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/runtime/javaCalls.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -309,6 +309,10 @@
}
void JavaCalls::call_helper(JavaValue* result, methodHandle* m, JavaCallArguments* args, TRAPS) {
+ // During dumping, Java execution environment is not fully initialized. Also, Java execution
+ // may cause undesirable side-effects in the class metadata.
+ assert(!DumpSharedSpaces, "must not execute Java bytecodes when dumping");
+
methodHandle method = *m;
JavaThread* thread = (JavaThread*)THREAD;
assert(thread->is_Java_thread(), "must be called by a java thread");
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -314,7 +314,7 @@
nonstatic_field(InstanceKlass, _jni_ids, JNIid*) \
nonstatic_field(InstanceKlass, _osr_nmethods_head, nmethod*) \
nonstatic_field(InstanceKlass, _breakpoints, BreakpointInfo*) \
- nonstatic_field(InstanceKlass, _generic_signature_index, u2) \
+ nonstatic_field(InstanceKlass, _generic_signature_index, u2) \
nonstatic_field(InstanceKlass, _methods_jmethod_ids, jmethodID*) \
volatile_nonstatic_field(InstanceKlass, _idnum_allocated_count, u2) \
nonstatic_field(InstanceKlass, _annotations, Annotations*) \
@@ -662,6 +662,7 @@
static_field(SystemDictionary, WK_KLASS(StackOverflowError_klass), Klass*) \
static_field(SystemDictionary, WK_KLASS(ProtectionDomain_klass), Klass*) \
static_field(SystemDictionary, WK_KLASS(AccessControlContext_klass), Klass*) \
+ static_field(SystemDictionary, WK_KLASS(SecureClassLoader_klass), Klass*) \
static_field(SystemDictionary, WK_KLASS(Reference_klass), Klass*) \
static_field(SystemDictionary, WK_KLASS(SoftReference_klass), Klass*) \
static_field(SystemDictionary, WK_KLASS(WeakReference_klass), Klass*) \
--- a/hotspot/src/share/vm/utilities/exceptions.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/utilities/exceptions.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -85,9 +85,13 @@
#endif // ASSERT
if (thread->is_VM_thread()
- || thread->is_Compiler_thread() ) {
+ || thread->is_Compiler_thread()
+ || DumpSharedSpaces ) {
// We do not care what kind of exception we get for the vm-thread or a thread which
// is compiling. We just install a dummy exception object
+ //
+ // We also cannot throw a proper exception when dumping, because we cannot run
+ // Java bytecodes now. A dummy exception will suffice.
thread->set_pending_exception(Universe::vm_exception(), file, line);
return true;
}
@@ -108,9 +112,13 @@
}
if (thread->is_VM_thread()
- || thread->is_Compiler_thread() ) {
+ || thread->is_Compiler_thread()
+ || DumpSharedSpaces ) {
// We do not care what kind of exception we get for the vm-thread or a thread which
// is compiling. We just install a dummy exception object
+ //
+ // We also cannot throw a proper exception when dumping, because we cannot run
+ // Java bytecodes now. A dummy exception will suffice.
thread->set_pending_exception(Universe::vm_exception(), file, line);
return true;
}
--- a/hotspot/src/share/vm/utilities/ostream.cpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/utilities/ostream.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -352,6 +352,7 @@
xmlStream* xtty;
outputStream* tty;
outputStream* gclog_or_tty;
+CDS_ONLY(fileStream* classlist_file;) // Only dump the classes that can be stored into the CDS archive
extern Mutex* tty_lock;
#define EXTRACHARLEN 32
@@ -463,7 +464,8 @@
return buf;
}
-// log_name comes from -XX:LogFile=log_name or -Xloggc:log_name
+// log_name comes from -XX:LogFile=log_name, -Xloggc:log_name or
+// -XX:DumpLoadedClassList=<file_name>
// in log_name, %p => pid1234 and
// %t => YYYY-MM-DD_HH-MM-SS
static const char* make_log_name(const char* log_name, const char* force_directory) {
@@ -1103,6 +1105,16 @@
gclog_or_tty = gclog;
}
+#if INCLUDE_CDS
+ // For -XX:DumpLoadedClassList=<file> option
+ if (DumpLoadedClassList != NULL) {
+ const char* list_name = make_log_name(DumpLoadedClassList, NULL);
+ classlist_file = new(ResourceObj::C_HEAP, mtInternal)
+ fileStream(list_name);
+ FREE_C_HEAP_ARRAY(char, list_name, mtInternal);
+ }
+#endif
+
// If we haven't lazily initialized the logfile yet, do it now,
// to avoid the possibility of lazy initialization during a VM
// crash, which can affect the stability of the fatal error handler.
@@ -1115,6 +1127,11 @@
static bool ostream_exit_called = false;
if (ostream_exit_called) return;
ostream_exit_called = true;
+#if INCLUDE_CDS
+ if (classlist_file != NULL) {
+ delete classlist_file;
+ }
+#endif
if (gclog_or_tty != tty) {
delete gclog_or_tty;
}
--- a/hotspot/src/share/vm/utilities/ostream.hpp Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/src/share/vm/utilities/ostream.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -214,6 +214,8 @@
void flush();
};
+CDS_ONLY(extern fileStream* classlist_file;)
+
// unlike fileStream, fdStream does unbuffered I/O by calling
// open() and write() directly. It is async-safe, but output
// from multiple thread may be mixed together. Used by fatal
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/utilities/stringUtils.cpp Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "utilities/stringUtils.hpp"
+
+int StringUtils::replace_no_expand(char* string, const char* from, const char* to) {
+ int replace_count = 0;
+ size_t from_len = strlen(from);
+ size_t to_len = strlen(to);
+ assert(from_len >= to_len, "must not expand input");
+
+ for (char* dst = string; *dst && (dst = strstr(dst, from)) != NULL;) {
+ char* left_over = dst + from_len;
+ memmove(dst, to, to_len); // does not copy trailing 0 of <to>
+ dst += to_len; // skip over the replacement.
+ memmove(dst, left_over, strlen(left_over) + 1); // copies the trailing 0 of <left_over>
+ ++ replace_count;
+ }
+
+ return replace_count;
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/utilities/stringUtils.hpp Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_UTILITIES_STRINGUTILS_HPP
+#define SHARE_VM_UTILITIES_STRINGUTILS_HPP
+
+#include "memory/allocation.hpp"
+
+class StringUtils : AllStatic {
+public:
+ // Replace the substring <from> with another string <to>. <to> must be
+ // no longer than <from>. The input string is modified in-place.
+ //
+ // Replacement is done in a single pass left-to-right. So replace_no_expand("aaa", "aa", "a")
+ // will result in "aa", not "a".
+ //
+ // Returns the count of substrings that have been replaced.
+ static int replace_no_expand(char* string, const char* from, const char* to);
+};
+
+#endif // SHARE_VM_UTILITIES_STRINGUTILS_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/AutoshutdownNMT.java Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key nmt
+ * @summary Test for deprecated message if -XX:-AutoShutdownNMT is specified
+ * @library /testlibrary
+ * @ignore
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class AutoshutdownNMT {
+
+ public static void main(String args[]) throws Exception {
+
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+ "-XX:NativeMemoryTracking=detail",
+ "-XX:-AutoShutdownNMT",
+ "-version");
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+ output.shouldContain("ignoring option AutoShutdownNMT");
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/JcmdBaselineDetail.java Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key nmt jcmd
+ * @summary Verify that jcmd correctly reports that baseline succeeds with NMT enabled with detailed tracking.
+ * @library /testlibrary
+ * @ignore
+ * @run main/othervm -XX:NativeMemoryTracking=detail JcmdBaselineDetail
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class JcmdBaselineDetail {
+
+ public static void main(String args[]) throws Exception {
+ // Grab my own PID
+ String pid = Integer.toString(ProcessTools.getProcessId());
+ OutputAnalyzer output;
+
+ ProcessBuilder pb = new ProcessBuilder();
+
+ // Run 'jcmd <pid> VM.native_memory baseline=true'
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "baseline=true"});
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Baseline succeeded");
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/JcmdDetailDiff.java Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary run NMT baseline, allocate memory and verify output from detail.diff
+ * @key nmt jcmd
+ * @library /testlibrary /testlibrary/whitebox
+ * @ignore
+ * @build JcmdDetailDiff
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail JcmdDetailDiff
+ */
+
+import com.oracle.java.testlibrary.*;
+
+import sun.hotspot.WhiteBox;
+
+public class JcmdDetailDiff {
+
+ public static WhiteBox wb = WhiteBox.getWhiteBox();
+
+ public static void main(String args[]) throws Exception {
+ ProcessBuilder pb = new ProcessBuilder();
+ OutputAnalyzer output;
+ // Grab my own PID
+ String pid = Integer.toString(ProcessTools.getProcessId());
+
+ long commitSize = 128 * 1024;
+ long reserveSize = 256 * 1024;
+ long addr;
+
+ // Run 'jcmd <pid> VM.native_memory baseline=true'
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "baseline=true"});
+ pb.start().waitFor();
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Baseline succeeded");
+
+ addr = wb.NMTReserveMemory(reserveSize);
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)");
+ output.shouldContain("WB_NMTReserveMemory");
+
+ wb.NMTCommitMemory(addr, commitSize);
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Test (reserved=256KB +256KB, committed=128KB +128KB)");
+ output.shouldContain("WB_NMTReserveMemory");
+
+ wb.NMTUncommitMemory(addr, commitSize);
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)");
+ output.shouldContain("WB_NMTReserveMemory");
+
+ wb.NMTReleaseMemory(addr, reserveSize);
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail.diff", "scale=KB"});
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldNotContain("Test (reserved=");
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/JcmdScaleDetail.java Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @key nmt jcmd
+ * @summary Test the NMT scale parameter with detail tracking level
+ * @library /testlibrary
+ * @ignore
+ * @run main/othervm -XX:NativeMemoryTracking=detail JcmdScaleDetail
+ */
+
+import com.oracle.java.testlibrary.*;
+
+public class JcmdScaleDetail {
+
+ public static void main(String args[]) throws Exception {
+ ProcessBuilder pb = new ProcessBuilder();
+ OutputAnalyzer output;
+ // Grab my own PID
+ String pid = Integer.toString(ProcessTools.getProcessId());
+
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=KB"});
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("KB, committed=");
+
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=MB"});
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("MB, committed=");
+
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=GB"});
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("GB, committed=");
+
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=apa"});
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Incorrect scale value: apa");
+
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary", "scale=GB"});
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("GB, committed=");
+
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary", "scale=apa"});
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Incorrect scale value: apa");
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/JcmdSummaryDiff.java Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary run NMT baseline, allocate memory and verify output from summary.diff
+ * @key nmt jcmd
+ * @library /testlibrary /testlibrary/whitebox
+ * @build JcmdSummaryDiff
+ * @ignore
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=summary JcmdSummaryDiff
+ */
+
+import com.oracle.java.testlibrary.*;
+
+import sun.hotspot.WhiteBox;
+
+public class JcmdSummaryDiff {
+
+ public static WhiteBox wb = WhiteBox.getWhiteBox();
+
+ public static void main(String args[]) throws Exception {
+ ProcessBuilder pb = new ProcessBuilder();
+ OutputAnalyzer output;
+ // Grab my own PID
+ String pid = Integer.toString(ProcessTools.getProcessId());
+
+ long commitSize = 128 * 1024;
+ long reserveSize = 256 * 1024;
+ long addr;
+
+ // Run 'jcmd <pid> VM.native_memory baseline=true'
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "baseline=true"});
+ pb.start().waitFor();
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Baseline succeeded");
+
+ addr = wb.NMTReserveMemory(reserveSize);
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary.diff", "scale=KB"});
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)");
+
+ wb.NMTCommitMemory(addr, commitSize);
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary.diff", "scale=KB"});
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Test (reserved=256KB +256KB, committed=128KB +128KB)");
+
+ wb.NMTUncommitMemory(addr, commitSize);
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary.diff", "scale=KB"});
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Test (reserved=256KB +256KB, committed=0KB)");
+
+ wb.NMTReleaseMemory(addr, reserveSize);
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary.diff", "scale=KB"});
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldNotContain("Test (reserved=");
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/MallocRoundingReportTest.java Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Test consistency of NMT by creating allocations of the Test type with various sizes and verifying visibility with jcmd
+ * @key nmt jcmd
+ * @library /testlibrary /testlibrary/whitebox
+ * @build MallocRoundingReportTest
+ * @ignore
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocRoundingReportTest
+ *
+ */
+
+import com.oracle.java.testlibrary.*;
+
+import sun.hotspot.WhiteBox;
+
+public class MallocRoundingReportTest {
+ private static long K = 1024;
+
+ public static void main(String args[]) throws Exception {
+ OutputAnalyzer output;
+ WhiteBox wb = WhiteBox.getWhiteBox();
+
+ // Grab my own PID
+ String pid = Integer.toString(ProcessTools.getProcessId());
+ ProcessBuilder pb = new ProcessBuilder();
+
+ long[] additionalBytes = {0, 1, 512, 650};
+ long[] kByteSize = {1024, 2048};
+ long mallocd_total = 0;
+ for ( int i = 0; i < kByteSize.length; i++)
+ {
+ for (int j = 0; j < (additionalBytes.length); j++) {
+ long curKB = kByteSize[i] + additionalBytes[j];
+ // round up/down to the nearest KB to match NMT reporting
+ long numKB = (curKB % kByteSize[i] >= 512) ? ((curKB / K) + 1) : curKB / K;
+ // Use WB API to alloc and free with the mtTest type
+ mallocd_total = wb.NMTMalloc(curKB);
+ // Run 'jcmd <pid> VM.native_memory summary', check for expected output
+ // NMT does not track memory allocations less than 1KB, and rounds to the nearest KB
+ String expectedOut = ("Test (reserved=" + numKB + "KB, committed=" + numKB + "KB)");
+
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary" });
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain(expectedOut);
+
+ wb.NMTFree(mallocd_total);
+ // Run 'jcmd <pid> VM.native_memory summary', check for expected output
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary" });
+ output = new OutputAnalyzer(pb.start());
+ output.shouldNotContain("Test (reserved=");
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/MallocSiteHashOverflow.java Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @key stress
+ * @test
+ * @summary Test corner case that overflows malloc site hashtable bucket
+ * @key nmt jcmd
+ * @library /testlibrary /testlibrary/whitebox
+ * @ignore - This test is disabled since it will stress NMT and timeout during normal testing
+ * @build MallocSiteHashOverflow
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm/timeout=480 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocSiteHashOverflow
+ */
+
+import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
+
+public class MallocSiteHashOverflow {
+ private static long K = 1024;
+ public static void main(String args[]) throws Exception {
+ String vm_name = System.getProperty("java.vm.name");
+
+ // For 32-bit systems, create 257 malloc sites with the same hash bucket to overflow a hash bucket
+ // For 64-bit systems, create 64K + 1 malloc sites with the same hash bucket to overflow a hash bucket
+ long entries = 257;
+ if (Platform.is64bit()) {
+ entries = 64 * K + 1;
+ }
+
+ OutputAnalyzer output;
+ WhiteBox wb = WhiteBox.getWhiteBox();
+
+ // Grab my own PID
+ String pid = Integer.toString(ProcessTools.getProcessId());
+ ProcessBuilder pb = new ProcessBuilder();
+
+ wb.NMTOverflowHashBucket(entries);
+
+ // Run 'jcmd <pid> VM.native_memory summary'
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "statistics"});
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Tracking level has been downgraded due to lack of resources");
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/MallocStressTest.java Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,266 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @key stress
+ * @test
+ * @summary Stress test for malloc tracking
+ * @key nmt jcmd
+ * @library /testlibrary /testlibrary/whitebox
+ * @build MallocStressTest
+ * @ignore - This test is disabled since it will stress NMT and timeout during normal testing
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocStressTest
+ */
+
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import com.oracle.java.testlibrary.*;
+import sun.hotspot.WhiteBox;
+
+public class MallocStressTest {
+ private static int K = 1024;
+
+ // The stress test runs in three phases:
+ // 1. alloc: A lot of malloc with fewer free, which simulates a burst memory allocation
+ // that is usually seen during startup or class loading.
+ // 2. pause: Pause the test to check accuracy of native memory tracking
+ // 3. release: Release all malloc'd memory and check native memory tracking result.
+ public enum TestPhase {
+ alloc,
+ pause,
+ release
+ };
+
+ static TestPhase phase = TestPhase.alloc;
+
+ // malloc'd memory
+ static ArrayList<MallocMemory> mallocd_memory = new ArrayList<MallocMemory>();
+ static long mallocd_total = 0;
+ static WhiteBox whiteBox;
+ static AtomicInteger pause_count = new AtomicInteger();
+
+ static boolean is_64_bit_system;
+
+ private static boolean is_64_bit_system() { return is_64_bit_system; }
+
+ public static void main(String args[]) throws Exception {
+ is_64_bit_system = (Platform.is64bit());
+
+ OutputAnalyzer output;
+ whiteBox = WhiteBox.getWhiteBox();
+
+ // Grab my own PID
+ String pid = Integer.toString(ProcessTools.getProcessId());
+ ProcessBuilder pb = new ProcessBuilder();
+
+ AllocThread[] alloc_threads = new AllocThread[256];
+ ReleaseThread[] release_threads = new ReleaseThread[64];
+
+ int index;
+ // Create many allocation threads
+ for (index = 0; index < alloc_threads.length; index ++) {
+ alloc_threads[index] = new AllocThread();
+ }
+
+ // Fewer release threads
+ for (index = 0; index < release_threads.length; index ++) {
+ release_threads[index] = new ReleaseThread();
+ }
+
+ if (is_64_bit_system()) {
+ sleep_wait(2*60*1000);
+ } else {
+ sleep_wait(60*1000);
+ }
+ // pause the stress test
+ phase = TestPhase.pause;
+ while (pause_count.intValue() < alloc_threads.length + release_threads.length) {
+ sleep_wait(10);
+ }
+
+ long mallocd_total_in_KB = (mallocd_total + K / 2) / K;
+
+ // Now check if the result from NMT matches the total memory allocated.
+ String expected_test_summary = "Test (reserved=" + mallocd_total_in_KB +"KB, committed=" + mallocd_total_in_KB + "KB)";
+ // Run 'jcmd <pid> VM.native_memory summary'
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"});
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain(expected_test_summary);
+
+ // Release all allocated memory
+ phase = TestPhase.release;
+ synchronized(mallocd_memory) {
+ mallocd_memory.notifyAll();
+ }
+
+ // Join all threads
+ for (index = 0; index < alloc_threads.length; index ++) {
+ try {
+ alloc_threads[index].join();
+ } catch (InterruptedException e) {
+ }
+ }
+
+ for (index = 0; index < release_threads.length; index ++) {
+ try {
+ release_threads[index].join();
+ } catch (InterruptedException e) {
+ }
+ }
+
+ // All test memory allocated should be released
+ output = new OutputAnalyzer(pb.start());
+ output.shouldNotContain("Test (reserved=");
+
+ // Verify that tracking level has not been downgraded
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "statistics"});
+ output = new OutputAnalyzer(pb.start());
+ output.shouldNotContain("Tracking level has been downgraded due to lack of resources");
+ }
+
+ private static void sleep_wait(int n) {
+ try {
+ Thread.sleep(n);
+ } catch (InterruptedException e) {
+ }
+ }
+
+
+ static class MallocMemory {
+ private long addr;
+ private int size;
+
+ MallocMemory(long addr, int size) {
+ this.addr = addr;
+ this.size = size;
+ }
+
+ long addr() { return this.addr; }
+ int size() { return this.size; }
+ }
+
+ static class AllocThread extends Thread {
+ AllocThread() {
+ this.setName("MallocThread");
+ this.start();
+ }
+
+ // AllocThread only runs "Alloc" phase
+ public void run() {
+ Random random = new Random();
+ while (MallocStressTest.phase == TestPhase.alloc) {
+ int r = Math.abs(random.nextInt());
+ // Only malloc small amount to avoid OOM
+ int size = r % 32;
+ if (is_64_bit_system()) {
+ r = r % 32 * K;
+ } else {
+ r = r % 64;
+ }
+ if (size == 0) size = 1;
+ long addr = MallocStressTest.whiteBox.NMTMallocWithPseudoStack(size, r);
+ if (addr != 0) {
+ MallocMemory mem = new MallocMemory(addr, size);
+ synchronized(MallocStressTest.mallocd_memory) {
+ MallocStressTest.mallocd_memory.add(mem);
+ MallocStressTest.mallocd_total += size;
+ }
+ } else {
+ System.out.println("Out of malloc memory");
+ break;
+ }
+ }
+ MallocStressTest.pause_count.incrementAndGet();
+ }
+ }
+
+ static class ReleaseThread extends Thread {
+ private Random random = new Random();
+ ReleaseThread() {
+ this.setName("ReleaseThread");
+ this.start();
+ }
+
+ public void run() {
+ while(true) {
+ switch(MallocStressTest.phase) {
+ case alloc:
+ slow_release();
+ break;
+ case pause:
+ enter_pause();
+ break;
+ case release:
+ quick_release();
+ return;
+ }
+ }
+ }
+
+ private void enter_pause() {
+ MallocStressTest.pause_count.incrementAndGet();
+ while (MallocStressTest.phase != MallocStressTest.TestPhase.release) {
+ try {
+ synchronized(MallocStressTest.mallocd_memory) {
+ MallocStressTest.mallocd_memory.wait(10);
+ }
+ } catch (InterruptedException e) {
+ }
+ }
+ }
+
+ private void quick_release() {
+ List<MallocMemory> free_list;
+ while (true) {
+ synchronized(MallocStressTest.mallocd_memory) {
+ if (MallocStressTest.mallocd_memory.isEmpty()) return;
+ int size = Math.min(MallocStressTest.mallocd_memory.size(), 5000);
+ List<MallocMemory> subList = MallocStressTest.mallocd_memory.subList(0, size);
+ free_list = new ArrayList<MallocMemory>(subList);
+ subList.clear();
+ }
+ for (int index = 0; index < free_list.size(); index ++) {
+ MallocMemory mem = free_list.get(index);
+ MallocStressTest.whiteBox.NMTFree(mem.addr());
+ }
+ }
+ }
+
+ private void slow_release() {
+ try {
+ Thread.sleep(10);
+ } catch (InterruptedException e) {
+ }
+ synchronized(MallocStressTest.mallocd_memory) {
+ if (MallocStressTest.mallocd_memory.isEmpty()) return;
+ int n = Math.abs(random.nextInt()) % MallocStressTest.mallocd_memory.size();
+ MallocMemory mem = mallocd_memory.remove(n);
+ MallocStressTest.whiteBox.NMTFree(mem.addr());
+ MallocStressTest.mallocd_total -= mem.size();
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/ReleaseNoCommit.java Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Release uncommitted memory and make sure NMT handles it correctly
+ * @key nmt regression
+ * @library /testlibrary /testlibrary/whitebox
+ * @build ReleaseNoCommit
+ * @ignore
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=summary ReleaseNoCommit
+ */
+
+import com.oracle.java.testlibrary.JDKToolFinder;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+import com.oracle.java.testlibrary.ProcessTools;
+
+import sun.hotspot.WhiteBox;
+
+public class ReleaseNoCommit {
+
+ public static void main(String args[]) throws Exception {
+ WhiteBox wb = WhiteBox.getWhiteBox();
+ long reserveSize = 256 * 1024;
+ long addr;
+
+ ProcessBuilder pb = new ProcessBuilder();
+ OutputAnalyzer output;
+ // Grab my own PID
+ String pid = Integer.toString(ProcessTools.getProcessId());
+
+ addr = wb.NMTReserveMemory(reserveSize);
+ // Check for reserved
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=KB"});
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain(" Test (reserved=256KB, committed=0KB)");
+
+ wb.NMTReleaseMemory(addr, reserveSize);
+
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "scale=KB"});
+ output = new OutputAnalyzer(pb.start());
+ output.shouldNotContain("Test (reserved=");
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/runtime/NMT/VirtualAllocCommitUncommitRecommit.java Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Test reserve/commit/uncommit/release of virtual memory and that we track it correctly
+ * @key nmt jcmd
+ * @library /testlibrary /testlibrary/whitebox
+ * @ignore
+ * @build VirtualAllocCommitUncommitRecommit
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail VirtualAllocCommitUncommitRecommit
+ *
+ */
+
+import com.oracle.java.testlibrary.*;
+
+import sun.hotspot.WhiteBox;
+
+public class VirtualAllocCommitUncommitRecommit {
+
+ public static WhiteBox wb = WhiteBox.getWhiteBox();
+
+ public static void main(String args[]) throws Exception {
+ OutputAnalyzer output;
+ long commitSize = 4 * 1024; // 4KB
+ long reserveSize = 1024 * 1024; // 1024KB
+ long addr;
+
+ String pid = Integer.toString(ProcessTools.getProcessId());
+ ProcessBuilder pb = new ProcessBuilder();
+
+ boolean has_nmt_detail = wb.NMTIsDetailSupported();
+ if (has_nmt_detail) {
+ System.out.println("NMT detail support detected.");
+ } else {
+ System.out.println("NMT detail support not detected.");
+ }
+
+ // reserve
+ addr = wb.NMTReserveMemory(reserveSize);
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid,
+ "VM.native_memory", "detail" });
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Test (reserved=1024KB, committed=0KB)");
+ if (has_nmt_detail) {
+ output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ + Long.toHexString(addr + reserveSize)
+ + "\\] reserved 1024KB for Test");
+ }
+
+ long addrA = addr;
+ long addrB = addr + commitSize;
+ long addrC = addr + (2 * commitSize);
+ long addrD = addr + (3 * commitSize);
+ long addrE = addr + (4 * commitSize);
+ long addrF = addr + (5 * commitSize);
+
+ // commit ABCD
+ wb.NMTCommitMemory(addrA, commitSize);
+ wb.NMTCommitMemory(addrB, commitSize);
+ wb.NMTCommitMemory(addrC, commitSize);
+ wb.NMTCommitMemory(addrD, commitSize);
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Test (reserved=1024KB, committed=16KB)");
+
+ if (has_nmt_detail) {
+ output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ + Long.toHexString(addr + reserveSize)
+ + "\\] reserved 1024KB for Test");
+ }
+ // uncommit BC
+ wb.NMTUncommitMemory(addrB, commitSize);
+ wb.NMTUncommitMemory(addrC, commitSize);
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Test (reserved=1024KB, committed=8KB)");
+
+ if (has_nmt_detail) {
+ output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ + Long.toHexString(addr + reserveSize)
+ + "\\] reserved 1024KB for Test");
+ }
+
+ // commit EF
+ wb.NMTCommitMemory(addrE, commitSize);
+ wb.NMTCommitMemory(addrF, commitSize);
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Test (reserved=1024KB, committed=16KB)");
+ if (has_nmt_detail) {
+ output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ + Long.toHexString(addr + reserveSize)
+ + "\\] reserved 1024KB for Test");
+ }
+
+ // uncommit A
+ wb.NMTUncommitMemory(addrA, commitSize);
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Test (reserved=1024KB, committed=12KB)");
+ if (has_nmt_detail) {
+ output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ + Long.toHexString(addr + reserveSize)
+ + "\\] reserved 1024KB for Test");
+ }
+
+ // commit ABC
+ wb.NMTCommitMemory(addrA, commitSize);
+ wb.NMTCommitMemory(addrB, commitSize);
+ wb.NMTCommitMemory(addrC, commitSize);
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Test (reserved=1024KB, committed=24KB)");
+ if (has_nmt_detail) {
+ output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ + Long.toHexString(addr + reserveSize)
+ + "\\] reserved 1024KB for Test");
+ }
+
+ // uncommit ABCDEF
+ wb.NMTUncommitMemory(addrA, commitSize);
+ wb.NMTUncommitMemory(addrB, commitSize);
+ wb.NMTUncommitMemory(addrC, commitSize);
+ wb.NMTUncommitMemory(addrD, commitSize);
+ wb.NMTUncommitMemory(addrE, commitSize);
+ wb.NMTUncommitMemory(addrF, commitSize);
+
+ output = new OutputAnalyzer(pb.start());
+ output.shouldContain("Test (reserved=1024KB, committed=0KB)");
+ if (has_nmt_detail) {
+ output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ + Long.toHexString(addr + reserveSize)
+ + "\\] reserved 1024KB for Test");
+ }
+
+ // release
+ wb.NMTReleaseMemory(addr, reserveSize);
+ output = new OutputAnalyzer(pb.start());
+ output.shouldNotContain("Test (reserved=");
+ output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*"
+ + Long.toHexString(addr + reserveSize) + "\\] reserved");
+ }
+}
--- a/hotspot/test/runtime/jsig/Test8017498.sh Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/test/runtime/jsig/Test8017498.sh Thu Aug 14 13:13:15 2014 +0000
@@ -74,7 +74,7 @@
$gcc_cmd -DLINUX -fPIC -shared \
${EXTRA_CFLAG} -z noexecstack \
- -o ${TESTSRC}${FS}libTestJNI.so \
+ -o libTestJNI.so \
-I${COMPILEJAVA}${FS}include \
-I${COMPILEJAVA}${FS}include${FS}linux \
${TESTSRC}${FS}TestJNI.c
@@ -82,7 +82,7 @@
# run the java test in the background
cmd="LD_PRELOAD=$MY_LD_PRELOAD \
${TESTJAVA}${FS}bin${FS}java \
- -Djava.library.path=${TESTSRC}${FS} -server TestJNI 100"
+ -Djava.library.path=. -server TestJNI 100"
echo "$cmd > test.out 2>&1"
eval $cmd > test.out 2>&1
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/testlibrary/com/oracle/java/testlibrary/BuildHelper.java Thu Aug 14 13:13:15 2014 +0000
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package com.oracle.java.testlibrary;
+
+import java.io.File;
+import java.io.FileReader;
+import java.util.Properties;
+
+public class BuildHelper {
+
+ /**
+ * Commercial builds should have the BUILD_TYPE set to commercial
+ * within the release file, found at the root of the JDK.
+ */
+ public static boolean isCommercialBuild() throws Exception {
+ String buildType = getReleaseProperty("BUILD_TYPE","notFound");
+ return buildType.equals("commercial");
+ }
+
+
+ /**
+ * Return the value for property key, or defaultValue if no property not found.
+ * If present, double quotes are trimmed.
+ */
+ public static String getReleaseProperty(String key, String defaultValue) throws Exception {
+ Properties properties = getReleaseProperties();
+ String value = properties.getProperty(key, defaultValue);
+ return trimDoubleQuotes(value);
+ }
+
+ /**
+ * Return the value for property key, or null if no property not found.
+ * If present, double quotes are trimmed.
+ */
+ public static String getReleaseProperty(String key) throws Exception {
+ return getReleaseProperty(key, null);
+ }
+
+ /**
+ * Get properties from the release file
+ */
+ public static Properties getReleaseProperties() throws Exception {
+ Properties properties = new Properties();
+ properties.load(new FileReader(getReleaseFile()));
+ return properties;
+ }
+
+ /**
+ * Every JDK has a release file in its root.
+ * @return A handler to the release file.
+ */
+ public static File getReleaseFile() throws Exception {
+ String jdkPath = getJDKRoot();
+ File releaseFile = new File(jdkPath,"release");
+ if ( ! releaseFile.canRead() ) {
+ throw new Exception("Release file is not readable, or it is absent: " +
+ releaseFile.getCanonicalPath());
+ }
+ return releaseFile;
+ }
+
+ /**
+ * Returns path to the JDK under test.
+ * This path is obtained through the test.jdk property, usually set by JTREG.
+ */
+ public static String getJDKRoot() {
+ String jdkPath = System.getProperty("test.jdk");
+ if (jdkPath == null) {
+ throw new RuntimeException("System property 'test.jdk' not set. This property is normally set by jtreg. "
+ + "When running test separately, set this property using '-Dtest.jdk=/path/to/jdk'.");
+ }
+ return jdkPath;
+ }
+
+ /**
+ * Trim double quotes from the beginning and the end of the given string.
+ * @param original string to trim.
+ * @return a new trimmed string.
+ */
+ public static String trimDoubleQuotes(String original) {
+ if (original == null) { return null; }
+ String trimmed = original.replaceAll("^\"+|\"+$", "");
+ return trimmed;
+ }
+}
--- a/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Thu Aug 14 09:02:51 2014 -0400
+++ b/hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java Thu Aug 14 13:13:15 2014 +0000
@@ -25,6 +25,10 @@
package sun.hotspot;
import java.lang.reflect.Executable;
+import java.util.Arrays;
+import java.util.List;
+import java.util.function.Function;
+import java.util.stream.Stream;
import java.security.BasicPermission;
import sun.hotspot.parser.DiagnosticCommand;
@@ -130,7 +134,7 @@
}
public native int getCompileQueueSize(int compLevel);
public native boolean testSetForceInlineMethod(Executable method, boolean value);
- public boolean enqueueMethodForCompilation(Executable method, int compLevel) {
+ public boolean enqueueMethodForCompilation(Executable method, int compLevel) {
return enqueueMethodForCompilation(method, compLevel, -1 /*InvocationEntryBci*/);
}
public native boolean enqueueMethodForCompilation(Executable method, int compLevel, int entry_bci);
@@ -143,6 +147,8 @@
// Memory
public native void readReservedMemory();
+ public native long allocateMetaspace(ClassLoader classLoader, long size);
+ public native void freeMetaspace(ClassLoader classLoader, long addr, long size);
// force Full GC
public native void fullGC();
@@ -151,8 +157,34 @@
public native int stressVirtualSpaceResize(long reservedSpaceSize, long magnitude, long iterations);
public native void runMemoryUnitTests();
public native void readFromNoaccessArea();
+ public native long getThreadStackSize();
+ public native long getThreadRemainingStackSize();
// CPU features
public native String getCPUFeatures();
+ // VM flags
+ public native void setBooleanVMFlag(String name, boolean value);
+ public native void setIntxVMFlag(String name, long value);
+ public native void setUintxVMFlag(String name, long value);
+ public native void setUint64VMFlag(String name, long value);
+ public native void setStringVMFlag(String name, String value);
+ public native void setDoubleVMFlag(String name, double value);
+ public native Boolean getBooleanVMFlag(String name);
+ public native Long getIntxVMFlag(String name);
+ public native Long getUintxVMFlag(String name);
+ public native Long getUint64VMFlag(String name);
+ public native String getStringVMFlag(String name);
+ public native Double getDoubleVMFlag(String name);
+ private final List<Function<String,Object>> flagsGetters = Arrays.asList(
+ this::getBooleanVMFlag, this::getIntxVMFlag, this::getUintxVMFlag,
+ this::getUint64VMFlag, this::getStringVMFlag, this::getDoubleVMFlag);
+
+ public Object getVMFlag(String name) {
+ return flagsGetters.stream()
+ .map(f -> f.apply(name))
+ .filter(x -> x != null)
+ .findAny()
+ .orElse(null);
+ }
}