--- a/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/agent/src/share/classes/sun/jvm/hotspot/memory/SystemDictionary.java Fri Jan 15 14:25:44 2010 -0800
@@ -63,12 +63,12 @@
javaSystemLoaderField = type.getOopField("_java_system_loader");
nofBuckets = db.lookupIntConstant("SystemDictionary::_nof_buckets").intValue();
- objectKlassField = type.getOopField(WK_KLASS("object_klass"));
- classLoaderKlassField = type.getOopField(WK_KLASS("classloader_klass"));
- stringKlassField = type.getOopField(WK_KLASS("string_klass"));
- systemKlassField = type.getOopField(WK_KLASS("system_klass"));
- threadKlassField = type.getOopField(WK_KLASS("thread_klass"));
- threadGroupKlassField = type.getOopField(WK_KLASS("threadGroup_klass"));
+ objectKlassField = type.getOopField(WK_KLASS("Object_klass"));
+ classLoaderKlassField = type.getOopField(WK_KLASS("ClassLoader_klass"));
+ stringKlassField = type.getOopField(WK_KLASS("String_klass"));
+ systemKlassField = type.getOopField(WK_KLASS("System_klass"));
+ threadKlassField = type.getOopField(WK_KLASS("Thread_klass"));
+ threadGroupKlassField = type.getOopField(WK_KLASS("ThreadGroup_klass"));
}
// This WK functions must follow the definitions in systemDictionary.hpp:
--- a/hotspot/src/cpu/sparc/vm/sparc.ad Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
//
-// Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
+// Copyright 1998-2010 Sun Microsystems, Inc. All Rights Reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -1885,6 +1885,10 @@
return RegMask();
}
+const RegMask Matcher::method_handle_invoke_SP_save_mask() {
+ return RegMask();
+}
+
%}
@@ -6664,7 +6668,7 @@
ins_pipe(ialu_imm);
%}
-instruct cmovII_U_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
+instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
ins_cost(150);
size(4);
@@ -6673,7 +6677,7 @@
ins_pipe(ialu_reg);
%}
-instruct cmovII_U_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{
+instruct cmovIIu_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{
match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
ins_cost(140);
size(4);
@@ -6719,6 +6723,16 @@
ins_pipe(ialu_reg);
%}
+// This instruction also works with CmpN so we don't need cmovNN_reg.
+instruct cmovNIu_reg(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src) %{
+ match(Set dst (CMoveN (Binary cmp icc) (Binary dst src)));
+ ins_cost(150);
+ size(4);
+ format %{ "MOV$cmp $icc,$src,$dst" %}
+ ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
+ ins_pipe(ialu_reg);
+%}
+
instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{
match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src)));
ins_cost(150);
@@ -6756,6 +6770,16 @@
ins_pipe(ialu_reg);
%}
+instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{
+ match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
+ ins_cost(150);
+
+ size(4);
+ format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
+ ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
+ ins_pipe(ialu_reg);
+%}
+
instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{
match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
ins_cost(140);
@@ -6766,6 +6790,16 @@
ins_pipe(ialu_imm);
%}
+instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{
+ match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
+ ins_cost(140);
+
+ size(4);
+ format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
+ ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
+ ins_pipe(ialu_imm);
+%}
+
instruct cmovPF_reg(cmpOpF cmp, flagsRegF fcc, iRegP dst, iRegP src) %{
match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src)));
ins_cost(150);
@@ -6805,6 +6839,17 @@
ins_pipe(int_conditional_float_move);
%}
+instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{
+ match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
+ ins_cost(150);
+
+ size(4);
+ format %{ "FMOVS$cmp $icc,$src,$dst" %}
+ opcode(0x101);
+ ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
+ ins_pipe(int_conditional_float_move);
+%}
+
// Conditional move,
instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{
match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src)));
@@ -6838,6 +6883,17 @@
ins_pipe(int_conditional_double_move);
%}
+instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{
+ match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
+ ins_cost(150);
+
+ size(4);
+ format %{ "FMOVD$cmp $icc,$src,$dst" %}
+ opcode(0x102);
+ ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
+ ins_pipe(int_conditional_double_move);
+%}
+
// Conditional move,
instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{
match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src)));
@@ -6877,6 +6933,17 @@
%}
+instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{
+ match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
+ ins_cost(150);
+
+ size(4);
+ format %{ "MOV$cmp $icc,$src,$dst\t! long" %}
+ ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
+ ins_pipe(ialu_reg);
+%}
+
+
instruct cmovLF_reg(cmpOpF cmp, flagsRegF fcc, iRegL dst, iRegL src) %{
match(Set dst (CMoveL (Binary cmp fcc) (Binary dst src)));
ins_cost(150);
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -330,6 +330,14 @@
// This is the sp before any possible extension (adapter/locals).
intptr_t* unextended_sp = interpreter_frame_sender_sp();
+ address sender_pc = this->sender_pc();
+ CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
+ assert(sender_cb, "sanity");
+ nmethod* sender_nm = sender_cb->as_nmethod_or_null();
+ if (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc)) {
+ unextended_sp = (intptr_t*) at(link_offset);
+ }
+
// The interpreter and compiler(s) always save EBP/RBP in a known
// location on entry. We must record where that location is
// so this if EBP/RBP was live on callout from c2 we can find
@@ -352,7 +360,7 @@
#endif // AMD64
}
#endif /* COMPILER2 */
- return frame(sp, unextended_sp, link(), sender_pc());
+ return frame(sp, unextended_sp, link(), sender_pc);
}
@@ -375,6 +383,18 @@
intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
+ intptr_t* unextended_sp = sender_sp;
+ // If we are returning to a compiled method handle call site,
+ // the saved_fp will in fact be a saved value of the unextended SP.
+ // The simplest way to tell whether we are returning to such a call
+ // site is as follows:
+ CodeBlob* sender_cb = CodeCache::find_blob_unsafe(sender_pc);
+ assert(sender_cb, "sanity");
+ nmethod* sender_nm = sender_cb->as_nmethod_or_null();
+ if (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc)) {
+ unextended_sp = saved_fp;
+ }
+
if (map->update_map()) {
// Tell GC to use argument oopmaps for some runtime stubs that need it.
// For C1, the runtime stub might not have oop maps, so set this flag
@@ -399,7 +419,7 @@
}
assert(sender_sp != sp(), "must have changed");
- return frame(sender_sp, saved_fp, sender_pc);
+ return frame(sender_sp, unextended_sp, saved_fp, sender_pc);
}
frame frame::sender(RegisterMap* map) const {
--- a/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/frame_x86.inline.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -225,11 +225,12 @@
// top of expression stack
inline intptr_t* frame::interpreter_frame_tos_address() const {
intptr_t* last_sp = interpreter_frame_last_sp();
- if (last_sp == NULL ) {
+ if (last_sp == NULL) {
return sp();
} else {
- // sp() may have been extended by an adapter
- assert(last_sp < fp() && last_sp >= sp(), "bad tos");
+ // sp() may have been extended or shrunk by an adapter. At least
+ // check that we don't fall behind the legal region.
+ assert(last_sp < (intptr_t*) interpreter_frame_monitor_begin(), "bad tos");
return last_sp;
}
}
--- a/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/methodHandles_x86.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -65,9 +65,9 @@
// Verify that argslot lies within (rsp, rbp].
Label L_ok, L_bad;
__ cmpptr(rax_argslot, rbp);
- __ jcc(Assembler::above, L_bad);
+ __ jccb(Assembler::above, L_bad);
__ cmpptr(rsp, rax_argslot);
- __ jcc(Assembler::below, L_ok);
+ __ jccb(Assembler::below, L_ok);
__ bind(L_bad);
__ stop(error_message);
__ bind(L_ok);
@@ -136,9 +136,9 @@
if (arg_slots.is_register()) {
Label L_ok, L_bad;
__ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
- __ jcc(Assembler::greater, L_bad);
+ __ jccb(Assembler::greater, L_bad);
__ testl(arg_slots.as_register(), -stack_move_unit() - 1);
- __ jcc(Assembler::zero, L_ok);
+ __ jccb(Assembler::zero, L_ok);
__ bind(L_bad);
__ stop("assert arg_slots <= 0 and clear low bits");
__ bind(L_ok);
@@ -173,7 +173,7 @@
__ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
__ addptr(rdx_temp, wordSize);
__ cmpptr(rdx_temp, rax_argslot);
- __ jcc(Assembler::less, loop);
+ __ jccb(Assembler::less, loop);
}
// Now move the argslot down, to point to the opened-up space.
@@ -211,9 +211,9 @@
Label L_ok, L_bad;
__ lea(rbx_temp, Address(rax_argslot, arg_slots, Address::times_ptr));
__ cmpptr(rbx_temp, rbp);
- __ jcc(Assembler::above, L_bad);
+ __ jccb(Assembler::above, L_bad);
__ cmpptr(rsp, rax_argslot);
- __ jcc(Assembler::below, L_ok);
+ __ jccb(Assembler::below, L_ok);
__ bind(L_bad);
__ stop("deleted argument(s) must fall within current frame");
__ bind(L_ok);
@@ -221,9 +221,9 @@
if (arg_slots.is_register()) {
Label L_ok, L_bad;
__ cmpptr(arg_slots.as_register(), (int32_t) NULL_WORD);
- __ jcc(Assembler::less, L_bad);
+ __ jccb(Assembler::less, L_bad);
__ testl(arg_slots.as_register(), -stack_move_unit() - 1);
- __ jcc(Assembler::zero, L_ok);
+ __ jccb(Assembler::zero, L_ok);
__ bind(L_bad);
__ stop("assert arg_slots >= 0 and clear low bits");
__ bind(L_ok);
@@ -258,7 +258,7 @@
__ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
__ addptr(rdx_temp, -wordSize);
__ cmpptr(rdx_temp, rsp);
- __ jcc(Assembler::greaterEqual, loop);
+ __ jccb(Assembler::greaterEqual, loop);
}
// Now move the argslot up, to point to the just-copied block.
@@ -268,8 +268,9 @@
}
#ifndef PRODUCT
+extern "C" void print_method_handle(oop mh);
void trace_method_handle_stub(const char* adaptername,
- oopDesc* mh,
+ oop mh,
intptr_t* entry_sp,
intptr_t* saved_sp,
intptr_t* saved_bp) {
@@ -280,6 +281,7 @@
adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
if (last_sp != saved_sp)
printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
+ if (Verbose) print_method_handle(mh);
}
#endif //PRODUCT
@@ -382,11 +384,11 @@
// FIXME: fill in _raise_exception_method with a suitable sun.dyn method
__ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
__ testptr(rbx_method, rbx_method);
- __ jcc(Assembler::zero, no_method);
+ __ jccb(Assembler::zero, no_method);
int jobject_oop_offset = 0;
__ movptr(rbx_method, Address(rbx_method, jobject_oop_offset)); // dereference the jobject
__ testptr(rbx_method, rbx_method);
- __ jcc(Assembler::zero, no_method);
+ __ jccb(Assembler::zero, no_method);
__ verify_oop(rbx_method);
__ push(rdi_pc); // and restore caller PC
__ jmp(rbx_method_fie);
@@ -533,16 +535,15 @@
if (arg_type == T_OBJECT) {
__ movptr(Address(rax_argslot, 0), rbx_temp);
} else {
- __ load_sized_value(rbx_temp, prim_value_addr,
+ __ load_sized_value(rdx_temp, prim_value_addr,
type2aelembytes(arg_type), is_signed_subword_type(arg_type));
- __ movptr(Address(rax_argslot, 0), rbx_temp);
+ __ movptr(Address(rax_argslot, 0), rdx_temp);
#ifndef _LP64
if (arg_slots == 2) {
- __ movl(rbx_temp, prim_value_addr.plus_disp(wordSize));
- __ movl(Address(rax_argslot, Interpreter::stackElementSize()), rbx_temp);
+ __ movl(rdx_temp, prim_value_addr.plus_disp(wordSize));
+ __ movl(Address(rax_argslot, Interpreter::stackElementSize()), rdx_temp);
}
#endif //_LP64
- break;
}
if (direct_to_method) {
@@ -584,7 +585,7 @@
Label done;
__ movptr(rdx_temp, vmarg);
__ testl(rdx_temp, rdx_temp);
- __ jcc(Assembler::zero, done); // no cast if null
+ __ jccb(Assembler::zero, done); // no cast if null
__ load_klass(rdx_temp, rdx_temp);
// live at this point:
@@ -675,24 +676,24 @@
// (now we are done with the old MH)
// original 32-bit vmdata word must be of this form:
- // | MBZ:16 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
- __ xchgl(rcx, rbx_vminfo); // free rcx for shifts
+ // | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
+ __ xchgptr(rcx, rbx_vminfo); // free rcx for shifts
__ shll(rdx_temp /*, rcx*/);
Label zero_extend, done;
__ testl(rcx, CONV_VMINFO_SIGN_FLAG);
- __ jcc(Assembler::zero, zero_extend);
+ __ jccb(Assembler::zero, zero_extend);
// this path is taken for int->byte, int->short
__ sarl(rdx_temp /*, rcx*/);
- __ jmp(done);
+ __ jmpb(done);
__ bind(zero_extend);
// this is taken for int->char
__ shrl(rdx_temp /*, rcx*/);
__ bind(done);
- __ movptr(vmarg, rdx_temp);
- __ xchgl(rcx, rbx_vminfo); // restore rcx_recv
+ __ movl(vmarg, rdx_temp);
+ __ xchgptr(rcx, rbx_vminfo); // restore rcx_recv
__ jump_to_method_handle_entry(rcx_recv, rdx_temp);
}
@@ -861,7 +862,7 @@
// Verify that argslot > destslot, by at least swap_bytes.
Label L_ok;
__ cmpptr(rax_argslot, rbx_destslot);
- __ jcc(Assembler::aboveEqual, L_ok);
+ __ jccb(Assembler::aboveEqual, L_ok);
__ stop("source must be above destination (upward rotation)");
__ bind(L_ok);
}
@@ -877,7 +878,7 @@
__ movptr(Address(rax_argslot, swap_bytes), rdx_temp);
__ addptr(rax_argslot, -wordSize);
__ cmpptr(rax_argslot, rbx_destslot);
- __ jcc(Assembler::aboveEqual, loop);
+ __ jccb(Assembler::aboveEqual, loop);
} else {
__ addptr(rax_argslot, swap_bytes);
#ifdef ASSERT
@@ -885,7 +886,7 @@
// Verify that argslot < destslot, by at least swap_bytes.
Label L_ok;
__ cmpptr(rax_argslot, rbx_destslot);
- __ jcc(Assembler::belowEqual, L_ok);
+ __ jccb(Assembler::belowEqual, L_ok);
__ stop("source must be below destination (downward rotation)");
__ bind(L_ok);
}
@@ -901,7 +902,7 @@
__ movptr(Address(rax_argslot, -swap_bytes), rdx_temp);
__ addptr(rax_argslot, wordSize);
__ cmpptr(rax_argslot, rbx_destslot);
- __ jcc(Assembler::belowEqual, loop);
+ __ jccb(Assembler::belowEqual, loop);
}
// pop the original first chunk into the destination slot, now free
@@ -967,7 +968,7 @@
__ addptr(rax_argslot, wordSize);
__ addptr(rdx_newarg, wordSize);
__ cmpptr(rdx_newarg, rbx_oldarg);
- __ jcc(Assembler::less, loop);
+ __ jccb(Assembler::less, loop);
__ pop(rdi); // restore temp
@@ -1119,7 +1120,7 @@
}
__ addptr(rax_argslot, Interpreter::stackElementSize());
__ cmpptr(rax_argslot, rdx_argslot_limit);
- __ jcc(Assembler::less, loop);
+ __ jccb(Assembler::less, loop);
} else if (length_constant == 0) {
__ bind(skip_array_check);
// nothing to copy
--- a/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/runtime_x86_32.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -43,11 +43,11 @@
// This code is entered with a jmp.
//
// Arguments:
-// rax,: exception oop
+// rax: exception oop
// rdx: exception pc
//
// Results:
-// rax,: exception oop
+// rax: exception oop
// rdx: exception pc in caller or ???
// destination: exception handler of caller
//
@@ -113,17 +113,17 @@
__ addptr(rsp, return_off * wordSize); // Epilog!
__ pop(rdx); // Exception pc
+ // rax: exception handler for given <exception oop/exception pc>
- // rax,: exception handler for given <exception oop/exception pc>
+ // Restore SP from BP if the exception PC is a MethodHandle call.
+ __ cmpl(Address(rcx, JavaThread::is_method_handle_exception_offset()), 0);
+ __ cmovptr(Assembler::notEqual, rsp, rbp);
// We have a handler in rax, (could be deopt blob)
// rdx - throwing pc, deopt blob will need it.
__ push(rax);
- // rcx contains handler address
-
- __ get_thread(rcx); // TLS
// Get the exception
__ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
// Get the exception pc in case we are deoptimized
@@ -137,7 +137,7 @@
__ pop(rcx);
- // rax,: exception oop
+ // rax: exception oop
// rcx: exception handler
// rdx: exception pc
__ jmp (rcx);
--- a/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -638,6 +638,10 @@
__ movptr(rax, Address(rsp, 0));
+ // Must preserve original SP for loading incoming arguments because
+ // we need to align the outgoing SP for compiled code.
+ __ movptr(r11, rsp);
+
// Cut-out for having no stack args. Since up to 2 int/oop args are passed
// in registers, we will occasionally have no stack args.
int comp_words_on_stack = 0;
@@ -661,6 +665,10 @@
// as far as the placement of the call instruction
__ push(rax);
+ // Put saved SP in another register
+ const Register saved_sp = rax;
+ __ movptr(saved_sp, r11);
+
// Will jump to the compiled code just as if compiled code was doing it.
// Pre-load the register-jump target early, to schedule it better.
__ movptr(r11, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset())));
@@ -680,11 +688,7 @@
assert(!regs[i].second()->is_valid() || regs[i].first()->next() == regs[i].second(),
"scrambled load targets?");
// Load in argument order going down.
- // int ld_off = (total_args_passed + comp_words_on_stack -i)*wordSize;
- // base ld_off on r13 (sender_sp) as the stack alignment makes offsets from rsp
- // unpredictable
- int ld_off = ((total_args_passed - 1) - i)*Interpreter::stackElementSize();
-
+ int ld_off = (total_args_passed - i)*Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
// Point to interpreter value (vs. tag)
int next_off = ld_off - Interpreter::stackElementSize();
//
@@ -699,10 +703,14 @@
if (r_1->is_stack()) {
// Convert stack slot to an SP offset (+ wordSize to account for return address )
int st_off = regs[i].first()->reg2stack()*VMRegImpl::stack_slot_size + wordSize;
+
+ // We can use r13 as a temp here because compiled code doesn't need r13 as an input
+ // and if we end up going thru a c2i because of a miss a reasonable value of r13
+ // will be generated.
if (!r_2->is_valid()) {
// sign extend???
- __ movl(rax, Address(r13, ld_off));
- __ movptr(Address(rsp, st_off), rax);
+ __ movl(r13, Address(saved_sp, ld_off));
+ __ movptr(Address(rsp, st_off), r13);
} else {
//
// We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
@@ -715,9 +723,9 @@
// ld_off is MSW so get LSW
const int offset = (sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
next_off : ld_off;
- __ movq(rax, Address(r13, offset));
+ __ movq(r13, Address(saved_sp, offset));
// st_off is LSW (i.e. reg.first())
- __ movq(Address(rsp, st_off), rax);
+ __ movq(Address(rsp, st_off), r13);
}
} else if (r_1->is_Register()) { // Register argument
Register r = r_1->as_Register();
@@ -732,16 +740,16 @@
next_off : ld_off;
// this can be a misaligned move
- __ movq(r, Address(r13, offset));
+ __ movq(r, Address(saved_sp, offset));
} else {
// sign extend and use a full word?
- __ movl(r, Address(r13, ld_off));
+ __ movl(r, Address(saved_sp, ld_off));
}
} else {
if (!r_2->is_valid()) {
- __ movflt(r_1->as_XMMRegister(), Address(r13, ld_off));
+ __ movflt(r_1->as_XMMRegister(), Address(saved_sp, ld_off));
} else {
- __ movdbl(r_1->as_XMMRegister(), Address(r13, next_off));
+ __ movdbl(r_1->as_XMMRegister(), Address(saved_sp, next_off));
}
}
}
@@ -3319,6 +3327,10 @@
// rax: exception handler
+ // Restore SP from BP if the exception PC is a MethodHandle call.
+ __ cmpl(Address(r15_thread, JavaThread::is_method_handle_exception_offset()), 0);
+ __ cmovptr(Assembler::notEqual, rsp, rbp);
+
// We have a handler in rax (could be deopt blob).
__ mov(r8, rax);
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1488,7 +1488,10 @@
if (interpreter_frame != NULL) {
#ifdef ASSERT
- assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
+ if (!EnableMethodHandles)
+ // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
+ // Probably, since deoptimization doesn't work yet.
+ assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
#endif
--- a/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -449,8 +449,12 @@
__ addptr(rax, stack_base);
__ subptr(rax, stack_size);
+ // Use the maximum number of pages we might bang.
+ const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
+ (StackRedPages+StackYellowPages);
+
// add in the red and yellow zone sizes
- __ addptr(rax, (StackRedPages + StackYellowPages) * page_size);
+ __ addptr(rax, max_pages * page_size);
// check against the current stack bottom
__ cmpptr(rsp, rax);
@@ -1502,8 +1506,10 @@
tempcount* Interpreter::stackElementWords() + popframe_extra_args;
if (interpreter_frame != NULL) {
#ifdef ASSERT
- assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(),
- "Frame not properly walkable");
+ if (!EnableMethodHandles)
+ // @@@ FIXME: Should we correct interpreter_frame_sender_sp in the calling sequences?
+ // Probably, since deoptimization doesn't work yet.
+ assert(caller->unextended_sp() == interpreter_frame->interpreter_frame_sender_sp(), "Frame not properly walkable");
assert(caller->sp() == interpreter_frame->sender_sp(), "Frame not properly walkable(2)");
#endif
--- a/hotspot/src/cpu/x86/vm/x86_32.ad Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/x86_32.ad Fri Jan 15 14:25:44 2010 -0800
@@ -268,22 +268,36 @@
static jlong *float_signflip_pool = double_quadword(&fp_signmask_pool[3*2], CONST64(0x8000000080000000), CONST64(0x8000000080000000));
static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
+// Offset hacking within calls.
+static int pre_call_FPU_size() {
+ if (Compile::current()->in_24_bit_fp_mode())
+ return 6; // fldcw
+ return 0;
+}
+
+static int preserve_SP_size() {
+ return LP64_ONLY(1 +) 2; // [rex,] op, rm(reg/reg)
+}
+
// !!!!! Special hack to get all type of calls to specify the byte offset
// from the start of the call to the point where the return address
// will point.
int MachCallStaticJavaNode::ret_addr_offset() {
- return 5 + (Compile::current()->in_24_bit_fp_mode() ? 6 : 0); // 5 bytes from start of call to where return address points
+ int offset = 5 + pre_call_FPU_size(); // 5 bytes from start of call to where return address points
+ if (_method_handle_invoke)
+ offset += preserve_SP_size();
+ return offset;
}
int MachCallDynamicJavaNode::ret_addr_offset() {
- return 10 + (Compile::current()->in_24_bit_fp_mode() ? 6 : 0); // 10 bytes from start of call to where return address points
+ return 10 + pre_call_FPU_size(); // 10 bytes from start of call to where return address points
}
static int sizeof_FFree_Float_Stack_All = -1;
int MachCallRuntimeNode::ret_addr_offset() {
assert(sizeof_FFree_Float_Stack_All != -1, "must have been emitted already");
- return sizeof_FFree_Float_Stack_All + 5 + (Compile::current()->in_24_bit_fp_mode() ? 6 : 0);
+ return sizeof_FFree_Float_Stack_All + 5 + pre_call_FPU_size();
}
// Indicate if the safepoint node needs the polling page as an input.
@@ -299,8 +313,16 @@
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
- if (Compile::current()->in_24_bit_fp_mode())
- current_offset += 6; // skip fldcw in pre_call_FPU, if any
+ current_offset += pre_call_FPU_size(); // skip fldcw, if any
+ current_offset += 1; // skip call opcode byte
+ return round_to(current_offset, alignment_required()) - current_offset;
+}
+
+// The address of the call instruction needs to be 4-byte aligned to
+// ensure that it does not span a cache line so that it can be patched.
+int CallStaticJavaHandleNode::compute_padding(int current_offset) const {
+ current_offset += pre_call_FPU_size(); // skip fldcw, if any
+ current_offset += preserve_SP_size(); // skip mov rbp, rsp
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
}
@@ -308,8 +330,7 @@
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
- if (Compile::current()->in_24_bit_fp_mode())
- current_offset += 6; // skip fldcw in pre_call_FPU, if any
+ current_offset += pre_call_FPU_size(); // skip fldcw, if any
current_offset += 5; // skip MOV instruction
current_offset += 1; // skip call opcode byte
return round_to(current_offset, alignment_required()) - current_offset;
@@ -1460,6 +1481,10 @@
return RegMask();
}
+const RegMask Matcher::method_handle_invoke_SP_save_mask() {
+ return EBP_REG_mask;
+}
+
%}
//----------ENCODING BLOCK-----------------------------------------------------
@@ -1772,10 +1797,13 @@
enc_class pre_call_FPU %{
// If method sets FPU control word restore it here
+ debug_only(int off0 = cbuf.code_size());
if( Compile::current()->in_24_bit_fp_mode() ) {
MacroAssembler masm(&cbuf);
masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std()));
}
+ debug_only(int off1 = cbuf.code_size());
+ assert(off1 - off0 == pre_call_FPU_size(), "correct size prediction");
%}
enc_class post_call_FPU %{
@@ -1786,6 +1814,21 @@
}
%}
+ enc_class preserve_SP %{
+ debug_only(int off0 = cbuf.code_size());
+ MacroAssembler _masm(&cbuf);
+ // RBP is preserved across all calls, even compiled calls.
+ // Use it to preserve RSP in places where the callee might change the SP.
+ __ movptr(rbp, rsp);
+ debug_only(int off1 = cbuf.code_size());
+ assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
+ %}
+
+ enc_class restore_SP %{
+ MacroAssembler _masm(&cbuf);
+ __ movptr(rsp, rbp);
+ %}
+
enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
// CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
// who we intended to call.
@@ -13406,6 +13449,7 @@
// compute_padding() functions will have to be adjusted.
instruct CallStaticJavaDirect(method meth) %{
match(CallStaticJava);
+ predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
effect(USE meth);
ins_cost(300);
@@ -13420,6 +13464,30 @@
ins_alignment(4);
%}
+// Call Java Static Instruction (method handle version)
+// Note: If this code changes, the corresponding ret_addr_offset() and
+// compute_padding() functions will have to be adjusted.
+instruct CallStaticJavaHandle(method meth, eBPRegP ebp) %{
+ match(CallStaticJava);
+ predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
+ effect(USE meth);
+ // EBP is saved by all callees (for interpreter stack correction).
+ // We use it here for a similar purpose, in {preserve,restore}_SP.
+
+ ins_cost(300);
+ format %{ "CALL,static/MethodHandle " %}
+ opcode(0xE8); /* E8 cd */
+ ins_encode( pre_call_FPU,
+ preserve_SP,
+ Java_Static_Call( meth ),
+ restore_SP,
+ call_epilog,
+ post_call_FPU );
+ ins_pipe( pipe_slow );
+ ins_pc_relative(1);
+ ins_alignment(4);
+%}
+
// Call Java Dynamic Instruction
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.
--- a/hotspot/src/cpu/x86/vm/x86_64.ad Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/cpu/x86/vm/x86_64.ad Fri Jan 15 14:25:44 2010 -0800
@@ -551,12 +551,19 @@
#define __ _masm.
+static int preserve_SP_size() {
+ return LP64_ONLY(1 +) 2; // [rex,] op, rm(reg/reg)
+}
+
// !!!!! Special hack to get all types of calls to specify the byte offset
// from the start of the call to the point where the return address
// will point.
int MachCallStaticJavaNode::ret_addr_offset()
{
- return 5; // 5 bytes from start of call to where return address points
+ int offset = 5; // 5 bytes from start of call to where return address points
+ if (_method_handle_invoke)
+ offset += preserve_SP_size();
+ return offset;
}
int MachCallDynamicJavaNode::ret_addr_offset()
@@ -589,6 +596,15 @@
// The address of the call instruction needs to be 4-byte aligned to
// ensure that it does not span a cache line so that it can be patched.
+int CallStaticJavaHandleNode::compute_padding(int current_offset) const
+{
+ current_offset += preserve_SP_size(); // skip mov rbp, rsp
+ current_offset += 1; // skip call opcode byte
+ return round_to(current_offset, alignment_required()) - current_offset;
+}
+
+// The address of the call instruction needs to be 4-byte aligned to
+// ensure that it does not span a cache line so that it can be patched.
int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
{
current_offset += 11; // skip movq instruction + call opcode byte
@@ -2113,6 +2129,10 @@
return LONG_RDX_REG_mask;
}
+const RegMask Matcher::method_handle_invoke_SP_save_mask() {
+ return PTR_RBP_REG_mask;
+}
+
static Address build_address(int b, int i, int s, int d) {
Register index = as_Register(i);
Address::ScaleFactor scale = (Address::ScaleFactor)s;
@@ -2608,6 +2628,21 @@
RELOC_DISP32);
%}
+ enc_class preserve_SP %{
+ debug_only(int off0 = cbuf.code_size());
+ MacroAssembler _masm(&cbuf);
+ // RBP is preserved across all calls, even compiled calls.
+ // Use it to preserve RSP in places where the callee might change the SP.
+ __ movptr(rbp, rsp);
+ debug_only(int off1 = cbuf.code_size());
+ assert(off1 - off0 == preserve_SP_size(), "correct size prediction");
+ %}
+
+ enc_class restore_SP %{
+ MacroAssembler _masm(&cbuf);
+ __ movptr(rsp, rbp);
+ %}
+
enc_class Java_Static_Call(method meth)
%{
// JAVA STATIC CALL
@@ -12526,9 +12561,9 @@
// Call Java Static Instruction
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.
-instruct CallStaticJavaDirect(method meth)
-%{
+instruct CallStaticJavaDirect(method meth) %{
match(CallStaticJava);
+ predicate(!((CallStaticJavaNode*) n)->is_method_handle_invoke());
effect(USE meth);
ins_cost(300);
@@ -12540,6 +12575,28 @@
ins_alignment(4);
%}
+// Call Java Static Instruction (method handle version)
+// Note: If this code changes, the corresponding ret_addr_offset() and
+// compute_padding() functions will have to be adjusted.
+instruct CallStaticJavaHandle(method meth, rbp_RegP rbp) %{
+ match(CallStaticJava);
+ predicate(((CallStaticJavaNode*) n)->is_method_handle_invoke());
+ effect(USE meth);
+ // RBP is saved by all callees (for interpreter stack correction).
+ // We use it here for a similar purpose, in {preserve,restore}_SP.
+
+ ins_cost(300);
+ format %{ "call,static/MethodHandle " %}
+ opcode(0xE8); /* E8 cd */
+ ins_encode(preserve_SP,
+ Java_Static_Call(meth),
+ restore_SP,
+ call_epilog);
+ ins_pipe(pipe_slow);
+ ins_pc_relative(1);
+ ins_alignment(4);
+%}
+
// Call Java Dynamic Instruction
// Note: If this code changes, the corresponding ret_addr_offset() and
// compute_padding() functions will have to be adjusted.
--- a/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/cpu/zero/vm/cppInterpreter_zero.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -245,7 +245,7 @@
if (handlerAddr == NULL) {
CALL_VM_NOCHECK(InterpreterRuntime::prepare_native_call(thread, method));
if (HAS_PENDING_EXCEPTION)
- goto unwind_and_return;
+ goto unlock_unwind_and_return;
handlerAddr = method->signature_handler();
assert(handlerAddr != NULL, "eh?");
@@ -254,7 +254,7 @@
CALL_VM_NOCHECK(handlerAddr =
InterpreterRuntime::slow_signature_handler(thread, method, NULL,NULL));
if (HAS_PENDING_EXCEPTION)
- goto unwind_and_return;
+ goto unlock_unwind_and_return;
}
handler = \
InterpreterRuntime::SignatureHandler::from_handlerAddr(handlerAddr);
@@ -365,10 +365,10 @@
// Reset handle block
thread->active_handles()->clear();
- // Unlock if necessary. It seems totally wrong that this
- // is skipped in the event of an exception but apparently
- // the template interpreter does this so we do too.
- if (monitor && !HAS_PENDING_EXCEPTION) {
+ unlock_unwind_and_return:
+
+ // Unlock if necessary
+ if (monitor) {
BasicLock *lock = monitor->lock();
markOop header = lock->displaced_header();
oop rcvr = monitor->obj();
--- a/hotspot/src/share/vm/c1/c1_IR.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/c1/c1_IR.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -251,8 +251,9 @@
DebugToken* expvals = recorder->create_scope_values(expressions());
DebugToken* monvals = recorder->create_monitor_values(monitors());
// reexecute allowed only for the topmost frame
- bool reexecute = topmost ? should_reexecute() : false;
- recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, locvals, expvals, monvals);
+ bool reexecute = topmost ? should_reexecute() : false;
+ bool is_method_handle_invoke = false;
+ recorder->describe_scope(pc_offset, scope()->method(), bci(), reexecute, is_method_handle_invoke, locvals, expvals, monvals);
}
};
--- a/hotspot/src/share/vm/c1/c1_Runtime1.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/c1/c1_Runtime1.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -425,7 +425,7 @@
assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
assert(exception->is_oop(), "just checking");
// Check that exception is a subclass of Throwable, otherwise we have a VerifyError
- if (!(exception->is_a(SystemDictionary::throwable_klass()))) {
+ if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
if (ExitVMOnVerifyError) vm_exit(-1);
ShouldNotReachHere();
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/ci/ciCPCache.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_ciCPCache.cpp.incl"
+
+// ciCPCache
+
+// ------------------------------------------------------------------
+// ciCPCache::get_f1_offset
+size_t ciCPCache::get_f1_offset(int index) {
+ // Calculate the offset from the constantPoolCacheOop to the f1
+ // field.
+ ByteSize f1_offset =
+ constantPoolCacheOopDesc::entry_offset(index) +
+ ConstantPoolCacheEntry::f1_offset();
+
+ return in_bytes(f1_offset);
+}
+
+
+// ------------------------------------------------------------------
+// ciCPCache::print
+//
+// Print debugging information about the cache.
+void ciCPCache::print() {
+ Unimplemented();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/ci/ciCPCache.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// ciCPCache
+//
+// This class represents a constant pool cache.
+//
+// Note: This class is called ciCPCache as ciConstantPoolCache is used
+// for something different.
+class ciCPCache : public ciObject {
+public:
+ ciCPCache(constantPoolCacheHandle cpcache) : ciObject(cpcache) {}
+
+ // What kind of ciObject is this?
+ bool is_cpcache() const { return true; }
+
+ // Get the offset in bytes from the oop to the f1 field of the
+ // requested entry.
+ size_t get_f1_offset(int index);
+
+ void print();
+};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/ci/ciCallSite.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_ciCallSite.cpp.incl"
+
+// ciCallSite
+
+// ------------------------------------------------------------------
+// ciCallSite::get_target
+//
+// Return the target MethodHandle of this CallSite.
+ciMethodHandle* ciCallSite::get_target() const {
+ VM_ENTRY_MARK;
+ oop method_handle_oop = java_dyn_CallSite::target(get_oop());
+ return CURRENT_ENV->get_object(method_handle_oop)->as_method_handle();
+}
+
+// ------------------------------------------------------------------
+// ciCallSite::print
+//
+// Print debugging information about the CallSite.
+void ciCallSite::print() {
+ Unimplemented();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/ci/ciCallSite.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// ciCallSite
+//
+// The class represents a java.dyn.CallSite object.
+class ciCallSite : public ciInstance {
+public:
+ ciCallSite(instanceHandle h_i) : ciInstance(h_i) {}
+
+ // What kind of ciObject is this?
+ bool is_call_site() const { return true; }
+
+ // Return the target MethodHandle of this CallSite.
+ ciMethodHandle* get_target() const;
+
+ void print();
+};
--- a/hotspot/src/share/vm/ci/ciClassList.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciClassList.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -25,6 +25,7 @@
class ciEnv;
class ciObjectFactory;
class ciConstantPoolCache;
+class ciCPCache;
class ciField;
class ciConstant;
@@ -42,6 +43,8 @@
class ciObject;
class ciNullObject;
class ciInstance;
+class ciCallSite;
+class ciMethodHandle;
class ciMethod;
class ciMethodData;
class ciReceiverTypeData; // part of ciMethodData
@@ -78,6 +81,7 @@
// Any more access must be given explicitly.
#define CI_PACKAGE_ACCESS_TO \
friend class ciObjectFactory; \
+friend class ciCallSite; \
friend class ciConstantPoolCache; \
friend class ciField; \
friend class ciConstant; \
@@ -93,6 +97,7 @@
friend class ciInstance; \
friend class ciMethod; \
friend class ciMethodData; \
+friend class ciMethodHandle; \
friend class ciReceiverTypeData; \
friend class ciSymbol; \
friend class ciArray; \
--- a/hotspot/src/share/vm/ci/ciEnv.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciEnv.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -38,17 +38,9 @@
ciTypeArrayKlassKlass* ciEnv::_type_array_klass_klass_instance;
ciObjArrayKlassKlass* ciEnv::_obj_array_klass_klass_instance;
-ciInstanceKlass* ciEnv::_ArrayStoreException;
-ciInstanceKlass* ciEnv::_Class;
-ciInstanceKlass* ciEnv::_ClassCastException;
-ciInstanceKlass* ciEnv::_Object;
-ciInstanceKlass* ciEnv::_Throwable;
-ciInstanceKlass* ciEnv::_Thread;
-ciInstanceKlass* ciEnv::_OutOfMemoryError;
-ciInstanceKlass* ciEnv::_String;
-ciInstanceKlass* ciEnv::_StringBuffer;
-ciInstanceKlass* ciEnv::_StringBuilder;
-ciInstanceKlass* ciEnv::_Integer;
+#define WK_KLASS_DEFN(name, ignore_s, ignore_o) ciInstanceKlass* ciEnv::_##name = NULL;
+WK_KLASSES_DO(WK_KLASS_DEFN)
+#undef WK_KLASS_DEFN
ciSymbol* ciEnv::_unloaded_cisymbol = NULL;
ciInstanceKlass* ciEnv::_unloaded_ciinstance_klass = NULL;
@@ -442,12 +434,11 @@
// ciEnv::get_klass_by_index_impl
//
// Implementation of get_klass_by_index.
-ciKlass* ciEnv::get_klass_by_index_impl(ciInstanceKlass* accessor,
+ciKlass* ciEnv::get_klass_by_index_impl(constantPoolHandle cpool,
int index,
- bool& is_accessible) {
- assert(accessor->get_instanceKlass()->is_linked(), "must be linked before accessing constant pool");
+ bool& is_accessible,
+ ciInstanceKlass* accessor) {
EXCEPTION_CONTEXT;
- constantPoolHandle cpool(THREAD, accessor->get_instanceKlass()->constants());
KlassHandle klass (THREAD, constantPoolOopDesc::klass_at_if_loaded(cpool, index));
symbolHandle klass_name;
if (klass.is_null()) {
@@ -509,22 +500,21 @@
// ciEnv::get_klass_by_index
//
// Get a klass from the constant pool.
-ciKlass* ciEnv::get_klass_by_index(ciInstanceKlass* accessor,
+ciKlass* ciEnv::get_klass_by_index(constantPoolHandle cpool,
int index,
- bool& is_accessible) {
- GUARDED_VM_ENTRY(return get_klass_by_index_impl(accessor, index, is_accessible);)
+ bool& is_accessible,
+ ciInstanceKlass* accessor) {
+ GUARDED_VM_ENTRY(return get_klass_by_index_impl(cpool, index, is_accessible, accessor);)
}
// ------------------------------------------------------------------
// ciEnv::get_constant_by_index_impl
//
// Implementation of get_constant_by_index().
-ciConstant ciEnv::get_constant_by_index_impl(ciInstanceKlass* accessor,
- int index) {
+ciConstant ciEnv::get_constant_by_index_impl(constantPoolHandle cpool,
+ int index,
+ ciInstanceKlass* accessor) {
EXCEPTION_CONTEXT;
- instanceKlass* ik_accessor = accessor->get_instanceKlass();
- assert(ik_accessor->is_linked(), "must be linked before accessing constant pool");
- constantPoolOop cpool = ik_accessor->constants();
constantTag tag = cpool->tag_at(index);
if (tag.is_int()) {
return ciConstant(T_INT, (jint)cpool->int_at(index));
@@ -552,7 +542,7 @@
} else if (tag.is_klass() || tag.is_unresolved_klass()) {
// 4881222: allow ldc to take a class type
bool ignore;
- ciKlass* klass = get_klass_by_index_impl(accessor, index, ignore);
+ ciKlass* klass = get_klass_by_index_impl(cpool, index, ignore, accessor);
if (HAS_PENDING_EXCEPTION) {
CLEAR_PENDING_EXCEPTION;
record_out_of_memory_failure();
@@ -561,6 +551,11 @@
assert (klass->is_instance_klass() || klass->is_array_klass(),
"must be an instance or array klass ");
return ciConstant(T_OBJECT, klass);
+ } else if (tag.is_object()) {
+ oop obj = cpool->object_at(index);
+ assert(obj->is_instance(), "must be an instance");
+ ciObject* ciobj = get_object(obj);
+ return ciConstant(T_OBJECT, ciobj);
} else {
ShouldNotReachHere();
return ciConstant();
@@ -597,9 +592,10 @@
// Pull a constant out of the constant pool. How appropriate.
//
// Implementation note: this query is currently in no way cached.
-ciConstant ciEnv::get_constant_by_index(ciInstanceKlass* accessor,
- int index) {
- GUARDED_VM_ENTRY(return get_constant_by_index_impl(accessor, index); )
+ciConstant ciEnv::get_constant_by_index(constantPoolHandle cpool,
+ int index,
+ ciInstanceKlass* accessor) {
+ GUARDED_VM_ENTRY(return get_constant_by_index_impl(cpool, index, accessor);)
}
// ------------------------------------------------------------------
@@ -609,7 +605,7 @@
//
// Implementation note: this query is currently in no way cached.
bool ciEnv::is_unresolved_string(ciInstanceKlass* accessor,
- int index) const {
+ int index) const {
GUARDED_VM_ENTRY(return is_unresolved_string_impl(accessor->get_instanceKlass(), index); )
}
@@ -620,7 +616,7 @@
//
// Implementation note: this query is currently in no way cached.
bool ciEnv::is_unresolved_klass(ciInstanceKlass* accessor,
- int index) const {
+ int index) const {
GUARDED_VM_ENTRY(return is_unresolved_klass_impl(accessor->get_instanceKlass(), index); )
}
@@ -701,15 +697,12 @@
// ------------------------------------------------------------------
// ciEnv::get_method_by_index_impl
-ciMethod* ciEnv::get_method_by_index_impl(ciInstanceKlass* accessor,
- int index, Bytecodes::Code bc) {
- // Get the method's declared holder.
-
- assert(accessor->get_instanceKlass()->is_linked(), "must be linked before accessing constant pool");
- constantPoolHandle cpool = accessor->get_instanceKlass()->constants();
+ciMethod* ciEnv::get_method_by_index_impl(constantPoolHandle cpool,
+ int index, Bytecodes::Code bc,
+ ciInstanceKlass* accessor) {
int holder_index = cpool->klass_ref_index_at(index);
bool holder_is_accessible;
- ciKlass* holder = get_klass_by_index_impl(accessor, holder_index, holder_is_accessible);
+ ciKlass* holder = get_klass_by_index_impl(cpool, holder_index, holder_is_accessible, accessor);
ciInstanceKlass* declared_holder = get_instance_klass_for_declared_method_holder(holder);
// Get the method's name and signature.
@@ -736,6 +729,33 @@
// ------------------------------------------------------------------
+// ciEnv::get_fake_invokedynamic_method_impl
+ciMethod* ciEnv::get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
+ int index, Bytecodes::Code bc) {
+ assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic");
+
+ // Get the CallSite from the constant pool cache.
+ ConstantPoolCacheEntry* cpc_entry = cpool->cache()->secondary_entry_at(index);
+ assert(cpc_entry != NULL && cpc_entry->is_secondary_entry(), "sanity");
+ Handle call_site = cpc_entry->f1();
+
+ // Call site might not be linked yet.
+ if (call_site.is_null()) {
+ ciInstanceKlass* mh_klass = get_object(SystemDictionary::MethodHandle_klass())->as_instance_klass();
+ ciSymbol* sig_sym = get_object(cpool->signature_ref_at(index))->as_symbol();
+ return get_unloaded_method(mh_klass, ciSymbol::invoke_name(), sig_sym);
+ }
+
+ // Get the methodOop from the CallSite.
+ methodOop method_oop = (methodOop) java_dyn_CallSite::vmmethod(call_site());
+ assert(method_oop != NULL, "sanity");
+ assert(method_oop->is_method_handle_invoke(), "consistent");
+
+ return get_object(method_oop)->as_method();
+}
+
+
+// ------------------------------------------------------------------
// ciEnv::get_instance_klass_for_declared_method_holder
ciInstanceKlass* ciEnv::get_instance_klass_for_declared_method_holder(ciKlass* method_holder) {
// For the case of <array>.clone(), the method holder can be a ciArrayKlass
@@ -757,15 +777,19 @@
}
-
-
// ------------------------------------------------------------------
// ciEnv::get_method_by_index
-ciMethod* ciEnv::get_method_by_index(ciInstanceKlass* accessor,
- int index, Bytecodes::Code bc) {
- GUARDED_VM_ENTRY(return get_method_by_index_impl(accessor, index, bc);)
+ciMethod* ciEnv::get_method_by_index(constantPoolHandle cpool,
+ int index, Bytecodes::Code bc,
+ ciInstanceKlass* accessor) {
+ if (bc == Bytecodes::_invokedynamic) {
+ GUARDED_VM_ENTRY(return get_fake_invokedynamic_method_impl(cpool, index, bc);)
+ } else {
+ GUARDED_VM_ENTRY(return get_method_by_index_impl(cpool, index, bc, accessor);)
+ }
}
+
// ------------------------------------------------------------------
// ciEnv::name_buffer
char *ciEnv::name_buffer(int req_len) {
--- a/hotspot/src/share/vm/ci/ciEnv.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciEnv.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -74,17 +74,9 @@
static ciTypeArrayKlassKlass* _type_array_klass_klass_instance;
static ciObjArrayKlassKlass* _obj_array_klass_klass_instance;
- static ciInstanceKlass* _ArrayStoreException;
- static ciInstanceKlass* _Class;
- static ciInstanceKlass* _ClassCastException;
- static ciInstanceKlass* _Object;
- static ciInstanceKlass* _Throwable;
- static ciInstanceKlass* _Thread;
- static ciInstanceKlass* _OutOfMemoryError;
- static ciInstanceKlass* _String;
- static ciInstanceKlass* _StringBuffer;
- static ciInstanceKlass* _StringBuilder;
- static ciInstanceKlass* _Integer;
+#define WK_KLASS_DECL(name, ignore_s, ignore_o) static ciInstanceKlass* _##name;
+ WK_KLASSES_DO(WK_KLASS_DECL)
+#undef WK_KLASS_DECL
static ciSymbol* _unloaded_cisymbol;
static ciInstanceKlass* _unloaded_ciinstance_klass;
@@ -120,37 +112,45 @@
bool require_local);
// Constant pool access.
- ciKlass* get_klass_by_index(ciInstanceKlass* loading_klass,
+ ciKlass* get_klass_by_index(constantPoolHandle cpool,
int klass_index,
- bool& is_accessible);
- ciConstant get_constant_by_index(ciInstanceKlass* loading_klass,
- int constant_index);
+ bool& is_accessible,
+ ciInstanceKlass* loading_klass);
+ ciConstant get_constant_by_index(constantPoolHandle cpool,
+ int constant_index,
+ ciInstanceKlass* accessor);
bool is_unresolved_string(ciInstanceKlass* loading_klass,
int constant_index) const;
bool is_unresolved_klass(ciInstanceKlass* loading_klass,
int constant_index) const;
ciField* get_field_by_index(ciInstanceKlass* loading_klass,
int field_index);
- ciMethod* get_method_by_index(ciInstanceKlass* loading_klass,
- int method_index, Bytecodes::Code bc);
+ ciMethod* get_method_by_index(constantPoolHandle cpool,
+ int method_index, Bytecodes::Code bc,
+ ciInstanceKlass* loading_klass);
// Implementation methods for loading and constant pool access.
ciKlass* get_klass_by_name_impl(ciKlass* accessing_klass,
ciSymbol* klass_name,
bool require_local);
- ciKlass* get_klass_by_index_impl(ciInstanceKlass* loading_klass,
+ ciKlass* get_klass_by_index_impl(constantPoolHandle cpool,
int klass_index,
- bool& is_accessible);
- ciConstant get_constant_by_index_impl(ciInstanceKlass* loading_klass,
- int constant_index);
+ bool& is_accessible,
+ ciInstanceKlass* loading_klass);
+ ciConstant get_constant_by_index_impl(constantPoolHandle cpool,
+ int constant_index,
+ ciInstanceKlass* loading_klass);
bool is_unresolved_string_impl (instanceKlass* loading_klass,
int constant_index) const;
bool is_unresolved_klass_impl (instanceKlass* loading_klass,
int constant_index) const;
ciField* get_field_by_index_impl(ciInstanceKlass* loading_klass,
int field_index);
- ciMethod* get_method_by_index_impl(ciInstanceKlass* loading_klass,
- int method_index, Bytecodes::Code bc);
+ ciMethod* get_method_by_index_impl(constantPoolHandle cpool,
+ int method_index, Bytecodes::Code bc,
+ ciInstanceKlass* loading_klass);
+ ciMethod* get_fake_invokedynamic_method_impl(constantPoolHandle cpool,
+ int index, Bytecodes::Code bc);
// Helper methods
bool check_klass_accessibility(ciKlass* accessing_klass,
@@ -292,39 +292,13 @@
// Access to certain well known ciObjects.
- ciInstanceKlass* ArrayStoreException_klass() {
- return _ArrayStoreException;
- }
- ciInstanceKlass* Class_klass() {
- return _Class;
- }
- ciInstanceKlass* ClassCastException_klass() {
- return _ClassCastException;
- }
- ciInstanceKlass* Object_klass() {
- return _Object;
- }
- ciInstanceKlass* Throwable_klass() {
- return _Throwable;
+#define WK_KLASS_FUNC(name, ignore_s, ignore_o) \
+ ciInstanceKlass* name() { \
+ return _##name;\
}
- ciInstanceKlass* Thread_klass() {
- return _Thread;
- }
- ciInstanceKlass* OutOfMemoryError_klass() {
- return _OutOfMemoryError;
- }
- ciInstanceKlass* String_klass() {
- return _String;
- }
- ciInstanceKlass* StringBuilder_klass() {
- return _StringBuilder;
- }
- ciInstanceKlass* StringBuffer_klass() {
- return _StringBuffer;
- }
- ciInstanceKlass* Integer_klass() {
- return _Integer;
- }
+ WK_KLASSES_DO(WK_KLASS_FUNC)
+#undef WK_KLASS_FUNC
+
ciInstance* NullPointerException_instance() {
assert(_NullPointerException_instance != NULL, "initialization problem");
return _NullPointerException_instance;
--- a/hotspot/src/share/vm/ci/ciExceptionHandler.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciExceptionHandler.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2003 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,12 +34,16 @@
//
// Get the exception klass that this handler catches.
ciInstanceKlass* ciExceptionHandler::catch_klass() {
+ VM_ENTRY_MARK;
assert(!is_catch_all(), "bad index");
if (_catch_klass == NULL) {
bool will_link;
- ciKlass* k = CURRENT_ENV->get_klass_by_index(_loading_klass,
+ assert(_loading_klass->get_instanceKlass()->is_linked(), "must be linked before accessing constant pool");
+ constantPoolHandle cpool(_loading_klass->get_instanceKlass()->constants());
+ ciKlass* k = CURRENT_ENV->get_klass_by_index(cpool,
_catch_klass_index,
- will_link);
+ will_link,
+ _loading_klass);
if (!will_link && k->is_loaded()) {
GUARDED_VM_ENTRY(
k = CURRENT_ENV->get_unloaded_klass(_loading_klass, k->name());
--- a/hotspot/src/share/vm/ci/ciField.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciField.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -86,7 +86,7 @@
bool ignore;
// This is not really a class reference; the index always refers to the
// field's type signature, as a symbol. Linkage checks do not apply.
- _type = ciEnv::current(thread)->get_klass_by_index(klass, sig_index, ignore);
+ _type = ciEnv::current(thread)->get_klass_by_index(cpool, sig_index, ignore, klass);
} else {
_type = ciType::make(field_type);
}
@@ -100,9 +100,9 @@
int holder_index = cpool->klass_ref_index_at(index);
bool holder_is_accessible;
ciInstanceKlass* declared_holder =
- ciEnv::current(thread)->get_klass_by_index(klass, holder_index,
- holder_is_accessible)
- ->as_instance_klass();
+ ciEnv::current(thread)->get_klass_by_index(cpool, holder_index,
+ holder_is_accessible,
+ klass)->as_instance_klass();
// The declared holder of this field may not have been loaded.
// Bail out with partial field information.
@@ -168,8 +168,18 @@
_holder = CURRENT_ENV->get_object(fd->field_holder())->as_instance_klass();
// Check to see if the field is constant.
- if (_holder->is_initialized() &&
- this->is_final() && this->is_static()) {
+ if (_holder->is_initialized() && this->is_final()) {
+ if (!this->is_static()) {
+ // A field can be constant if it's a final static field or if it's
+ // a final non-static field of a trusted class ({java,sun}.dyn).
+ if (_holder->is_in_package("java/dyn") || _holder->is_in_package("sun/dyn")) {
+ _is_constant = true;
+ return;
+ }
+ _is_constant = false;
+ return;
+ }
+
// This field just may be constant. The only cases where it will
// not be constant are:
//
@@ -182,8 +192,8 @@
// java.lang.System.out, and java.lang.System.err.
klassOop k = _holder->get_klassOop();
- assert( SystemDictionary::system_klass() != NULL, "Check once per vm");
- if( k == SystemDictionary::system_klass() ) {
+ assert( SystemDictionary::System_klass() != NULL, "Check once per vm");
+ if( k == SystemDictionary::System_klass() ) {
// Check offsets for case 2: System.in, System.out, or System.err
if( _offset == java_lang_System::in_offset_in_bytes() ||
_offset == java_lang_System::out_offset_in_bytes() ||
--- a/hotspot/src/share/vm/ci/ciField.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciField.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -138,10 +138,18 @@
// Get the constant value of this field.
ciConstant constant_value() {
- assert(is_constant(), "illegal call to constant_value()");
+ assert(is_static() && is_constant(), "illegal call to constant_value()");
return _constant_value;
}
+ // Get the constant value of non-static final field in the given
+ // object.
+ ciConstant constant_value_of(ciObject* object) {
+ assert(!is_static() && is_constant(), "only if field is non-static constant");
+ assert(object->is_instance(), "must be instance");
+ return object->as_instance()->field_value(this);
+ }
+
// Check for link time errors. Accessing a field from a
// certain class via a certain bytecode may or may not be legal.
// This call checks to see if an exception may be raised by
--- a/hotspot/src/share/vm/ci/ciInstance.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciInstance.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -36,7 +36,7 @@
VM_ENTRY_MARK;
oop m = get_oop();
// Return NULL if it is not java.lang.Class.
- if (m == NULL || m->klass() != SystemDictionary::class_klass()) {
+ if (m == NULL || m->klass() != SystemDictionary::Class_klass()) {
return NULL;
}
// Return either a primitive type or a klass.
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -75,7 +75,7 @@
_java_mirror = NULL;
if (is_shared()) {
- if (h_k() != SystemDictionary::object_klass()) {
+ if (h_k() != SystemDictionary::Object_klass()) {
super();
}
java_mirror();
@@ -232,8 +232,48 @@
// ------------------------------------------------------------------
// ciInstanceKlass::uses_default_loader
bool ciInstanceKlass::uses_default_loader() {
- VM_ENTRY_MARK;
- return loader() == NULL;
+ // Note: We do not need to resolve the handle or enter the VM
+ // in order to test null-ness.
+ return _loader == NULL;
+}
+
+// ------------------------------------------------------------------
+// ciInstanceKlass::is_in_package
+//
+// Is this klass in the given package?
+bool ciInstanceKlass::is_in_package(const char* packagename, int len) {
+ // To avoid class loader mischief, this test always rejects application classes.
+ if (!uses_default_loader())
+ return false;
+ GUARDED_VM_ENTRY(
+ return is_in_package_impl(packagename, len);
+ )
+}
+
+bool ciInstanceKlass::is_in_package_impl(const char* packagename, int len) {
+ ASSERT_IN_VM;
+
+ // If packagename contains trailing '/' exclude it from the
+ // prefix-test since we test for it explicitly.
+ if (packagename[len - 1] == '/')
+ len--;
+
+ if (!name()->starts_with(packagename, len))
+ return false;
+
+ // Test if the class name is something like "java/lang".
+ if ((len + 1) > name()->utf8_length())
+ return false;
+
+ // Test for trailing '/'
+ if ((char) name()->byte_at(len) != '/')
+ return false;
+
+ // Make sure it's not actually in a subpackage:
+ if (name()->index_of_at(len+1, "/", 1) >= 0)
+ return false;
+
+ return true;
}
// ------------------------------------------------------------------
--- a/hotspot/src/share/vm/ci/ciInstanceKlass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciInstanceKlass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -29,10 +29,11 @@
// be loaded.
class ciInstanceKlass : public ciKlass {
CI_PACKAGE_ACCESS
+ friend class ciBytecodeStream;
friend class ciEnv;
+ friend class ciExceptionHandler;
friend class ciMethod;
friend class ciField;
- friend class ciBytecodeStream;
private:
jobject _loader;
@@ -78,6 +79,8 @@
const char* type_string() { return "ciInstanceKlass"; }
+ bool is_in_package_impl(const char* packagename, int len);
+
void print_impl(outputStream* st);
ciConstantPoolCache* field_cache();
@@ -196,6 +199,12 @@
bool is_java_lang_Object();
+ // Is this klass in the given package?
+ bool is_in_package(const char* packagename) {
+ return is_in_package(packagename, (int) strlen(packagename));
+ }
+ bool is_in_package(const char* packagename, int len);
+
// What kind of ciObject is this?
bool is_instance_klass() { return true; }
bool is_java_klass() { return true; }
--- a/hotspot/src/share/vm/ci/ciKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
--- a/hotspot/src/share/vm/ci/ciKlass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciKlass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -69,7 +69,7 @@
ciKlass(KlassHandle k_h);
// What is the name of this klass?
- ciSymbol* name() { return _name; }
+ ciSymbol* name() const { return _name; }
// What is its layout helper value?
jint layout_helper() { return _layout_helper; }
--- a/hotspot/src/share/vm/ci/ciMethod.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciMethod.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -687,7 +687,7 @@
// ------------------------------------------------------------------
// invokedynamic support
//
-bool ciMethod::is_method_handle_invoke() {
+bool ciMethod::is_method_handle_invoke() const {
check_is_loaded();
bool flag = ((flags().as_int() & JVM_MH_INVOKE_BITS) == JVM_MH_INVOKE_BITS);
#ifdef ASSERT
@@ -700,6 +700,12 @@
return flag;
}
+bool ciMethod::is_method_handle_adapter() const {
+ check_is_loaded();
+ VM_ENTRY_MARK;
+ return get_methodOop()->is_method_handle_adapter();
+}
+
ciInstance* ciMethod::method_handle_type() {
check_is_loaded();
VM_ENTRY_MARK;
--- a/hotspot/src/share/vm/ci/ciMethod.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciMethod.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,6 +38,8 @@
CI_PACKAGE_ACCESS
friend class ciEnv;
friend class ciExceptionHandlerStream;
+ friend class ciBytecodeStream;
+ friend class ciMethodHandle;
private:
// General method information.
@@ -213,7 +215,10 @@
bool check_call(int refinfo_index, bool is_static) const;
void build_method_data(); // make sure it exists in the VM also
int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC
- bool is_method_handle_invoke();
+
+ // JSR 292 support
+ bool is_method_handle_invoke() const;
+ bool is_method_handle_adapter() const;
ciInstance* method_handle_type();
// What kind of ciObject is this?
@@ -251,4 +256,10 @@
// Print the name of this method in various incarnations.
void print_name(outputStream* st = tty);
void print_short_name(outputStream* st = tty);
+
+ methodOop get_method_handle_target() {
+ klassOop receiver_limit_oop = NULL;
+ int flags = 0;
+ return MethodHandles::decode_method(get_oop(), receiver_limit_oop, flags);
+ }
};
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/ci/ciMethodHandle.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_ciMethodHandle.cpp.incl"
+
+// ciMethodHandle
+
+// ------------------------------------------------------------------
+// ciMethodHandle::get_adapter
+//
+// Return an adapter for this MethodHandle.
+ciMethod* ciMethodHandle::get_adapter(bool is_invokedynamic) const {
+ VM_ENTRY_MARK;
+
+ Handle h(get_oop());
+ methodHandle callee(_callee->get_methodOop());
+ MethodHandleCompiler mhc(h, callee, is_invokedynamic, THREAD);
+ methodHandle m = mhc.compile(CHECK_NULL);
+ return CURRENT_ENV->get_object(m())->as_method();
+}
+
+
+// ------------------------------------------------------------------
+// ciMethodHandle::print_impl
+//
+// Implementation of the print method.
+void ciMethodHandle::print_impl(outputStream* st) {
+ st->print(" type=");
+ get_oop()->print();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/ci/ciMethodHandle.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// ciMethodHandle
+//
+// The class represents a java.dyn.MethodHandle object.
+class ciMethodHandle : public ciInstance {
+private:
+ ciMethod* _callee;
+
+ // Return an adapter for this MethodHandle.
+ ciMethod* get_adapter(bool is_invokedynamic) const;
+
+protected:
+ void print_impl(outputStream* st);
+
+public:
+ ciMethodHandle(instanceHandle h_i) : ciInstance(h_i) {};
+
+ // What kind of ciObject is this?
+ bool is_method_handle() const { return true; }
+
+ ciMethod* callee() const { return _callee; }
+ void set_callee(ciMethod* m) { _callee = m; }
+
+ // Return an adapter for a MethodHandle call.
+ ciMethod* get_method_handle_adapter() const {
+ return get_adapter(false);
+ }
+
+ // Return an adapter for an invokedynamic call.
+ ciMethod* get_invokedynamic_adapter() const {
+ return get_adapter(true);
+ }
+};
--- a/hotspot/src/share/vm/ci/ciObject.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciObject.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -131,9 +131,12 @@
// What kind of ciObject is this?
virtual bool is_null_object() const { return false; }
+ virtual bool is_call_site() const { return false; }
+ virtual bool is_cpcache() const { return false; }
virtual bool is_instance() { return false; }
virtual bool is_method() { return false; }
virtual bool is_method_data() { return false; }
+ virtual bool is_method_handle() const { return false; }
virtual bool is_array() { return false; }
virtual bool is_obj_array() { return false; }
virtual bool is_type_array() { return false; }
@@ -185,6 +188,14 @@
assert(is_null_object(), "bad cast");
return (ciNullObject*)this;
}
+ ciCallSite* as_call_site() {
+ assert(is_call_site(), "bad cast");
+ return (ciCallSite*) this;
+ }
+ ciCPCache* as_cpcache() {
+ assert(is_cpcache(), "bad cast");
+ return (ciCPCache*) this;
+ }
ciInstance* as_instance() {
assert(is_instance(), "bad cast");
return (ciInstance*)this;
@@ -197,6 +208,10 @@
assert(is_method_data(), "bad cast");
return (ciMethodData*)this;
}
+ ciMethodHandle* as_method_handle() {
+ assert(is_method_handle(), "bad cast");
+ return (ciMethodHandle*) this;
+ }
ciArray* as_array() {
assert(is_array(), "bad cast");
return (ciArray*)this;
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -144,39 +144,13 @@
ciEnv::_obj_array_klass_klass_instance =
get(Universe::objArrayKlassKlassObj())
->as_obj_array_klass_klass();
- ciEnv::_ArrayStoreException =
- get(SystemDictionary::ArrayStoreException_klass())
- ->as_instance_klass();
- ciEnv::_Class =
- get(SystemDictionary::class_klass())
- ->as_instance_klass();
- ciEnv::_ClassCastException =
- get(SystemDictionary::ClassCastException_klass())
- ->as_instance_klass();
- ciEnv::_Object =
- get(SystemDictionary::object_klass())
- ->as_instance_klass();
- ciEnv::_Throwable =
- get(SystemDictionary::throwable_klass())
- ->as_instance_klass();
- ciEnv::_Thread =
- get(SystemDictionary::thread_klass())
- ->as_instance_klass();
- ciEnv::_OutOfMemoryError =
- get(SystemDictionary::OutOfMemoryError_klass())
- ->as_instance_klass();
- ciEnv::_String =
- get(SystemDictionary::string_klass())
- ->as_instance_klass();
- ciEnv::_StringBuffer =
- get(SystemDictionary::stringBuffer_klass())
- ->as_instance_klass();
- ciEnv::_StringBuilder =
- get(SystemDictionary::StringBuilder_klass())
- ->as_instance_klass();
- ciEnv::_Integer =
- get(SystemDictionary::int_klass())
- ->as_instance_klass();
+
+#define WK_KLASS_DEFN(name, ignore_s, opt) \
+ if (SystemDictionary::name() != NULL) \
+ ciEnv::_##name = get(SystemDictionary::name())->as_instance_klass();
+
+ WK_KLASSES_DO(WK_KLASS_DEFN)
+#undef WK_KLASS_DEFN
for (int len = -1; len != _ci_objects->length(); ) {
len = _ci_objects->length();
@@ -333,13 +307,21 @@
return new (arena()) ciMethodData(h_md);
} else if (o->is_instance()) {
instanceHandle h_i(THREAD, (instanceOop)o);
- return new (arena()) ciInstance(h_i);
+ if (java_dyn_CallSite::is_instance(o))
+ return new (arena()) ciCallSite(h_i);
+ else if (java_dyn_MethodHandle::is_instance(o))
+ return new (arena()) ciMethodHandle(h_i);
+ else
+ return new (arena()) ciInstance(h_i);
} else if (o->is_objArray()) {
objArrayHandle h_oa(THREAD, (objArrayOop)o);
return new (arena()) ciObjArray(h_oa);
} else if (o->is_typeArray()) {
typeArrayHandle h_ta(THREAD, (typeArrayOop)o);
return new (arena()) ciTypeArray(h_ta);
+ } else if (o->is_constantPoolCache()) {
+ constantPoolCacheHandle h_cpc(THREAD, (constantPoolCacheOop) o);
+ return new (arena()) ciCPCache(h_cpc);
}
// The oop is of some type not supported by the compiler interface.
@@ -576,7 +558,7 @@
if (key->is_perm() && _non_perm_count == 0) {
return emptyBucket;
} else if (key->is_instance()) {
- if (key->klass() == SystemDictionary::class_klass()) {
+ if (key->klass() == SystemDictionary::Class_klass()) {
// class mirror instances are always perm
return emptyBucket;
}
--- a/hotspot/src/share/vm/ci/ciStreams.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciStreams.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -186,8 +186,9 @@
// If this bytecode is a new, newarray, multianewarray, instanceof,
// or checkcast, get the referenced klass.
ciKlass* ciBytecodeStream::get_klass(bool& will_link) {
- return CURRENT_ENV->get_klass_by_index(_holder, get_klass_index(),
- will_link);
+ VM_ENTRY_MARK;
+ constantPoolHandle cpool(_method->get_methodOop()->constants());
+ return CURRENT_ENV->get_klass_by_index(cpool, get_klass_index(), will_link, _holder);
}
// ------------------------------------------------------------------
@@ -213,7 +214,9 @@
// If this bytecode is one of the ldc variants, get the referenced
// constant.
ciConstant ciBytecodeStream::get_constant() {
- return CURRENT_ENV->get_constant_by_index(_holder, get_constant_index());
+ VM_ENTRY_MARK;
+ constantPoolHandle cpool(_method->get_methodOop()->constants());
+ return CURRENT_ENV->get_constant_by_index(cpool, get_constant_index(), _holder);
}
// ------------------------------------------------------------------
@@ -264,9 +267,11 @@
// There is no "will_link" result passed back. The user is responsible
// for checking linkability when retrieving the associated field.
ciInstanceKlass* ciBytecodeStream::get_declared_field_holder() {
+ VM_ENTRY_MARK;
+ constantPoolHandle cpool(_method->get_methodOop()->constants());
int holder_index = get_field_holder_index();
bool ignore;
- return CURRENT_ENV->get_klass_by_index(_holder, holder_index, ignore)
+ return CURRENT_ENV->get_klass_by_index(cpool, holder_index, ignore, _holder)
->as_instance_klass();
}
@@ -277,9 +282,10 @@
// referenced by the current bytecode. Used for generating
// deoptimization information.
int ciBytecodeStream::get_field_holder_index() {
- VM_ENTRY_MARK;
- constantPoolOop cpool = _holder->get_instanceKlass()->constants();
- return cpool->klass_ref_index_at(get_field_index());
+ GUARDED_VM_ENTRY(
+ constantPoolOop cpool = _holder->get_instanceKlass()->constants();
+ return cpool->klass_ref_index_at(get_field_index());
+ )
}
// ------------------------------------------------------------------
@@ -321,7 +327,9 @@
//
// If this is a method invocation bytecode, get the invoked method.
ciMethod* ciBytecodeStream::get_method(bool& will_link) {
- ciMethod* m = CURRENT_ENV->get_method_by_index(_holder, get_method_index(),cur_bc());
+ VM_ENTRY_MARK;
+ constantPoolHandle cpool(_method->get_methodOop()->constants());
+ ciMethod* m = CURRENT_ENV->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder);
will_link = m->is_loaded();
return m;
}
@@ -338,11 +346,13 @@
// There is no "will_link" result passed back. The user is responsible
// for checking linkability when retrieving the associated method.
ciKlass* ciBytecodeStream::get_declared_method_holder() {
+ VM_ENTRY_MARK;
+ constantPoolHandle cpool(_method->get_methodOop()->constants());
bool ignore;
- // report as Dynamic for invokedynamic, which is syntactically classless
+ // report as InvokeDynamic for invokedynamic, which is syntactically classless
if (cur_bc() == Bytecodes::_invokedynamic)
- return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_dyn_Dynamic(), false);
- return CURRENT_ENV->get_klass_by_index(_holder, get_method_holder_index(), ignore);
+ return CURRENT_ENV->get_klass_by_name(_holder, ciSymbol::java_dyn_InvokeDynamic(), false);
+ return CURRENT_ENV->get_klass_by_index(cpool, get_method_holder_index(), ignore, _holder);
}
// ------------------------------------------------------------------
@@ -352,8 +362,7 @@
// referenced by the current bytecode. Used for generating
// deoptimization information.
int ciBytecodeStream::get_method_holder_index() {
- VM_ENTRY_MARK;
- constantPoolOop cpool = _holder->get_instanceKlass()->constants();
+ constantPoolOop cpool = _method->get_methodOop()->constants();
return cpool->klass_ref_index_at(get_method_index());
}
@@ -370,3 +379,31 @@
int name_and_type_index = cpool->name_and_type_ref_index_at(method_index);
return cpool->signature_ref_index_at(name_and_type_index);
}
+
+// ------------------------------------------------------------------
+// ciBytecodeStream::get_cpcache
+ciCPCache* ciBytecodeStream::get_cpcache() {
+ VM_ENTRY_MARK;
+ // Get the constant pool.
+ constantPoolOop cpool = _holder->get_instanceKlass()->constants();
+ constantPoolCacheOop cpcache = cpool->cache();
+
+ return CURRENT_ENV->get_object(cpcache)->as_cpcache();
+}
+
+// ------------------------------------------------------------------
+// ciBytecodeStream::get_call_site
+ciCallSite* ciBytecodeStream::get_call_site() {
+ VM_ENTRY_MARK;
+ // Get the constant pool.
+ constantPoolOop cpool = _holder->get_instanceKlass()->constants();
+ constantPoolCacheOop cpcache = cpool->cache();
+
+ // Get the CallSite from the constant pool cache.
+ int method_index = get_method_index();
+ ConstantPoolCacheEntry* cpcache_entry = cpcache->secondary_entry_at(method_index);
+ oop call_site_oop = cpcache_entry->f1();
+
+ // Create a CallSite object and return it.
+ return CURRENT_ENV->get_object(call_site_oop)->as_call_site();
+}
--- a/hotspot/src/share/vm/ci/ciStreams.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciStreams.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -232,6 +232,9 @@
int get_method_holder_index();
int get_method_signature_index();
+ ciCPCache* get_cpcache();
+ ciCallSite* get_call_site();
+
private:
void assert_index_size(int required_size) const {
#ifdef ASSERT
--- a/hotspot/src/share/vm/ci/ciSymbol.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciSymbol.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -60,6 +60,22 @@
}
// ------------------------------------------------------------------
+// ciSymbol::starts_with
+//
+// Tests if the symbol starts with the given prefix.
+bool ciSymbol::starts_with(const char* prefix, int len) const {
+ GUARDED_VM_ENTRY(return get_symbolOop()->starts_with(prefix, len);)
+}
+
+// ------------------------------------------------------------------
+// ciSymbol::index_of
+//
+// Determines where the symbol contains the given substring.
+int ciSymbol::index_of_at(int i, const char* str, int len) const {
+ GUARDED_VM_ENTRY(return get_symbolOop()->index_of_at(i, str, len);)
+}
+
+// ------------------------------------------------------------------
// ciSymbol::utf8_length
int ciSymbol::utf8_length() {
GUARDED_VM_ENTRY(return get_symbolOop()->utf8_length();)
--- a/hotspot/src/share/vm/ci/ciSymbol.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciSymbol.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2001 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
// machine.
class ciSymbol : public ciObject {
CI_PACKAGE_ACCESS
+ // These friends all make direct use of get_symbolOop:
friend class ciEnv;
friend class ciInstanceKlass;
friend class ciSignature;
@@ -38,13 +39,13 @@
ciSymbol(symbolOop s) : ciObject(s) {}
ciSymbol(symbolHandle s); // for use with vmSymbolHandles
- symbolOop get_symbolOop() { return (symbolOop)get_oop(); }
+ symbolOop get_symbolOop() const { return (symbolOop)get_oop(); }
const char* type_string() { return "ciSymbol"; }
void print_impl(outputStream* st);
- int byte_at(int i);
+ // This is public in symbolOop but private here, because the base can move:
jbyte* base();
// Make a ciSymbol from a C string (implementation).
@@ -55,6 +56,15 @@
const char* as_utf8();
int utf8_length();
+ // Return the i-th utf8 byte, where i < utf8_length
+ int byte_at(int i);
+
+ // Tests if the symbol starts with the given prefix.
+ bool starts_with(const char* prefix, int len) const;
+
+ // Determines where the symbol contains the given substring.
+ int index_of_at(int i, const char* str, int len) const;
+
// What kind of ciObject is this?
bool is_symbol() { return true; }
--- a/hotspot/src/share/vm/ci/ciType.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciType.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -111,7 +111,7 @@
// short, etc.
// Note: Bare T_ADDRESS means a raw pointer type, not a return_address.
assert((uint)t < T_CONFLICT+1, "range check");
- if (t == T_OBJECT) return ciEnv::_Object; // java/lang/Object
+ if (t == T_OBJECT) return ciEnv::_Object_klass; // java/lang/Object
assert(_basic_types[t] != NULL, "domain check");
return _basic_types[t];
}
--- a/hotspot/src/share/vm/ci/ciTypeFlow.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciTypeFlow.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -635,8 +635,15 @@
ciMethod* method = str->get_method(will_link);
if (!will_link) {
// We weren't able to find the method.
- ciKlass* unloaded_holder = method->holder();
- trap(str, unloaded_holder, str->get_method_holder_index());
+ if (str->cur_bc() == Bytecodes::_invokedynamic) {
+ trap(str, NULL,
+ Deoptimization::make_trap_request
+ (Deoptimization::Reason_uninitialized,
+ Deoptimization::Action_reinterpret));
+ } else {
+ ciKlass* unloaded_holder = method->holder();
+ trap(str, unloaded_holder, str->get_method_holder_index());
+ }
} else {
ciSignature* signature = method->signature();
ciSignatureStream sigstr(signature);
@@ -1292,8 +1299,8 @@
case Bytecodes::_invokeinterface: do_invoke(str, true); break;
case Bytecodes::_invokespecial: do_invoke(str, true); break;
case Bytecodes::_invokestatic: do_invoke(str, false); break;
-
case Bytecodes::_invokevirtual: do_invoke(str, true); break;
+ case Bytecodes::_invokedynamic: do_invoke(str, false); break;
case Bytecodes::_istore: store_local_int(str->get_index()); break;
case Bytecodes::_istore_0: store_local_int(0); break;
--- a/hotspot/src/share/vm/ci/ciUtilities.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/ci/ciUtilities.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -79,7 +79,7 @@
THREAD); \
if (HAS_PENDING_EXCEPTION) { \
if (PENDING_EXCEPTION->klass() == \
- SystemDictionary::threaddeath_klass()) { \
+ SystemDictionary::ThreadDeath_klass()) { \
/* Kill the compilation. */ \
fatal("unhandled ci exception"); \
return (result); \
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -430,7 +430,7 @@
case JVM_CONSTANT_UnresolvedClass :
// Patching a class means pre-resolving it.
// The name in the constant pool is ignored.
- if (patch->klass() == SystemDictionary::class_klass()) { // %%% java_lang_Class::is_instance
+ if (patch->klass() == SystemDictionary::Class_klass()) { // %%% java_lang_Class::is_instance
guarantee_property(!java_lang_Class::is_primitive(patch()),
"Illegal class patch at %d in class file %s",
index, CHECK);
@@ -643,7 +643,7 @@
guarantee_property(value_type.is_int(), "Inconsistent constant value type in class file %s", CHECK);
break;
case T_OBJECT:
- guarantee_property((cp->symbol_at(signature_index)->equals("Ljava/lang/String;", 18)
+ guarantee_property((cp->symbol_at(signature_index)->equals("Ljava/lang/String;")
&& (value_type.is_string() || value_type.is_unresolved_string())),
"Bad string initial value in class file %s", CHECK);
break;
@@ -1718,9 +1718,7 @@
m->set_exception_table(exception_handlers());
// Copy byte codes
- if (code_length > 0) {
- memcpy(m->code_base(), code_start, code_length);
- }
+ m->set_code(code_start);
// Copy line number table
if (linenumber_table != NULL) {
@@ -3471,8 +3469,8 @@
#endif
// Check if this klass supports the java.lang.Cloneable interface
- if (SystemDictionary::cloneable_klass_loaded()) {
- if (k->is_subtype_of(SystemDictionary::cloneable_klass())) {
+ if (SystemDictionary::Cloneable_klass_loaded()) {
+ if (k->is_subtype_of(SystemDictionary::Cloneable_klass())) {
k->set_is_cloneable();
}
}
@@ -4178,7 +4176,7 @@
// Check if ch is Java identifier start or is Java identifier part
// 4672820: call java.lang.Character methods directly without generating separate tables.
EXCEPTION_MARK;
- instanceKlassHandle klass (THREAD, SystemDictionary::char_klass());
+ instanceKlassHandle klass (THREAD, SystemDictionary::Character_klass());
// return value
JavaValue result(T_BOOLEAN);
--- a/hotspot/src/share/vm/classfile/classLoader.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/classfile/classLoader.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -819,7 +819,7 @@
_package_hash_table->copy_pkgnames(packages);
}
// Allocate objArray and fill with java.lang.String
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
nof_entries, CHECK_0);
objArrayHandle result(THREAD, r);
for (int i = 0; i < nof_entries; i++) {
--- a/hotspot/src/share/vm/classfile/javaAssertions.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/classfile/javaAssertions.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -95,14 +95,14 @@
int len;
typeArrayOop t;
len = OptionList::count(_packages);
- objArrayOop pn = oopFactory::new_objArray(SystemDictionary::string_klass(), len, CHECK_NULL);
+ objArrayOop pn = oopFactory::new_objArray(SystemDictionary::String_klass(), len, CHECK_NULL);
objArrayHandle pkgNames (THREAD, pn);
t = oopFactory::new_typeArray(T_BOOLEAN, len, CHECK_NULL);
typeArrayHandle pkgEnabled(THREAD, t);
fillJavaArrays(_packages, len, pkgNames, pkgEnabled, CHECK_NULL);
len = OptionList::count(_classes);
- objArrayOop cn = oopFactory::new_objArray(SystemDictionary::string_klass(), len, CHECK_NULL);
+ objArrayOop cn = oopFactory::new_objArray(SystemDictionary::String_klass(), len, CHECK_NULL);
objArrayHandle classNames (THREAD, cn);
t = oopFactory::new_typeArray(T_BOOLEAN, len, CHECK_NULL);
typeArrayHandle classEnabled(THREAD, t);
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -68,9 +68,9 @@
// and the char array it points to end up in the same cache line.
oop obj;
if (tenured) {
- obj = instanceKlass::cast(SystemDictionary::string_klass())->allocate_permanent_instance(CHECK_NH);
+ obj = instanceKlass::cast(SystemDictionary::String_klass())->allocate_permanent_instance(CHECK_NH);
} else {
- obj = instanceKlass::cast(SystemDictionary::string_klass())->allocate_instance(CHECK_NH);
+ obj = instanceKlass::cast(SystemDictionary::String_klass())->allocate_instance(CHECK_NH);
}
// Create the char array. The String object must be handlized here
@@ -293,7 +293,7 @@
bool java_lang_String::equals(oop java_string, jchar* chars, int len) {
assert(SharedSkipVerify ||
- java_string->klass() == SystemDictionary::string_klass(),
+ java_string->klass() == SystemDictionary::String_klass(),
"must be java_string");
typeArrayOop value = java_lang_String::value(java_string);
int offset = java_lang_String::offset(java_string);
@@ -311,7 +311,7 @@
void java_lang_String::print(Handle java_string, outputStream* st) {
oop obj = java_string();
- assert(obj->klass() == SystemDictionary::string_klass(), "must be java_string");
+ assert(obj->klass() == SystemDictionary::String_klass(), "must be java_string");
typeArrayOop value = java_lang_String::value(obj);
int offset = java_lang_String::offset(obj);
int length = java_lang_String::length(obj);
@@ -339,9 +339,9 @@
// class is put into the system dictionary.
int computed_modifiers = k->compute_modifier_flags(CHECK_0);
k->set_modifier_flags(computed_modifiers);
- if (SystemDictionary::class_klass_loaded()) {
+ if (SystemDictionary::Class_klass_loaded()) {
// Allocate mirror (java.lang.Class instance)
- Handle mirror = instanceKlass::cast(SystemDictionary::class_klass())->allocate_permanent_instance(CHECK_0);
+ Handle mirror = instanceKlass::cast(SystemDictionary::Class_klass())->allocate_permanent_instance(CHECK_0);
// Setup indirections
mirror->obj_field_put(klass_offset, k());
k->set_java_mirror(mirror());
@@ -378,7 +378,7 @@
oop java_lang_Class::create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS) {
// This should be improved by adding a field at the Java level or by
// introducing a new VM klass (see comment in ClassFileParser)
- oop java_class = instanceKlass::cast(SystemDictionary::class_klass())->allocate_permanent_instance(CHECK_0);
+ oop java_class = instanceKlass::cast(SystemDictionary::Class_klass())->allocate_permanent_instance(CHECK_0);
if (type != T_VOID) {
klassOop aklass = Universe::typeArrayKlassObj(type);
assert(aklass != NULL, "correct bootstrap");
@@ -502,7 +502,7 @@
oop java_lang_Class::primitive_mirror(BasicType t) {
oop mirror = Universe::java_mirror(t);
- assert(mirror != NULL && mirror->is_a(SystemDictionary::class_klass()), "must be a Class");
+ assert(mirror != NULL && mirror->is_a(SystemDictionary::Class_klass()), "must be a Class");
assert(java_lang_Class::is_primitive(mirror), "must be primitive");
return mirror;
}
@@ -515,14 +515,14 @@
assert(!offsets_computed, "offsets should be initialized only once");
offsets_computed = true;
- klassOop k = SystemDictionary::class_klass();
+ klassOop k = SystemDictionary::Class_klass();
// The classRedefinedCount field is only present starting in 1.5,
// so don't go fatal.
compute_optional_offset(classRedefinedCount_offset,
k, vmSymbols::classRedefinedCount_name(), vmSymbols::int_signature());
// The field indicating parallelCapable (parallelLockMap) is only present starting in 7,
- klassOop k1 = SystemDictionary::classloader_klass();
+ klassOop k1 = SystemDictionary::ClassLoader_klass();
compute_optional_offset(parallelCapable_offset,
k1, vmSymbols::parallelCapable_name(), vmSymbols::concurrenthashmap_signature());
}
@@ -588,7 +588,7 @@
void java_lang_Thread::compute_offsets() {
assert(_group_offset == 0, "offsets should be initialized only once");
- klassOop k = SystemDictionary::thread_klass();
+ klassOop k = SystemDictionary::Thread_klass();
compute_offset(_name_offset, k, vmSymbols::name_name(), vmSymbols::char_array_signature());
compute_offset(_group_offset, k, vmSymbols::group_name(), vmSymbols::threadgroup_signature());
compute_offset(_contextClassLoader_offset, k, vmSymbols::contextClassLoader_name(), vmSymbols::classloader_signature());
@@ -847,7 +847,7 @@
void java_lang_ThreadGroup::compute_offsets() {
assert(_parent_offset == 0, "offsets should be initialized only once");
- klassOop k = SystemDictionary::threadGroup_klass();
+ klassOop k = SystemDictionary::ThreadGroup_klass();
compute_offset(_parent_offset, k, vmSymbols::parent_name(), vmSymbols::threadgroup_signature());
compute_offset(_name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature());
@@ -1344,7 +1344,7 @@
// No-op if stack trace is disabled
if (!StackTraceInThrowable) return;
- assert(throwable->is_a(SystemDictionary::throwable_klass()), "sanity check");
+ assert(throwable->is_a(SystemDictionary::Throwable_klass()), "sanity check");
oop backtrace = java_lang_Throwable::backtrace(throwable());
assert(backtrace != NULL, "backtrace not preallocated");
@@ -1449,7 +1449,7 @@
assert(JDK_Version::is_gte_jdk14x_version(), "should only be called in >= 1.4");
// Allocate java.lang.StackTraceElement instance
- klassOop k = SystemDictionary::stackTraceElement_klass();
+ klassOop k = SystemDictionary::StackTraceElement_klass();
assert(k != NULL, "must be loaded in 1.4+");
instanceKlassHandle ik (THREAD, k);
if (ik->should_be_initialized()) {
@@ -1487,7 +1487,7 @@
void java_lang_reflect_AccessibleObject::compute_offsets() {
- klassOop k = SystemDictionary::reflect_accessible_object_klass();
+ klassOop k = SystemDictionary::reflect_AccessibleObject_klass();
compute_offset(override_offset, k, vmSymbols::override_name(), vmSymbols::bool_signature());
}
@@ -1502,7 +1502,7 @@
}
void java_lang_reflect_Method::compute_offsets() {
- klassOop k = SystemDictionary::reflect_method_klass();
+ klassOop k = SystemDictionary::reflect_Method_klass();
compute_offset(clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature());
compute_offset(name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature());
compute_offset(returnType_offset, k, vmSymbols::returnType_name(), vmSymbols::class_signature());
@@ -1523,7 +1523,7 @@
Handle java_lang_reflect_Method::create(TRAPS) {
assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
- klassOop klass = SystemDictionary::reflect_method_klass();
+ klassOop klass = SystemDictionary::reflect_Method_klass();
// This class is eagerly initialized during VM initialization, since we keep a refence
// to one of the methods
assert(instanceKlass::cast(klass)->is_initialized(), "must be initialized");
@@ -1665,7 +1665,7 @@
}
void java_lang_reflect_Constructor::compute_offsets() {
- klassOop k = SystemDictionary::reflect_constructor_klass();
+ klassOop k = SystemDictionary::reflect_Constructor_klass();
compute_offset(clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature());
compute_offset(parameterTypes_offset, k, vmSymbols::parameterTypes_name(), vmSymbols::class_array_signature());
compute_offset(exceptionTypes_offset, k, vmSymbols::exceptionTypes_name(), vmSymbols::class_array_signature());
@@ -1789,7 +1789,7 @@
}
void java_lang_reflect_Field::compute_offsets() {
- klassOop k = SystemDictionary::reflect_field_klass();
+ klassOop k = SystemDictionary::reflect_Field_klass();
compute_offset(clazz_offset, k, vmSymbols::clazz_name(), vmSymbols::class_signature());
compute_offset(name_offset, k, vmSymbols::name_name(), vmSymbols::string_signature());
compute_offset(type_offset, k, vmSymbols::type_name(), vmSymbols::class_signature());
@@ -1896,7 +1896,7 @@
void sun_reflect_ConstantPool::compute_offsets() {
- klassOop k = SystemDictionary::reflect_constant_pool_klass();
+ klassOop k = SystemDictionary::reflect_ConstantPool_klass();
// This null test can be removed post beta
if (k != NULL) {
compute_offset(_cp_oop_offset, k, vmSymbols::constantPoolOop_name(), vmSymbols::object_signature());
@@ -1906,7 +1906,7 @@
Handle sun_reflect_ConstantPool::create(TRAPS) {
assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
- klassOop k = SystemDictionary::reflect_constant_pool_klass();
+ klassOop k = SystemDictionary::reflect_ConstantPool_klass();
instanceKlassHandle klass (THREAD, k);
// Ensure it is initialized
klass->initialize(CHECK_NH);
@@ -1926,7 +1926,7 @@
}
void sun_reflect_UnsafeStaticFieldAccessorImpl::compute_offsets() {
- klassOop k = SystemDictionary::reflect_unsafe_static_field_accessor_impl_klass();
+ klassOop k = SystemDictionary::reflect_UnsafeStaticFieldAccessorImpl_klass();
// This null test can be removed post beta
if (k != NULL) {
compute_offset(_base_offset, k,
@@ -2072,7 +2072,7 @@
// Support for java_lang_ref_Reference
oop java_lang_ref_Reference::pending_list_lock() {
- instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
+ instanceKlass* ik = instanceKlass::cast(SystemDictionary::Reference_klass());
char *addr = (((char *)ik->start_of_static_fields()) + static_lock_offset);
if (UseCompressedOops) {
return oopDesc::load_decode_heap_oop((narrowOop *)addr);
@@ -2082,7 +2082,7 @@
}
HeapWord *java_lang_ref_Reference::pending_list_addr() {
- instanceKlass* ik = instanceKlass::cast(SystemDictionary::reference_klass());
+ instanceKlass* ik = instanceKlass::cast(SystemDictionary::Reference_klass());
char *addr = (((char *)ik->start_of_static_fields()) + static_pending_offset);
// XXX This might not be HeapWord aligned, almost rather be char *.
return (HeapWord*)addr;
@@ -2105,17 +2105,17 @@
}
jlong java_lang_ref_SoftReference::clock() {
- instanceKlass* ik = instanceKlass::cast(SystemDictionary::soft_reference_klass());
+ instanceKlass* ik = instanceKlass::cast(SystemDictionary::SoftReference_klass());
int offset = ik->offset_of_static_fields() + static_clock_offset;
- return SystemDictionary::soft_reference_klass()->long_field(offset);
+ return SystemDictionary::SoftReference_klass()->long_field(offset);
}
void java_lang_ref_SoftReference::set_clock(jlong value) {
- instanceKlass* ik = instanceKlass::cast(SystemDictionary::soft_reference_klass());
+ instanceKlass* ik = instanceKlass::cast(SystemDictionary::SoftReference_klass());
int offset = ik->offset_of_static_fields() + static_clock_offset;
- SystemDictionary::soft_reference_klass()->long_field_put(offset, value);
+ SystemDictionary::SoftReference_klass()->long_field_put(offset, value);
}
@@ -2403,6 +2403,10 @@
return ptypes(mt)->obj_at(idx);
}
+int java_dyn_MethodType::ptype_count(oop mt) {
+ return ptypes(mt)->length();
+}
+
// Support for java_dyn_MethodTypeForm
@@ -2534,7 +2538,7 @@
// the generated bytecodes for reflection, and if so, "magically"
// delegate to its parent to prevent class loading from occurring
// in places where applications using reflection didn't expect it.
- klassOop delegating_cl_class = SystemDictionary::reflect_delegating_classloader_klass();
+ klassOop delegating_cl_class = SystemDictionary::reflect_DelegatingClassLoader_klass();
// This might be null in non-1.4 JDKs
if (delegating_cl_class != NULL && loader->is_a(delegating_cl_class)) {
return parent(loader);
@@ -2549,7 +2553,7 @@
void java_lang_System::compute_offsets() {
assert(offset_of_static_fields == 0, "offsets should be initialized only once");
- instanceKlass* ik = instanceKlass::cast(SystemDictionary::system_klass());
+ instanceKlass* ik = instanceKlass::cast(SystemDictionary::System_klass());
offset_of_static_fields = ik->offset_of_static_fields();
}
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -111,7 +111,7 @@
// Testers
static bool is_instance(oop obj) {
- return obj != NULL && obj->klass() == SystemDictionary::string_klass();
+ return obj != NULL && obj->klass() == SystemDictionary::String_klass();
}
// Debugging
@@ -161,7 +161,7 @@
static void print_signature(oop java_class, outputStream *st);
// Testing
static bool is_instance(oop obj) {
- return obj != NULL && obj->klass() == SystemDictionary::class_klass();
+ return obj != NULL && obj->klass() == SystemDictionary::Class_klass();
}
static bool is_primitive(oop java_class);
static BasicType primitive_type(oop java_class);
@@ -1027,6 +1027,7 @@
static oop form(oop mt);
static oop ptype(oop mt, int index);
+ static int ptype_count(oop mt);
static symbolOop as_signature(oop mt, bool intern_if_not_found, TRAPS);
static void print_signature(oop mt, outputStream* st);
@@ -1083,6 +1084,14 @@
static oop vmmethod(oop site);
static void set_vmmethod(oop site, oop ref);
+ // Testers
+ static bool is_subclass(klassOop klass) {
+ return Klass::cast(klass)->is_subclass_of(SystemDictionary::CallSite_klass());
+ }
+ static bool is_instance(oop obj) {
+ return obj != NULL && is_subclass(obj->klass());
+ }
+
// Accessors for code generation:
static int target_offset_in_bytes() { return _target_offset; }
static int type_offset_in_bytes() { return _type_offset; }
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -60,10 +60,10 @@
}
void SystemDictionary::compute_java_system_loader(TRAPS) {
- KlassHandle system_klass(THREAD, WK_KLASS(classloader_klass));
+ KlassHandle system_klass(THREAD, WK_KLASS(ClassLoader_klass));
JavaValue result(T_OBJECT);
JavaCalls::call_static(&result,
- KlassHandle(THREAD, WK_KLASS(classloader_klass)),
+ KlassHandle(THREAD, WK_KLASS(ClassLoader_klass)),
vmSymbolHandles::getSystemClassLoader_name(),
vmSymbolHandles::void_classloader_signature(),
CHECK);
@@ -128,7 +128,7 @@
// in which case we have to check whether the pending exception is a ClassNotFoundException,
// and if so convert it to a NoClassDefFoundError
// And chain the original ClassNotFoundException
- if (throw_error && PENDING_EXCEPTION->is_a(SystemDictionary::classNotFoundException_klass())) {
+ if (throw_error && PENDING_EXCEPTION->is_a(SystemDictionary::ClassNotFoundException_klass())) {
ResourceMark rm(THREAD);
assert(klass_h() == NULL, "Should not have result with exception pending");
Handle e(THREAD, PENDING_EXCEPTION);
@@ -359,7 +359,7 @@
assert(class_loader() != NULL, "should not have non-null protection domain for null classloader");
- KlassHandle system_loader(THREAD, SystemDictionary::classloader_klass());
+ KlassHandle system_loader(THREAD, SystemDictionary::ClassLoader_klass());
JavaCalls::call_special(&result,
class_loader,
system_loader,
@@ -743,7 +743,7 @@
// Bootstrap goes through here to allow for an extra guarantee check
if (UnsyncloadClass || (class_loader.is_null())) {
if (k.is_null() && HAS_PENDING_EXCEPTION
- && PENDING_EXCEPTION->is_a(SystemDictionary::linkageError_klass())) {
+ && PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) {
MutexLocker mu(SystemDictionary_lock, THREAD);
klassOop check = find_class(d_index, d_hash, name, class_loader);
if (check != NULL) {
@@ -1367,7 +1367,7 @@
JavaValue result(T_OBJECT);
- KlassHandle spec_klass (THREAD, SystemDictionary::classloader_klass());
+ KlassHandle spec_klass (THREAD, SystemDictionary::ClassLoader_klass());
// Call public unsynchronized loadClass(String) directly for all class loaders
// for parallelCapable class loaders. JDK >=7, loadClass(String, boolean) will
@@ -1944,13 +1944,13 @@
void SystemDictionary::initialize_preloaded_classes(TRAPS) {
- assert(WK_KLASS(object_klass) == NULL, "preloaded classes should only be initialized once");
+ assert(WK_KLASS(Object_klass) == NULL, "preloaded classes should only be initialized once");
// Preload commonly used klasses
WKID scan = FIRST_WKID;
// first do Object, String, Class
- initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(class_klass), scan, CHECK);
+ initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(Class_klass), scan, CHECK);
- debug_only(instanceKlass::verify_class_klass_nonstatic_oop_maps(WK_KLASS(class_klass)));
+ debug_only(instanceKlass::verify_class_klass_nonstatic_oop_maps(WK_KLASS(Class_klass)));
// Fixup mirrors for classes loaded before java.lang.Class.
// These calls iterate over the objects currently in the perm gen
@@ -1961,17 +1961,17 @@
Universe::fixup_mirrors(CHECK);
// do a bunch more:
- initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(reference_klass), scan, CHECK);
+ initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(Reference_klass), scan, CHECK);
// Preload ref klasses and set reference types
- instanceKlass::cast(WK_KLASS(reference_klass))->set_reference_type(REF_OTHER);
- instanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(reference_klass));
+ instanceKlass::cast(WK_KLASS(Reference_klass))->set_reference_type(REF_OTHER);
+ instanceRefKlass::update_nonstatic_oop_maps(WK_KLASS(Reference_klass));
- initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(phantom_reference_klass), scan, CHECK);
- instanceKlass::cast(WK_KLASS(soft_reference_klass))->set_reference_type(REF_SOFT);
- instanceKlass::cast(WK_KLASS(weak_reference_klass))->set_reference_type(REF_WEAK);
- instanceKlass::cast(WK_KLASS(final_reference_klass))->set_reference_type(REF_FINAL);
- instanceKlass::cast(WK_KLASS(phantom_reference_klass))->set_reference_type(REF_PHANTOM);
+ initialize_wk_klasses_through(WK_KLASS_ENUM_NAME(PhantomReference_klass), scan, CHECK);
+ instanceKlass::cast(WK_KLASS(SoftReference_klass))->set_reference_type(REF_SOFT);
+ instanceKlass::cast(WK_KLASS(WeakReference_klass))->set_reference_type(REF_WEAK);
+ instanceKlass::cast(WK_KLASS(FinalReference_klass))->set_reference_type(REF_FINAL);
+ instanceKlass::cast(WK_KLASS(PhantomReference_klass))->set_reference_type(REF_PHANTOM);
WKID meth_group_start = WK_KLASS_ENUM_NAME(MethodHandle_klass);
WKID meth_group_end = WK_KLASS_ENUM_NAME(WrongMethodTypeException_klass);
@@ -1984,7 +1984,7 @@
scan = WKID(meth_group_end+1);
}
WKID indy_group_start = WK_KLASS_ENUM_NAME(Linkage_klass);
- WKID indy_group_end = WK_KLASS_ENUM_NAME(Dynamic_klass);
+ WKID indy_group_end = WK_KLASS_ENUM_NAME(InvokeDynamic_klass);
initialize_wk_klasses_until(indy_group_start, scan, CHECK);
if (EnableInvokeDynamic) {
initialize_wk_klasses_through(indy_group_end, scan, CHECK);
@@ -1996,14 +1996,14 @@
initialize_wk_klasses_until(WKID_LIMIT, scan, CHECK);
- _box_klasses[T_BOOLEAN] = WK_KLASS(boolean_klass);
- _box_klasses[T_CHAR] = WK_KLASS(char_klass);
- _box_klasses[T_FLOAT] = WK_KLASS(float_klass);
- _box_klasses[T_DOUBLE] = WK_KLASS(double_klass);
- _box_klasses[T_BYTE] = WK_KLASS(byte_klass);
- _box_klasses[T_SHORT] = WK_KLASS(short_klass);
- _box_klasses[T_INT] = WK_KLASS(int_klass);
- _box_klasses[T_LONG] = WK_KLASS(long_klass);
+ _box_klasses[T_BOOLEAN] = WK_KLASS(Boolean_klass);
+ _box_klasses[T_CHAR] = WK_KLASS(Character_klass);
+ _box_klasses[T_FLOAT] = WK_KLASS(Float_klass);
+ _box_klasses[T_DOUBLE] = WK_KLASS(Double_klass);
+ _box_klasses[T_BYTE] = WK_KLASS(Byte_klass);
+ _box_klasses[T_SHORT] = WK_KLASS(Short_klass);
+ _box_klasses[T_INT] = WK_KLASS(Integer_klass);
+ _box_klasses[T_LONG] = WK_KLASS(Long_klass);
//_box_klasses[T_OBJECT] = WK_KLASS(object_klass);
//_box_klasses[T_ARRAY] = WK_KLASS(object_klass);
@@ -2014,11 +2014,11 @@
#endif // KERNEL
{ // Compute whether we should use loadClass or loadClassInternal when loading classes.
- methodOop method = instanceKlass::cast(classloader_klass())->find_method(vmSymbols::loadClassInternal_name(), vmSymbols::string_class_signature());
+ methodOop method = instanceKlass::cast(ClassLoader_klass())->find_method(vmSymbols::loadClassInternal_name(), vmSymbols::string_class_signature());
_has_loadClassInternal = (method != NULL);
}
{ // Compute whether we should use checkPackageAccess or NOT
- methodOop method = instanceKlass::cast(classloader_klass())->find_method(vmSymbols::checkPackageAccess_name(), vmSymbols::class_protectiondomain_signature());
+ methodOop method = instanceKlass::cast(ClassLoader_klass())->find_method(vmSymbols::checkPackageAccess_name(), vmSymbols::class_protectiondomain_signature());
_has_checkPackageAccess = (method != NULL);
}
}
@@ -2340,6 +2340,8 @@
SymbolPropertyEntry* spe = invoke_method_table()->find_entry(index, hash, signature);
if (spe == NULL || spe->property_oop() == NULL) {
// Must create lots of stuff here, but outside of the SystemDictionary lock.
+ if (THREAD->is_Compiler_thread())
+ return NULL; // do not attempt from within compiler
Handle mt = compute_method_handle_type(signature(),
class_loader, protection_domain,
CHECK_NULL);
@@ -2372,7 +2374,7 @@
TRAPS) {
Handle empty;
int npts = ArgumentCount(signature()).size();
- objArrayHandle pts = oopFactory::new_objArray(SystemDictionary::class_klass(), npts, CHECK_(empty));
+ objArrayHandle pts = oopFactory::new_objArray(SystemDictionary::Class_klass(), npts, CHECK_(empty));
int arg = 0;
Handle rt; // the return type from the signature
for (SignatureStream ss(signature()); !ss.is_done(); ss.next()) {
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -82,55 +82,55 @@
#define WK_KLASSES_DO(template) \
/* well-known classes */ \
- template(object_klass, java_lang_Object, Pre) \
- template(string_klass, java_lang_String, Pre) \
- template(class_klass, java_lang_Class, Pre) \
- template(cloneable_klass, java_lang_Cloneable, Pre) \
- template(classloader_klass, java_lang_ClassLoader, Pre) \
- template(serializable_klass, java_io_Serializable, Pre) \
- template(system_klass, java_lang_System, Pre) \
- template(throwable_klass, java_lang_Throwable, Pre) \
- template(error_klass, java_lang_Error, Pre) \
- template(threaddeath_klass, java_lang_ThreadDeath, Pre) \
- template(exception_klass, java_lang_Exception, Pre) \
- template(runtime_exception_klass, java_lang_RuntimeException, Pre) \
- template(protectionDomain_klass, java_security_ProtectionDomain, Pre) \
+ template(Object_klass, java_lang_Object, Pre) \
+ template(String_klass, java_lang_String, Pre) \
+ template(Class_klass, java_lang_Class, Pre) \
+ template(Cloneable_klass, java_lang_Cloneable, Pre) \
+ template(ClassLoader_klass, java_lang_ClassLoader, Pre) \
+ template(Serializable_klass, java_io_Serializable, Pre) \
+ template(System_klass, java_lang_System, Pre) \
+ template(Throwable_klass, java_lang_Throwable, Pre) \
+ template(Error_klass, java_lang_Error, Pre) \
+ template(ThreadDeath_klass, java_lang_ThreadDeath, Pre) \
+ template(Exception_klass, java_lang_Exception, Pre) \
+ template(RuntimeException_klass, java_lang_RuntimeException, Pre) \
+ template(ProtectionDomain_klass, java_security_ProtectionDomain, Pre) \
template(AccessControlContext_klass, java_security_AccessControlContext, Pre) \
- template(classNotFoundException_klass, java_lang_ClassNotFoundException, Pre) \
- template(noClassDefFoundError_klass, java_lang_NoClassDefFoundError, Pre) \
- template(linkageError_klass, java_lang_LinkageError, Pre) \
+ template(ClassNotFoundException_klass, java_lang_ClassNotFoundException, Pre) \
+ template(NoClassDefFoundError_klass, java_lang_NoClassDefFoundError, Pre) \
+ template(LinkageError_klass, java_lang_LinkageError, Pre) \
template(ClassCastException_klass, java_lang_ClassCastException, Pre) \
template(ArrayStoreException_klass, java_lang_ArrayStoreException, Pre) \
- template(virtualMachineError_klass, java_lang_VirtualMachineError, Pre) \
+ template(VirtualMachineError_klass, java_lang_VirtualMachineError, Pre) \
template(OutOfMemoryError_klass, java_lang_OutOfMemoryError, Pre) \
template(StackOverflowError_klass, java_lang_StackOverflowError, Pre) \
template(IllegalMonitorStateException_klass, java_lang_IllegalMonitorStateException, Pre) \
- template(reference_klass, java_lang_ref_Reference, Pre) \
+ template(Reference_klass, java_lang_ref_Reference, Pre) \
\
/* Preload ref klasses and set reference types */ \
- template(soft_reference_klass, java_lang_ref_SoftReference, Pre) \
- template(weak_reference_klass, java_lang_ref_WeakReference, Pre) \
- template(final_reference_klass, java_lang_ref_FinalReference, Pre) \
- template(phantom_reference_klass, java_lang_ref_PhantomReference, Pre) \
- template(finalizer_klass, java_lang_ref_Finalizer, Pre) \
+ template(SoftReference_klass, java_lang_ref_SoftReference, Pre) \
+ template(WeakReference_klass, java_lang_ref_WeakReference, Pre) \
+ template(FinalReference_klass, java_lang_ref_FinalReference, Pre) \
+ template(PhantomReference_klass, java_lang_ref_PhantomReference, Pre) \
+ template(Finalizer_klass, java_lang_ref_Finalizer, Pre) \
\
- template(thread_klass, java_lang_Thread, Pre) \
- template(threadGroup_klass, java_lang_ThreadGroup, Pre) \
- template(properties_klass, java_util_Properties, Pre) \
- template(reflect_accessible_object_klass, java_lang_reflect_AccessibleObject, Pre) \
- template(reflect_field_klass, java_lang_reflect_Field, Pre) \
- template(reflect_method_klass, java_lang_reflect_Method, Pre) \
- template(reflect_constructor_klass, java_lang_reflect_Constructor, Pre) \
+ template(Thread_klass, java_lang_Thread, Pre) \
+ template(ThreadGroup_klass, java_lang_ThreadGroup, Pre) \
+ template(Properties_klass, java_util_Properties, Pre) \
+ template(reflect_AccessibleObject_klass, java_lang_reflect_AccessibleObject, Pre) \
+ template(reflect_Field_klass, java_lang_reflect_Field, Pre) \
+ template(reflect_Method_klass, java_lang_reflect_Method, Pre) \
+ template(reflect_Constructor_klass, java_lang_reflect_Constructor, Pre) \
\
/* NOTE: needed too early in bootstrapping process to have checks based on JDK version */ \
/* Universe::is_gte_jdk14x_version() is not set up by this point. */ \
/* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
- template(reflect_magic_klass, sun_reflect_MagicAccessorImpl, Opt) \
- template(reflect_method_accessor_klass, sun_reflect_MethodAccessorImpl, Opt_Only_JDK14NewRef) \
- template(reflect_constructor_accessor_klass, sun_reflect_ConstructorAccessorImpl, Opt_Only_JDK14NewRef) \
- template(reflect_delegating_classloader_klass, sun_reflect_DelegatingClassLoader, Opt) \
- template(reflect_constant_pool_klass, sun_reflect_ConstantPool, Opt_Only_JDK15) \
- template(reflect_unsafe_static_field_accessor_impl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15) \
+ template(reflect_MagicAccessorImpl_klass, sun_reflect_MagicAccessorImpl, Opt) \
+ template(reflect_MethodAccessorImpl_klass, sun_reflect_MethodAccessorImpl, Opt_Only_JDK14NewRef) \
+ template(reflect_ConstructorAccessorImpl_klass, sun_reflect_ConstructorAccessorImpl, Opt_Only_JDK14NewRef) \
+ template(reflect_DelegatingClassLoader_klass, sun_reflect_DelegatingClassLoader, Opt) \
+ template(reflect_ConstantPool_klass, sun_reflect_ConstantPool, Opt_Only_JDK15) \
+ template(reflect_UnsafeStaticFieldAccessorImpl_klass, sun_reflect_UnsafeStaticFieldAccessorImpl, Opt_Only_JDK15) \
\
/* support for dynamic typing; it's OK if these are NULL in earlier JDKs */ \
template(MethodHandle_klass, java_dyn_MethodHandle, Opt) \
@@ -144,16 +144,14 @@
template(WrongMethodTypeException_klass, java_dyn_WrongMethodTypeException, Opt) \
template(Linkage_klass, java_dyn_Linkage, Opt) \
template(CallSite_klass, java_dyn_CallSite, Opt) \
- template(Dynamic_klass, java_dyn_Dynamic, Opt) \
- /* Note: MethodHandle must be first, and Dynamic last in group */ \
+ template(InvokeDynamic_klass, java_dyn_InvokeDynamic, Opt) \
+ /* Note: MethodHandle must be first, and InvokeDynamic last in group */ \
\
- template(vector_klass, java_util_Vector, Pre) \
- template(hashtable_klass, java_util_Hashtable, Pre) \
- template(stringBuffer_klass, java_lang_StringBuffer, Pre) \
+ template(StringBuffer_klass, java_lang_StringBuffer, Pre) \
template(StringBuilder_klass, java_lang_StringBuilder, Pre) \
\
/* It's NULL in non-1.4 JDKs. */ \
- template(stackTraceElement_klass, java_lang_StackTraceElement, Opt) \
+ template(StackTraceElement_klass, java_lang_StackTraceElement, Opt) \
/* Universe::is_gte_jdk14x_version() is not set up by this point. */ \
/* It's okay if this turns out to be NULL in non-1.4 JDKs. */ \
template(java_nio_Buffer_klass, java_nio_Buffer, Opt) \
@@ -164,14 +162,14 @@
template(sun_jkernel_DownloadManager_klass, sun_jkernel_DownloadManager, Opt_Kernel) \
\
/* Preload boxing klasses */ \
- template(boolean_klass, java_lang_Boolean, Pre) \
- template(char_klass, java_lang_Character, Pre) \
- template(float_klass, java_lang_Float, Pre) \
- template(double_klass, java_lang_Double, Pre) \
- template(byte_klass, java_lang_Byte, Pre) \
- template(short_klass, java_lang_Short, Pre) \
- template(int_klass, java_lang_Integer, Pre) \
- template(long_klass, java_lang_Long, Pre) \
+ template(Boolean_klass, java_lang_Boolean, Pre) \
+ template(Character_klass, java_lang_Character, Pre) \
+ template(Float_klass, java_lang_Float, Pre) \
+ template(Double_klass, java_lang_Double, Pre) \
+ template(Byte_klass, java_lang_Byte, Pre) \
+ template(Short_klass, java_lang_Short, Pre) \
+ template(Integer_klass, java_lang_Integer, Pre) \
+ template(Long_klass, java_lang_Long, Pre) \
/*end*/
@@ -438,8 +436,8 @@
// Tells whether ClassLoader.checkPackageAccess is present
static bool has_checkPackageAccess() { return _has_checkPackageAccess; }
- static bool class_klass_loaded() { return WK_KLASS(class_klass) != NULL; }
- static bool cloneable_klass_loaded() { return WK_KLASS(cloneable_klass) != NULL; }
+ static bool Class_klass_loaded() { return WK_KLASS(Class_klass) != NULL; }
+ static bool Cloneable_klass_loaded() { return WK_KLASS(Cloneable_klass) != NULL; }
// Returns default system loader
static oop java_system_loader();
--- a/hotspot/src/share/vm/classfile/verifier.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/classfile/verifier.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -143,7 +143,7 @@
bool Verifier::is_eligible_for_verification(instanceKlassHandle klass, bool should_verify_class) {
symbolOop name = klass->name();
- klassOop refl_magic_klass = SystemDictionary::reflect_magic_klass();
+ klassOop refl_magic_klass = SystemDictionary::reflect_MagicAccessorImpl_klass();
return (should_verify_for(klass->class_loader(), should_verify_class) &&
// return if the class is a bootstrapping class
--- a/hotspot/src/share/vm/classfile/vmSymbols.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/classfile/vmSymbols.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,6 +70,7 @@
void vmSymbols::initialize(TRAPS) {
assert((int)SID_LIMIT <= (1<<log2_SID_LIMIT), "must fit in this bitfield");
assert((int)SID_LIMIT*5 > (1<<log2_SID_LIMIT), "make the bitfield smaller, please");
+ assert(vmIntrinsics::FLAG_LIMIT <= (1 << vmIntrinsics::log2_FLAG_LIMIT), "must fit in this bitfield");
if (!UseSharedSpaces) {
const char* string = &vm_symbol_bodies[0];
@@ -271,6 +272,58 @@
return sid;
}
+static vmIntrinsics::ID wrapper_intrinsic(BasicType type, bool unboxing) {
+#define TYPE2(type, unboxing) ((int)(type)*2 + ((unboxing) ? 1 : 0))
+ switch (TYPE2(type, unboxing)) {
+#define BASIC_TYPE_CASE(type, box, unbox) \
+ case TYPE2(type, false): return vmIntrinsics::box; \
+ case TYPE2(type, true): return vmIntrinsics::unbox
+ BASIC_TYPE_CASE(T_BOOLEAN, _Boolean_valueOf, _booleanValue);
+ BASIC_TYPE_CASE(T_BYTE, _Byte_valueOf, _byteValue);
+ BASIC_TYPE_CASE(T_CHAR, _Character_valueOf, _charValue);
+ BASIC_TYPE_CASE(T_SHORT, _Short_valueOf, _shortValue);
+ BASIC_TYPE_CASE(T_INT, _Integer_valueOf, _intValue);
+ BASIC_TYPE_CASE(T_LONG, _Long_valueOf, _longValue);
+ BASIC_TYPE_CASE(T_FLOAT, _Float_valueOf, _floatValue);
+ BASIC_TYPE_CASE(T_DOUBLE, _Double_valueOf, _doubleValue);
+#undef BASIC_TYPE_CASE
+ }
+#undef TYPE2
+ return vmIntrinsics::_none;
+}
+
+vmIntrinsics::ID vmIntrinsics::for_boxing(BasicType type) {
+ return wrapper_intrinsic(type, false);
+}
+vmIntrinsics::ID vmIntrinsics::for_unboxing(BasicType type) {
+ return wrapper_intrinsic(type, true);
+}
+
+vmIntrinsics::ID vmIntrinsics::for_raw_conversion(BasicType src, BasicType dest) {
+#define SRC_DEST(s,d) (((int)(s) << 4) + (int)(d))
+ switch (SRC_DEST(src, dest)) {
+ case SRC_DEST(T_INT, T_FLOAT): return vmIntrinsics::_intBitsToFloat;
+ case SRC_DEST(T_FLOAT, T_INT): return vmIntrinsics::_floatToRawIntBits;
+
+ case SRC_DEST(T_LONG, T_DOUBLE): return vmIntrinsics::_longBitsToDouble;
+ case SRC_DEST(T_DOUBLE, T_LONG): return vmIntrinsics::_doubleToRawLongBits;
+ }
+#undef SRC_DEST
+
+ return vmIntrinsics::_none;
+}
+
+methodOop vmIntrinsics::method_for(vmIntrinsics::ID id) {
+ if (id == _none) return NULL;
+ symbolOop cname = vmSymbols::symbol_at(class_for(id));
+ symbolOop mname = vmSymbols::symbol_at(name_for(id));
+ symbolOop msig = vmSymbols::symbol_at(signature_for(id));
+ if (cname == NULL || mname == NULL || msig == NULL) return NULL;
+ klassOop k = SystemDictionary::find_well_known_klass(cname);
+ if (k == NULL) return NULL;
+ return instanceKlass::cast(k)->find_method(mname, msig);
+}
+
#define VM_INTRINSIC_INITIALIZE(id, klass, name, sig, flags) #id "\0"
static const char* vm_intrinsic_name_bodies =
@@ -330,15 +383,15 @@
}
// These are for forming case labels:
-#define ID3(x, y, z) (( jint)(z) + \
- ((jint)(y) << vmSymbols::log2_SID_LIMIT) + \
- ((jint)(x) << (2*vmSymbols::log2_SID_LIMIT)) )
+#define ID3(x, y, z) (( jlong)(z) + \
+ ((jlong)(y) << vmSymbols::log2_SID_LIMIT) + \
+ ((jlong)(x) << (2*vmSymbols::log2_SID_LIMIT)) )
#define SID_ENUM(n) vmSymbols::VM_SYMBOL_ENUM_NAME(n)
-vmIntrinsics::ID vmIntrinsics::find_id(vmSymbols::SID holder,
- vmSymbols::SID name,
- vmSymbols::SID sig,
- jshort flags) {
+vmIntrinsics::ID vmIntrinsics::find_id_impl(vmSymbols::SID holder,
+ vmSymbols::SID name,
+ vmSymbols::SID sig,
+ jshort flags) {
assert((int)vmSymbols::SID_LIMIT <= (1<<vmSymbols::log2_SID_LIMIT), "must fit");
// Let the C compiler build the decision tree.
@@ -383,62 +436,50 @@
}
-// These are for friendly printouts of intrinsics:
+// These are to get information about intrinsics.
+
+#define ID4(x, y, z, f) ((ID3(x, y, z) << vmIntrinsics::log2_FLAG_LIMIT) | (jlong) (f))
+
+static const jlong intrinsic_info_array[vmIntrinsics::ID_LIMIT+1] = {
+#define VM_INTRINSIC_INFO(ignore_id, klass, name, sig, fcode) \
+ ID4(SID_ENUM(klass), SID_ENUM(name), SID_ENUM(sig), vmIntrinsics::fcode),
+
+ 0, VM_INTRINSICS_DO(VM_INTRINSIC_INFO,
+ VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE)
+ 0
+#undef VM_INTRINSIC_INFO
+};
+
+inline jlong intrinsic_info(vmIntrinsics::ID id) {
+ return intrinsic_info_array[vmIntrinsics::ID_from((int)id)];
+}
vmSymbols::SID vmIntrinsics::class_for(vmIntrinsics::ID id) {
-#ifndef PRODUCT
-#define VM_INTRINSIC_CASE(id, klass, name, sig, fcode) \
- case id: return SID_ENUM(klass);
-
- switch (id) {
- VM_INTRINSICS_DO(VM_INTRINSIC_CASE,
- VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
- }
-#undef VM_INTRINSIC_CASE
-#endif //PRODUCT
- return vmSymbols::NO_SID;
+ jlong info = intrinsic_info(id);
+ int shift = 2*vmSymbols::log2_SID_LIMIT + log2_FLAG_LIMIT, mask = right_n_bits(vmSymbols::log2_SID_LIMIT);
+ assert(((ID4(1021,1022,1023,15) >> shift) & mask) == 1021, "");
+ return vmSymbols::SID( (info >> shift) & mask );
}
vmSymbols::SID vmIntrinsics::name_for(vmIntrinsics::ID id) {
-#ifndef PRODUCT
-#define VM_INTRINSIC_CASE(id, klass, name, sig, fcode) \
- case id: return SID_ENUM(name);
-
- switch (id) {
- VM_INTRINSICS_DO(VM_INTRINSIC_CASE,
- VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
- }
-#undef VM_INTRINSIC_CASE
-#endif //PRODUCT
- return vmSymbols::NO_SID;
+ jlong info = intrinsic_info(id);
+ int shift = vmSymbols::log2_SID_LIMIT + log2_FLAG_LIMIT, mask = right_n_bits(vmSymbols::log2_SID_LIMIT);
+ assert(((ID4(1021,1022,1023,15) >> shift) & mask) == 1022, "");
+ return vmSymbols::SID( (info >> shift) & mask );
}
vmSymbols::SID vmIntrinsics::signature_for(vmIntrinsics::ID id) {
-#ifndef PRODUCT
-#define VM_INTRINSIC_CASE(id, klass, name, sig, fcode) \
- case id: return SID_ENUM(sig);
-
- switch (id) {
- VM_INTRINSICS_DO(VM_INTRINSIC_CASE,
- VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
- }
-#undef VM_INTRINSIC_CASE
-#endif //PRODUCT
- return vmSymbols::NO_SID;
+ jlong info = intrinsic_info(id);
+ int shift = log2_FLAG_LIMIT, mask = right_n_bits(vmSymbols::log2_SID_LIMIT);
+ assert(((ID4(1021,1022,1023,15) >> shift) & mask) == 1023, "");
+ return vmSymbols::SID( (info >> shift) & mask );
}
vmIntrinsics::Flags vmIntrinsics::flags_for(vmIntrinsics::ID id) {
-#ifndef PRODUCT
-#define VM_INTRINSIC_CASE(id, klass, name, sig, fcode) \
- case id: return fcode;
-
- switch (id) {
- VM_INTRINSICS_DO(VM_INTRINSIC_CASE,
- VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_SYMBOL_IGNORE, VM_ALIAS_IGNORE);
- }
-#undef VM_INTRINSIC_CASE
-#endif //PRODUCT
- return F_none;
+ jlong info = intrinsic_info(id);
+ int shift = 0, mask = right_n_bits(log2_FLAG_LIMIT);
+ assert(((ID4(1021,1022,1023,15) >> shift) & mask) == 15, "");
+ return Flags( (info >> shift) & mask );
}
--- a/hotspot/src/share/vm/classfile/vmSymbols.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/classfile/vmSymbols.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -219,7 +219,7 @@
template(base_name, "base") \
\
/* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */ \
- template(java_dyn_Dynamic, "java/dyn/Dynamic") \
+ template(java_dyn_InvokeDynamic, "java/dyn/InvokeDynamic") \
template(java_dyn_Linkage, "java/dyn/Linkage") \
template(java_dyn_CallSite, "java/dyn/CallSite") \
template(java_dyn_MethodHandle, "java/dyn/MethodHandle") \
@@ -347,9 +347,14 @@
\
/* common signatures names */ \
template(void_method_signature, "()V") \
+ template(void_boolean_signature, "()Z") \
+ template(void_byte_signature, "()B") \
+ template(void_char_signature, "()C") \
+ template(void_short_signature, "()S") \
template(void_int_signature, "()I") \
template(void_long_signature, "()J") \
- template(void_boolean_signature, "()Z") \
+ template(void_float_signature, "()F") \
+ template(void_double_signature, "()D") \
template(int_void_signature, "(I)V") \
template(int_int_signature, "(I)I") \
template(int_bool_signature, "(I)Z") \
@@ -854,6 +859,46 @@
\
do_intrinsic(_invoke, java_lang_reflect_Method, invoke_name, object_array_object_object_signature, F_R) \
/* (symbols invoke_name and invoke_signature defined above) */ \
+ do_intrinsic(_checkSpreadArgument, sun_dyn_MethodHandleImpl, checkSpreadArgument_name, checkSpreadArgument_signature, F_S) \
+ do_name( checkSpreadArgument_name, "checkSpreadArgument") \
+ do_name( checkSpreadArgument_signature, "(Ljava/lang/Object;I)V") \
+ \
+ /* unboxing methods: */ \
+ do_intrinsic(_booleanValue, java_lang_Boolean, booleanValue_name, void_boolean_signature, F_R) \
+ do_name( booleanValue_name, "booleanValue") \
+ do_intrinsic(_byteValue, java_lang_Byte, byteValue_name, void_byte_signature, F_R) \
+ do_name( byteValue_name, "byteValue") \
+ do_intrinsic(_charValue, java_lang_Character, charValue_name, void_char_signature, F_R) \
+ do_name( charValue_name, "charValue") \
+ do_intrinsic(_shortValue, java_lang_Short, shortValue_name, void_short_signature, F_R) \
+ do_name( shortValue_name, "shortValue") \
+ do_intrinsic(_intValue, java_lang_Integer, intValue_name, void_int_signature, F_R) \
+ do_name( intValue_name, "intValue") \
+ do_intrinsic(_longValue, java_lang_Long, longValue_name, void_long_signature, F_R) \
+ do_name( longValue_name, "longValue") \
+ do_intrinsic(_floatValue, java_lang_Float, floatValue_name, void_float_signature, F_R) \
+ do_name( floatValue_name, "floatValue") \
+ do_intrinsic(_doubleValue, java_lang_Double, doubleValue_name, void_double_signature, F_R) \
+ do_name( doubleValue_name, "doubleValue") \
+ \
+ /* boxing methods: */ \
+ do_name( valueOf_name, "valueOf") \
+ do_intrinsic(_Boolean_valueOf, java_lang_Boolean, valueOf_name, Boolean_valueOf_signature, F_S) \
+ do_name( Boolean_valueOf_signature, "(Z)Ljava/lang/Boolean;") \
+ do_intrinsic(_Byte_valueOf, java_lang_Byte, valueOf_name, Byte_valueOf_signature, F_S) \
+ do_name( Byte_valueOf_signature, "(B)Ljava/lang/Byte;") \
+ do_intrinsic(_Character_valueOf, java_lang_Character, valueOf_name, Character_valueOf_signature, F_S) \
+ do_name( Character_valueOf_signature, "(C)Ljava/lang/Character;") \
+ do_intrinsic(_Short_valueOf, java_lang_Short, valueOf_name, Short_valueOf_signature, F_S) \
+ do_name( Short_valueOf_signature, "(S)Ljava/lang/Short;") \
+ do_intrinsic(_Integer_valueOf, java_lang_Integer, valueOf_name, Integer_valueOf_signature, F_S) \
+ do_name( Integer_valueOf_signature, "(I)Ljava/lang/Integer;") \
+ do_intrinsic(_Long_valueOf, java_lang_Long, valueOf_name, Long_valueOf_signature, F_S) \
+ do_name( Long_valueOf_signature, "(J)Ljava/lang/Long;") \
+ do_intrinsic(_Float_valueOf, java_lang_Float, valueOf_name, Float_valueOf_signature, F_S) \
+ do_name( Float_valueOf_signature, "(F)Ljava/lang/Float;") \
+ do_intrinsic(_Double_valueOf, java_lang_Double, valueOf_name, Double_valueOf_signature, F_S) \
+ do_name( Double_valueOf_signature, "(D)Ljava/lang/Double;") \
\
/*end*/
@@ -984,7 +1029,12 @@
F_Y, // !static ?native synchronized
F_RN, // !static native !synchronized
F_SN, // static native !synchronized
- F_RNY // !static native synchronized
+ F_RNY, // !static native synchronized
+
+ FLAG_LIMIT
+ };
+ enum {
+ log2_FLAG_LIMIT = 4 // checked by an assert at start-up
};
public:
@@ -996,15 +1046,32 @@
static const char* name_at(ID id);
+private:
+ static ID find_id_impl(vmSymbols::SID holder,
+ vmSymbols::SID name,
+ vmSymbols::SID sig,
+ jshort flags);
+
+public:
// Given a method's class, name, signature, and access flags, report its ID.
static ID find_id(vmSymbols::SID holder,
vmSymbols::SID name,
vmSymbols::SID sig,
- jshort flags);
+ jshort flags) {
+ ID id = find_id_impl(holder, name, sig, flags);
+#ifdef ASSERT
+ // ID _none does not hold the following asserts.
+ if (id == _none) return id;
+#endif
+ assert( class_for(id) == holder, "correct id");
+ assert( name_for(id) == name, "correct id");
+ assert(signature_for(id) == sig, "correct id");
+ return id;
+ }
static void verify_method(ID actual_id, methodOop m) PRODUCT_RETURN;
- // No need for these in the product:
+ // Find out the symbols behind an intrinsic:
static vmSymbols::SID class_for(ID id);
static vmSymbols::SID name_for(ID id);
static vmSymbols::SID signature_for(ID id);
@@ -1014,4 +1081,11 @@
// Access to intrinsic methods:
static methodOop method_for(ID id);
+
+ // Wrapper object methods:
+ static ID for_boxing(BasicType type);
+ static ID for_unboxing(BasicType type);
+
+ // Raw conversion:
+ static ID for_raw_conversion(BasicType src, BasicType dest);
};
--- a/hotspot/src/share/vm/code/codeBlob.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/code/codeBlob.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -102,6 +102,9 @@
virtual bool is_compiled_by_c2() const { return false; }
virtual bool is_compiled_by_c1() const { return false; }
+ // Casting
+ nmethod* as_nmethod_or_null() { return is_nmethod() ? (nmethod*) this : NULL; }
+
// Boundaries
address header_begin() const { return (address) this; }
address header_end() const { return ((address) this) + _header_size; };
@@ -201,7 +204,8 @@
virtual void print_value_on(outputStream* st) const PRODUCT_RETURN;
// Print the comment associated with offset on stream, if there is one
- void print_block_comment(outputStream* stream, intptr_t offset) {
+ virtual void print_block_comment(outputStream* stream, address block_begin) {
+ intptr_t offset = (intptr_t)(block_begin - instructions_begin());
_comments.print_block_comment(stream, offset);
}
--- a/hotspot/src/share/vm/code/debugInfoRec.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/code/debugInfoRec.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -281,6 +281,7 @@
ciMethod* method,
int bci,
bool reexecute,
+ bool is_method_handle_invoke,
DebugToken* locals,
DebugToken* expressions,
DebugToken* monitors) {
@@ -292,8 +293,9 @@
int stream_offset = stream()->position();
last_pd->set_scope_decode_offset(stream_offset);
- // Record reexecute bit into pcDesc
+ // Record flags into pcDesc.
last_pd->set_should_reexecute(reexecute);
+ last_pd->set_is_method_handle_invoke(is_method_handle_invoke);
// serialize sender stream offest
stream()->write_int(sender_stream_offset);
--- a/hotspot/src/share/vm/code/debugInfoRec.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/code/debugInfoRec.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -88,6 +88,7 @@
ciMethod* method,
int bci,
bool reexecute,
+ bool is_method_handle_invoke = false,
DebugToken* locals = NULL,
DebugToken* expressions = NULL,
DebugToken* monitors = NULL);
--- a/hotspot/src/share/vm/code/nmethod.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/code/nmethod.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -56,13 +56,13 @@
#endif
bool nmethod::is_compiled_by_c1() const {
+ if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing
if (is_native_method()) return false;
- assert(compiler() != NULL, "must be");
return compiler()->is_c1();
}
bool nmethod::is_compiled_by_c2() const {
+ if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing
if (is_native_method()) return false;
- assert(compiler() != NULL, "must be");
return compiler()->is_c2();
}
@@ -1170,7 +1170,7 @@
}
// Common functionality for both make_not_entrant and make_zombie
-bool nmethod::make_not_entrant_or_zombie(int state) {
+bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
// If the method is already zombie there is nothing to do
@@ -1724,9 +1724,9 @@
if (!method()->is_native()) {
SimpleScopeDesc ssd(this, fr.pc());
Bytecode_invoke* call = Bytecode_invoke_at(ssd.method(), ssd.bci());
- bool is_static = call->is_invokestatic();
+ bool has_receiver = call->has_receiver();
symbolOop signature = call->signature();
- fr.oops_compiled_arguments_do(signature, is_static, reg_map, f);
+ fr.oops_compiled_arguments_do(signature, has_receiver, reg_map, f);
}
}
@@ -1763,6 +1763,14 @@
"must end with a sentinel");
#endif //ASSERT
+ // Search for MethodHandle invokes and tag the nmethod.
+ for (int i = 0; i < count; i++) {
+ if (pcs[i].is_method_handle_invoke()) {
+ set_has_method_handle_invokes(true);
+ break;
+ }
+ }
+
int size = count * sizeof(PcDesc);
assert(scopes_pcs_size() >= size, "oob");
memcpy(scopes_pcs_begin(), pcs, size);
@@ -2030,6 +2038,18 @@
// -----------------------------------------------------------------------------
+// MethodHandle
+
+bool nmethod::is_method_handle_return(address return_pc) {
+ if (!has_method_handle_invokes()) return false;
+ PcDesc* pd = pc_desc_at(return_pc);
+ if (pd == NULL)
+ return false;
+ return pd->is_method_handle_invoke();
+}
+
+
+// -----------------------------------------------------------------------------
// Verification
class VerifyOopsClosure: public OopClosure {
@@ -2379,6 +2399,107 @@
return NULL;
}
+void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) {
+ if (block_begin == entry_point()) stream->print_cr("[Entry Point]");
+ if (block_begin == verified_entry_point()) stream->print_cr("[Verified Entry Point]");
+ if (block_begin == exception_begin()) stream->print_cr("[Exception Handler]");
+ if (block_begin == stub_begin()) stream->print_cr("[Stub Code]");
+ if (block_begin == consts_begin()) stream->print_cr("[Constants]");
+ if (block_begin == entry_point()) {
+ methodHandle m = method();
+ if (m.not_null()) {
+ stream->print(" # ");
+ m->print_value_on(stream);
+ stream->cr();
+ }
+ if (m.not_null() && !is_osr_method()) {
+ ResourceMark rm;
+ int sizeargs = m->size_of_parameters();
+ BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, sizeargs);
+ VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, sizeargs);
+ {
+ int sig_index = 0;
+ if (!m->is_static())
+ sig_bt[sig_index++] = T_OBJECT; // 'this'
+ for (SignatureStream ss(m->signature()); !ss.at_return_type(); ss.next()) {
+ BasicType t = ss.type();
+ sig_bt[sig_index++] = t;
+ if (type2size[t] == 2) {
+ sig_bt[sig_index++] = T_VOID;
+ } else {
+ assert(type2size[t] == 1, "size is 1 or 2");
+ }
+ }
+ assert(sig_index == sizeargs, "");
+ }
+ const char* spname = "sp"; // make arch-specific?
+ intptr_t out_preserve = SharedRuntime::java_calling_convention(sig_bt, regs, sizeargs, false);
+ int stack_slot_offset = this->frame_size() * wordSize;
+ int tab1 = 14, tab2 = 24;
+ int sig_index = 0;
+ int arg_index = (m->is_static() ? 0 : -1);
+ bool did_old_sp = false;
+ for (SignatureStream ss(m->signature()); !ss.at_return_type(); ) {
+ bool at_this = (arg_index == -1);
+ bool at_old_sp = false;
+ BasicType t = (at_this ? T_OBJECT : ss.type());
+ assert(t == sig_bt[sig_index], "sigs in sync");
+ if (at_this)
+ stream->print(" # this: ");
+ else
+ stream->print(" # parm%d: ", arg_index);
+ stream->move_to(tab1);
+ VMReg fst = regs[sig_index].first();
+ VMReg snd = regs[sig_index].second();
+ if (fst->is_reg()) {
+ stream->print("%s", fst->name());
+ if (snd->is_valid()) {
+ stream->print(":%s", snd->name());
+ }
+ } else if (fst->is_stack()) {
+ int offset = fst->reg2stack() * VMRegImpl::stack_slot_size + stack_slot_offset;
+ if (offset == stack_slot_offset) at_old_sp = true;
+ stream->print("[%s+0x%x]", spname, offset);
+ } else {
+ stream->print("reg%d:%d??", (int)(intptr_t)fst, (int)(intptr_t)snd);
+ }
+ stream->print(" ");
+ stream->move_to(tab2);
+ stream->print("= ");
+ if (at_this) {
+ m->method_holder()->print_value_on(stream);
+ } else {
+ bool did_name = false;
+ if (!at_this && ss.is_object()) {
+ symbolOop name = ss.as_symbol_or_null();
+ if (name != NULL) {
+ name->print_value_on(stream);
+ did_name = true;
+ }
+ }
+ if (!did_name)
+ stream->print("%s", type2name(t));
+ }
+ if (at_old_sp) {
+ stream->print(" (%s of caller)", spname);
+ did_old_sp = true;
+ }
+ stream->cr();
+ sig_index += type2size[t];
+ arg_index += 1;
+ if (!at_this) ss.next();
+ }
+ if (!did_old_sp) {
+ stream->print(" # ");
+ stream->move_to(tab1);
+ stream->print("[%s+0x%x]", spname, stack_slot_offset);
+ stream->print(" (%s of caller)", spname);
+ stream->cr();
+ }
+ }
+ }
+}
+
void nmethod::print_code_comment_on(outputStream* st, int column, u_char* begin, u_char* end) {
// First, find an oopmap in (begin, end].
// We use the odd half-closed interval so that oop maps and scope descs
--- a/hotspot/src/share/vm/code/nmethod.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/code/nmethod.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -81,18 +81,19 @@
struct nmFlags {
friend class VMStructs;
- unsigned int version:8; // version number (0 = first version)
- unsigned int level:4; // optimization level
- unsigned int age:4; // age (in # of sweep steps)
+ unsigned int version:8; // version number (0 = first version)
+ unsigned int level:4; // optimization level
+ unsigned int age:4; // age (in # of sweep steps)
- unsigned int state:2; // {alive, zombie, unloaded)
+ unsigned int state:2; // {alive, zombie, unloaded)
- unsigned int isUncommonRecompiled:1; // recompiled because of uncommon trap?
- unsigned int isToBeRecompiled:1; // to be recompiled as soon as it matures
- unsigned int hasFlushedDependencies:1; // Used for maintenance of dependencies
- unsigned int markedForReclamation:1; // Used by NMethodSweeper
+ unsigned int isUncommonRecompiled:1; // recompiled because of uncommon trap?
+ unsigned int isToBeRecompiled:1; // to be recompiled as soon as it matures
+ unsigned int hasFlushedDependencies:1; // Used for maintenance of dependencies
+ unsigned int markedForReclamation:1; // Used by NMethodSweeper
- unsigned int has_unsafe_access:1; // May fault due to unsafe access.
+ unsigned int has_unsafe_access:1; // May fault due to unsafe access.
+ unsigned int has_method_handle_invokes:1; // Has this method MethodHandle invokes?
void clear();
};
@@ -254,7 +255,7 @@
const char* reloc_string_for(u_char* begin, u_char* end);
// Returns true if this thread changed the state of the nmethod or
// false if another thread performed the transition.
- bool make_not_entrant_or_zombie(int state);
+ bool make_not_entrant_or_zombie(unsigned int state);
void inc_decompile_count();
// used to check that writes to nmFlags are done consistently.
@@ -409,6 +410,9 @@
bool has_unsafe_access() const { return flags.has_unsafe_access; }
void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; }
+ bool has_method_handle_invokes() const { return flags.has_method_handle_invokes; }
+ void set_has_method_handle_invokes(bool z) { flags.has_method_handle_invokes = z; }
+
int level() const { return flags.level; }
void set_level(int newLevel) { check_safepoint(); flags.level = newLevel; }
@@ -541,6 +545,9 @@
address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
+ // MethodHandle
+ bool is_method_handle_return(address return_pc);
+
// jvmti support:
void post_compiled_method_load_event();
@@ -569,6 +576,13 @@
void log_new_nmethod() const;
void log_state_change() const;
+ // Prints block-level comments, including nmethod specific block labels:
+ virtual void print_block_comment(outputStream* stream, address block_begin) {
+ print_nmethod_labels(stream, block_begin);
+ CodeBlob::print_block_comment(stream, block_begin);
+ }
+ void print_nmethod_labels(outputStream* stream, address block_begin);
+
// Prints a comment for one native instruction (reloc info, pc desc)
void print_code_comment_on(outputStream* st, int column, address begin, address end);
static void print_statistics() PRODUCT_RETURN;
--- a/hotspot/src/share/vm/code/pcDesc.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/code/pcDesc.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -38,6 +38,7 @@
int word;
struct {
unsigned int reexecute: 1;
+ unsigned int is_method_handle_invoke: 1;
} bits;
bool operator ==(const PcDescFlags& other) { return word == other.word; }
} _flags;
@@ -72,6 +73,9 @@
_flags == pd->_flags;
}
+ bool is_method_handle_invoke() const { return _flags.bits.is_method_handle_invoke; }
+ void set_is_method_handle_invoke(bool z) { _flags.bits.is_method_handle_invoke = z; }
+
// Returns the real pc
address real_pc(const nmethod* code) const;
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1820,9 +1820,11 @@
CompileBroker::_t_standard_compilation.seconds(),
CompileBroker::_t_standard_compilation.seconds() / CompileBroker::_total_standard_compile_count);
tty->print_cr(" On stack replacement : %6.3f s, Average : %2.3f", CompileBroker::_t_osr_compilation.seconds(), CompileBroker::_t_osr_compilation.seconds() / CompileBroker::_total_osr_compile_count);
- compiler(CompLevel_fast_compile)->print_timers();
- if (compiler(CompLevel_fast_compile) != compiler(CompLevel_highest_tier)) {
- compiler(CompLevel_highest_tier)->print_timers();
+
+ if (compiler(CompLevel_fast_compile)) {
+ compiler(CompLevel_fast_compile)->print_timers();
+ if (compiler(CompLevel_fast_compile) != compiler(CompLevel_highest_tier))
+ compiler(CompLevel_highest_tier)->print_timers();
}
tty->cr();
--- a/hotspot/src/share/vm/compiler/compilerOracle.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/compiler/compilerOracle.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -392,18 +392,18 @@
};
static MethodMatcher::Mode check_mode(char name[], const char*& error_msg) {
- if (strcmp(name, "*") == 0) return MethodMatcher::Any;
-
int match = MethodMatcher::Exact;
- if (name[0] == '*') {
+ while (name[0] == '*') {
match |= MethodMatcher::Suffix;
strcpy(name, name + 1);
}
+ if (strcmp(name, "*") == 0) return MethodMatcher::Any;
+
size_t len = strlen(name);
- if (len > 0 && name[len - 1] == '*') {
+ while (len > 0 && name[len - 1] == '*') {
match |= MethodMatcher::Prefix;
- name[len - 1] = '\0';
+ name[--len] = '\0';
}
if (strstr(name, "*") != NULL) {
@@ -610,6 +610,14 @@
CompilerOracle::parse_from_string(CompileCommand, CompilerOracle::parse_from_line);
CompilerOracle::parse_from_string(CompileOnly, CompilerOracle::parse_compile_only);
CompilerOracle::parse_from_file();
+ if (lists[PrintCommand] != NULL) {
+ if (PrintAssembly) {
+ warning("CompileCommand and/or .hotspot_compiler file contains 'print' commands, but PrintAssembly is also enabled");
+ } else if (FLAG_IS_DEFAULT(DebugNonSafepoints)) {
+ warning("printing of assembly code is enabled; turning on DebugNonSafepoints to gain additional output");
+ DebugNonSafepoints = true;
+ }
+ }
}
--- a/hotspot/src/share/vm/compiler/disassembler.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/compiler/disassembler.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -151,8 +151,10 @@
outputStream* st = output();
if (_print_bytes && pc > pc0)
print_insn_bytes(pc0, pc);
- if (_nm != NULL)
+ if (_nm != NULL) {
_nm->print_code_comment_on(st, COMMENT_COLUMN, pc0, pc);
+ // this calls reloc_string_for which calls oop::print_value_on
+ }
// Output pc bucket ticks if we have any
if (total_ticks() != 0) {
@@ -273,8 +275,15 @@
oop obj;
if (_nm != NULL
&& (obj = _nm->embeddedOop_at(cur_insn())) != NULL
- && (address) obj == adr) {
+ && (address) obj == adr
+ && Universe::heap()->is_in(obj)
+ && Universe::heap()->is_in(obj->klass())) {
+ julong c = st->count();
obj->print_value_on(st);
+ if (st->count() == c) {
+ // No output. (Can happen in product builds.)
+ st->print("(a %s)", Klass::cast(obj->klass())->external_name());
+ }
return;
}
}
@@ -286,17 +295,9 @@
void decode_env::print_insn_labels() {
address p = cur_insn();
outputStream* st = output();
- nmethod* nm = _nm;
- if (nm != NULL) {
- if (p == nm->entry_point()) st->print_cr("[Entry Point]");
- if (p == nm->verified_entry_point()) st->print_cr("[Verified Entry Point]");
- if (p == nm->exception_begin()) st->print_cr("[Exception Handler]");
- if (p == nm->stub_begin()) st->print_cr("[Stub Code]");
- if (p == nm->consts_begin()) st->print_cr("[Constants]");
- }
CodeBlob* cb = _code;
if (cb != NULL) {
- cb->print_block_comment(st, (intptr_t)(p - cb->instructions_begin()));
+ cb->print_block_comment(st, p);
}
if (_print_pc) {
st->print(" " INTPTR_FORMAT ": ", (intptr_t) p);
--- a/hotspot/src/share/vm/compiler/methodLiveness.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/compiler/methodLiveness.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -782,6 +782,7 @@
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:
case Bytecodes::_invokeinterface:
+ case Bytecodes::_invokedynamic:
case Bytecodes::_newarray:
case Bytecodes::_anewarray:
case Bytecodes::_checkcast:
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -62,12 +62,13 @@
tl->link_head(tc);
tl->link_tail(tc);
tl->set_count(1);
- tl->init_statistics();
+ tl->init_statistics(true /* split_birth */);
tl->setParent(NULL);
tl->setLeft(NULL);
tl->setRight(NULL);
return tl;
}
+
TreeList* TreeList::as_TreeList(HeapWord* addr, size_t size) {
TreeChunk* tc = (TreeChunk*) addr;
assert(size >= sizeof(TreeChunk), "Chunk is too small for a TreeChunk");
@@ -267,6 +268,31 @@
return retTC;
}
+// Returns the block with the largest heap address amongst
+// those in the list for this size; potentially slow and expensive,
+// use with caution!
+TreeChunk* TreeList::largest_address() {
+ guarantee(head() != NULL, "The head of the list cannot be NULL");
+ FreeChunk* fc = head()->next();
+ TreeChunk* retTC;
+ if (fc == NULL) {
+ retTC = head_as_TreeChunk();
+ } else {
+ // walk down the list and return the one with the highest
+ // heap address among chunks of this size.
+ FreeChunk* last = fc;
+ while (fc->next() != NULL) {
+ if ((HeapWord*)last < (HeapWord*)fc) {
+ last = fc;
+ }
+ fc = fc->next();
+ }
+ retTC = TreeChunk::as_TreeChunk(last);
+ }
+ assert(retTC->list() == this, "Wrong type of chunk.");
+ return retTC;
+}
+
BinaryTreeDictionary::BinaryTreeDictionary(MemRegion mr, bool splay):
_splay(splay)
{
@@ -379,7 +405,7 @@
break;
}
// The evm code reset the hint of the candidate as
- // at an interrim point. Why? Seems like this leaves
+ // at an interim point. Why? Seems like this leaves
// the hint pointing to a list that didn't work.
// curTL->set_hint(hintTL->size());
}
@@ -436,7 +462,7 @@
TreeList *curTL = root();
if (curTL != NULL) {
while(curTL->right() != NULL) curTL = curTL->right();
- return curTL->first_available();
+ return curTL->largest_address();
} else {
return NULL;
}
@@ -664,7 +690,7 @@
}
}
TreeChunk* tc = TreeChunk::as_TreeChunk(fc);
- // This chunk is being returned to the binary try. It's embedded
+ // This chunk is being returned to the binary tree. Its embedded
// TreeList should be unused at this point.
tc->initialize();
if (curTL != NULL) { // exact match
@@ -807,6 +833,8 @@
}
bool BinaryTreeDictionary::coalDictOverPopulated(size_t size) {
+ if (FLSAlwaysCoalesceLarge) return true;
+
TreeList* list_of_size = findList(size);
// None of requested size implies overpopulated.
return list_of_size == NULL || list_of_size->coalDesired() <= 0 ||
@@ -854,17 +882,20 @@
double _percentage;
float _inter_sweep_current;
float _inter_sweep_estimate;
+ float _intra_sweep_estimate;
public:
BeginSweepClosure(double p, float inter_sweep_current,
- float inter_sweep_estimate) :
+ float inter_sweep_estimate,
+ float intra_sweep_estimate) :
_percentage(p),
_inter_sweep_current(inter_sweep_current),
- _inter_sweep_estimate(inter_sweep_estimate) { }
+ _inter_sweep_estimate(inter_sweep_estimate),
+ _intra_sweep_estimate(intra_sweep_estimate) { }
void do_list(FreeList* fl) {
double coalSurplusPercent = _percentage;
- fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate);
+ fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate, _intra_sweep_estimate);
fl->set_coalDesired((ssize_t)((double)fl->desired() * coalSurplusPercent));
fl->set_beforeSweep(fl->count());
fl->set_bfrSurp(fl->surplus());
@@ -939,9 +970,10 @@
}
void BinaryTreeDictionary::beginSweepDictCensus(double coalSurplusPercent,
- float inter_sweep_current, float inter_sweep_estimate) {
+ float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) {
BeginSweepClosure bsc(coalSurplusPercent, inter_sweep_current,
- inter_sweep_estimate);
+ inter_sweep_estimate,
+ intra_sweep_estimate);
bsc.do_tree(root());
}
@@ -1077,13 +1109,13 @@
// Print census information - counts, births, deaths, etc.
// for each list in the tree. Also print some summary
// information.
-class printTreeCensusClosure : public AscendTreeCensusClosure {
+class PrintTreeCensusClosure : public AscendTreeCensusClosure {
int _print_line;
size_t _totalFree;
FreeList _total;
public:
- printTreeCensusClosure() {
+ PrintTreeCensusClosure() {
_print_line = 0;
_totalFree = 0;
}
@@ -1113,7 +1145,7 @@
gclog_or_tty->print("\nBinaryTree\n");
FreeList::print_labels_on(gclog_or_tty, "size");
- printTreeCensusClosure ptc;
+ PrintTreeCensusClosure ptc;
ptc.do_tree(root());
FreeList* total = ptc.total();
@@ -1130,6 +1162,38 @@
/(total->desired() != 0 ? (double)total->desired() : 1.0));
}
+class PrintFreeListsClosure : public AscendTreeCensusClosure {
+ outputStream* _st;
+ int _print_line;
+
+ public:
+ PrintFreeListsClosure(outputStream* st) {
+ _st = st;
+ _print_line = 0;
+ }
+ void do_list(FreeList* fl) {
+ if (++_print_line >= 40) {
+ FreeList::print_labels_on(_st, "size");
+ _print_line = 0;
+ }
+ fl->print_on(gclog_or_tty);
+ size_t sz = fl->size();
+ for (FreeChunk* fc = fl->head(); fc != NULL;
+ fc = fc->next()) {
+ _st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
+ fc, (HeapWord*)fc + sz,
+ fc->cantCoalesce() ? "\t CC" : "");
+ }
+ }
+};
+
+void BinaryTreeDictionary::print_free_lists(outputStream* st) const {
+
+ FreeList::print_labels_on(st, "size");
+ PrintFreeListsClosure pflc(st);
+ pflc.do_tree(root());
+}
+
// Verify the following tree invariants:
// . _root has no parent
// . parent and child point to each other
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/binaryTreeDictionary.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -42,9 +42,6 @@
friend class AscendTreeCensusClosure;
friend class DescendTreeCensusClosure;
friend class DescendTreeSearchClosure;
- TreeList* _parent;
- TreeList* _left;
- TreeList* _right;
protected:
TreeList* parent() const { return _parent; }
@@ -82,6 +79,11 @@
// to a TreeChunk.
TreeChunk* first_available();
+ // Returns the block with the largest heap address amongst
+ // those in the list for this size; potentially slow and expensive,
+ // use with caution!
+ TreeChunk* largest_address();
+
// removeChunkReplaceIfNeeded() removes the given "tc" from the TreeList.
// If "tc" is the first chunk in the list, it is also the
// TreeList that is the node in the tree. removeChunkReplaceIfNeeded()
@@ -254,8 +256,9 @@
// Methods called at the beginning of a sweep to prepare the
// statistics for the sweep.
void beginSweepDictCensus(double coalSurplusPercent,
- float sweep_current,
- float sweep_estimate);
+ float inter_sweep_current,
+ float inter_sweep_estimate,
+ float intra_sweep_estimate);
// Methods called after the end of a sweep to modify the
// statistics for the sweep.
void endSweepDictCensus(double splitSurplusPercent);
@@ -269,6 +272,7 @@
// Print the statistcis for all the lists in the tree. Also may
// print out summaries.
void printDictCensus(void) const;
+ void print_free_lists(outputStream* st) const;
// For debugging. Returns the sum of the _returnedBytes for
// all lists in the tree.
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -32,7 +32,9 @@
// threads. The second argument is in support of an extra locking
// check for CFL spaces' free list locks.
#ifndef PRODUCT
-void CMSLockVerifier::assert_locked(const Mutex* lock, const Mutex* p_lock) {
+void CMSLockVerifier::assert_locked(const Mutex* lock,
+ const Mutex* p_lock1,
+ const Mutex* p_lock2) {
if (!Universe::is_fully_initialized()) {
return;
}
@@ -40,7 +42,7 @@
Thread* myThread = Thread::current();
if (lock == NULL) { // a "lock-free" structure, e.g. MUT, protected by CMS token
- assert(p_lock == NULL, "Unexpected state");
+ assert(p_lock1 == NULL && p_lock2 == NULL, "Unexpected caller error");
if (myThread->is_ConcurrentGC_thread()) {
// This test might have to change in the future, if there can be
// multiple peer CMS threads. But for now, if we're testing the CMS
@@ -60,36 +62,39 @@
return;
}
- if (ParallelGCThreads == 0) {
+ if (myThread->is_VM_thread()
+ || myThread->is_ConcurrentGC_thread()
+ || myThread->is_Java_thread()) {
+ // Make sure that we are holding the associated lock.
assert_lock_strong(lock);
+ // The checking of p_lock is a spl case for CFLS' free list
+ // locks: we make sure that none of the parallel GC work gang
+ // threads are holding "sub-locks" of freeListLock(). We check only
+ // the parDictionaryAllocLock because the others are too numerous.
+ // This spl case code is somewhat ugly and any improvements
+ // are welcome.
+ assert(p_lock1 == NULL || !p_lock1->is_locked() || p_lock1->owned_by_self(),
+ "Possible race between this and parallel GC threads");
+ assert(p_lock2 == NULL || !p_lock2->is_locked() || p_lock2->owned_by_self(),
+ "Possible race between this and parallel GC threads");
+ } else if (myThread->is_GC_task_thread()) {
+ // Make sure that the VM or CMS thread holds lock on our behalf
+ // XXX If there were a concept of a gang_master for a (set of)
+ // gang_workers, we could have used the identity of that thread
+ // for checking ownership here; for now we just disjunct.
+ assert(lock->owner() == VMThread::vm_thread() ||
+ lock->owner() == ConcurrentMarkSweepThread::cmst(),
+ "Should be locked by VM thread or CMS thread on my behalf");
+ if (p_lock1 != NULL) {
+ assert_lock_strong(p_lock1);
+ }
+ if (p_lock2 != NULL) {
+ assert_lock_strong(p_lock2);
+ }
} else {
- if (myThread->is_VM_thread()
- || myThread->is_ConcurrentGC_thread()
- || myThread->is_Java_thread()) {
- // Make sure that we are holding the associated lock.
- assert_lock_strong(lock);
- // The checking of p_lock is a spl case for CFLS' free list
- // locks: we make sure that none of the parallel GC work gang
- // threads are holding "sub-locks" of freeListLock(). We check only
- // the parDictionaryAllocLock because the others are too numerous.
- // This spl case code is somewhat ugly and any improvements
- // are welcome XXX FIX ME!!
- if (p_lock != NULL) {
- assert(!p_lock->is_locked() || p_lock->owned_by_self(),
- "Possible race between this and parallel GC threads");
- }
- } else if (myThread->is_GC_task_thread()) {
- // Make sure that the VM or CMS thread holds lock on our behalf
- // XXX If there were a concept of a gang_master for a (set of)
- // gang_workers, we could have used the identity of that thread
- // for checking ownership here; for now we just disjunct.
- assert(lock->owner() == VMThread::vm_thread() ||
- lock->owner() == ConcurrentMarkSweepThread::cmst(),
- "Should be locked by VM thread or CMS thread on my behalf");
- } else {
- // Make sure we didn't miss some obscure corner case
- ShouldNotReachHere();
- }
+ // Make sure we didn't miss some other thread type calling into here;
+ // perhaps as a result of future VM evolution.
+ ShouldNotReachHere();
}
}
#endif
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -29,8 +29,11 @@
// the parallel threads.
class CMSLockVerifier: AllStatic {
public:
- static void assert_locked(const Mutex* lock, const Mutex* p_lock)
+ static void assert_locked(const Mutex* lock, const Mutex* p_lock1, const Mutex* p_lock2)
PRODUCT_RETURN;
+ static void assert_locked(const Mutex* lock, const Mutex* p_lock) {
+ assert_locked(lock, p_lock, NULL);
+ }
static void assert_locked(const Mutex* lock) {
assert_locked(lock, NULL);
}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -62,18 +62,15 @@
// implementation, namely, the simple binary tree (splaying
// temporarily disabled).
switch (dictionaryChoice) {
- case FreeBlockDictionary::dictionaryBinaryTree:
- _dictionary = new BinaryTreeDictionary(mr);
- break;
case FreeBlockDictionary::dictionarySplayTree:
case FreeBlockDictionary::dictionarySkipList:
default:
warning("dictionaryChoice: selected option not understood; using"
" default BinaryTreeDictionary implementation instead.");
+ case FreeBlockDictionary::dictionaryBinaryTree:
_dictionary = new BinaryTreeDictionary(mr);
break;
}
- splitBirth(mr.word_size());
assert(_dictionary != NULL, "CMS dictionary initialization");
// The indexed free lists are initially all empty and are lazily
// filled in on demand. Initialize the array elements to NULL.
@@ -388,6 +385,105 @@
return res;
}
+void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
+const {
+ reportIndexedFreeListStatistics();
+ gclog_or_tty->print_cr("Layout of Indexed Freelists");
+ gclog_or_tty->print_cr("---------------------------");
+ FreeList::print_labels_on(st, "size");
+ for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
+ _indexedFreeList[i].print_on(gclog_or_tty);
+ for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
+ fc = fc->next()) {
+ gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ") %s",
+ fc, (HeapWord*)fc + i,
+ fc->cantCoalesce() ? "\t CC" : "");
+ }
+ }
+}
+
+void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
+const {
+ _promoInfo.print_on(st);
+}
+
+void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
+const {
+ _dictionary->reportStatistics();
+ st->print_cr("Layout of Freelists in Tree");
+ st->print_cr("---------------------------");
+ _dictionary->print_free_lists(st);
+}
+
+class BlkPrintingClosure: public BlkClosure {
+ const CMSCollector* _collector;
+ const CompactibleFreeListSpace* _sp;
+ const CMSBitMap* _live_bit_map;
+ const bool _post_remark;
+ outputStream* _st;
+public:
+ BlkPrintingClosure(const CMSCollector* collector,
+ const CompactibleFreeListSpace* sp,
+ const CMSBitMap* live_bit_map,
+ outputStream* st):
+ _collector(collector),
+ _sp(sp),
+ _live_bit_map(live_bit_map),
+ _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
+ _st(st) { }
+ size_t do_blk(HeapWord* addr);
+};
+
+size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
+ size_t sz = _sp->block_size_no_stall(addr, _collector);
+ assert(sz != 0, "Should always be able to compute a size");
+ if (_sp->block_is_obj(addr)) {
+ const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
+ _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
+ addr,
+ dead ? "dead" : "live",
+ sz,
+ (!dead && CMSPrintObjectsInDump) ? ":" : ".");
+ if (CMSPrintObjectsInDump && !dead) {
+ oop(addr)->print_on(_st);
+ _st->print_cr("--------------------------------------");
+ }
+ } else { // free block
+ _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
+ addr, sz, CMSPrintChunksInDump ? ":" : ".");
+ if (CMSPrintChunksInDump) {
+ ((FreeChunk*)addr)->print_on(_st);
+ _st->print_cr("--------------------------------------");
+ }
+ }
+ return sz;
+}
+
+void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
+ outputStream* st) {
+ st->print_cr("\n=========================");
+ st->print_cr("Block layout in CMS Heap:");
+ st->print_cr("=========================");
+ BlkPrintingClosure bpcl(c, this, c->markBitMap(), st);
+ blk_iterate(&bpcl);
+
+ st->print_cr("\n=======================================");
+ st->print_cr("Order & Layout of Promotion Info Blocks");
+ st->print_cr("=======================================");
+ print_promo_info_blocks(st);
+
+ st->print_cr("\n===========================");
+ st->print_cr("Order of Indexed Free Lists");
+ st->print_cr("=========================");
+ print_indexed_free_lists(st);
+
+ st->print_cr("\n=================================");
+ st->print_cr("Order of Free Lists in Dictionary");
+ st->print_cr("=================================");
+ print_dictionary_free_lists(st);
+}
+
+
void CompactibleFreeListSpace::reportFreeListStatistics() const {
assert_lock_strong(&_freelistLock);
assert(PrintFLSStatistics != 0, "Reporting error");
@@ -449,37 +545,37 @@
if (prevEnd != NULL) {
// Resize the underlying block offset table.
_bt.resize(pointer_delta(value, bottom()));
- if (value <= prevEnd) {
- assert(value >= unallocated_block(), "New end is below unallocated block");
- } else {
- // Now, take this new chunk and add it to the free blocks.
- // Note that the BOT has not yet been updated for this block.
- size_t newFcSize = pointer_delta(value, prevEnd);
- // XXX This is REALLY UGLY and should be fixed up. XXX
- if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
- // Mark the boundary of the new block in BOT
- _bt.mark_block(prevEnd, value);
- // put it all in the linAB
- if (ParallelGCThreads == 0) {
- _smallLinearAllocBlock._ptr = prevEnd;
- _smallLinearAllocBlock._word_size = newFcSize;
- repairLinearAllocBlock(&_smallLinearAllocBlock);
- } else { // ParallelGCThreads > 0
- MutexLockerEx x(parDictionaryAllocLock(),
- Mutex::_no_safepoint_check_flag);
- _smallLinearAllocBlock._ptr = prevEnd;
- _smallLinearAllocBlock._word_size = newFcSize;
- repairLinearAllocBlock(&_smallLinearAllocBlock);
+ if (value <= prevEnd) {
+ assert(value >= unallocated_block(), "New end is below unallocated block");
+ } else {
+ // Now, take this new chunk and add it to the free blocks.
+ // Note that the BOT has not yet been updated for this block.
+ size_t newFcSize = pointer_delta(value, prevEnd);
+ // XXX This is REALLY UGLY and should be fixed up. XXX
+ if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
+ // Mark the boundary of the new block in BOT
+ _bt.mark_block(prevEnd, value);
+ // put it all in the linAB
+ if (ParallelGCThreads == 0) {
+ _smallLinearAllocBlock._ptr = prevEnd;
+ _smallLinearAllocBlock._word_size = newFcSize;
+ repairLinearAllocBlock(&_smallLinearAllocBlock);
+ } else { // ParallelGCThreads > 0
+ MutexLockerEx x(parDictionaryAllocLock(),
+ Mutex::_no_safepoint_check_flag);
+ _smallLinearAllocBlock._ptr = prevEnd;
+ _smallLinearAllocBlock._word_size = newFcSize;
+ repairLinearAllocBlock(&_smallLinearAllocBlock);
+ }
+ // Births of chunks put into a LinAB are not recorded. Births
+ // of chunks as they are allocated out of a LinAB are.
+ } else {
+ // Add the block to the free lists, if possible coalescing it
+ // with the last free block, and update the BOT and census data.
+ addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
}
- // Births of chunks put into a LinAB are not recorded. Births
- // of chunks as they are allocated out of a LinAB are.
- } else {
- // Add the block to the free lists, if possible coalescing it
- // with the last free block, and update the BOT and census data.
- addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
}
}
- }
}
class FreeListSpace_DCTOC : public Filtering_DCTOC {
@@ -732,7 +828,7 @@
void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
UpwardsObjectClosure* cl) {
- assert_locked();
+ assert_locked(freelistLock());
NOT_PRODUCT(verify_objects_initialized());
Space::object_iterate_mem(mr, cl);
}
@@ -1212,12 +1308,15 @@
void CompactibleFreeListSpace::assert_locked() const {
CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
}
+
+void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
+ CMSLockVerifier::assert_locked(lock);
+}
#endif
FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
// In the parallel case, the main thread holds the free list lock
// on behalf the parallel threads.
- assert_locked();
FreeChunk* fc;
{
// If GC is parallel, this might be called by several threads.
@@ -1298,17 +1397,18 @@
res = blk->_ptr;
_bt.allocated(res, blk->_word_size);
} else if (size + MinChunkSize <= blk->_refillSize) {
+ size_t sz = blk->_word_size;
// Update _unallocated_block if the size is such that chunk would be
// returned to the indexed free list. All other chunks in the indexed
// free lists are allocated from the dictionary so that _unallocated_block
// has already been adjusted for them. Do it here so that the cost
// for all chunks added back to the indexed free lists.
- if (blk->_word_size < SmallForDictionary) {
- _bt.allocated(blk->_ptr, blk->_word_size);
+ if (sz < SmallForDictionary) {
+ _bt.allocated(blk->_ptr, sz);
}
// Return the chunk that isn't big enough, and then refill below.
- addChunkToFreeLists(blk->_ptr, blk->_word_size);
- _bt.verify_single_block(blk->_ptr, (blk->_ptr + blk->_word_size));
+ addChunkToFreeLists(blk->_ptr, sz);
+ splitBirth(sz);
// Don't keep statistics on adding back chunk from a LinAB.
} else {
// A refilled block would not satisfy the request.
@@ -1376,11 +1476,13 @@
res = getChunkFromIndexedFreeListHelper(size);
}
_bt.verify_not_unallocated((HeapWord*) res, size);
+ assert(res == NULL || res->size() == size, "Incorrect block size");
return res;
}
FreeChunk*
-CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size) {
+CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
+ bool replenish) {
assert_locked();
FreeChunk* fc = NULL;
if (size < SmallForDictionary) {
@@ -1398,54 +1500,66 @@
// and replenishing indexed lists from the small linAB.
//
FreeChunk* newFc = NULL;
- size_t replenish_size = CMSIndexedFreeListReplenish * size;
+ const size_t replenish_size = CMSIndexedFreeListReplenish * size;
if (replenish_size < SmallForDictionary) {
// Do not replenish from an underpopulated size.
if (_indexedFreeList[replenish_size].surplus() > 0 &&
_indexedFreeList[replenish_size].head() != NULL) {
- newFc =
- _indexedFreeList[replenish_size].getChunkAtHead();
- } else {
+ newFc = _indexedFreeList[replenish_size].getChunkAtHead();
+ } else if (bestFitFirst()) {
newFc = bestFitSmall(replenish_size);
}
}
- if (newFc != NULL) {
- splitDeath(replenish_size);
- } else if (replenish_size > size) {
+ if (newFc == NULL && replenish_size > size) {
assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
- newFc =
- getChunkFromIndexedFreeListHelper(replenish_size);
+ newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
}
+ // Note: The stats update re split-death of block obtained above
+ // will be recorded below precisely when we know we are going to
+ // be actually splitting it into more than one pieces below.
if (newFc != NULL) {
- assert(newFc->size() == replenish_size, "Got wrong size");
- size_t i;
- FreeChunk *curFc, *nextFc;
- // carve up and link blocks 0, ..., CMSIndexedFreeListReplenish - 2
- // The last chunk is not added to the lists but is returned as the
- // free chunk.
- for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
- i = 0;
- i < (CMSIndexedFreeListReplenish - 1);
- curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
- i++) {
+ if (replenish || CMSReplenishIntermediate) {
+ // Replenish this list and return one block to caller.
+ size_t i;
+ FreeChunk *curFc, *nextFc;
+ size_t num_blk = newFc->size() / size;
+ assert(num_blk >= 1, "Smaller than requested?");
+ assert(newFc->size() % size == 0, "Should be integral multiple of request");
+ if (num_blk > 1) {
+ // we are sure we will be splitting the block just obtained
+ // into multiple pieces; record the split-death of the original
+ splitDeath(replenish_size);
+ }
+ // carve up and link blocks 0, ..., num_blk - 2
+ // The last chunk is not added to the lists but is returned as the
+ // free chunk.
+ for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
+ i = 0;
+ i < (num_blk - 1);
+ curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
+ i++) {
+ curFc->setSize(size);
+ // Don't record this as a return in order to try and
+ // determine the "returns" from a GC.
+ _bt.verify_not_unallocated((HeapWord*) fc, size);
+ _indexedFreeList[size].returnChunkAtTail(curFc, false);
+ _bt.mark_block((HeapWord*)curFc, size);
+ splitBirth(size);
+ // Don't record the initial population of the indexed list
+ // as a split birth.
+ }
+
+ // check that the arithmetic was OK above
+ assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
+ "inconsistency in carving newFc");
curFc->setSize(size);
- // Don't record this as a return in order to try and
- // determine the "returns" from a GC.
- _bt.verify_not_unallocated((HeapWord*) fc, size);
- _indexedFreeList[size].returnChunkAtTail(curFc, false);
_bt.mark_block((HeapWord*)curFc, size);
splitBirth(size);
- // Don't record the initial population of the indexed list
- // as a split birth.
+ fc = curFc;
+ } else {
+ // Return entire block to caller
+ fc = newFc;
}
-
- // check that the arithmetic was OK above
- assert((HeapWord*)nextFc == (HeapWord*)newFc + replenish_size,
- "inconsistency in carving newFc");
- curFc->setSize(size);
- _bt.mark_block((HeapWord*)curFc, size);
- splitBirth(size);
- return curFc;
}
}
} else {
@@ -1453,7 +1567,7 @@
// replenish the indexed free list.
fc = getChunkFromDictionaryExact(size);
}
- assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
+ // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
return fc;
}
@@ -1512,6 +1626,11 @@
// adjust _unallocated_block downward, as necessary
_bt.freed((HeapWord*)chunk, size);
_dictionary->returnChunk(chunk);
+#ifndef PRODUCT
+ if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
+ TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
+ }
+#endif // PRODUCT
}
void
@@ -1525,6 +1644,11 @@
} else {
_indexedFreeList[size].returnChunkAtHead(fc);
}
+#ifndef PRODUCT
+ if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
+ _indexedFreeList[size].verify_stats();
+ }
+#endif // PRODUCT
}
// Add chunk to end of last block -- if it's the largest
@@ -1537,7 +1661,6 @@
HeapWord* chunk, size_t size) {
// check that the chunk does lie in this space!
assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
- assert_locked();
// One of the parallel gc task threads may be here
// whilst others are allocating.
Mutex* lock = NULL;
@@ -1991,24 +2114,26 @@
return frag;
}
-#define CoalSurplusPercent 1.05
-#define SplitSurplusPercent 1.10
-
void CompactibleFreeListSpace::beginSweepFLCensus(
float inter_sweep_current,
- float inter_sweep_estimate) {
+ float inter_sweep_estimate,
+ float intra_sweep_estimate) {
assert_locked();
size_t i;
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
FreeList* fl = &_indexedFreeList[i];
- fl->compute_desired(inter_sweep_current, inter_sweep_estimate);
- fl->set_coalDesired((ssize_t)((double)fl->desired() * CoalSurplusPercent));
+ if (PrintFLSStatistics > 1) {
+ gclog_or_tty->print("size[%d] : ", i);
+ }
+ fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
+ fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
fl->set_beforeSweep(fl->count());
fl->set_bfrSurp(fl->surplus());
}
- _dictionary->beginSweepDictCensus(CoalSurplusPercent,
+ _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
inter_sweep_current,
- inter_sweep_estimate);
+ inter_sweep_estimate,
+ intra_sweep_estimate);
}
void CompactibleFreeListSpace::setFLSurplus() {
@@ -2017,7 +2142,7 @@
for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
FreeList *fl = &_indexedFreeList[i];
fl->set_surplus(fl->count() -
- (ssize_t)((double)fl->desired() * SplitSurplusPercent));
+ (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
}
}
@@ -2048,6 +2173,11 @@
}
void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
+ if (PrintFLSStatistics > 0) {
+ HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
+ gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
+ largestAddr);
+ }
setFLSurplus();
setFLHints();
if (PrintGC && PrintFLSCensus > 0) {
@@ -2055,7 +2185,7 @@
}
clearFLCensus();
assert_locked();
- _dictionary->endSweepDictCensus(SplitSurplusPercent);
+ _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
}
bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
@@ -2312,13 +2442,18 @@
}
void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
- FreeChunk* fc = _indexedFreeList[size].head();
+ FreeChunk* fc = _indexedFreeList[size].head();
+ FreeChunk* tail = _indexedFreeList[size].tail();
+ size_t num = _indexedFreeList[size].count();
+ size_t n = 0;
guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
- for (; fc != NULL; fc = fc->next()) {
+ for (; fc != NULL; fc = fc->next(), n++) {
guarantee(fc->size() == size, "Size inconsistency");
guarantee(fc->isFree(), "!free?");
guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
+ guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
}
+ guarantee(n == num, "Incorrect count");
}
#ifndef PRODUCT
@@ -2516,11 +2651,41 @@
_tracking = true;
}
-void PromotionInfo::stopTrackingPromotions() {
+#define CMSPrintPromoBlockInfo 1
+
+void PromotionInfo::stopTrackingPromotions(uint worker_id) {
assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
"spooling inconsistency?");
_firstIndex = _nextIndex = 1;
_tracking = false;
+ if (CMSPrintPromoBlockInfo > 1) {
+ print_statistics(worker_id);
+ }
+}
+
+void PromotionInfo::print_statistics(uint worker_id) const {
+ assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
+ "Else will undercount");
+ assert(CMSPrintPromoBlockInfo > 0, "Else unnecessary call");
+ // Count the number of blocks and slots in the free pool
+ size_t slots = 0;
+ size_t blocks = 0;
+ for (SpoolBlock* cur_spool = _spareSpool;
+ cur_spool != NULL;
+ cur_spool = cur_spool->nextSpoolBlock) {
+ // the first entry is just a self-pointer; indices 1 through
+ // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
+ guarantee((void*)cur_spool->displacedHdr == (void*)&cur_spool->displacedHdr,
+ "first entry of displacedHdr should be self-referential");
+ slots += cur_spool->bufferSize - 1;
+ blocks++;
+ }
+ if (_spoolHead != NULL) {
+ slots += _spoolHead->bufferSize - 1;
+ blocks++;
+ }
+ gclog_or_tty->print_cr(" [worker %d] promo_blocks = %d, promo_slots = %d ",
+ worker_id, blocks, slots);
}
// When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
@@ -2584,15 +2749,84 @@
guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
}
+void PromotionInfo::print_on(outputStream* st) const {
+ SpoolBlock* curSpool = NULL;
+ size_t i = 0;
+ st->print_cr("start & end indices: [" SIZE_FORMAT ", " SIZE_FORMAT ")",
+ _firstIndex, _nextIndex);
+ for (curSpool = _spoolHead; curSpool != _spoolTail && curSpool != NULL;
+ curSpool = curSpool->nextSpoolBlock) {
+ curSpool->print_on(st);
+ st->print_cr(" active ");
+ i++;
+ }
+ for (curSpool = _spoolTail; curSpool != NULL;
+ curSpool = curSpool->nextSpoolBlock) {
+ curSpool->print_on(st);
+ st->print_cr(" inactive ");
+ i++;
+ }
+ for (curSpool = _spareSpool; curSpool != NULL;
+ curSpool = curSpool->nextSpoolBlock) {
+ curSpool->print_on(st);
+ st->print_cr(" free ");
+ i++;
+ }
+ st->print_cr(SIZE_FORMAT " header spooling blocks", i);
+}
+
+void SpoolBlock::print_on(outputStream* st) const {
+ st->print("[" PTR_FORMAT "," PTR_FORMAT "), " SIZE_FORMAT " HeapWords -> " PTR_FORMAT,
+ this, (HeapWord*)displacedHdr + bufferSize,
+ bufferSize, nextSpoolBlock);
+}
+
+///////////////////////////////////////////////////////////////////////////
+// CFLS_LAB
+///////////////////////////////////////////////////////////////////////////
+
+#define VECTOR_257(x) \
+ /* 1 2 3 4 5 6 7 8 9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
+ { x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
+ x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, \
+ x }
+
+// Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
+// OldPLABSize, whose static default is different; if overridden at the
+// command-line, this will get reinitialized via a call to
+// modify_initialization() below.
+AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[] =
+ VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
+size_t CFLS_LAB::_global_num_blocks[] = VECTOR_257(0);
+int CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
_cfls(cfls)
{
- _blocks_to_claim = CMSParPromoteBlocksToClaim;
+ assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
for (size_t i = CompactibleFreeListSpace::IndexSetStart;
i < CompactibleFreeListSpace::IndexSetSize;
i += CompactibleFreeListSpace::IndexSetStride) {
_indexedFreeList[i].set_size(i);
+ _num_blocks[i] = 0;
+ }
+}
+
+static bool _CFLS_LAB_modified = false;
+
+void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
+ assert(!_CFLS_LAB_modified, "Call only once");
+ _CFLS_LAB_modified = true;
+ for (size_t i = CompactibleFreeListSpace::IndexSetStart;
+ i < CompactibleFreeListSpace::IndexSetSize;
+ i += CompactibleFreeListSpace::IndexSetStride) {
+ _blocks_to_claim[i].modify(n, wt, true /* force */);
}
}
@@ -2607,11 +2841,9 @@
if (res == NULL) return NULL;
} else {
FreeList* fl = &_indexedFreeList[word_sz];
- bool filled = false; //TRAP
if (fl->count() == 0) {
- bool filled = true; //TRAP
// Attempt to refill this local free list.
- _cfls->par_get_chunk_of_blocks(word_sz, _blocks_to_claim, fl);
+ get_from_global_pool(word_sz, fl);
// If it didn't work, give up.
if (fl->count() == 0) return NULL;
}
@@ -2626,80 +2858,190 @@
return (HeapWord*)res;
}
-void CFLS_LAB::retire() {
- for (size_t i = CompactibleFreeListSpace::IndexSetStart;
+// Get a chunk of blocks of the right size and update related
+// book-keeping stats
+void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
+ // Get the #blocks we want to claim
+ size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
+ assert(n_blks > 0, "Error");
+ assert(ResizePLAB || n_blks == OldPLABSize, "Error");
+ // In some cases, when the application has a phase change,
+ // there may be a sudden and sharp shift in the object survival
+ // profile, and updating the counts at the end of a scavenge
+ // may not be quick enough, giving rise to large scavenge pauses
+ // during these phase changes. It is beneficial to detect such
+ // changes on-the-fly during a scavenge and avoid such a phase-change
+ // pothole. The following code is a heuristic attempt to do that.
+ // It is protected by a product flag until we have gained
+ // enough experience with this heuristic and fine-tuned its behaviour.
+ // WARNING: This might increase fragmentation if we overreact to
+ // small spikes, so some kind of historical smoothing based on
+ // previous experience with the greater reactivity might be useful.
+ // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
+ // default.
+ if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
+ size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
+ n_blks += CMSOldPLABReactivityFactor*multiple*n_blks;
+ n_blks = MIN2(n_blks, CMSOldPLABMax);
+ }
+ assert(n_blks > 0, "Error");
+ _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
+ // Update stats table entry for this block size
+ _num_blocks[word_sz] += fl->count();
+}
+
+void CFLS_LAB::compute_desired_plab_size() {
+ for (size_t i = CompactibleFreeListSpace::IndexSetStart;
i < CompactibleFreeListSpace::IndexSetSize;
i += CompactibleFreeListSpace::IndexSetStride) {
- if (_indexedFreeList[i].count() > 0) {
- MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
- Mutex::_no_safepoint_check_flag);
- _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
- // Reset this list.
- _indexedFreeList[i] = FreeList();
- _indexedFreeList[i].set_size(i);
+ assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
+ "Counter inconsistency");
+ if (_global_num_workers[i] > 0) {
+ // Need to smooth wrt historical average
+ if (ResizeOldPLAB) {
+ _blocks_to_claim[i].sample(
+ MAX2((size_t)CMSOldPLABMin,
+ MIN2((size_t)CMSOldPLABMax,
+ _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
+ }
+ // Reset counters for next round
+ _global_num_workers[i] = 0;
+ _global_num_blocks[i] = 0;
+ if (PrintOldPLAB) {
+ gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
+ }
}
}
}
-void
-CompactibleFreeListSpace::
-par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
+void CFLS_LAB::retire(int tid) {
+ // We run this single threaded with the world stopped;
+ // so no need for locks and such.
+#define CFLS_LAB_PARALLEL_ACCESS 0
+ NOT_PRODUCT(Thread* t = Thread::current();)
+ assert(Thread::current()->is_VM_thread(), "Error");
+ assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
+ "Will access to uninitialized slot below");
+#if CFLS_LAB_PARALLEL_ACCESS
+ for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
+ i > 0;
+ i -= CompactibleFreeListSpace::IndexSetStride) {
+#else // CFLS_LAB_PARALLEL_ACCESS
+ for (size_t i = CompactibleFreeListSpace::IndexSetStart;
+ i < CompactibleFreeListSpace::IndexSetSize;
+ i += CompactibleFreeListSpace::IndexSetStride) {
+#endif // !CFLS_LAB_PARALLEL_ACCESS
+ assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
+ "Can't retire more than what we obtained");
+ if (_num_blocks[i] > 0) {
+ size_t num_retire = _indexedFreeList[i].count();
+ assert(_num_blocks[i] > num_retire, "Should have used at least one");
+ {
+#if CFLS_LAB_PARALLEL_ACCESS
+ MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
+ Mutex::_no_safepoint_check_flag);
+#endif // CFLS_LAB_PARALLEL_ACCESS
+ // Update globals stats for num_blocks used
+ _global_num_blocks[i] += (_num_blocks[i] - num_retire);
+ _global_num_workers[i]++;
+ assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
+ if (num_retire > 0) {
+ _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
+ // Reset this list.
+ _indexedFreeList[i] = FreeList();
+ _indexedFreeList[i].set_size(i);
+ }
+ }
+ if (PrintOldPLAB) {
+ gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
+ tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
+ }
+ // Reset stats for next round
+ _num_blocks[i] = 0;
+ }
+ }
+}
+
+void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
assert(fl->count() == 0, "Precondition.");
assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
"Precondition");
- // We'll try all multiples of word_sz in the indexed set (starting with
- // word_sz itself), then try getting a big chunk and splitting it.
- int k = 1;
- size_t cur_sz = k * word_sz;
- bool found = false;
- while (cur_sz < CompactibleFreeListSpace::IndexSetSize && k == 1) {
- FreeList* gfl = &_indexedFreeList[cur_sz];
- FreeList fl_for_cur_sz; // Empty.
- fl_for_cur_sz.set_size(cur_sz);
- {
- MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
- Mutex::_no_safepoint_check_flag);
- if (gfl->count() != 0) {
- size_t nn = MAX2(n/k, (size_t)1);
- gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
- found = true;
+ // We'll try all multiples of word_sz in the indexed set, starting with
+ // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
+ // then try getting a big chunk and splitting it.
+ {
+ bool found;
+ int k;
+ size_t cur_sz;
+ for (k = 1, cur_sz = k * word_sz, found = false;
+ (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
+ (CMSSplitIndexedFreeListBlocks || k <= 1);
+ k++, cur_sz = k * word_sz) {
+ FreeList* gfl = &_indexedFreeList[cur_sz];
+ FreeList fl_for_cur_sz; // Empty.
+ fl_for_cur_sz.set_size(cur_sz);
+ {
+ MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
+ Mutex::_no_safepoint_check_flag);
+ if (gfl->count() != 0) {
+ // nn is the number of chunks of size cur_sz that
+ // we'd need to split k-ways each, in order to create
+ // "n" chunks of size word_sz each.
+ const size_t nn = MAX2(n/k, (size_t)1);
+ gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
+ found = true;
+ if (k > 1) {
+ // Update split death stats for the cur_sz-size blocks list:
+ // we increment the split death count by the number of blocks
+ // we just took from the cur_sz-size blocks list and which
+ // we will be splitting below.
+ ssize_t deaths = _indexedFreeList[cur_sz].splitDeaths() +
+ fl_for_cur_sz.count();
+ _indexedFreeList[cur_sz].set_splitDeaths(deaths);
+ }
+ }
+ }
+ // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
+ if (found) {
+ if (k == 1) {
+ fl->prepend(&fl_for_cur_sz);
+ } else {
+ // Divide each block on fl_for_cur_sz up k ways.
+ FreeChunk* fc;
+ while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
+ // Must do this in reverse order, so that anybody attempting to
+ // access the main chunk sees it as a single free block until we
+ // change it.
+ size_t fc_size = fc->size();
+ for (int i = k-1; i >= 0; i--) {
+ FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
+ ffc->setSize(word_sz);
+ ffc->linkNext(NULL);
+ ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
+ // Above must occur before BOT is updated below.
+ // splitting from the right, fc_size == (k - i + 1) * wordsize
+ _bt.mark_block((HeapWord*)ffc, word_sz);
+ fc_size -= word_sz;
+ _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
+ _bt.verify_single_block((HeapWord*)fc, fc_size);
+ _bt.verify_single_block((HeapWord*)ffc, ffc->size());
+ // Push this on "fl".
+ fl->returnChunkAtHead(ffc);
+ }
+ // TRAP
+ assert(fl->tail()->next() == NULL, "List invariant.");
+ }
+ }
+ // Update birth stats for this block size.
+ size_t num = fl->count();
+ MutexLockerEx x(_indexedFreeListParLocks[word_sz],
+ Mutex::_no_safepoint_check_flag);
+ ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
+ _indexedFreeList[word_sz].set_splitBirths(births);
+ return;
}
}
- // Now transfer fl_for_cur_sz to fl. Common case, we hope, is k = 1.
- if (found) {
- if (k == 1) {
- fl->prepend(&fl_for_cur_sz);
- } else {
- // Divide each block on fl_for_cur_sz up k ways.
- FreeChunk* fc;
- while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
- // Must do this in reverse order, so that anybody attempting to
- // access the main chunk sees it as a single free block until we
- // change it.
- size_t fc_size = fc->size();
- for (int i = k-1; i >= 0; i--) {
- FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
- ffc->setSize(word_sz);
- ffc->linkNext(NULL);
- ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
- // Above must occur before BOT is updated below.
- // splitting from the right, fc_size == (k - i + 1) * wordsize
- _bt.mark_block((HeapWord*)ffc, word_sz);
- fc_size -= word_sz;
- _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
- _bt.verify_single_block((HeapWord*)fc, fc_size);
- _bt.verify_single_block((HeapWord*)ffc, ffc->size());
- // Push this on "fl".
- fl->returnChunkAtHead(ffc);
- }
- // TRAP
- assert(fl->tail()->next() == NULL, "List invariant.");
- }
- }
- return;
- }
- k++; cur_sz = k * word_sz;
}
// Otherwise, we'll split a block from the dictionary.
FreeChunk* fc = NULL;
@@ -2723,17 +3065,31 @@
}
}
if (fc == NULL) return;
+ assert((ssize_t)n >= 1, "Control point invariant");
// Otherwise, split up that block.
- size_t nn = fc->size() / word_sz;
+ const size_t nn = fc->size() / word_sz;
n = MIN2(nn, n);
+ assert((ssize_t)n >= 1, "Control point invariant");
rem = fc->size() - n * word_sz;
// If there is a remainder, and it's too small, allocate one fewer.
if (rem > 0 && rem < MinChunkSize) {
n--; rem += word_sz;
}
+ // Note that at this point we may have n == 0.
+ assert((ssize_t)n >= 0, "Control point invariant");
+
+ // If n is 0, the chunk fc that was found is not large
+ // enough to leave a viable remainder. We are unable to
+ // allocate even one block. Return fc to the
+ // dictionary and return, leaving "fl" empty.
+ if (n == 0) {
+ returnChunkToDictionary(fc);
+ return;
+ }
+
// First return the remainder, if any.
// Note that we hold the lock until we decide if we're going to give
- // back the remainder to the dictionary, since a contending allocator
+ // back the remainder to the dictionary, since a concurrent allocation
// may otherwise see the heap as empty. (We're willing to take that
// hit if the block is a small block.)
if (rem > 0) {
@@ -2743,18 +3099,16 @@
rem_fc->linkNext(NULL);
rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
// Above must occur before BOT is updated below.
+ assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
_bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
if (rem >= IndexSetSize) {
returnChunkToDictionary(rem_fc);
- dictionary()->dictCensusUpdate(fc->size(),
- true /*split*/,
- true /*birth*/);
+ dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
rem_fc = NULL;
}
// Otherwise, return it to the small list below.
}
}
- //
if (rem_fc != NULL) {
MutexLockerEx x(_indexedFreeListParLocks[rem],
Mutex::_no_safepoint_check_flag);
@@ -2762,7 +3116,7 @@
_indexedFreeList[rem].returnChunkAtHead(rem_fc);
smallSplitBirth(rem);
}
-
+ assert((ssize_t)n > 0 && fc != NULL, "Consistency");
// Now do the splitting up.
// Must do this in reverse order, so that anybody attempting to
// access the main chunk sees it as a single free block until we
@@ -2792,13 +3146,15 @@
_bt.verify_single_block((HeapWord*)fc, fc->size());
fl->returnChunkAtHead(fc);
+ assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
{
+ // Update the stats for this block size.
MutexLockerEx x(_indexedFreeListParLocks[word_sz],
Mutex::_no_safepoint_check_flag);
- ssize_t new_births = _indexedFreeList[word_sz].splitBirths() + n;
- _indexedFreeList[word_sz].set_splitBirths(new_births);
- ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
- _indexedFreeList[word_sz].set_surplus(new_surplus);
+ const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
+ _indexedFreeList[word_sz].set_splitBirths(births);
+ // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
+ // _indexedFreeList[word_sz].set_surplus(new_surplus);
}
// TRAP
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -25,8 +25,6 @@
// Classes in support of keeping track of promotions into a non-Contiguous
// space, in this case a CompactibleFreeListSpace.
-#define CFLS_LAB_REFILL_STATS 0
-
// Forward declarations
class CompactibleFreeListSpace;
class BlkClosure;
@@ -89,6 +87,9 @@
displacedHdr = (markOop*)&displacedHdr;
nextSpoolBlock = NULL;
}
+
+ void print_on(outputStream* st) const;
+ void print() const { print_on(gclog_or_tty); }
};
class PromotionInfo VALUE_OBJ_CLASS_SPEC {
@@ -121,7 +122,7 @@
return _promoHead == NULL;
}
void startTrackingPromotions();
- void stopTrackingPromotions();
+ void stopTrackingPromotions(uint worker_id = 0);
bool tracking() const { return _tracking; }
void track(PromotedObject* trackOop); // keep track of a promoted oop
// The following variant must be used when trackOop is not fully
@@ -161,6 +162,9 @@
_nextIndex = 0;
}
+
+ void print_on(outputStream* st) const;
+ void print_statistics(uint worker_id) const;
};
class LinearAllocBlock VALUE_OBJ_CLASS_SPEC {
@@ -243,6 +247,7 @@
mutable Mutex _freelistLock;
// locking verifier convenience function
void assert_locked() const PRODUCT_RETURN;
+ void assert_locked(const Mutex* lock) const PRODUCT_RETURN;
// Linear allocation blocks
LinearAllocBlock _smallLinearAllocBlock;
@@ -281,13 +286,6 @@
// Locks protecting the exact lists during par promotion allocation.
Mutex* _indexedFreeListParLocks[IndexSetSize];
-#if CFLS_LAB_REFILL_STATS
- // Some statistics.
- jint _par_get_chunk_from_small;
- jint _par_get_chunk_from_large;
-#endif
-
-
// Attempt to obtain up to "n" blocks of the size "word_sz" (which is
// required to be smaller than "IndexSetSize".) If successful,
// adds them to "fl", which is required to be an empty free list.
@@ -320,7 +318,7 @@
// Helper function for getChunkFromIndexedFreeList.
// Replenish the indexed free list for this "size". Do not take from an
// underpopulated size.
- FreeChunk* getChunkFromIndexedFreeListHelper(size_t size);
+ FreeChunk* getChunkFromIndexedFreeListHelper(size_t size, bool replenish = true);
// Get a chunk from the indexed free list. If the indexed free list
// does not have a free chunk, try to replenish the indexed free list
@@ -430,10 +428,6 @@
void initialize_sequential_subtasks_for_marking(int n_threads,
HeapWord* low = NULL);
-#if CFLS_LAB_REFILL_STATS
- void print_par_alloc_stats();
-#endif
-
// Space enquiries
size_t used() const;
size_t free() const;
@@ -617,6 +611,12 @@
// Do some basic checks on the the free lists.
void checkFreeListConsistency() const PRODUCT_RETURN;
+ // Printing support
+ void dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st);
+ void print_indexed_free_lists(outputStream* st) const;
+ void print_dictionary_free_lists(outputStream* st) const;
+ void print_promo_info_blocks(outputStream* st) const;
+
NOT_PRODUCT (
void initializeIndexedFreeListArrayReturnedBytes();
size_t sumIndexedFreeListArrayReturnedBytes();
@@ -638,8 +638,9 @@
// Statistics functions
// Initialize census for lists before the sweep.
- void beginSweepFLCensus(float sweep_current,
- float sweep_estimate);
+ void beginSweepFLCensus(float inter_sweep_current,
+ float inter_sweep_estimate,
+ float intra_sweep_estimate);
// Set the surplus for each of the free lists.
void setFLSurplus();
// Set the hint for each of the free lists.
@@ -730,16 +731,17 @@
FreeList _indexedFreeList[CompactibleFreeListSpace::IndexSetSize];
// Initialized from a command-line arg.
- size_t _blocks_to_claim;
-#if CFLS_LAB_REFILL_STATS
- // Some statistics.
- int _refills;
- int _blocksTaken;
- static int _tot_refills;
- static int _tot_blocksTaken;
- static int _next_threshold;
-#endif
+ // Allocation statistics in support of dynamic adjustment of
+ // #blocks to claim per get_from_global_pool() call below.
+ static AdaptiveWeightedAverage
+ _blocks_to_claim [CompactibleFreeListSpace::IndexSetSize];
+ static size_t _global_num_blocks [CompactibleFreeListSpace::IndexSetSize];
+ static int _global_num_workers[CompactibleFreeListSpace::IndexSetSize];
+ size_t _num_blocks [CompactibleFreeListSpace::IndexSetSize];
+
+ // Internal work method
+ void get_from_global_pool(size_t word_sz, FreeList* fl);
public:
CFLS_LAB(CompactibleFreeListSpace* cfls);
@@ -748,7 +750,12 @@
HeapWord* alloc(size_t word_sz);
// Return any unused portions of the buffer to the global pool.
- void retire();
+ void retire(int tid);
+
+ // Dynamic OldPLABSize sizing
+ static void compute_desired_plab_size();
+ // When the settings are modified from default static initialization
+ static void modify_initialization(size_t n, unsigned wt);
};
size_t PromotionInfo::refillSize() const {
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -253,7 +253,6 @@
}
}
-
void ConcurrentMarkSweepGeneration::ref_processor_init() {
assert(collector() != NULL, "no collector");
collector()->ref_processor_init();
@@ -341,6 +340,14 @@
_icms_duty_cycle = CMSIncrementalDutyCycle;
}
+double CMSStats::cms_free_adjustment_factor(size_t free) const {
+ // TBD: CR 6909490
+ return 1.0;
+}
+
+void CMSStats::adjust_cms_free_adjustment_factor(bool fail, size_t free) {
+}
+
// If promotion failure handling is on use
// the padded average size of the promotion for each
// young generation collection.
@@ -361,7 +368,11 @@
// Adjust by the safety factor.
double cms_free_dbl = (double)cms_free;
- cms_free_dbl = cms_free_dbl * (100.0 - CMSIncrementalSafetyFactor) / 100.0;
+ double cms_adjustment = (100.0 - CMSIncrementalSafetyFactor)/100.0;
+ // Apply a further correction factor which tries to adjust
+ // for recent occurance of concurrent mode failures.
+ cms_adjustment = cms_adjustment * cms_free_adjustment_factor(cms_free);
+ cms_free_dbl = cms_free_dbl * cms_adjustment;
if (PrintGCDetails && Verbose) {
gclog_or_tty->print_cr("CMSStats::time_until_cms_gen_full: cms_free "
@@ -395,6 +406,8 @@
// late.
double work = cms_duration() + gc0_period();
double deadline = time_until_cms_gen_full();
+ // If a concurrent mode failure occurred recently, we want to be
+ // more conservative and halve our expected time_until_cms_gen_full()
if (work > deadline) {
if (Verbose && PrintGCDetails) {
gclog_or_tty->print(
@@ -556,7 +569,8 @@
_should_unload_classes(false),
_concurrent_cycles_since_last_unload(0),
_roots_scanning_options(0),
- _sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
+ _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
+ _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
{
if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
ExplicitGCInvokesConcurrent = true;
@@ -773,7 +787,7 @@
NOT_PRODUCT(_overflow_counter = CMSMarkStackOverflowInterval;)
_gc_counters = new CollectorCounters("CMS", 1);
_completed_initialization = true;
- _sweep_timer.start(); // start of time
+ _inter_sweep_timer.start(); // start of time
}
const char* ConcurrentMarkSweepGeneration::name() const {
@@ -900,6 +914,14 @@
return result;
}
+// At a promotion failure dump information on block layout in heap
+// (cms old generation).
+void ConcurrentMarkSweepGeneration::promotion_failure_occurred() {
+ if (CMSDumpAtPromotionFailure) {
+ cmsSpace()->dump_at_safepoint_with_locks(collector(), gclog_or_tty);
+ }
+}
+
CompactibleSpace*
ConcurrentMarkSweepGeneration::first_compaction_space() const {
return _cmsSpace;
@@ -1368,12 +1390,7 @@
ConcurrentMarkSweepGeneration::
par_promote_alloc_done(int thread_num) {
CMSParGCThreadState* ps = _par_gc_thread_states[thread_num];
- ps->lab.retire();
-#if CFLS_LAB_REFILL_STATS
- if (thread_num == 0) {
- _cmsSpace->print_par_alloc_stats();
- }
-#endif
+ ps->lab.retire(thread_num);
}
void
@@ -1974,11 +1991,14 @@
// We must adjust the allocation statistics being maintained
// in the free list space. We do so by reading and clearing
// the sweep timer and updating the block flux rate estimates below.
- assert(_sweep_timer.is_active(), "We should never see the timer inactive");
- _sweep_timer.stop();
- // Note that we do not use this sample to update the _sweep_estimate.
- _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
- _sweep_estimate.padded_average());
+ assert(!_intra_sweep_timer.is_active(), "_intra_sweep_timer should be inactive");
+ if (_inter_sweep_timer.is_active()) {
+ _inter_sweep_timer.stop();
+ // Note that we do not use this sample to update the _inter_sweep_estimate.
+ _cmsGen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
+ _inter_sweep_estimate.padded_average(),
+ _intra_sweep_estimate.padded_average());
+ }
GenMarkSweep::invoke_at_safepoint(_cmsGen->level(),
ref_processor(), clear_all_soft_refs);
@@ -2015,10 +2035,10 @@
}
// Adjust the per-size allocation stats for the next epoch.
- _cmsGen->cmsSpace()->endSweepFLCensus(sweepCount() /* fake */);
- // Restart the "sweep timer" for next epoch.
- _sweep_timer.reset();
- _sweep_timer.start();
+ _cmsGen->cmsSpace()->endSweepFLCensus(sweep_count() /* fake */);
+ // Restart the "inter sweep timer" for the next epoch.
+ _inter_sweep_timer.reset();
+ _inter_sweep_timer.start();
// Sample collection pause time and reset for collection interval.
if (UseAdaptiveSizePolicy) {
@@ -2676,7 +2696,7 @@
// Also reset promotion tracking in par gc thread states.
if (ParallelGCThreads > 0) {
for (uint i = 0; i < ParallelGCThreads; i++) {
- _par_gc_thread_states[i]->promo.stopTrackingPromotions();
+ _par_gc_thread_states[i]->promo.stopTrackingPromotions(i);
}
}
}
@@ -2771,7 +2791,7 @@
bool do_bit(size_t offset) {
HeapWord* addr = _marks->offsetToHeapWord(offset);
if (!_marks->isMarked(addr)) {
- oop(addr)->print();
+ oop(addr)->print_on(gclog_or_tty);
gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr);
_failed = true;
}
@@ -2820,7 +2840,7 @@
// Clear any marks from a previous round
verification_mark_bm()->clear_all();
assert(verification_mark_stack()->isEmpty(), "markStack should be empty");
- assert(overflow_list_is_empty(), "overflow list should be empty");
+ verify_work_stacks_empty();
GenCollectedHeap* gch = GenCollectedHeap::heap();
gch->ensure_parsability(false); // fill TLABs, but no need to retire them
@@ -2893,8 +2913,8 @@
verification_mark_bm()->iterate(&vcl);
if (vcl.failed()) {
gclog_or_tty->print("Verification failed");
- Universe::heap()->print();
- fatal(" ... aborting");
+ Universe::heap()->print_on(gclog_or_tty);
+ fatal("CMS: failed marking verification after remark");
}
}
@@ -3314,7 +3334,7 @@
Universe::heap()->barrier_set()->resize_covered_region(mr);
// Hmmmm... why doesn't CFLS::set_end verify locking?
// This is quite ugly; FIX ME XXX
- _cmsSpace->assert_locked();
+ _cmsSpace->assert_locked(freelistLock());
_cmsSpace->set_end((HeapWord*)_virtual_space.high());
// update the space and generation capacity counters
@@ -5868,9 +5888,9 @@
check_correct_thread_executing();
verify_work_stacks_empty();
verify_overflow_empty();
- incrementSweepCount();
- _sweep_timer.stop();
- _sweep_estimate.sample(_sweep_timer.seconds());
+ increment_sweep_count();
+ _inter_sweep_timer.stop();
+ _inter_sweep_estimate.sample(_inter_sweep_timer.seconds());
size_policy()->avg_cms_free_at_sweep()->sample(_cmsGen->free());
// PermGen verification support: If perm gen sweeping is disabled in
@@ -5893,6 +5913,9 @@
}
}
+ assert(!_intra_sweep_timer.is_active(), "Should not be active");
+ _intra_sweep_timer.reset();
+ _intra_sweep_timer.start();
if (asynch) {
TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
CMSPhaseAccounting pa(this, "sweep", !PrintGCDetails);
@@ -5937,8 +5960,11 @@
verify_work_stacks_empty();
verify_overflow_empty();
- _sweep_timer.reset();
- _sweep_timer.start();
+ _intra_sweep_timer.stop();
+ _intra_sweep_estimate.sample(_intra_sweep_timer.seconds());
+
+ _inter_sweep_timer.reset();
+ _inter_sweep_timer.start();
update_time_of_last_gc(os::javaTimeMillis());
@@ -5981,11 +6007,11 @@
// FIX ME!!! Looks like this belongs in CFLSpace, with
// CMSGen merely delegating to it.
void ConcurrentMarkSweepGeneration::setNearLargestChunk() {
- double nearLargestPercent = 0.999;
+ double nearLargestPercent = FLSLargestBlockCoalesceProximity;
HeapWord* minAddr = _cmsSpace->bottom();
HeapWord* largestAddr =
(HeapWord*) _cmsSpace->dictionary()->findLargestDict();
- if (largestAddr == 0) {
+ if (largestAddr == NULL) {
// The dictionary appears to be empty. In this case
// try to coalesce at the end of the heap.
largestAddr = _cmsSpace->end();
@@ -5993,6 +6019,13 @@
size_t largestOffset = pointer_delta(largestAddr, minAddr);
size_t nearLargestOffset =
(size_t)((double)largestOffset * nearLargestPercent) - MinChunkSize;
+ if (PrintFLSStatistics != 0) {
+ gclog_or_tty->print_cr(
+ "CMS: Large Block: " PTR_FORMAT ";"
+ " Proximity: " PTR_FORMAT " -> " PTR_FORMAT,
+ largestAddr,
+ _cmsSpace->nearLargestChunk(), minAddr + nearLargestOffset);
+ }
_cmsSpace->set_nearLargestChunk(minAddr + nearLargestOffset);
}
@@ -6072,9 +6105,11 @@
assert_lock_strong(gen->freelistLock());
assert_lock_strong(bitMapLock());
- assert(!_sweep_timer.is_active(), "Was switched off in an outer context");
- gen->cmsSpace()->beginSweepFLCensus((float)(_sweep_timer.seconds()),
- _sweep_estimate.padded_average());
+ assert(!_inter_sweep_timer.is_active(), "Was switched off in an outer context");
+ assert(_intra_sweep_timer.is_active(), "Was switched on in an outer context");
+ gen->cmsSpace()->beginSweepFLCensus((float)(_inter_sweep_timer.seconds()),
+ _inter_sweep_estimate.padded_average(),
+ _intra_sweep_estimate.padded_average());
gen->setNearLargestChunk();
{
@@ -6087,7 +6122,7 @@
// end-of-sweep-census below will be off by a little bit.
}
gen->cmsSpace()->sweep_completed();
- gen->cmsSpace()->endSweepFLCensus(sweepCount());
+ gen->cmsSpace()->endSweepFLCensus(sweep_count());
if (should_unload_classes()) { // unloaded classes this cycle,
_concurrent_cycles_since_last_unload = 0; // ... reset count
} else { // did not unload classes,
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -355,6 +355,11 @@
unsigned int new_duty_cycle);
unsigned int icms_update_duty_cycle_impl();
+ // In support of adjusting of cms trigger ratios based on history
+ // of concurrent mode failure.
+ double cms_free_adjustment_factor(size_t free) const;
+ void adjust_cms_free_adjustment_factor(bool fail, size_t free);
+
public:
CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
unsigned int alpha = CMSExpAvgFactor);
@@ -570,8 +575,11 @@
// appropriately.
void check_gc_time_limit();
// XXX Move these to CMSStats ??? FIX ME !!!
- elapsedTimer _sweep_timer;
- AdaptivePaddedAverage _sweep_estimate;
+ elapsedTimer _inter_sweep_timer; // time between sweeps
+ elapsedTimer _intra_sweep_timer; // time _in_ sweeps
+ // padded decaying average estimates of the above
+ AdaptivePaddedAverage _inter_sweep_estimate;
+ AdaptivePaddedAverage _intra_sweep_estimate;
protected:
ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS)
@@ -625,6 +633,7 @@
// . _collectorState <= Idling == post-sweep && pre-mark
// . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
// precleaning || abortablePrecleanb
+ public:
enum CollectorState {
Resizing = 0,
Resetting = 1,
@@ -636,6 +645,7 @@
FinalMarking = 7,
Sweeping = 8
};
+ protected:
static CollectorState _collectorState;
// State related to prologue/epilogue invocation for my generations
@@ -655,7 +665,7 @@
int _numYields;
size_t _numDirtyCards;
- uint _sweepCount;
+ size_t _sweep_count;
// number of full gc's since the last concurrent gc.
uint _full_gcs_since_conc_gc;
@@ -905,7 +915,7 @@
// Check that the currently executing thread is the expected
// one (foreground collector or background collector).
- void check_correct_thread_executing() PRODUCT_RETURN;
+ static void check_correct_thread_executing() PRODUCT_RETURN;
// XXXPERM void print_statistics() PRODUCT_RETURN;
bool is_cms_reachable(HeapWord* addr);
@@ -930,8 +940,8 @@
static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
- uint sweepCount() const { return _sweepCount; }
- void incrementSweepCount() { _sweepCount++; }
+ size_t sweep_count() const { return _sweep_count; }
+ void increment_sweep_count() { _sweep_count++; }
// Timers/stats for gc scheduling and incremental mode pacing.
CMSStats& stats() { return _stats; }
@@ -1165,6 +1175,11 @@
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
bool younger_handles_promotion_failure) const;
+ // Inform this (non-young) generation that a promotion failure was
+ // encountered during a collection of a younger generation that
+ // promotes into this generation.
+ virtual void promotion_failure_occurred();
+
bool should_collect(bool full, size_t size, bool tlab);
virtual bool should_concurrent_collect() const;
virtual bool is_too_full() const;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeBlockDictionary.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -55,7 +55,8 @@
virtual void dictCensusUpdate(size_t size, bool split, bool birth) = 0;
virtual bool coalDictOverPopulated(size_t size) = 0;
virtual void beginSweepDictCensus(double coalSurplusPercent,
- float sweep_current, float sweep_ewstimate) = 0;
+ float inter_sweep_current, float inter_sweep_estimate,
+ float intra__sweep_current) = 0;
virtual void endSweepDictCensus(double splitSurplusPercent) = 0;
virtual FreeChunk* findLargestDict() const = 0;
// verify that the given chunk is in the dictionary.
@@ -79,6 +80,7 @@
}
virtual void printDictCensus() const = 0;
+ virtual void print_free_lists(outputStream* st) const = 0;
virtual void verify() const = 0;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -67,3 +67,8 @@
}
}
#endif
+
+void FreeChunk::print_on(outputStream* st) {
+ st->print_cr("Next: " PTR_FORMAT " Prev: " PTR_FORMAT " %s",
+ next(), prev(), cantCoalesce() ? "[can't coalesce]" : "");
+}
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeChunk.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -129,6 +129,8 @@
void verifyList() const PRODUCT_RETURN;
void mangleAllocated(size_t size) PRODUCT_RETURN;
void mangleFreed(size_t size) PRODUCT_RETURN;
+
+ void print_on(outputStream* st);
};
// Alignment helpers etc.
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -81,8 +81,8 @@
set_hint(hint);
}
-void FreeList::init_statistics() {
- _allocation_stats.initialize();
+void FreeList::init_statistics(bool split_birth) {
+ _allocation_stats.initialize(split_birth);
}
FreeChunk* FreeList::getChunkAtHead() {
@@ -292,14 +292,31 @@
}
#ifndef PRODUCT
+void FreeList::verify_stats() const {
+ // The +1 of the LH comparand is to allow some "looseness" in
+ // checking: we usually call this interface when adding a block
+ // and we'll subsequently update the stats; we cannot update the
+ // stats beforehand because in the case of the large-block BT
+ // dictionary for example, this might be the first block and
+ // in that case there would be no place that we could record
+ // the stats (which are kept in the block itself).
+ assert(_allocation_stats.prevSweep() + _allocation_stats.splitBirths() + 1 // Total Stock + 1
+ >= _allocation_stats.splitDeaths() + (ssize_t)count(), "Conservation Principle");
+}
+
void FreeList::assert_proper_lock_protection_work() const {
-#ifdef ASSERT
- if (_protecting_lock != NULL &&
- SharedHeap::heap()->n_par_threads() > 0) {
- // Should become an assert.
- guarantee(_protecting_lock->owned_by_self(), "FreeList RACE DETECTED");
+ assert(_protecting_lock != NULL, "Don't call this directly");
+ assert(ParallelGCThreads > 0, "Don't call this directly");
+ Thread* thr = Thread::current();
+ if (thr->is_VM_thread() || thr->is_ConcurrentGC_thread()) {
+ // assert that we are holding the freelist lock
+ } else if (thr->is_GC_task_thread()) {
+ assert(_protecting_lock->owned_by_self(), "FreeList RACE DETECTED");
+ } else if (thr->is_Java_thread()) {
+ assert(!SafepointSynchronize::is_at_safepoint(), "Should not be executing");
+ } else {
+ ShouldNotReachHere(); // unaccounted thread type?
}
-#endif
}
#endif
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/freeList.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -35,18 +35,26 @@
// for that implementation.
class Mutex;
+class TreeList;
class FreeList VALUE_OBJ_CLASS_SPEC {
friend class CompactibleFreeListSpace;
friend class VMStructs;
- friend class printTreeCensusClosure;
- FreeChunk* _head; // List of free chunks
+ friend class PrintTreeCensusClosure;
+
+ protected:
+ TreeList* _parent;
+ TreeList* _left;
+ TreeList* _right;
+
+ private:
+ FreeChunk* _head; // Head of list of free chunks
FreeChunk* _tail; // Tail of list of free chunks
- size_t _size; // Size in Heap words of each chunks
+ size_t _size; // Size in Heap words of each chunk
ssize_t _count; // Number of entries in list
size_t _hint; // next larger size list with a positive surplus
- AllocationStats _allocation_stats; // statistics for smart allocation
+ AllocationStats _allocation_stats; // allocation-related statistics
#ifdef ASSERT
Mutex* _protecting_lock;
@@ -63,9 +71,12 @@
// Initialize the allocation statistics.
protected:
- void init_statistics();
+ void init_statistics(bool split_birth = false);
void set_count(ssize_t v) { _count = v;}
- void increment_count() { _count++; }
+ void increment_count() {
+ _count++;
+ }
+
void decrement_count() {
_count--;
assert(_count >= 0, "Count should not be negative");
@@ -167,11 +178,13 @@
_allocation_stats.set_desired(v);
}
void compute_desired(float inter_sweep_current,
- float inter_sweep_estimate) {
+ float inter_sweep_estimate,
+ float intra_sweep_estimate) {
assert_proper_lock_protection();
_allocation_stats.compute_desired(_count,
inter_sweep_current,
- inter_sweep_estimate);
+ inter_sweep_estimate,
+ intra_sweep_estimate);
}
ssize_t coalDesired() const {
return _allocation_stats.coalDesired();
@@ -306,6 +319,9 @@
// found. Return NULL if "fc" is not found.
bool verifyChunkInFreeLists(FreeChunk* fc) const;
+ // Stats verification
+ void verify_stats() const PRODUCT_RETURN;
+
// Printing support
static void print_labels_on(outputStream* st, const char* c);
void print_on(outputStream* st, const char* c = NULL) const;
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_concurrentMarkSweep Fri Jan 15 14:25:44 2010 -0800
@@ -221,6 +221,7 @@
freeList.cpp globals.hpp
freeList.cpp mutex.hpp
freeList.cpp sharedHeap.hpp
+freeList.cpp vmThread.hpp
freeList.hpp allocationStats.hpp
--- a/hotspot/src/share/vm/gc_implementation/includeDB_gc_serial Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/includeDB_gc_serial Fri Jan 15 14:25:44 2010 -0800
@@ -71,6 +71,7 @@
gcUtil.hpp allocation.hpp
gcUtil.hpp debug.hpp
gcUtil.hpp globalDefinitions.hpp
+gcUtil.hpp ostream.hpp
gcUtil.hpp timer.hpp
generationCounters.cpp generationCounters.hpp
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -50,6 +50,7 @@
work_queue_set_, &term_),
_is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
_keep_alive_closure(&_scan_weak_ref_closure),
+ _promotion_failure_size(0),
_pushes(0), _pops(0), _steals(0), _steal_attempts(0), _term_attempts(0),
_strong_roots_time(0.0), _term_time(0.0)
{
@@ -249,6 +250,16 @@
}
}
+void ParScanThreadState::print_and_clear_promotion_failure_size() {
+ if (_promotion_failure_size != 0) {
+ if (PrintPromotionFailure) {
+ gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
+ _thread_num, _promotion_failure_size);
+ }
+ _promotion_failure_size = 0;
+ }
+}
+
class ParScanThreadStateSet: private ResourceArray {
public:
// Initializes states for the specified number of threads;
@@ -260,11 +271,11 @@
GrowableArray<oop>** overflow_stacks_,
size_t desired_plab_sz,
ParallelTaskTerminator& term);
- inline ParScanThreadState& thread_sate(int i);
+ inline ParScanThreadState& thread_state(int i);
int pushes() { return _pushes; }
int pops() { return _pops; }
int steals() { return _steals; }
- void reset();
+ void reset(bool promotion_failed);
void flush();
private:
ParallelTaskTerminator& _term;
@@ -295,22 +306,31 @@
}
}
-inline ParScanThreadState& ParScanThreadStateSet::thread_sate(int i)
+inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
{
assert(i >= 0 && i < length(), "sanity check!");
return ((ParScanThreadState*)_data)[i];
}
-void ParScanThreadStateSet::reset()
+void ParScanThreadStateSet::reset(bool promotion_failed)
{
_term.reset_for_reuse();
+ if (promotion_failed) {
+ for (int i = 0; i < length(); ++i) {
+ thread_state(i).print_and_clear_promotion_failure_size();
+ }
+ }
}
void ParScanThreadStateSet::flush()
{
+ // Work in this loop should be kept as lightweight as
+ // possible since this might otherwise become a bottleneck
+ // to scaling. Should we add heavy-weight work into this
+ // loop, consider parallelizing the loop into the worker threads.
for (int i = 0; i < length(); ++i) {
- ParScanThreadState& par_scan_state = thread_sate(i);
+ ParScanThreadState& par_scan_state = thread_state(i);
// Flush stats related to To-space PLAB activity and
// retire the last buffer.
@@ -362,6 +382,14 @@
}
}
}
+ if (UseConcMarkSweepGC && ParallelGCThreads > 0) {
+ // We need to call this even when ResizeOldPLAB is disabled
+ // so as to avoid breaking some asserts. While we may be able
+ // to avoid this by reorganizing the code a bit, I am loathe
+ // to do that unless we find cases where ergo leads to bad
+ // performance.
+ CFLS_LAB::compute_desired_plab_size();
+ }
}
ParScanClosure::ParScanClosure(ParNewGeneration* g,
@@ -475,7 +503,7 @@
Generation* old_gen = gch->next_gen(_gen);
- ParScanThreadState& par_scan_state = _state_set->thread_sate(i);
+ ParScanThreadState& par_scan_state = _state_set->thread_state(i);
par_scan_state.set_young_old_boundary(_young_old_boundary);
par_scan_state.start_strong_roots();
@@ -659,7 +687,7 @@
{
ResourceMark rm;
HandleMark hm;
- ParScanThreadState& par_scan_state = _state_set.thread_sate(i);
+ ParScanThreadState& par_scan_state = _state_set.thread_state(i);
par_scan_state.set_young_old_boundary(_young_old_boundary);
_task.work(i, par_scan_state.is_alive_closure(),
par_scan_state.keep_alive_closure(),
@@ -693,7 +721,7 @@
ParNewRefProcTaskProxy rp_task(task, _generation, *_generation.next_gen(),
_generation.reserved().end(), _state_set);
workers->run_task(&rp_task);
- _state_set.reset();
+ _state_set.reset(_generation.promotion_failed());
}
void ParNewRefProcTaskExecutor::execute(EnqueueTask& task)
@@ -813,7 +841,7 @@
GenCollectedHeap::StrongRootsScope srs(gch);
tsk.work(0);
}
- thread_state_set.reset();
+ thread_state_set.reset(promotion_failed());
if (PAR_STATS_ENABLED && ParallelGCVerbose) {
gclog_or_tty->print("Thread totals:\n"
@@ -882,6 +910,8 @@
swap_spaces(); // Make life simpler for CMS || rescan; see 6483690.
from()->set_next_compaction_space(to());
gch->set_incremental_collection_will_fail();
+ // Inform the next generation that a promotion failure occurred.
+ _next_gen->promotion_failure_occurred();
// Reset the PromotionFailureALot counters.
NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
@@ -1029,6 +1059,8 @@
new_obj = old;
preserve_mark_if_necessary(old, m);
+ // Log the size of the maiden promotion failure
+ par_scan_state->log_promotion_failure(sz);
}
old->forward_to(new_obj);
@@ -1150,6 +1182,8 @@
failed_to_promote = true;
preserve_mark_if_necessary(old, m);
+ // Log the size of the maiden promotion failure
+ par_scan_state->log_promotion_failure(sz);
}
} else {
// Is in to-space; do copying ourselves.
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -97,6 +97,9 @@
int _pushes, _pops, _steals, _steal_attempts, _term_attempts;
int _overflow_pushes, _overflow_refills, _overflow_refill_objs;
+ // Stats for promotion failure
+ size_t _promotion_failure_size;
+
// Timing numbers.
double _start;
double _start_strong_roots;
@@ -169,6 +172,15 @@
// Undo the most recent allocation ("obj", of "word_sz").
void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
+ // Promotion failure stats
+ size_t promotion_failure_size() { return promotion_failure_size(); }
+ void log_promotion_failure(size_t sz) {
+ if (_promotion_failure_size == 0) {
+ _promotion_failure_size = sz;
+ }
+ }
+ void print_and_clear_promotion_failure_size();
+
int pushes() { return _pushes; }
int pops() { return _pops; }
int steals() { return _steals; }
--- a/hotspot/src/share/vm/gc_implementation/shared/allocationStats.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/shared/allocationStats.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -31,7 +31,7 @@
// beginning of this sweep:
// Count(end_last_sweep) - Count(start_this_sweep)
// + splitBirths(between) - splitDeaths(between)
- // The above number divided by the time since the start [END???] of the
+ // The above number divided by the time since the end of the
// previous sweep gives us a time rate of demand for blocks
// of this size. We compute a padded average of this rate as
// our current estimate for the time rate of demand for blocks
@@ -41,7 +41,7 @@
// estimates.
AdaptivePaddedAverage _demand_rate_estimate;
- ssize_t _desired; // Estimate computed as described above
+ ssize_t _desired; // Demand stimate computed as described above
ssize_t _coalDesired; // desired +/- small-percent for tuning coalescing
ssize_t _surplus; // count - (desired +/- small-percent),
@@ -53,9 +53,9 @@
ssize_t _coalDeaths; // loss from coalescing
ssize_t _splitBirths; // additional chunks from splitting
ssize_t _splitDeaths; // loss from splitting
- size_t _returnedBytes; // number of bytes returned to list.
+ size_t _returnedBytes; // number of bytes returned to list.
public:
- void initialize() {
+ void initialize(bool split_birth = false) {
AdaptivePaddedAverage* dummy =
new (&_demand_rate_estimate) AdaptivePaddedAverage(CMS_FLSWeight,
CMS_FLSPadding);
@@ -67,7 +67,7 @@
_beforeSweep = 0;
_coalBirths = 0;
_coalDeaths = 0;
- _splitBirths = 0;
+ _splitBirths = split_birth? 1 : 0;
_splitDeaths = 0;
_returnedBytes = 0;
}
@@ -75,10 +75,12 @@
AllocationStats() {
initialize();
}
+
// The rate estimate is in blocks per second.
void compute_desired(size_t count,
float inter_sweep_current,
- float inter_sweep_estimate) {
+ float inter_sweep_estimate,
+ float intra_sweep_estimate) {
// If the latest inter-sweep time is below our granularity
// of measurement, we may call in here with
// inter_sweep_current == 0. However, even for suitably small
@@ -88,12 +90,31 @@
// vulnerable to noisy glitches. In such cases, we
// ignore the current sample and use currently available
// historical estimates.
+ // XXX NEEDS TO BE FIXED
+ // assert(prevSweep() + splitBirths() >= splitDeaths() + (ssize_t)count, "Conservation Principle");
+ // ^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ // "Total Stock" "Not used at this block size"
if (inter_sweep_current > _threshold) {
- ssize_t demand = prevSweep() - count + splitBirths() - splitDeaths();
+ ssize_t demand = prevSweep() - (ssize_t)count + splitBirths() - splitDeaths();
+ // XXX NEEDS TO BE FIXED
+ // assert(demand >= 0, "Demand should be non-negative");
+ // Defensive: adjust for imprecision in event counting
+ if (demand < 0) {
+ demand = 0;
+ }
+ float old_rate = _demand_rate_estimate.padded_average();
float rate = ((float)demand)/inter_sweep_current;
_demand_rate_estimate.sample(rate);
- _desired = (ssize_t)(_demand_rate_estimate.padded_average()
- *inter_sweep_estimate);
+ float new_rate = _demand_rate_estimate.padded_average();
+ ssize_t old_desired = _desired;
+ _desired = (ssize_t)(new_rate * (inter_sweep_estimate
+ + CMSExtrapolateSweep
+ ? intra_sweep_estimate
+ : 0.0));
+ if (PrintFLSStatistics > 1) {
+ gclog_or_tty->print_cr("demand: %d, old_rate: %f, current_rate: %f, new_rate: %f, old_desired: %d, new_desired: %d",
+ demand, old_rate, rate, new_rate, old_desired, _desired);
+ }
}
}
--- a/hotspot/src/share/vm/gc_implementation/shared/gcUtil.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcUtil.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -52,11 +52,35 @@
_last_sample = new_sample;
}
+void AdaptiveWeightedAverage::print() const {
+ print_on(tty);
+}
+
+void AdaptiveWeightedAverage::print_on(outputStream* st) const {
+ guarantee(false, "NYI");
+}
+
+void AdaptivePaddedAverage::print() const {
+ print_on(tty);
+}
+
+void AdaptivePaddedAverage::print_on(outputStream* st) const {
+ guarantee(false, "NYI");
+}
+
+void AdaptivePaddedNoZeroDevAverage::print() const {
+ print_on(tty);
+}
+
+void AdaptivePaddedNoZeroDevAverage::print_on(outputStream* st) const {
+ guarantee(false, "NYI");
+}
+
void AdaptivePaddedAverage::sample(float new_sample) {
- // Compute our parent classes sample information
+ // Compute new adaptive weighted average based on new sample.
AdaptiveWeightedAverage::sample(new_sample);
- // Now compute the deviation and the new padded sample
+ // Now update the deviation and the padded average.
float new_avg = average();
float new_dev = compute_adaptive_average(fabsd(new_sample - new_avg),
deviation());
--- a/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcUtil.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -54,8 +54,8 @@
public:
// Input weight must be between 0 and 100
- AdaptiveWeightedAverage(unsigned weight) :
- _average(0.0), _sample_count(0), _weight(weight), _last_sample(0.0) {
+ AdaptiveWeightedAverage(unsigned weight, float avg = 0.0) :
+ _average(avg), _sample_count(0), _weight(weight), _last_sample(0.0) {
}
void clear() {
@@ -64,6 +64,13 @@
_last_sample = 0;
}
+ // Useful for modifying static structures after startup.
+ void modify(size_t avg, unsigned wt, bool force = false) {
+ assert(force, "Are you sure you want to call this?");
+ _average = (float)avg;
+ _weight = wt;
+ }
+
// Accessors
float average() const { return _average; }
unsigned weight() const { return _weight; }
@@ -83,6 +90,10 @@
// Convert to float and back to avoid integer overflow.
return (size_t)exp_avg((float)avg, (float)sample, weight);
}
+
+ // Printing
+ void print_on(outputStream* st) const;
+ void print() const;
};
@@ -129,6 +140,10 @@
// Override
void sample(float new_sample);
+
+ // Printing
+ void print_on(outputStream* st) const;
+ void print() const;
};
// A weighted average that includes a deviation from the average,
@@ -146,7 +161,12 @@
AdaptivePaddedAverage(weight, padding) {}
// Override
void sample(float new_sample);
+
+ // Printing
+ void print_on(outputStream* st) const;
+ void print() const;
};
+
// Use a least squares fit to a set of data to generate a linear
// equation.
// y = intercept + slope * x
--- a/hotspot/src/share/vm/gc_interface/collectedHeap.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/gc_interface/collectedHeap.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -274,7 +274,7 @@
fill_with_array(start, words);
} else if (words > 0) {
assert(words == min_fill_size(), "unaligned size");
- post_allocation_setup_common(SystemDictionary::object_klass(), start,
+ post_allocation_setup_common(SystemDictionary::Object_klass(), start,
words);
}
}
--- a/hotspot/src/share/vm/includeDB_compiler2 Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/includeDB_compiler2 Fri Jan 15 14:25:44 2010 -0800
@@ -155,6 +155,9 @@
callGenerator.cpp cfgnode.hpp
callGenerator.cpp compileLog.hpp
callGenerator.cpp connode.hpp
+callGenerator.cpp ciCPCache.hpp
+callGenerator.cpp ciMethodHandle.hpp
+callGenerator.cpp javaClasses.hpp
callGenerator.cpp parse.hpp
callGenerator.cpp rootnode.hpp
callGenerator.cpp runtime.hpp
@@ -391,6 +394,9 @@
doCall.cpp addnode.hpp
doCall.cpp callGenerator.hpp
+doCall.cpp ciCallSite.hpp
+doCall.cpp ciCPCache.hpp
+doCall.cpp ciMethodHandle.hpp
doCall.cpp cfgnode.hpp
doCall.cpp compileLog.hpp
doCall.cpp linkResolver.hpp
--- a/hotspot/src/share/vm/includeDB_core Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/includeDB_core Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
//
-// Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
+// Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -516,6 +516,11 @@
ciCallProfile.hpp ciClassList.hpp
+ciCallSite.cpp ciCallSite.hpp
+ciCallSite.cpp ciUtilities.hpp
+
+ciCallSite.hpp ciInstance.hpp
+
ciConstant.cpp allocation.hpp
ciConstant.cpp allocation.inline.hpp
ciConstant.cpp ciConstant.hpp
@@ -532,6 +537,12 @@
ciConstantPoolCache.hpp growableArray.hpp
ciConstantPoolCache.hpp resourceArea.hpp
+ciCPCache.cpp cpCacheOop.hpp
+ciCPCache.cpp ciCPCache.hpp
+
+ciCPCache.hpp ciClassList.hpp
+ciCPCache.hpp ciObject.hpp
+
ciEnv.cpp allocation.inline.hpp
ciEnv.cpp ciConstant.hpp
ciEnv.cpp ciEnv.hpp
@@ -593,6 +604,7 @@
ciField.hpp ciClassList.hpp
ciField.hpp ciConstant.hpp
ciField.hpp ciFlags.hpp
+ciField.hpp ciInstance.hpp
ciFlags.cpp ciFlags.hpp
@@ -679,6 +691,7 @@
ciMethod.hpp ciInstanceKlass.hpp
ciMethod.hpp ciObject.hpp
ciMethod.hpp ciSignature.hpp
+ciMethod.hpp methodHandles.hpp
ciMethod.hpp methodLiveness.hpp
ciMethodBlocks.cpp bytecode.hpp
@@ -710,6 +723,15 @@
ciMethodKlass.hpp ciKlass.hpp
ciMethodKlass.hpp ciSymbol.hpp
+ciMethodHandle.cpp ciClassList.hpp
+ciMethodHandle.cpp ciInstance.hpp
+ciMethodHandle.cpp ciMethodHandle.hpp
+ciMethodHandle.cpp ciUtilities.hpp
+ciMethodHandle.cpp methodHandles.hpp
+ciMethodHandle.cpp methodHandleWalk.hpp
+
+ciMethodHandle.hpp methodHandles.hpp
+
ciNullObject.cpp ciNullObject.hpp
ciNullObject.hpp ciClassList.hpp
@@ -755,11 +777,14 @@
ciObject.hpp jniHandles.hpp
ciObjectFactory.cpp allocation.inline.hpp
+ciObjectFactory.cpp ciCallSite.hpp
+ciObjectFactory.cpp ciCPCache.hpp
ciObjectFactory.cpp ciInstance.hpp
ciObjectFactory.cpp ciInstanceKlass.hpp
ciObjectFactory.cpp ciInstanceKlassKlass.hpp
ciObjectFactory.cpp ciMethod.hpp
ciObjectFactory.cpp ciMethodData.hpp
+ciObjectFactory.cpp ciMethodHandle.hpp
ciObjectFactory.cpp ciMethodKlass.hpp
ciObjectFactory.cpp ciNullObject.hpp
ciObjectFactory.cpp ciObjArray.hpp
@@ -793,6 +818,7 @@
ciSignature.hpp globalDefinitions.hpp
ciSignature.hpp growableArray.hpp
+ciStreams.cpp ciCallSite.hpp
ciStreams.cpp ciConstant.hpp
ciStreams.cpp ciField.hpp
ciStreams.cpp ciStreams.hpp
@@ -1499,6 +1525,7 @@
disassembler.cpp fprofiler.hpp
disassembler.cpp handles.inline.hpp
disassembler.cpp hpi.hpp
+disassembler.cpp javaClasses.hpp
disassembler.cpp stubCodeGenerator.hpp
disassembler.cpp stubRoutines.hpp
@@ -2814,6 +2841,12 @@
methodDataOop.hpp orderAccess.hpp
methodDataOop.hpp universe.hpp
+methodHandleWalk.hpp methodHandles.hpp
+
+methodHandleWalk.cpp methodHandleWalk.hpp
+methodHandleWalk.cpp oopFactory.hpp
+methodHandleWalk.cpp rewriter.hpp
+
methodHandles.hpp frame.inline.hpp
methodHandles.hpp globals.hpp
methodHandles.hpp interfaceSupport.hpp
@@ -3471,6 +3504,7 @@
reflection.cpp javaClasses.hpp
reflection.cpp jvm.h
reflection.cpp linkResolver.hpp
+reflection.cpp methodHandleWalk.hpp
reflection.cpp objArrayKlass.hpp
reflection.cpp objArrayOop.hpp
reflection.cpp oopFactory.hpp
--- a/hotspot/src/share/vm/includeDB_gc_parallel Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/includeDB_gc_parallel Fri Jan 15 14:25:44 2010 -0800
@@ -21,6 +21,8 @@
// have any questions.
//
+arguments.cpp compactibleFreeListSpace.hpp
+
assembler_<arch>.cpp g1SATBCardTableModRefBS.hpp
assembler_<arch>.cpp g1CollectedHeap.inline.hpp
assembler_<arch>.cpp heapRegion.hpp
--- a/hotspot/src/share/vm/interpreter/bytecode.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/interpreter/bytecode.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -102,7 +102,9 @@
KlassHandle resolved_klass;
constantPoolHandle constants(THREAD, _method->constants());
- if (adjusted_invoke_code() != Bytecodes::_invokeinterface) {
+ if (adjusted_invoke_code() == Bytecodes::_invokedynamic) {
+ LinkResolver::resolve_dynamic_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));
+ } else if (adjusted_invoke_code() != Bytecodes::_invokeinterface) {
LinkResolver::resolve_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));
} else {
LinkResolver::resolve_interface_method(m, resolved_klass, constants, index(), CHECK_(methodHandle()));
--- a/hotspot/src/share/vm/interpreter/bytecode.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/interpreter/bytecode.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -205,12 +205,14 @@
bool is_invokespecial() const { return adjusted_invoke_code() == Bytecodes::_invokespecial; }
bool is_invokedynamic() const { return adjusted_invoke_code() == Bytecodes::_invokedynamic; }
+ bool has_receiver() const { return !is_invokestatic() && !is_invokedynamic(); }
bool has_giant_index() const { return is_invokedynamic(); }
bool is_valid() const { return is_invokeinterface() ||
is_invokevirtual() ||
is_invokestatic() ||
- is_invokespecial(); }
+ is_invokespecial() ||
+ is_invokedynamic(); }
// Creation
inline friend Bytecode_invoke* Bytecode_invoke_at(methodHandle method, int bci);
--- a/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/interpreter/bytecodeTracer.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -270,6 +270,8 @@
st->print_cr(" %s", constants->resolved_klass_at(i)->klass_part()->external_name());
} else if (tag.is_unresolved_klass()) {
st->print_cr(" <unresolved klass at %d>", i);
+ } else if (tag.is_object()) {
+ st->print_cr(" " PTR_FORMAT, constants->object_at(i));
} else {
st->print_cr(" bad tag=%d at %d", tag.value(), i);
}
--- a/hotspot/src/share/vm/interpreter/bytecodes.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/interpreter/bytecodes.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -357,7 +357,7 @@
def(_invokespecial , "invokespecial" , "bjj" , NULL , T_ILLEGAL, -1, true);
def(_invokestatic , "invokestatic" , "bjj" , NULL , T_ILLEGAL, 0, true);
def(_invokeinterface , "invokeinterface" , "bjj__", NULL , T_ILLEGAL, -1, true);
- def(_invokedynamic , "invokedynamic" , "bjjjj", NULL , T_ILLEGAL, -1, true );
+ def(_invokedynamic , "invokedynamic" , "bjjjj", NULL , T_ILLEGAL, 0, true );
def(_new , "new" , "bii" , NULL , T_OBJECT , 1, true );
def(_newarray , "newarray" , "bc" , NULL , T_OBJECT , 0, true );
def(_anewarray , "anewarray" , "bii" , NULL , T_OBJECT , 0, true );
--- a/hotspot/src/share/vm/interpreter/interpreter.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/interpreter/interpreter.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -323,7 +323,7 @@
// (NOT needed for the old calling convension)
if (!is_top_frame) {
int index = Bytes::get_native_u4(bcp+1);
- method->constants()->cache()->entry_at(index)->set_parameter_size(callee_parameters);
+ method->constants()->cache()->secondary_entry_at(index)->set_parameter_size(callee_parameters);
}
break;
}
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -353,7 +353,7 @@
assert(h_exception.not_null(), "NULL exceptions should be handled by athrow");
assert(h_exception->is_oop(), "just checking");
// Check that exception is a subclass of Throwable, otherwise we have a VerifyError
- if (!(h_exception->is_a(SystemDictionary::throwable_klass()))) {
+ if (!(h_exception->is_a(SystemDictionary::Throwable_klass()))) {
if (ExitVMOnVerifyError) vm_exit(-1);
ShouldNotReachHere();
}
@@ -585,7 +585,7 @@
Handle exception(thread, thread->vm_result());
assert(exception() != NULL, "vm result should be set");
thread->set_vm_result(NULL); // clear vm result before continuing (may cause memory leaks and assert failures)
- if (!exception->is_a(SystemDictionary::threaddeath_klass())) {
+ if (!exception->is_a(SystemDictionary::ThreadDeath_klass())) {
exception = get_preinitialized_exception(
SystemDictionary::IllegalMonitorStateException_klass(),
CATCH);
@@ -660,7 +660,7 @@
tty->print_cr("Resolving: klass: %s to method: %s", info.resolved_klass()->name()->as_C_string(), info.resolved_method()->name()->as_C_string());
}
if (info.resolved_method()->method_holder() ==
- SystemDictionary::object_klass()) {
+ SystemDictionary::Object_klass()) {
// NOTE: THIS IS A FIX FOR A CORNER CASE in the JVM spec
// (see also cpCacheOop.cpp for details)
methodHandle rm = info.resolved_method();
@@ -1250,7 +1250,7 @@
methodHandle mh(thread, fr.interpreter_frame_method());
Bytecode_invoke* invoke = Bytecode_invoke_at(mh, bci);
ArgumentSizeComputer asc(invoke->signature());
- int size_of_arguments = (asc.size() + (invoke->is_invokestatic() ? 0 : 1)); // receiver
+ int size_of_arguments = (asc.size() + (invoke->has_receiver() ? 1 : 0)); // receiver
Copy::conjoint_bytes(src_address, dest_address,
size_of_arguments * Interpreter::stackElementSize());
IRT_END
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -55,7 +55,7 @@
// we should pick the vtable index from the resolved method.
// Other than that case, there is no valid vtable index to specify.
int vtable_index = methodOopDesc::invalid_vtable_index;
- if (resolved_method->method_holder() == SystemDictionary::object_klass()) {
+ if (resolved_method->method_holder() == SystemDictionary::Object_klass()) {
assert(resolved_method->vtable_index() == selected_method->vtable_index(), "sanity check");
vtable_index = resolved_method->vtable_index();
}
@@ -75,6 +75,8 @@
_selected_method = selected_method;
_vtable_index = vtable_index;
if (CompilationPolicy::mustBeCompiled(selected_method)) {
+ // This path is unusual, mostly used by the '-Xcomp' stress test mode.
+
// Note: with several active threads, the mustBeCompiled may be true
// while canBeCompiled is false; remove assert
// assert(CompilationPolicy::canBeCompiled(selected_method), "cannot compile");
@@ -82,6 +84,16 @@
// don't force compilation, resolve was on behalf of compiler
return;
}
+ if (instanceKlass::cast(selected_method->method_holder())->is_not_initialized()) {
+ // 'is_not_initialized' means not only '!is_initialized', but also that
+ // initialization has not been started yet ('!being_initialized')
+ // Do not force compilation of methods in uninitialized classes.
+ // Note that doing this would throw an assert later,
+ // in CompileBroker::compile_method.
+ // We sometimes use the link resolver to do reflective lookups
+ // even before classes are initialized.
+ return;
+ }
CompileBroker::compile_method(selected_method, InvocationEntryBci,
methodHandle(), 0, "mustBeCompiled", CHECK);
}
@@ -181,7 +193,7 @@
// We'll check for the method name first, as that's most likely
// to be false (so we'll short-circuit out of these tests).
if (sel_method->name() == vmSymbols::clone_name() &&
- sel_klass() == SystemDictionary::object_klass() &&
+ sel_klass() == SystemDictionary::Object_klass() &&
resolved_klass->oop_is_array()) {
// We need to change "protected" to "public".
assert(flags.is_protected(), "clone not protected?");
@@ -223,6 +235,18 @@
resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
}
+void LinkResolver::resolve_dynamic_method(methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS) {
+ // The class is java.dyn.MethodHandle
+ resolved_klass = SystemDictionaryHandles::MethodHandle_klass();
+
+ symbolHandle method_name = vmSymbolHandles::invoke_name();
+
+ symbolHandle method_signature(THREAD, pool->signature_ref_at(index));
+ KlassHandle current_klass (THREAD, pool->pool_holder());
+
+ resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
+}
+
void LinkResolver::resolve_interface_method(methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS) {
// resolve klass
--- a/hotspot/src/share/vm/interpreter/linkResolver.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/interpreter/linkResolver.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -133,6 +133,7 @@
// static resolving for all calls except interface calls
static void resolve_method (methodHandle& method_result, KlassHandle& klass_result, constantPoolHandle pool, int index, TRAPS);
+ static void resolve_dynamic_method (methodHandle& resolved_method, KlassHandle& resolved_klass, constantPoolHandle pool, int index, TRAPS);
static void resolve_interface_method(methodHandle& method_result, KlassHandle& klass_result, constantPoolHandle pool, int index, TRAPS);
// runtime/static resolving for fields
--- a/hotspot/src/share/vm/interpreter/rewriter.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/interpreter/rewriter.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -247,15 +247,22 @@
void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
ResourceMark rm(THREAD);
- Rewriter rw(klass, CHECK);
+ Rewriter rw(klass, klass->constants(), klass->methods(), CHECK);
// (That's all, folks.)
}
-Rewriter::Rewriter(instanceKlassHandle klass, TRAPS)
+
+void Rewriter::rewrite(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS) {
+ ResourceMark rm(THREAD);
+ Rewriter rw(klass, cpool, methods, CHECK);
+ // (That's all, folks.)
+}
+
+
+Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS)
: _klass(klass),
- // gather starting points
- _pool( THREAD, klass->constants()),
- _methods(THREAD, klass->methods())
+ _pool(cpool),
+ _methods(methods)
{
assert(_pool->cache() == NULL, "constant pool cache must not be set yet");
--- a/hotspot/src/share/vm/interpreter/rewriter.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/interpreter/rewriter.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -57,7 +57,7 @@
}
// All the work goes in here:
- Rewriter(instanceKlassHandle klass, TRAPS);
+ Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS);
void compute_index_maps();
void make_constant_pool_cache(TRAPS);
@@ -70,6 +70,7 @@
public:
// Driver routine:
static void rewrite(instanceKlassHandle klass, TRAPS);
+ static void rewrite(instanceKlassHandle klass, constantPoolHandle cpool, objArrayHandle methods, TRAPS);
enum {
_secondary_entry_tag = nth_bit(30)
--- a/hotspot/src/share/vm/memory/classify.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/memory/classify.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -49,7 +49,7 @@
Klass* k = obj->blueprint();
- if (k->as_klassOop() == SystemDictionary::object_klass()) {
+ if (k->as_klassOop() == SystemDictionary::Object_klass()) {
tty->print_cr("Found the class!");
}
--- a/hotspot/src/share/vm/memory/defNewGeneration.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/memory/defNewGeneration.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -609,7 +609,7 @@
remove_forwarding_pointers();
if (PrintGCDetails) {
- gclog_or_tty->print(" (promotion failed)");
+ gclog_or_tty->print(" (promotion failed) ");
}
// Add to-space to the list of space to compact
// when a promotion failure has occurred. In that
@@ -620,6 +620,9 @@
from()->set_next_compaction_space(to());
gch->set_incremental_collection_will_fail();
+ // Inform the next generation that a promotion failure occurred.
+ _next_gen->promotion_failure_occurred();
+
// Reset the PromotionFailureALot counters.
NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
}
@@ -679,6 +682,11 @@
void DefNewGeneration::handle_promotion_failure(oop old) {
preserve_mark_if_necessary(old, old->mark());
+ if (!_promotion_failed && PrintPromotionFailure) {
+ gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ",
+ old->size());
+ }
+
// forward to self
old->forward_to(old);
_promotion_failed = true;
--- a/hotspot/src/share/vm/memory/dump.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/memory/dump.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -63,7 +63,7 @@
void do_oop(oop* p) {
if (p != NULL) {
oop obj = *p;
- if (obj->klass() == SystemDictionary::string_klass()) {
+ if (obj->klass() == SystemDictionary::String_klass()) {
int hash;
typeArrayOop value = java_lang_String::value(obj);
@@ -625,11 +625,11 @@
if (obj->is_klass() || obj->is_instance()) {
if (obj->is_klass() ||
- obj->is_a(SystemDictionary::class_klass()) ||
- obj->is_a(SystemDictionary::throwable_klass())) {
+ obj->is_a(SystemDictionary::Class_klass()) ||
+ obj->is_a(SystemDictionary::Throwable_klass())) {
// Do nothing
}
- else if (obj->is_a(SystemDictionary::string_klass())) {
+ else if (obj->is_a(SystemDictionary::String_klass())) {
// immutable objects.
} else {
// someone added an object we hadn't accounted for.
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -925,6 +925,8 @@
guarantee(VerifyBeforeGC ||
VerifyDuringGC ||
VerifyBeforeExit ||
+ PrintAssembly ||
+ tty->count() != 0 || // already printing
VerifyAfterGC, "too expensive");
#endif
// This might be sped up with a cache of the last generation that
--- a/hotspot/src/share/vm/memory/generation.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/memory/generation.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -181,6 +181,12 @@
virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes,
bool younger_handles_promotion_failure) const;
+ // For a non-young generation, this interface can be used to inform a
+ // generation that a promotion attempt into that generation failed.
+ // Typically used to enable diagnostic output for post-mortem analysis,
+ // but other uses of the interface are not ruled out.
+ virtual void promotion_failure_occurred() { /* does nothing */ }
+
// Return an estimate of the maximum allocation that could be performed
// in the generation without triggering any collection or expansion
// activity. It is "unsafe" because no locks are taken; the result
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -71,7 +71,7 @@
assert(_sentinelRef == NULL, "should be initialized precisely once");
EXCEPTION_MARK;
_sentinelRef = instanceKlass::cast(
- SystemDictionary::reference_klass())->
+ SystemDictionary::Reference_klass())->
allocate_permanent_instance(THREAD);
// Initialize the master soft ref clock.
--- a/hotspot/src/share/vm/memory/space.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/memory/space.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -876,7 +876,7 @@
instanceOop obj = (instanceOop) allocate(size);
obj->set_mark(markOopDesc::prototype());
obj->set_klass_gap(0);
- obj->set_klass(SystemDictionary::object_klass());
+ obj->set_klass(SystemDictionary::Object_klass());
}
}
--- a/hotspot/src/share/vm/memory/universe.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/memory/universe.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -291,7 +291,7 @@
SystemDictionary::initialize(CHECK);
- klassOop ok = SystemDictionary::object_klass();
+ klassOop ok = SystemDictionary::Object_klass();
_the_null_string = StringTable::intern("null", CHECK);
_the_min_jint_string = StringTable::intern("-2147483648", CHECK);
@@ -299,9 +299,9 @@
if (UseSharedSpaces) {
// Verify shared interfaces array.
assert(_the_array_interfaces_array->obj_at(0) ==
- SystemDictionary::cloneable_klass(), "u3");
+ SystemDictionary::Cloneable_klass(), "u3");
assert(_the_array_interfaces_array->obj_at(1) ==
- SystemDictionary::serializable_klass(), "u3");
+ SystemDictionary::Serializable_klass(), "u3");
// Verify element klass for system obj array klass
assert(objArrayKlass::cast(_systemObjArrayKlassObj)->element_klass() == ok, "u1");
@@ -320,8 +320,8 @@
assert(Klass::cast(systemObjArrayKlassObj())->super() == ok, "u3");
} else {
// Set up shared interfaces array. (Do this before supers are set up.)
- _the_array_interfaces_array->obj_at_put(0, SystemDictionary::cloneable_klass());
- _the_array_interfaces_array->obj_at_put(1, SystemDictionary::serializable_klass());
+ _the_array_interfaces_array->obj_at_put(0, SystemDictionary::Cloneable_klass());
+ _the_array_interfaces_array->obj_at_put(1, SystemDictionary::Serializable_klass());
// Set element klass for system obj array klass
objArrayKlass::cast(_systemObjArrayKlassObj)->set_element_klass(ok);
@@ -365,7 +365,7 @@
// Initialize _objectArrayKlass after core bootstraping to make
// sure the super class is set up properly for _objectArrayKlass.
_objectArrayKlassObj = instanceKlass::
- cast(SystemDictionary::object_klass())->array_klass(1, CHECK);
+ cast(SystemDictionary::Object_klass())->array_klass(1, CHECK);
// Add the class to the class hierarchy manually to make sure that
// its vtable is initialized after core bootstrapping is completed.
Klass::cast(_objectArrayKlassObj)->append_to_sibling_list();
@@ -426,11 +426,11 @@
while (i < size) {
if (!UseConcMarkSweepGC) {
// Allocate dummy in old generation
- oop dummy = instanceKlass::cast(SystemDictionary::object_klass())->allocate_instance(CHECK);
+ oop dummy = instanceKlass::cast(SystemDictionary::Object_klass())->allocate_instance(CHECK);
dummy_array->obj_at_put(i++, dummy);
}
// Allocate dummy in permanent generation
- oop dummy = instanceKlass::cast(SystemDictionary::object_klass())->allocate_permanent_instance(CHECK);
+ oop dummy = instanceKlass::cast(SystemDictionary::Object_klass())->allocate_permanent_instance(CHECK);
dummy_array->obj_at_put(i++, dummy);
}
{
@@ -540,7 +540,7 @@
// but we cannot do that for classes created before java.lang.Class is loaded. Here we simply
// walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note
// that the number of objects allocated at this point is very small.
- assert(SystemDictionary::class_klass_loaded(), "java.lang.Class should be loaded");
+ assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded");
FixupMirrorClosure blk;
Universe::heap()->permanent_object_iterate(&blk);
}
@@ -556,7 +556,7 @@
if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit");
{
PRESERVE_EXCEPTION_MARK;
- KlassHandle finalizer_klass(THREAD, SystemDictionary::finalizer_klass());
+ KlassHandle finalizer_klass(THREAD, SystemDictionary::Finalizer_klass());
JavaValue result(T_VOID);
JavaCalls::call_static(
&result,
@@ -950,7 +950,7 @@
{ ResourceMark rm;
Interpreter::initialize(); // needed for interpreter entry points
if (!UseSharedSpaces) {
- KlassHandle ok_h(THREAD, SystemDictionary::object_klass());
+ KlassHandle ok_h(THREAD, SystemDictionary::Object_klass());
Universe::reinitialize_vtable_of(ok_h, CHECK_false);
Universe::reinitialize_itables(CHECK_false);
}
@@ -960,7 +960,7 @@
instanceKlassHandle k_h;
if (!UseSharedSpaces) {
// Setup preallocated empty java.lang.Class array
- Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::class_klass(), 0, CHECK_false);
+ Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_false);
// Setup preallocated OutOfMemoryError errors
k = SystemDictionary::resolve_or_fail(vmSymbolHandles::java_lang_OutOfMemoryError(), true, CHECK_false);
k_h = instanceKlassHandle(THREAD, k);
@@ -1027,8 +1027,8 @@
// Setup static method for registering finalizers
// The finalizer klass must be linked before looking up the method, in
// case it needs to get rewritten.
- instanceKlass::cast(SystemDictionary::finalizer_klass())->link_class(CHECK_false);
- methodOop m = instanceKlass::cast(SystemDictionary::finalizer_klass())->find_method(
+ instanceKlass::cast(SystemDictionary::Finalizer_klass())->link_class(CHECK_false);
+ methodOop m = instanceKlass::cast(SystemDictionary::Finalizer_klass())->find_method(
vmSymbols::register_method_name(),
vmSymbols::register_method_signature());
if (m == NULL || !m->is_static()) {
@@ -1036,7 +1036,7 @@
"java.lang.ref.Finalizer.register", false);
}
Universe::_finalizer_register_cache->init(
- SystemDictionary::finalizer_klass(), m, CHECK_false);
+ SystemDictionary::Finalizer_klass(), m, CHECK_false);
// Resolve on first use and initialize class.
// Note: No race-condition here, since a resolve will always return the same result
@@ -1053,14 +1053,14 @@
Universe::_reflect_invoke_cache->init(k_h(), m, CHECK_false);
// Setup method for registering loaded classes in class loader vector
- instanceKlass::cast(SystemDictionary::classloader_klass())->link_class(CHECK_false);
- m = instanceKlass::cast(SystemDictionary::classloader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
+ instanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
+ m = instanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
if (m == NULL || m->is_static()) {
THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
"java.lang.ClassLoader.addClass", false);
}
Universe::_loader_addClass_cache->init(
- SystemDictionary::classloader_klass(), m, CHECK_false);
+ SystemDictionary::ClassLoader_klass(), m, CHECK_false);
// The folowing is initializing converter functions for serialization in
// JVM.cpp. If we clean up the StrictMath code above we may want to find
--- a/hotspot/src/share/vm/oops/arrayKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/arrayKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -43,7 +43,7 @@
if (super() == NULL) return NULL; // bootstrap case
// Array klasses have primary supertypes which are not reported to Java.
// Example super chain: String[][] -> Object[][] -> Object[] -> Object
- return SystemDictionary::object_klass();
+ return SystemDictionary::Object_klass();
}
@@ -82,7 +82,7 @@
k = arrayKlassHandle(THREAD, base_klass());
assert(!k()->is_parsable(), "not expecting parsability yet.");
- k->set_super(Universe::is_bootstrapping() ? (klassOop)NULL : SystemDictionary::object_klass());
+ k->set_super(Universe::is_bootstrapping() ? (klassOop)NULL : SystemDictionary::Object_klass());
k->set_layout_helper(Klass::_lh_neutral_value);
k->set_dimension(1);
k->set_higher_dimension(NULL);
@@ -117,9 +117,9 @@
bool arrayKlass::compute_is_subtype_of(klassOop k) {
// An array is a subtype of Serializable, Clonable, and Object
- return k == SystemDictionary::object_klass()
- || k == SystemDictionary::cloneable_klass()
- || k == SystemDictionary::serializable_klass();
+ return k == SystemDictionary::Object_klass()
+ || k == SystemDictionary::Cloneable_klass()
+ || k == SystemDictionary::Serializable_klass();
}
--- a/hotspot/src/share/vm/oops/arrayKlass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/arrayKlass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -67,7 +67,7 @@
// Compiler/Interpreter offset
static ByteSize component_mirror_offset() { return byte_offset_of(arrayKlass, _component_mirror); }
- virtual klassOop java_super() const;//{ return SystemDictionary::object_klass(); }
+ virtual klassOop java_super() const;//{ return SystemDictionary::Object_klass(); }
// Allocation
// Sizes points to the first dimension of the array, subsequent dimensions
--- a/hotspot/src/share/vm/oops/arrayKlassKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/arrayKlassKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -159,7 +159,7 @@
assert(obj->is_klass(), "must be klass");
klassKlass::oop_print_on(obj, st);
}
-
+#endif //PRODUCT
void arrayKlassKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_klass(), "must be klass");
@@ -168,7 +168,6 @@
st->print("[]");
}
}
-#endif
const char* arrayKlassKlass::internal_name() const {
--- a/hotspot/src/share/vm/oops/arrayKlassKlass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/arrayKlassKlass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -55,14 +55,13 @@
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
-#ifndef PRODUCT
public:
// Printing
+ void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
void oop_print_on(oop obj, outputStream* st);
- void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#endif //PRODUCT
- public:
// Verification
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
--- a/hotspot/src/share/vm/oops/compiledICHolderKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/compiledICHolderKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -166,12 +166,12 @@
st->print(" - klass: "); c->holder_klass()->print_value_on(st); st->cr();
}
+#endif //PRODUCT
void compiledICHolderKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_compiledICHolder(), "must be compiledICHolder");
Klass::oop_print_value_on(obj, st);
}
-#endif
const char* compiledICHolderKlass::internal_name() const {
return "{compiledICHolder}";
--- a/hotspot/src/share/vm/oops/compiledICHolderKlass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/compiledICHolderKlass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -68,14 +68,13 @@
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
-#ifndef PRODUCT
public:
// Printing
+ void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
void oop_print_on (oop obj, outputStream* st);
- void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#endif //PRODUCT
- public:
// Verification
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
--- a/hotspot/src/share/vm/oops/constMethodKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/constMethodKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -216,6 +216,7 @@
}
}
+#endif //PRODUCT
// Short version of printing constMethodOop - just print the name of the
// method it belongs to.
@@ -226,8 +227,6 @@
m->method()->print_value_on(st);
}
-#endif // PRODUCT
-
const char* constMethodKlass::internal_name() const {
return "{constMethod}";
}
--- a/hotspot/src/share/vm/oops/constMethodKlass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/constMethodKlass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -77,15 +77,13 @@
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
-#ifndef PRODUCT
public:
// Printing
+ void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
void oop_print_on (oop obj, outputStream* st);
- void oop_print_value_on(oop obj, outputStream* st);
+#endif //PRODUCT
-#endif
-
- public:
// Verify operations
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
--- a/hotspot/src/share/vm/oops/constMethodOop.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/constMethodOop.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -258,6 +258,11 @@
LocalVariableTableElement* localvariable_table_start() const;
// byte codes
+ void set_code(address code) {
+ if (code_size() > 0) {
+ memcpy(code_base(), code, code_size());
+ }
+ }
address code_base() const { return (address) (this+1); }
address code_end() const { return code_base() + code_size(); }
bool contains(address bcp) const { return code_base() <= bcp
--- a/hotspot/src/share/vm/oops/constantPoolKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/constantPoolKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -387,8 +387,18 @@
cp->set_cache(cache());
}
+#endif
-#endif
+void constantPoolKlass::oop_print_value_on(oop obj, outputStream* st) {
+ assert(obj->is_constantPool(), "must be constantPool");
+ constantPoolOop cp = constantPoolOop(obj);
+ st->print("constant pool [%d]", cp->length());
+ if (cp->has_pseudo_string()) st->print("/pseudo_string");
+ if (cp->has_invokedynamic()) st->print("/invokedynamic");
+ cp->print_address_on(st);
+ st->print(" for ");
+ cp->pool_holder()->print_value_on(st);
+}
const char* constantPoolKlass::internal_name() const {
return "{constant pool}";
--- a/hotspot/src/share/vm/oops/constantPoolKlass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/constantPoolKlass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -65,9 +65,10 @@
juint alloc_size() const { return _alloc_size; }
void set_alloc_size(juint n) { _alloc_size = n; }
-#ifndef PRODUCT
public:
// Printing
+ void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
void oop_print_on(oop obj, outputStream* st);
#endif
--- a/hotspot/src/share/vm/oops/constantPoolOop.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/constantPoolOop.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -110,7 +110,7 @@
}
if (!PENDING_EXCEPTION->
- is_a(SystemDictionary::linkageError_klass())) {
+ is_a(SystemDictionary::LinkageError_klass())) {
// Just throw the exception and don't prevent these classes from
// being loaded due to virtual machine errors like StackOverflow
// and OutOfMemoryError, etc, or if the thread was hit by stop()
--- a/hotspot/src/share/vm/oops/constantPoolOop.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/constantPoolOop.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -191,6 +191,16 @@
}
}
+ void object_at_put(int which, oop str) {
+ oop_store((volatile oop*) obj_at_addr(which), str);
+ release_tag_at_put(which, JVM_CONSTANT_Object);
+ if (UseConcMarkSweepGC) {
+ // In case the earlier card-mark was consumed by a concurrent
+ // marking thread before the tag was updated, redirty the card.
+ oop_store_without_check((volatile oop*) obj_at_addr(which), str);
+ }
+ }
+
// For temporary use while constructing constant pool
void string_index_at_put(int which, int string_index) {
tag_at_put(which, JVM_CONSTANT_StringIndex);
@@ -228,7 +238,8 @@
tag.is_unresolved_klass() ||
tag.is_symbol() ||
tag.is_unresolved_string() ||
- tag.is_string();
+ tag.is_string() ||
+ tag.is_object();
}
// Fetching constants
@@ -291,6 +302,11 @@
return string_at_impl(h_this, which, CHECK_NULL);
}
+ oop object_at(int which) {
+ assert(tag_at(which).is_object(), "Corrupted constant pool");
+ return *obj_at_addr(which);
+ }
+
// A "pseudo-string" is an non-string oop that has found is way into
// a String entry.
// Under AnonymousClasses this can happen if the user patches a live
--- a/hotspot/src/share/vm/oops/cpCacheKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/cpCacheKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -261,6 +261,15 @@
#endif
+void constantPoolCacheKlass::oop_print_value_on(oop obj, outputStream* st) {
+ assert(obj->is_constantPoolCache(), "obj must be constant pool cache");
+ constantPoolCacheOop cache = (constantPoolCacheOop)obj;
+ st->print("cache [%d]", cache->length());
+ cache->print_address_on(st);
+ st->print(" for ");
+ cache->constant_pool()->print_value_on(st);
+}
+
void constantPoolCacheKlass::oop_verify_on(oop obj, outputStream* st) {
guarantee(obj->is_constantPoolCache(), "obj must be constant pool cache");
constantPoolCacheOop cache = (constantPoolCacheOop)obj;
--- a/hotspot/src/share/vm/oops/cpCacheKlass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/cpCacheKlass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -61,9 +61,10 @@
juint alloc_size() const { return _alloc_size; }
void set_alloc_size(juint n) { _alloc_size = n; }
-#ifndef PRODUCT
public:
// Printing
+ void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
void oop_print_on(oop obj, outputStream* st);
#endif
--- a/hotspot/src/share/vm/oops/generateOopMap.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/generateOopMap.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1556,13 +1556,13 @@
case Bytecodes::_getfield: do_field(true, false, itr->get_index_big(), itr->bci()); break;
case Bytecodes::_putfield: do_field(false, false, itr->get_index_big(), itr->bci()); break;
- case Bytecodes::_invokevirtual:
- case Bytecodes::_invokespecial: do_method(false, false, itr->get_index_big(), itr->bci()); break;
- case Bytecodes::_invokestatic: do_method(true, false, itr->get_index_big(), itr->bci()); break;
+ case Bytecodes::_invokevirtual:
+ case Bytecodes::_invokespecial: do_method(false, false, itr->get_index_big(), itr->bci()); break;
+ case Bytecodes::_invokestatic: do_method(true, false, itr->get_index_big(), itr->bci()); break;
case Bytecodes::_invokedynamic: do_method(true, false, itr->get_index_int(), itr->bci()); break;
- case Bytecodes::_invokeinterface: do_method(false, true, itr->get_index_big(), itr->bci()); break;
- case Bytecodes::_newarray:
- case Bytecodes::_anewarray: pp_new_ref(vCTS, itr->bci()); break;
+ case Bytecodes::_invokeinterface: do_method(false, true, itr->get_index_big(), itr->bci()); break;
+ case Bytecodes::_newarray:
+ case Bytecodes::_anewarray: pp_new_ref(vCTS, itr->bci()); break;
case Bytecodes::_checkcast: do_checkcast(); break;
case Bytecodes::_arraylength:
case Bytecodes::_instanceof: pp(rCTS, vCTS); break;
@@ -1830,12 +1830,8 @@
void GenerateOopMap::do_ldc(int idx, int bci) {
- constantPoolOop cp = method()->constants();
- constantTag tag = cp->tag_at(idx);
-
- CellTypeState cts = (tag.is_string() || tag.is_unresolved_string() ||
- tag.is_klass() || tag.is_unresolved_klass())
- ? CellTypeState::make_line_ref(bci) : valCTS;
+ constantPoolOop cp = method()->constants();
+ CellTypeState cts = cp->is_pointer_entry(idx) ? CellTypeState::make_line_ref(bci) : valCTS;
ppush1(cts);
}
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -383,7 +383,7 @@
this_oop->set_initialization_state_and_notify(initialization_error, THREAD);
CLEAR_PENDING_EXCEPTION; // ignore any exception thrown, class initialization error is thrown below
}
- if (e->is_a(SystemDictionary::error_klass())) {
+ if (e->is_a(SystemDictionary::Error_klass())) {
THROW_OOP(e());
} else {
JavaCallArguments args(e);
@@ -568,7 +568,7 @@
THROW_MSG(throwError ? vmSymbols::java_lang_InstantiationError()
: vmSymbols::java_lang_InstantiationException(), external_name());
}
- if (as_klassOop() == SystemDictionary::class_klass()) {
+ if (as_klassOop() == SystemDictionary::Class_klass()) {
ResourceMark rm(THREAD);
THROW_MSG(throwError ? vmSymbols::java_lang_IllegalAccessError()
: vmSymbols::java_lang_IllegalAccessException(), external_name());
@@ -2225,7 +2225,7 @@
void instanceKlass::oop_print_on(oop obj, outputStream* st) {
Klass::oop_print_on(obj, st);
- if (as_klassOop() == SystemDictionary::string_klass()) {
+ if (as_klassOop() == SystemDictionary::String_klass()) {
typeArrayOop value = java_lang_String::value(obj);
juint offset = java_lang_String::offset(obj);
juint length = java_lang_String::length(obj);
@@ -2245,7 +2245,7 @@
FieldPrinter print_nonstatic_field(st, obj);
do_nonstatic_fields(&print_nonstatic_field);
- if (as_klassOop() == SystemDictionary::class_klass()) {
+ if (as_klassOop() == SystemDictionary::Class_klass()) {
st->print(BULLET"signature: ");
java_lang_Class::print_signature(obj, st);
st->cr();
@@ -2268,11 +2268,13 @@
}
}
+#endif //PRODUCT
+
void instanceKlass::oop_print_value_on(oop obj, outputStream* st) {
st->print("a ");
name()->print_value_on(st);
obj->print_address_on(st);
- if (as_klassOop() == SystemDictionary::string_klass()
+ if (as_klassOop() == SystemDictionary::String_klass()
&& java_lang_String::value(obj) != NULL) {
ResourceMark rm;
int len = java_lang_String::length(obj);
@@ -2281,7 +2283,7 @@
st->print(" = \"%s\"", str);
if (len > plen)
st->print("...[%d]", len);
- } else if (as_klassOop() == SystemDictionary::class_klass()) {
+ } else if (as_klassOop() == SystemDictionary::Class_klass()) {
klassOop k = java_lang_Class::as_klassOop(obj);
st->print(" = ");
if (k != NULL) {
@@ -2299,8 +2301,6 @@
}
}
-#endif // ndef PRODUCT
-
const char* instanceKlass::internal_name() const {
return external_name();
}
@@ -2348,7 +2348,7 @@
// Check that we have the right class
static bool first_time = true;
- guarantee(k == SystemDictionary::class_klass() && first_time, "Invalid verify of maps");
+ guarantee(k == SystemDictionary::Class_klass() && first_time, "Invalid verify of maps");
first_time = false;
const int extra = java_lang_Class::number_of_fake_oop_fields;
guarantee(ik->nonstatic_field_size() == extra, "just checking");
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -839,17 +839,16 @@
// JVMTI support
jint jvmti_class_status() const;
-#ifndef PRODUCT
public:
// Printing
+ void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
void oop_print_on (oop obj, outputStream* st);
- void oop_print_value_on(oop obj, outputStream* st);
void print_dependent_nmethods(bool verbose = false);
bool is_dependent_nmethod(nmethod* nm);
#endif
- public:
// Verification
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
--- a/hotspot/src/share/vm/oops/instanceKlassKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/instanceKlassKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -638,6 +638,7 @@
st->cr();
}
+#endif //PRODUCT
void instanceKlassKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_klass(), "must be klass");
@@ -645,8 +646,6 @@
ik->name()->print_value_on(st);
}
-#endif // PRODUCT
-
const char* instanceKlassKlass::internal_name() const {
return "{instance class}";
}
--- a/hotspot/src/share/vm/oops/instanceKlassKlass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/instanceKlassKlass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -69,14 +69,13 @@
// Apply closure to the InstanceKlass oops that are outside the java heap.
inline void iterate_c_heap_oops(instanceKlass* ik, OopClosure* closure);
-#ifndef PRODUCT
public:
// Printing
+ void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
void oop_print_on(oop obj, outputStream* st);
- void oop_print_value_on(oop obj, outputStream* st);
#endif
- public:
// Verification
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
--- a/hotspot/src/share/vm/oops/instanceRefKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/instanceRefKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -397,7 +397,7 @@
// Check that we have the right class
debug_only(static bool first_time = true);
- assert(k == SystemDictionary::reference_klass() && first_time,
+ assert(k == SystemDictionary::Reference_klass() && first_time,
"Invalid update of maps");
debug_only(first_time = false);
assert(ik->nonstatic_oop_map_count() == 1, "just checking");
--- a/hotspot/src/share/vm/oops/klass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/klass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -217,8 +217,8 @@
set_super(NULL);
oop_store_without_check((oop*) &_primary_supers[0], (oop) this->as_klassOop());
assert(super_depth() == 0, "Object must already be initialized properly");
- } else if (k != super() || k == SystemDictionary::object_klass()) {
- assert(super() == NULL || super() == SystemDictionary::object_klass(),
+ } else if (k != super() || k == SystemDictionary::Object_klass()) {
+ assert(super() == NULL || super() == SystemDictionary::Object_klass(),
"initialize this only once to a non-trivial value");
set_super(k);
Klass* sup = k->klass_part();
@@ -370,7 +370,7 @@
void Klass::remove_from_sibling_list() {
// remove receiver from sibling list
instanceKlass* super = superklass();
- assert(super != NULL || as_klassOop() == SystemDictionary::object_klass(), "should have super");
+ assert(super != NULL || as_klassOop() == SystemDictionary::Object_klass(), "should have super");
if (super == NULL) return; // special case: class Object
if (super->subklass() == this) {
// first subklass
@@ -541,6 +541,7 @@
st->cr();
}
+#endif //PRODUCT
void Klass::oop_print_value_on(oop obj, outputStream* st) {
// print title
@@ -549,8 +550,6 @@
obj->print_address_on(st);
}
-#endif
-
// Verification
void Klass::oop_verify_on(oop obj, outputStream* st) {
--- a/hotspot/src/share/vm/oops/klass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/klass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -776,14 +776,13 @@
// JVMTI support
virtual jint jvmti_class_status() const;
-#ifndef PRODUCT
public:
// Printing
+ virtual void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
virtual void oop_print_on (oop obj, outputStream* st);
- virtual void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#endif //PRODUCT
- public:
// Verification
virtual const char* internal_name() const = 0;
virtual void oop_verify_on(oop obj, outputStream* st);
--- a/hotspot/src/share/vm/oops/klassKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/klassKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -202,13 +202,12 @@
Klass::oop_print_on(obj, st);
}
+#endif //PRODUCT
void klassKlass::oop_print_value_on(oop obj, outputStream* st) {
Klass::oop_print_value_on(obj, st);
}
-#endif
-
const char* klassKlass::internal_name() const {
return "{other class}";
}
--- a/hotspot/src/share/vm/oops/klassKlass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/klassKlass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -67,14 +67,13 @@
juint alloc_size() const { return _alloc_size; }
void set_alloc_size(juint n) { _alloc_size = n; }
-#ifndef PRODUCT
public:
// Printing
+ void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
void oop_print_on (oop obj, outputStream* st);
- void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#endif //PRODUCT
- public:
// Verification
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
--- a/hotspot/src/share/vm/oops/methodDataKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/methodDataKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -214,6 +214,8 @@
m->print_data_on(st);
}
+#endif //PRODUCT
+
void methodDataKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_methodData(), "should be method data");
methodDataOop m = methodDataOop(obj);
@@ -221,8 +223,6 @@
m->method()->print_value_on(st);
}
-#endif // !PRODUCT
-
const char* methodDataKlass::internal_name() const {
return "{method data}";
}
--- a/hotspot/src/share/vm/oops/methodDataKlass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/methodDataKlass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -71,14 +71,13 @@
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
-#ifndef PRODUCT
public:
// Printing
+ void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
void oop_print_on (oop obj, outputStream* st);
- void oop_print_value_on(oop obj, outputStream* st);
-#endif // !PRODUCT
+#endif //PRODUCT
- public:
// Verify operations
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
--- a/hotspot/src/share/vm/oops/methodKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/methodKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -308,6 +308,7 @@
}
}
+#endif //PRODUCT
void methodKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_method(), "must be method");
@@ -323,8 +324,6 @@
if (WizardMode && m->code() != NULL) st->print(" ((nmethod*)%p)", m->code());
}
-#endif // PRODUCT
-
const char* methodKlass::internal_name() const {
return "{method}";
}
--- a/hotspot/src/share/vm/oops/methodKlass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/methodKlass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -68,14 +68,13 @@
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
-#ifndef PRODUCT
public:
// Printing
+ void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
void oop_print_on (oop obj, outputStream* st);
- void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#endif //PRODUCT
- public:
// Verify operations
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
--- a/hotspot/src/share/vm/oops/methodOop.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/methodOop.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -456,12 +456,12 @@
return objArrayHandle(THREAD, Universe::the_empty_class_klass_array());
} else {
methodHandle h_this(THREAD, this_oop);
- objArrayOop m_oop = oopFactory::new_objArray(SystemDictionary::class_klass(), length, CHECK_(objArrayHandle()));
+ objArrayOop m_oop = oopFactory::new_objArray(SystemDictionary::Class_klass(), length, CHECK_(objArrayHandle()));
objArrayHandle mirrors (THREAD, m_oop);
for (int i = 0; i < length; i++) {
CheckedExceptionElement* table = h_this->checked_exceptions_start(); // recompute on each iteration, not gc safe
klassOop k = h_this->constants()->klass_at(table[i].class_cp_index, CHECK_(objArrayHandle()));
- assert(Klass::cast(k)->is_subclass_of(SystemDictionary::throwable_klass()), "invalid exception class");
+ assert(Klass::cast(k)->is_subclass_of(SystemDictionary::Throwable_klass()), "invalid exception class");
mirrors->obj_at_put(i, Klass::cast(k)->java_mirror());
}
return mirrors;
@@ -821,6 +821,18 @@
return pchase;
}
+//------------------------------------------------------------------------------
+// methodOopDesc::is_method_handle_adapter
+//
+// Tests if this method is an internal adapter frame from the
+// MethodHandleCompiler.
+bool methodOopDesc::is_method_handle_adapter() const {
+ return ((name() == vmSymbols::invoke_name() &&
+ method_holder() == SystemDictionary::MethodHandle_klass())
+ ||
+ method_holder() == SystemDictionary::InvokeDynamic_klass());
+}
+
methodHandle methodOopDesc::make_invoke_method(KlassHandle holder,
symbolHandle signature,
Handle method_type, TRAPS) {
@@ -1032,8 +1044,8 @@
// We are loading classes eagerly. If a ClassNotFoundException or
// a LinkageError was generated, be sure to ignore it.
if (HAS_PENDING_EXCEPTION) {
- if (PENDING_EXCEPTION->is_a(SystemDictionary::classNotFoundException_klass()) ||
- PENDING_EXCEPTION->is_a(SystemDictionary::linkageError_klass())) {
+ if (PENDING_EXCEPTION->is_a(SystemDictionary::ClassNotFoundException_klass()) ||
+ PENDING_EXCEPTION->is_a(SystemDictionary::LinkageError_klass())) {
CLEAR_PENDING_EXCEPTION;
} else {
return false;
--- a/hotspot/src/share/vm/oops/methodOop.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/methodOop.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -365,6 +365,7 @@
#endif
// byte codes
+ void set_code(address code) { return constMethod()->set_code(code); }
address code_base() const { return constMethod()->code_base(); }
bool contains(address bcp) const { return constMethod()->contains(bcp); }
@@ -524,6 +525,9 @@
// JSR 292 support
bool is_method_handle_invoke() const { return access_flags().is_method_handle_invoke(); }
+ // Tests if this method is an internal adapter frame from the
+ // MethodHandleCompiler.
+ bool is_method_handle_adapter() const;
static methodHandle make_invoke_method(KlassHandle holder,
symbolHandle signature,
Handle method_type,
@@ -537,6 +541,7 @@
// all without checking for a stack overflow
static int extra_stack_entries() { return (EnableMethodHandles ? (int)MethodHandlePushLimit : 0) + (EnableInvokeDynamic ? 3 : 0); }
static int extra_stack_words(); // = extra_stack_entries() * Interpreter::stackElementSize()
+
// RedefineClasses() support:
bool is_old() const { return access_flags().is_old(); }
void set_is_old() { _access_flags.set_is_old(); }
--- a/hotspot/src/share/vm/oops/objArrayKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -246,8 +246,8 @@
} else {
objArrayOop sec_oop = oopFactory::new_system_objArray(num_secondaries, CHECK_NULL);
objArrayHandle secondaries(THREAD, sec_oop);
- secondaries->obj_at_put(num_extra_slots+0, SystemDictionary::cloneable_klass());
- secondaries->obj_at_put(num_extra_slots+1, SystemDictionary::serializable_klass());
+ secondaries->obj_at_put(num_extra_slots+0, SystemDictionary::Cloneable_klass());
+ secondaries->obj_at_put(num_extra_slots+1, SystemDictionary::Serializable_klass());
for (int i = 0; i < num_elem_supers; i++) {
klassOop elem_super = (klassOop) elem_supers->obj_at(i);
klassOop array_super = elem_super->klass_part()->array_klass_or_null();
@@ -499,6 +499,8 @@
}
}
+#endif //PRODUCT
+
static int max_objArray_print_length = 4;
void objArrayKlass::oop_print_value_on(oop obj, outputStream* st) {
@@ -508,7 +510,7 @@
int len = objArrayOop(obj)->length();
st->print("[%d] ", len);
obj->print_address_on(st);
- if (PrintOopAddress || PrintMiscellaneous && (WizardMode || Verbose)) {
+ if (NOT_PRODUCT(PrintOopAddress ||) PrintMiscellaneous && (WizardMode || Verbose)) {
st->print("{");
for (int i = 0; i < len; i++) {
if (i > max_objArray_print_length) {
@@ -520,8 +522,6 @@
}
}
-#endif // PRODUCT
-
const char* objArrayKlass::internal_name() const {
return external_name();
}
--- a/hotspot/src/share/vm/oops/objArrayKlass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/objArrayKlass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -119,14 +119,13 @@
private:
static klassOop array_klass_impl (objArrayKlassHandle this_oop, bool or_null, int n, TRAPS);
-#ifndef PRODUCT
public:
// Printing
+ void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
void oop_print_on (oop obj, outputStream* st);
- void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#endif //PRODUCT
- public:
// Verification
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
--- a/hotspot/src/share/vm/oops/objArrayKlassKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/objArrayKlassKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -99,7 +99,7 @@
}
} else {
// The element type is already Object. Object[] has direct super of Object.
- super_klass = KlassHandle(THREAD, SystemDictionary::object_klass());
+ super_klass = KlassHandle(THREAD, SystemDictionary::Object_klass());
}
}
@@ -278,6 +278,7 @@
st->cr();
}
+#endif //PRODUCT
void objArrayKlassKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_klass(), "must be klass");
@@ -287,8 +288,6 @@
st->print("[]");
}
-#endif
-
const char* objArrayKlassKlass::internal_name() const {
return "{object array class}";
}
--- a/hotspot/src/share/vm/oops/objArrayKlassKlass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/objArrayKlassKlass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -64,14 +64,13 @@
// helpers
static klassOop allocate_objArray_klass_impl(objArrayKlassKlassHandle this_oop, int n, KlassHandle element_klass, TRAPS);
-#ifndef PRODUCT
public:
// Printing
+ void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
void oop_print_on(oop obj, outputStream* st);
- void oop_print_value_on(oop obj, outputStream* st);
-#endif
+#endif //PRODUCT
- public:
// Verification
const char* internal_name() const;
void oop_verify_on(oop obj, outputStream* st);
--- a/hotspot/src/share/vm/oops/oop.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/oop.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -31,14 +31,13 @@
#ifdef PRODUCT
void oopDesc::print_on(outputStream* st) const {}
-void oopDesc::print_value_on(outputStream* st) const {}
void oopDesc::print_address_on(outputStream* st) const {}
-char* oopDesc::print_value_string() { return NULL; }
char* oopDesc::print_string() { return NULL; }
void oopDesc::print() {}
-void oopDesc::print_value() {}
void oopDesc::print_address() {}
-#else
+
+#else //PRODUCT
+
void oopDesc::print_on(outputStream* st) const {
if (this == NULL) {
st->print_cr("NULL");
@@ -47,22 +46,6 @@
}
}
-void oopDesc::print_value_on(outputStream* st) const {
- oop obj = oop(this);
- if (this == NULL) {
- st->print("NULL");
- } else if (java_lang_String::is_instance(obj)) {
- java_lang_String::print(obj, st);
- if (PrintOopAddress) print_address_on(st);
-#ifdef ASSERT
- } else if (!Universe::heap()->is_in(obj) || !Universe::heap()->is_in(klass())) {
- st->print("### BAD OOP %p ###", (address)obj);
-#endif
- } else {
- blueprint()->oop_print_value_on(obj, st);
- }
-}
-
void oopDesc::print_address_on(outputStream* st) const {
if (PrintOopAddress) {
st->print("{"INTPTR_FORMAT"}", this);
@@ -71,23 +54,47 @@
void oopDesc::print() { print_on(tty); }
-void oopDesc::print_value() { print_value_on(tty); }
-
void oopDesc::print_address() { print_address_on(tty); }
char* oopDesc::print_string() {
- stringStream* st = new stringStream();
- print_on(st);
- return st->as_string();
+ stringStream st;
+ print_on(&st);
+ return st.as_string();
+}
+
+#endif // PRODUCT
+
+// The print_value functions are present in all builds, to support the disassembler.
+
+void oopDesc::print_value() {
+ print_value_on(tty);
}
char* oopDesc::print_value_string() {
- stringStream* st = new stringStream();
- print_value_on(st);
- return st->as_string();
+ char buf[100];
+ stringStream st(buf, sizeof(buf));
+ print_value_on(&st);
+ return st.as_string();
}
-#endif // PRODUCT
+void oopDesc::print_value_on(outputStream* st) const {
+ oop obj = oop(this);
+ if (this == NULL) {
+ st->print("NULL");
+ } else if (java_lang_String::is_instance(obj)) {
+ java_lang_String::print(obj, st);
+#ifndef PRODUCT
+ if (PrintOopAddress) print_address_on(st);
+#endif //PRODUCT
+#ifdef ASSERT
+ } else if (!Universe::heap()->is_in(obj) || !Universe::heap()->is_in(klass())) {
+ st->print("### BAD OOP %p ###", (address)obj);
+#endif //ASSERT
+ } else {
+ blueprint()->oop_print_value_on(obj, st);
+ }
+}
+
void oopDesc::verify_on(outputStream* st) {
if (this != NULL) {
--- a/hotspot/src/share/vm/oops/symbolKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/symbolKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -213,6 +213,8 @@
st->print("'");
}
+#endif //PRODUCT
+
void symbolKlass::oop_print_value_on(oop obj, outputStream* st) {
symbolOop sym = symbolOop(obj);
st->print("'");
@@ -222,8 +224,6 @@
st->print("'");
}
-#endif //PRODUCT
-
const char* symbolKlass::internal_name() const {
return "{symbol}";
}
--- a/hotspot/src/share/vm/oops/symbolKlass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/symbolKlass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -65,10 +65,10 @@
int oop_oop_iterate(oop obj, OopClosure* blk);
int oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr);
-#ifndef PRODUCT
// Printing
void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
void oop_print_on(oop obj, outputStream* st);
-#endif
+#endif //PRODUCT
const char* internal_name() const;
};
--- a/hotspot/src/share/vm/oops/symbolOop.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/symbolOop.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,11 @@
# include "incls/_precompiled.incl"
# include "incls/_symbolOop.cpp.incl"
+
+// ------------------------------------------------------------------
+// symbolOopDesc::equals
+//
+// Compares the symbol with a string of the given length.
bool symbolOopDesc::equals(const char* str, int len) const {
int l = utf8_length();
if (l != len) return false;
@@ -36,6 +41,48 @@
return true;
}
+
+// ------------------------------------------------------------------
+// symbolOopDesc::starts_with
+//
+// Tests if the symbol starts with the specified prefix of the given
+// length.
+bool symbolOopDesc::starts_with(const char* prefix, int len) const {
+ if (len > utf8_length()) return false;
+ while (len-- > 0) {
+ if (prefix[len] != (char) byte_at(len))
+ return false;
+ }
+ assert(len == -1, "we should be at the beginning");
+ return true;
+}
+
+
+// ------------------------------------------------------------------
+// symbolOopDesc::index_of
+//
+// Finds if the given string is a substring of this symbol's utf8 bytes.
+// Return -1 on failure. Otherwise return the first index where str occurs.
+int symbolOopDesc::index_of_at(int i, const char* str, int len) const {
+ assert(i >= 0 && i <= utf8_length(), "oob");
+ if (len <= 0) return 0;
+ char first_char = str[0];
+ address bytes = (address) ((symbolOopDesc*)this)->base();
+ address limit = bytes + utf8_length() - len; // inclusive limit
+ address scan = bytes + i;
+ if (scan > limit)
+ return -1;
+ for (;;) {
+ scan = (address) memchr(scan, first_char, (limit + 1 - scan));
+ if (scan == NULL)
+ return -1; // not found
+ assert(scan >= bytes+i && scan <= limit, "scan oob");
+ if (memcmp(scan, str, len) == 0)
+ return (int)(scan - bytes);
+ }
+}
+
+
char* symbolOopDesc::as_C_string(char* buf, int size) const {
if (size > 0) {
int len = MIN2(size - 1, utf8_length());
--- a/hotspot/src/share/vm/oops/symbolOop.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/symbolOop.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -70,8 +70,21 @@
void set_utf8_length(int len) { _length = len; }
- // Compares the symbol with a string
+ // Compares the symbol with a string.
bool equals(const char* str, int len) const;
+ bool equals(const char* str) const { return equals(str, (int) strlen(str)); }
+
+ // Tests if the symbol starts with the given prefix.
+ bool starts_with(const char* prefix, int len) const;
+ bool starts_with(const char* prefix) const {
+ return starts_with(prefix, (int) strlen(prefix));
+ }
+
+ // Tests if the symbol starts with the given prefix.
+ int index_of_at(int i, const char* str, int len) const;
+ int index_of_at(int i, const char* str) const {
+ return index_of_at(i, str, (int) strlen(str));
+ }
// Three-way compare for sorting; returns -1/0/1 if receiver is </==/> than arg
// note that the ordering is not alfabetical
--- a/hotspot/src/share/vm/oops/typeArrayKlassKlass.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/typeArrayKlassKlass.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -45,6 +45,7 @@
Klass:: oop_print_on(obj, st);
}
+#endif //PRODUCT
void typeArrayKlassKlass::oop_print_value_on(oop obj, outputStream* st) {
assert(obj->is_klass(), "must be klass");
@@ -63,8 +64,6 @@
st->print("}");
}
-#endif
-
const char* typeArrayKlassKlass::internal_name() const {
return "{type array class}";
}
--- a/hotspot/src/share/vm/oops/typeArrayKlassKlass.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/oops/typeArrayKlassKlass.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -47,12 +47,12 @@
static int header_size() { return oopDesc::header_size() + sizeof(typeArrayKlassKlass)/HeapWordSize; }
int object_size() const { return align_object_size(header_size()); }
-#ifndef PRODUCT
public:
// Printing
+ void oop_print_value_on(oop obj, outputStream* st);
+#ifndef PRODUCT
void oop_print_on(oop obj, outputStream* st);
- void oop_print_value_on(oop obj, outputStream* st);
-#endif
- public:
+#endif //PRODUCT
+
const char* internal_name() const;
};
--- a/hotspot/src/share/vm/opto/bytecodeInfo.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/bytecodeInfo.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -27,11 +27,16 @@
//=============================================================================
//------------------------------InlineTree-------------------------------------
-InlineTree::InlineTree( Compile* c, const InlineTree *caller_tree, ciMethod* callee, JVMState* caller_jvms, int caller_bci, float site_invoke_ratio )
+InlineTree::InlineTree( Compile* c,
+ const InlineTree *caller_tree, ciMethod* callee,
+ JVMState* caller_jvms, int caller_bci,
+ float site_invoke_ratio, int site_depth_adjust)
: C(c), _caller_jvms(caller_jvms),
_caller_tree((InlineTree*)caller_tree),
_method(callee), _site_invoke_ratio(site_invoke_ratio),
- _count_inline_bcs(method()->code_size()) {
+ _site_depth_adjust(site_depth_adjust),
+ _count_inline_bcs(method()->code_size())
+{
NOT_PRODUCT(_count_inlines = 0;)
if (_caller_jvms != NULL) {
// Keep a private copy of the caller_jvms:
@@ -40,7 +45,7 @@
assert(!caller_jvms->should_reexecute(), "there should be no reexecute bytecode with inlining");
}
assert(_caller_jvms->same_calls_as(caller_jvms), "consistent JVMS");
- assert((caller_tree == NULL ? 0 : caller_tree->inline_depth() + 1) == inline_depth(), "correct (redundant) depth parameter");
+ assert((caller_tree == NULL ? 0 : caller_tree->stack_depth() + 1) == stack_depth(), "correct (redundant) depth parameter");
assert(caller_bci == this->caller_bci(), "correct (redundant) bci parameter");
if (UseOldInlining) {
// Update hierarchical counts, count_inline_bcs() and count_inlines()
@@ -52,10 +57,13 @@
}
}
-InlineTree::InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio)
+InlineTree::InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms,
+ float site_invoke_ratio, int site_depth_adjust)
: C(c), _caller_jvms(caller_jvms), _caller_tree(NULL),
_method(callee_method), _site_invoke_ratio(site_invoke_ratio),
- _count_inline_bcs(method()->code_size()) {
+ _site_depth_adjust(site_depth_adjust),
+ _count_inline_bcs(method()->code_size())
+{
NOT_PRODUCT(_count_inlines = 0;)
assert(!UseOldInlining, "do not use for old stuff");
}
@@ -180,6 +188,10 @@
return NULL;
}
+ // Always inline MethodHandle methods.
+ if (callee_method->is_method_handle_invoke())
+ return NULL;
+
// First check all inlining restrictions which are required for correctness
if (callee_method->is_abstract()) return "abstract method";
// note: we allow ik->is_abstract()
@@ -265,10 +277,13 @@
return msg;
}
- bool is_accessor = InlineAccessors && callee_method->is_accessor();
+ if (InlineAccessors && callee_method->is_accessor()) {
+ // accessor methods are not subject to any of the following limits.
+ return NULL;
+ }
// suppress a few checks for accessors and trivial methods
- if (!is_accessor && callee_method->code_size() > MaxTrivialSize) {
+ if (callee_method->code_size() > MaxTrivialSize) {
// don't inline into giant methods
if (C->unique() > (uint)NodeCountInliningCutoff) {
@@ -287,7 +302,7 @@
}
}
- if (!C->do_inlining() && InlineAccessors && !is_accessor) {
+ if (!C->do_inlining() && InlineAccessors) {
return "not an accessor";
}
if( inline_depth() > MaxInlineLevel ) {
@@ -322,14 +337,17 @@
// stricter than callee_holder->is_initialized()
ciBytecodeStream iter(caller_method);
iter.force_bci(caller_bci);
- int index = iter.get_index_int();
- if( !caller_method->is_klass_loaded(index, true) ) {
- return false;
- }
- // Try to do constant pool resolution if running Xcomp
Bytecodes::Code call_bc = iter.cur_bc();
- if( !caller_method->check_call(index, call_bc == Bytecodes::_invokestatic) ) {
- return false;
+ // An invokedynamic instruction does not have a klass.
+ if (call_bc != Bytecodes::_invokedynamic) {
+ int index = iter.get_index_int();
+ if (!caller_method->is_klass_loaded(index, true)) {
+ return false;
+ }
+ // Try to do constant pool resolution if running Xcomp
+ if( !caller_method->check_call(index, call_bc == Bytecodes::_invokestatic) ) {
+ return false;
+ }
}
}
// We will attempt to see if a class/field/etc got properly loaded. If it
@@ -457,7 +475,30 @@
if (old_ilt != NULL) {
return old_ilt;
}
- InlineTree *ilt = new InlineTree( C, this, callee_method, caller_jvms, caller_bci, recur_frequency );
+ int new_depth_adjust = 0;
+ if (caller_jvms->method() != NULL) {
+ if ((caller_jvms->method()->name() == ciSymbol::invoke_name() &&
+ caller_jvms->method()->holder()->name() == ciSymbol::java_dyn_MethodHandle())
+ || caller_jvms->method()->holder()->name() == ciSymbol::java_dyn_InvokeDynamic())
+ /* @@@ FIXME:
+ if (caller_jvms->method()->is_method_handle_adapter())
+ */
+ new_depth_adjust -= 1; // don't count actions in MH or indy adapter frames
+ else if (callee_method->is_method_handle_invoke()) {
+ new_depth_adjust -= 1; // don't count method handle calls from java.dyn implem
+ }
+ if (new_depth_adjust != 0 && PrintInlining) {
+ stringStream nm1; caller_jvms->method()->print_name(&nm1);
+ stringStream nm2; callee_method->print_name(&nm2);
+ tty->print_cr("discounting inlining depth from %s to %s", nm1.base(), nm2.base());
+ }
+ if (new_depth_adjust != 0 && C->log()) {
+ int id1 = C->log()->identify(caller_jvms->method());
+ int id2 = C->log()->identify(callee_method);
+ C->log()->elem("inline_depth_discount caller='%d' callee='%d'", id1, id2);
+ }
+ }
+ InlineTree *ilt = new InlineTree(C, this, callee_method, caller_jvms, caller_bci, recur_frequency, _site_depth_adjust + new_depth_adjust);
_subtrees.append( ilt );
NOT_PRODUCT( _count_inlines += 1; )
@@ -483,7 +524,7 @@
Compile* C = Compile::current();
// Root of inline tree
- InlineTree *ilt = new InlineTree(C, NULL, C->method(), NULL, -1, 1.0F);
+ InlineTree *ilt = new InlineTree(C, NULL, C->method(), NULL, -1, 1.0F, 0);
return ilt;
}
--- a/hotspot/src/share/vm/opto/callGenerator.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/callGenerator.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -136,6 +136,8 @@
}
// Mark the call node as virtual, sort of:
call->set_optimized_virtual(true);
+ if (method()->is_method_handle_invoke())
+ call->set_method_handle_invoke(true);
}
kit.set_arguments_for_java_call(call);
kit.set_edges_for_java_call(call, false, _separate_io_proj);
@@ -145,6 +147,71 @@
return kit.transfer_exceptions_into_jvms();
}
+//---------------------------DynamicCallGenerator-----------------------------
+// Internal class which handles all out-of-line invokedynamic calls.
+class DynamicCallGenerator : public CallGenerator {
+public:
+ DynamicCallGenerator(ciMethod* method)
+ : CallGenerator(method)
+ {
+ }
+ virtual JVMState* generate(JVMState* jvms);
+};
+
+JVMState* DynamicCallGenerator::generate(JVMState* jvms) {
+ GraphKit kit(jvms);
+
+ if (kit.C->log() != NULL) {
+ kit.C->log()->elem("dynamic_call bci='%d'", jvms->bci());
+ }
+
+ // Get the constant pool cache from the caller class.
+ ciMethod* caller_method = jvms->method();
+ ciBytecodeStream str(caller_method);
+ str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
+ assert(str.cur_bc() == Bytecodes::_invokedynamic, "wrong place to issue a dynamic call!");
+ ciCPCache* cpcache = str.get_cpcache();
+
+ // Get the offset of the CallSite from the constant pool cache
+ // pointer.
+ int index = str.get_method_index();
+ size_t call_site_offset = cpcache->get_f1_offset(index);
+
+ // Load the CallSite object from the constant pool cache.
+ const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache);
+ Node* cpcache_adr = kit.makecon(cpcache_ptr);
+ Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset);
+ Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
+
+ // Load the target MethodHandle from the CallSite object.
+ Node* target_mh_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes());
+ Node* target_mh = kit.make_load(kit.control(), target_mh_adr, TypeInstPtr::BOTTOM, T_OBJECT);
+
+ address resolve_stub = SharedRuntime::get_resolve_opt_virtual_call_stub();
+
+ CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), resolve_stub, method(), kit.bci());
+ // invokedynamic is treated as an optimized invokevirtual.
+ call->set_optimized_virtual(true);
+ // Take extra care (in the presence of argument motion) not to trash the SP:
+ call->set_method_handle_invoke(true);
+
+ // Pass the target MethodHandle as first argument and shift the
+ // other arguments.
+ call->init_req(0 + TypeFunc::Parms, target_mh);
+ uint nargs = call->method()->arg_size();
+ for (uint i = 1; i < nargs; i++) {
+ Node* arg = kit.argument(i - 1);
+ call->init_req(i + TypeFunc::Parms, arg);
+ }
+
+ kit.set_edges_for_java_call(call);
+ Node* ret = kit.set_results_for_java_call(call);
+ kit.push_node(method()->return_type()->basic_type(), ret);
+ return kit.transfer_exceptions_into_jvms();
+}
+
+//--------------------------VirtualCallGenerator------------------------------
+// Internal class which handles all out-of-line calls checking receiver type.
class VirtualCallGenerator : public CallGenerator {
private:
int _vtable_index;
@@ -159,8 +226,6 @@
virtual JVMState* generate(JVMState* jvms);
};
-//--------------------------VirtualCallGenerator------------------------------
-// Internal class which handles all out-of-line calls checking receiver type.
JVMState* VirtualCallGenerator::generate(JVMState* jvms) {
GraphKit kit(jvms);
Node* receiver = kit.argument(0);
@@ -253,8 +318,14 @@
return new DirectCallGenerator(m, separate_io_proj);
}
+CallGenerator* CallGenerator::for_dynamic_call(ciMethod* m) {
+ assert(m->is_method_handle_invoke(), "for_dynamic_call mismatch");
+ return new DynamicCallGenerator(m);
+}
+
CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) {
assert(!m->is_static(), "for_virtual_call mismatch");
+ assert(!m->is_method_handle_invoke(), "should be a direct call");
return new VirtualCallGenerator(m, vtable_index);
}
@@ -576,6 +647,155 @@
}
+//------------------------PredictedDynamicCallGenerator-----------------------
+// Internal class which handles all out-of-line calls checking receiver type.
+class PredictedDynamicCallGenerator : public CallGenerator {
+ ciMethodHandle* _predicted_method_handle;
+ CallGenerator* _if_missed;
+ CallGenerator* _if_hit;
+ float _hit_prob;
+
+public:
+ PredictedDynamicCallGenerator(ciMethodHandle* predicted_method_handle,
+ CallGenerator* if_missed,
+ CallGenerator* if_hit,
+ float hit_prob)
+ : CallGenerator(if_missed->method()),
+ _predicted_method_handle(predicted_method_handle),
+ _if_missed(if_missed),
+ _if_hit(if_hit),
+ _hit_prob(hit_prob)
+ {}
+
+ virtual bool is_inline() const { return _if_hit->is_inline(); }
+ virtual bool is_deferred() const { return _if_hit->is_deferred(); }
+
+ virtual JVMState* generate(JVMState* jvms);
+};
+
+
+CallGenerator* CallGenerator::for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle,
+ CallGenerator* if_missed,
+ CallGenerator* if_hit,
+ float hit_prob) {
+ return new PredictedDynamicCallGenerator(predicted_method_handle, if_missed, if_hit, hit_prob);
+}
+
+
+JVMState* PredictedDynamicCallGenerator::generate(JVMState* jvms) {
+ GraphKit kit(jvms);
+ PhaseGVN& gvn = kit.gvn();
+
+ CompileLog* log = kit.C->log();
+ if (log != NULL) {
+ log->elem("predicted_dynamic_call bci='%d'", jvms->bci());
+ }
+
+ // Get the constant pool cache from the caller class.
+ ciMethod* caller_method = jvms->method();
+ ciBytecodeStream str(caller_method);
+ str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
+ ciCPCache* cpcache = str.get_cpcache();
+
+ // Get the offset of the CallSite from the constant pool cache
+ // pointer.
+ int index = str.get_method_index();
+ size_t call_site_offset = cpcache->get_f1_offset(index);
+
+ // Load the CallSite object from the constant pool cache.
+ const TypeOopPtr* cpcache_ptr = TypeOopPtr::make_from_constant(cpcache);
+ Node* cpcache_adr = kit.makecon(cpcache_ptr);
+ Node* call_site_adr = kit.basic_plus_adr(cpcache_adr, cpcache_adr, call_site_offset);
+ Node* call_site = kit.make_load(kit.control(), call_site_adr, TypeInstPtr::BOTTOM, T_OBJECT, Compile::AliasIdxRaw);
+
+ // Load the target MethodHandle from the CallSite object.
+ Node* target_adr = kit.basic_plus_adr(call_site, call_site, java_dyn_CallSite::target_offset_in_bytes());
+ Node* target_mh = kit.make_load(kit.control(), target_adr, TypeInstPtr::BOTTOM, T_OBJECT);
+
+ // Check if the MethodHandle is still the same.
+ const TypeOopPtr* predicted_mh_ptr = TypeOopPtr::make_from_constant(_predicted_method_handle, true);
+ Node* predicted_mh = kit.makecon(predicted_mh_ptr);
+
+ Node* cmp = gvn.transform(new(kit.C, 3) CmpPNode(target_mh, predicted_mh));
+ Node* bol = gvn.transform(new(kit.C, 2) BoolNode(cmp, BoolTest::eq) );
+ IfNode* iff = kit.create_and_xform_if(kit.control(), bol, _hit_prob, COUNT_UNKNOWN);
+ kit.set_control( gvn.transform(new(kit.C, 1) IfTrueNode (iff)));
+ Node* slow_ctl = gvn.transform(new(kit.C, 1) IfFalseNode(iff));
+
+ SafePointNode* slow_map = NULL;
+ JVMState* slow_jvms;
+ { PreserveJVMState pjvms(&kit);
+ kit.set_control(slow_ctl);
+ if (!kit.stopped()) {
+ slow_jvms = _if_missed->generate(kit.sync_jvms());
+ assert(slow_jvms != NULL, "miss path must not fail to generate");
+ kit.add_exception_states_from(slow_jvms);
+ kit.set_map(slow_jvms->map());
+ if (!kit.stopped())
+ slow_map = kit.stop();
+ }
+ }
+
+ if (kit.stopped()) {
+ // Instance exactly does not matches the desired type.
+ kit.set_jvms(slow_jvms);
+ return kit.transfer_exceptions_into_jvms();
+ }
+
+ // Make the hot call:
+ JVMState* new_jvms = _if_hit->generate(kit.sync_jvms());
+ if (new_jvms == NULL) {
+ // Inline failed, so make a direct call.
+ assert(_if_hit->is_inline(), "must have been a failed inline");
+ CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method());
+ new_jvms = cg->generate(kit.sync_jvms());
+ }
+ kit.add_exception_states_from(new_jvms);
+ kit.set_jvms(new_jvms);
+
+ // Need to merge slow and fast?
+ if (slow_map == NULL) {
+ // The fast path is the only path remaining.
+ return kit.transfer_exceptions_into_jvms();
+ }
+
+ if (kit.stopped()) {
+ // Inlined method threw an exception, so it's just the slow path after all.
+ kit.set_jvms(slow_jvms);
+ return kit.transfer_exceptions_into_jvms();
+ }
+
+ // Finish the diamond.
+ kit.C->set_has_split_ifs(true); // Has chance for split-if optimization
+ RegionNode* region = new (kit.C, 3) RegionNode(3);
+ region->init_req(1, kit.control());
+ region->init_req(2, slow_map->control());
+ kit.set_control(gvn.transform(region));
+ Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO);
+ iophi->set_req(2, slow_map->i_o());
+ kit.set_i_o(gvn.transform(iophi));
+ kit.merge_memory(slow_map->merged_memory(), region, 2);
+ uint tos = kit.jvms()->stkoff() + kit.sp();
+ uint limit = slow_map->req();
+ for (uint i = TypeFunc::Parms; i < limit; i++) {
+ // Skip unused stack slots; fast forward to monoff();
+ if (i == tos) {
+ i = kit.jvms()->monoff();
+ if( i >= limit ) break;
+ }
+ Node* m = kit.map()->in(i);
+ Node* n = slow_map->in(i);
+ if (m != n) {
+ const Type* t = gvn.type(m)->meet(gvn.type(n));
+ Node* phi = PhiNode::make(region, m, t);
+ phi->set_req(2, n);
+ kit.map()->set_req(i, gvn.transform(phi));
+ }
+ }
+ return kit.transfer_exceptions_into_jvms();
+}
+
+
//-------------------------UncommonTrapCallGenerator-----------------------------
// Internal class which handles all out-of-line calls checking receiver type.
class UncommonTrapCallGenerator : public CallGenerator {
--- a/hotspot/src/share/vm/opto/callGenerator.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/callGenerator.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -100,6 +100,7 @@
// How to generate vanilla out-of-line call sites:
static CallGenerator* for_direct_call(ciMethod* m, bool separate_io_projs = false); // static, special
+ static CallGenerator* for_dynamic_call(ciMethod* m); // invokedynamic
static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface
// How to generate a replace a direct call with an inline version
@@ -116,6 +117,12 @@
CallGenerator* if_hit,
float hit_prob);
+ // How to make a call that optimistically assumes a MethodHandle target:
+ static CallGenerator* for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle,
+ CallGenerator* if_missed,
+ CallGenerator* if_hit,
+ float hit_prob);
+
// How to make a call that gives up and goes back to the interpreter:
static CallGenerator* for_uncommon_trap(ciMethod* m,
Deoptimization::DeoptReason reason,
--- a/hotspot/src/share/vm/opto/callnode.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/callnode.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -562,12 +562,15 @@
virtual uint size_of() const; // Size is bigger
bool _optimized_virtual;
+ bool _method_handle_invoke;
ciMethod* _method; // Method being direct called
public:
const int _bci; // Byte Code Index of call byte code
CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci)
: CallNode(tf, addr, TypePtr::BOTTOM),
- _method(method), _bci(bci), _optimized_virtual(false)
+ _method(method), _bci(bci),
+ _optimized_virtual(false),
+ _method_handle_invoke(false)
{
init_class_id(Class_CallJava);
}
@@ -577,6 +580,8 @@
void set_method(ciMethod *m) { _method = m; }
void set_optimized_virtual(bool f) { _optimized_virtual = f; }
bool is_optimized_virtual() const { return _optimized_virtual; }
+ void set_method_handle_invoke(bool f) { _method_handle_invoke = f; }
+ bool is_method_handle_invoke() const { return _method_handle_invoke; }
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
--- a/hotspot/src/share/vm/opto/divnode.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/divnode.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -114,7 +114,8 @@
if( andconi_t && andconi_t->is_con() ) {
jint andconi = andconi_t->get_con();
if( andconi < 0 && is_power_of_2(-andconi) && (-andconi) >= d ) {
- dividend = dividend->in(1);
+ if( (-andconi) == d ) // Remove AND if it clears bits which will be shifted
+ dividend = dividend->in(1);
needs_rounding = false;
}
}
@@ -356,7 +357,8 @@
if( andconl_t && andconl_t->is_con() ) {
jlong andconl = andconl_t->get_con();
if( andconl < 0 && is_power_of_2_long(-andconl) && (-andconl) >= d ) {
- dividend = dividend->in(1);
+ if( (-andconl) == d ) // Remove AND if it clears bits which will be shifted
+ dividend = dividend->in(1);
needs_rounding = false;
}
}
--- a/hotspot/src/share/vm/opto/doCall.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/doCall.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -43,7 +43,9 @@
}
#endif
-CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float prof_factor) {
+CallGenerator* Compile::call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual,
+ JVMState* jvms, bool allow_inline,
+ float prof_factor) {
CallGenerator* cg;
// Dtrace currently doesn't work unless all calls are vanilla
@@ -116,7 +118,7 @@
// TO DO: When UseOldInlining is removed, copy the ILT code elsewhere.
float site_invoke_ratio = prof_factor;
// Note: ilt is for the root of this parse, not the present call site.
- ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio);
+ ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, 0);
}
WarmCallInfo scratch_ci;
if (!UseOldInlining)
@@ -224,6 +226,57 @@
}
}
+ // Do MethodHandle calls.
+ if (call_method->is_method_handle_invoke()) {
+ if (jvms->method()->java_code_at_bci(jvms->bci()) != Bytecodes::_invokedynamic) {
+ GraphKit kit(jvms);
+ Node* n = kit.argument(0);
+
+ if (n->Opcode() == Op_ConP) {
+ const TypeOopPtr* oop_ptr = n->bottom_type()->is_oopptr();
+ ciObject* const_oop = oop_ptr->const_oop();
+ ciMethodHandle* method_handle = const_oop->as_method_handle();
+
+ // Set the actually called method to have access to the class
+ // and signature in the MethodHandleCompiler.
+ method_handle->set_callee(call_method);
+
+ // Get an adapter for the MethodHandle.
+ ciMethod* target_method = method_handle->get_method_handle_adapter();
+
+ CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
+ if (hit_cg != NULL && hit_cg->is_inline())
+ return hit_cg;
+ }
+
+ return CallGenerator::for_direct_call(call_method);
+ }
+ else {
+ // Get the MethodHandle from the CallSite.
+ ciMethod* caller_method = jvms->method();
+ ciBytecodeStream str(caller_method);
+ str.force_bci(jvms->bci()); // Set the stream to the invokedynamic bci.
+ ciCallSite* call_site = str.get_call_site();
+ ciMethodHandle* method_handle = call_site->get_target();
+
+ // Set the actually called method to have access to the class
+ // and signature in the MethodHandleCompiler.
+ method_handle->set_callee(call_method);
+
+ // Get an adapter for the MethodHandle.
+ ciMethod* target_method = method_handle->get_invokedynamic_adapter();
+
+ CallGenerator* hit_cg = this->call_generator(target_method, vtable_index, false, jvms, true, prof_factor);
+ if (hit_cg != NULL && hit_cg->is_inline()) {
+ CallGenerator* miss_cg = CallGenerator::for_dynamic_call(call_method);
+ return CallGenerator::for_predicted_dynamic_call(method_handle, miss_cg, hit_cg, prof_factor);
+ }
+
+ // If something failed, generate a normal dynamic call.
+ return CallGenerator::for_dynamic_call(call_method);
+ }
+ }
+
// There was no special inlining tactic, or it bailed out.
// Use a more generic tactic, like a simple call.
if (call_is_virtual) {
@@ -299,7 +352,7 @@
// Interface classes can be loaded & linked and never get around to
// being initialized. Uncommon-trap for not-initialized static or
// v-calls. Let interface calls happen.
- ciInstanceKlass* holder_klass = dest_method->holder();
+ ciInstanceKlass* holder_klass = dest_method->holder();
if (!holder_klass->is_initialized() &&
!holder_klass->is_interface()) {
uncommon_trap(Deoptimization::Reason_uninitialized,
@@ -307,14 +360,6 @@
holder_klass);
return true;
}
- if (dest_method->is_method_handle_invoke()
- && holder_klass->name() == ciSymbol::java_dyn_Dynamic()) {
- // FIXME: NYI
- uncommon_trap(Deoptimization::Reason_unhandled,
- Deoptimization::Action_none,
- holder_klass);
- return true;
- }
assert(dest_method->will_link(method()->holder(), klass, bc()), "dest_method: typeflow responsibility");
return false;
@@ -333,6 +378,7 @@
bool is_virtual = bc() == Bytecodes::_invokevirtual;
bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;
+ bool is_invokedynamic = bc() == Bytecodes::_invokedynamic;
// Find target being called
bool will_link;
@@ -341,7 +387,8 @@
ciKlass* holder = iter().get_declared_method_holder();
ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);
- int nargs = dest_method->arg_size();
+ int nargs = dest_method->arg_size();
+ if (is_invokedynamic) nargs -= 1;
// uncommon-trap when callee is unloaded, uninitialized or will not link
// bailout when too many arguments for register representation
@@ -355,7 +402,7 @@
return;
}
assert(holder_klass->is_loaded(), "");
- assert(dest_method->is_static() == !has_receiver, "must match bc");
+ assert((dest_method->is_static() || is_invokedynamic) == !has_receiver , "must match bc");
// Note: this takes into account invokeinterface of methods declared in java/lang/Object,
// which should be invokevirtuals but according to the VM spec may be invokeinterfaces
assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
--- a/hotspot/src/share/vm/opto/escape.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/escape.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1916,7 +1916,7 @@
Node *arg = call->in(i)->uncast();
if (at->isa_oopptr() != NULL &&
- ptnode_adr(arg->_idx)->escape_state() < PointsToNode::ArgEscape) {
+ ptnode_adr(arg->_idx)->escape_state() < PointsToNode::GlobalEscape) {
bool global_escapes = false;
bool fields_escapes = false;
--- a/hotspot/src/share/vm/opto/graphKit.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/graphKit.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -981,14 +981,19 @@
case Bytecodes::_invokedynamic:
case Bytecodes::_invokeinterface:
{
- bool is_static = (depth == 0);
bool ignore;
ciBytecodeStream iter(method());
iter.reset_to_bci(bci());
iter.next();
ciMethod* method = iter.get_method(ignore);
inputs = method->arg_size_no_receiver();
- if (!is_static) inputs += 1;
+ // Add a receiver argument, maybe:
+ if (code != Bytecodes::_invokestatic &&
+ code != Bytecodes::_invokedynamic)
+ inputs += 1;
+ // (Do not use ciMethod::arg_size(), because
+ // it might be an unloaded method, which doesn't
+ // know whether it is static or not.)
int size = method->return_type()->size();
depth = size - inputs;
}
--- a/hotspot/src/share/vm/opto/ifnode.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/ifnode.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -531,6 +531,9 @@
if (linear_only)
return NULL;
+ if( dom->is_Root() )
+ return NULL;
+
// Else hit a Region. Check for a loop header
if( dom->is_Loop() )
return dom->in(1); // Skip up thru loops
--- a/hotspot/src/share/vm/opto/lcm.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/lcm.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -120,6 +120,7 @@
case Op_LoadRange:
case Op_LoadD_unaligned:
case Op_LoadL_unaligned:
+ assert(mach->in(2) == val, "should be address");
break;
case Op_StoreB:
case Op_StoreC:
@@ -146,6 +147,21 @@
default: // Also check for embedded loads
if( !mach->needs_anti_dependence_check() )
continue; // Not an memory op; skip it
+ {
+ // Check that value is used in memory address.
+ Node* base;
+ Node* index;
+ const MachOper* oper = mach->memory_inputs(base, index);
+ if (oper == NULL || oper == (MachOper*)-1) {
+ continue; // Not an memory op; skip it
+ }
+ if (val == base ||
+ val == index && val->bottom_type()->isa_narrowoop()) {
+ break; // Found it
+ } else {
+ continue; // Skip it
+ }
+ }
break;
}
// check if the offset is not too high for implicit exception
@@ -542,6 +558,16 @@
// pointers as far as the kill mask goes.
bool exclude_soe = op == Op_CallRuntime;
+ // If the call is a MethodHandle invoke, we need to exclude the
+ // register which is used to save the SP value over MH invokes from
+ // the mask. Otherwise this register could be used for
+ // deoptimization information.
+ if (op == Op_CallStaticJava) {
+ MachCallStaticJavaNode* mcallstaticjava = (MachCallStaticJavaNode*) mcall;
+ if (mcallstaticjava->_method_handle_invoke)
+ proj->_rout.OR(Matcher::method_handle_invoke_SP_save_mask());
+ }
+
// Fill in the kill mask for the call
for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) {
if( !regs.Member(r) ) { // Not already defined by the call
--- a/hotspot/src/share/vm/opto/library_call.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/library_call.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1999-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1999-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -3697,12 +3697,14 @@
// Helper routine for above
bool LibraryCallKit::is_method_invoke_or_aux_frame(JVMState* jvms) {
+ ciMethod* method = jvms->method();
+
// Is this the Method.invoke method itself?
- if (jvms->method()->intrinsic_id() == vmIntrinsics::_invoke)
+ if (method->intrinsic_id() == vmIntrinsics::_invoke)
return true;
// Is this a helper, defined somewhere underneath MethodAccessorImpl.
- ciKlass* k = jvms->method()->holder();
+ ciKlass* k = method->holder();
if (k->is_instance_klass()) {
ciInstanceKlass* ik = k->as_instance_klass();
for (; ik != NULL; ik = ik->super()) {
@@ -3712,6 +3714,10 @@
}
}
}
+ else if (method->is_method_handle_adapter()) {
+ // This is an internal adapter frame from the MethodHandleCompiler -- skip it
+ return true;
+ }
return false;
}
--- a/hotspot/src/share/vm/opto/machnode.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/machnode.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -636,7 +636,9 @@
}
#ifndef PRODUCT
void MachCallJavaNode::dump_spec(outputStream *st) const {
- if( _method ) {
+ if (_method_handle_invoke)
+ st->print("MethodHandle ");
+ if (_method) {
_method->print_short_name(st);
st->print(" ");
}
@@ -644,6 +646,20 @@
}
#endif
+//------------------------------Registers--------------------------------------
+const RegMask &MachCallJavaNode::in_RegMask(uint idx) const {
+ // Values in the domain use the users calling convention, embodied in the
+ // _in_rms array of RegMasks.
+ if (idx < tf()->domain()->cnt()) return _in_rms[idx];
+ // Values outside the domain represent debug info
+ Matcher* m = Compile::current()->matcher();
+ // If this call is a MethodHandle invoke we have to use a different
+ // debugmask which does not include the register we use to save the
+ // SP over MH invokes.
+ RegMask** debugmask = _method_handle_invoke ? m->idealreg2mhdebugmask : m->idealreg2debugmask;
+ return *debugmask[in(idx)->ideal_reg()];
+}
+
//=============================================================================
uint MachCallStaticJavaNode::size_of() const { return sizeof(*this); }
uint MachCallStaticJavaNode::cmp( const Node &n ) const {
--- a/hotspot/src/share/vm/opto/machnode.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/machnode.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -662,9 +662,13 @@
ciMethod* _method; // Method being direct called
int _bci; // Byte Code index of call byte code
bool _optimized_virtual; // Tells if node is a static call or an optimized virtual
+ bool _method_handle_invoke; // Tells if the call has to preserve SP
MachCallJavaNode() : MachCallNode() {
init_class_id(Class_MachCallJava);
}
+
+ virtual const RegMask &in_RegMask(uint) const;
+
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
--- a/hotspot/src/share/vm/opto/matcher.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/matcher.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -70,19 +70,27 @@
_dontcare(&_states_arena) {
C->set_matcher(this);
- idealreg2spillmask[Op_RegI] = NULL;
- idealreg2spillmask[Op_RegN] = NULL;
- idealreg2spillmask[Op_RegL] = NULL;
- idealreg2spillmask[Op_RegF] = NULL;
- idealreg2spillmask[Op_RegD] = NULL;
- idealreg2spillmask[Op_RegP] = NULL;
+ idealreg2spillmask [Op_RegI] = NULL;
+ idealreg2spillmask [Op_RegN] = NULL;
+ idealreg2spillmask [Op_RegL] = NULL;
+ idealreg2spillmask [Op_RegF] = NULL;
+ idealreg2spillmask [Op_RegD] = NULL;
+ idealreg2spillmask [Op_RegP] = NULL;
- idealreg2debugmask[Op_RegI] = NULL;
- idealreg2debugmask[Op_RegN] = NULL;
- idealreg2debugmask[Op_RegL] = NULL;
- idealreg2debugmask[Op_RegF] = NULL;
- idealreg2debugmask[Op_RegD] = NULL;
- idealreg2debugmask[Op_RegP] = NULL;
+ idealreg2debugmask [Op_RegI] = NULL;
+ idealreg2debugmask [Op_RegN] = NULL;
+ idealreg2debugmask [Op_RegL] = NULL;
+ idealreg2debugmask [Op_RegF] = NULL;
+ idealreg2debugmask [Op_RegD] = NULL;
+ idealreg2debugmask [Op_RegP] = NULL;
+
+ idealreg2mhdebugmask[Op_RegI] = NULL;
+ idealreg2mhdebugmask[Op_RegN] = NULL;
+ idealreg2mhdebugmask[Op_RegL] = NULL;
+ idealreg2mhdebugmask[Op_RegF] = NULL;
+ idealreg2mhdebugmask[Op_RegD] = NULL;
+ idealreg2mhdebugmask[Op_RegP] = NULL;
+
debug_only(_mem_node = NULL;) // Ideal memory node consumed by mach node
}
@@ -389,19 +397,28 @@
void Matcher::init_first_stack_mask() {
// Allocate storage for spill masks as masks for the appropriate load type.
- RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask)*12);
- idealreg2spillmask[Op_RegN] = &rms[0];
- idealreg2spillmask[Op_RegI] = &rms[1];
- idealreg2spillmask[Op_RegL] = &rms[2];
- idealreg2spillmask[Op_RegF] = &rms[3];
- idealreg2spillmask[Op_RegD] = &rms[4];
- idealreg2spillmask[Op_RegP] = &rms[5];
- idealreg2debugmask[Op_RegN] = &rms[6];
- idealreg2debugmask[Op_RegI] = &rms[7];
- idealreg2debugmask[Op_RegL] = &rms[8];
- idealreg2debugmask[Op_RegF] = &rms[9];
- idealreg2debugmask[Op_RegD] = &rms[10];
- idealreg2debugmask[Op_RegP] = &rms[11];
+ RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * 3*6);
+
+ idealreg2spillmask [Op_RegN] = &rms[0];
+ idealreg2spillmask [Op_RegI] = &rms[1];
+ idealreg2spillmask [Op_RegL] = &rms[2];
+ idealreg2spillmask [Op_RegF] = &rms[3];
+ idealreg2spillmask [Op_RegD] = &rms[4];
+ idealreg2spillmask [Op_RegP] = &rms[5];
+
+ idealreg2debugmask [Op_RegN] = &rms[6];
+ idealreg2debugmask [Op_RegI] = &rms[7];
+ idealreg2debugmask [Op_RegL] = &rms[8];
+ idealreg2debugmask [Op_RegF] = &rms[9];
+ idealreg2debugmask [Op_RegD] = &rms[10];
+ idealreg2debugmask [Op_RegP] = &rms[11];
+
+ idealreg2mhdebugmask[Op_RegN] = &rms[12];
+ idealreg2mhdebugmask[Op_RegI] = &rms[13];
+ idealreg2mhdebugmask[Op_RegL] = &rms[14];
+ idealreg2mhdebugmask[Op_RegF] = &rms[15];
+ idealreg2mhdebugmask[Op_RegD] = &rms[16];
+ idealreg2mhdebugmask[Op_RegP] = &rms[17];
OptoReg::Name i;
@@ -442,12 +459,19 @@
// Make up debug masks. Any spill slot plus callee-save registers.
// Caller-save registers are assumed to be trashable by the various
// inline-cache fixup routines.
- *idealreg2debugmask[Op_RegN]= *idealreg2spillmask[Op_RegN];
- *idealreg2debugmask[Op_RegI]= *idealreg2spillmask[Op_RegI];
- *idealreg2debugmask[Op_RegL]= *idealreg2spillmask[Op_RegL];
- *idealreg2debugmask[Op_RegF]= *idealreg2spillmask[Op_RegF];
- *idealreg2debugmask[Op_RegD]= *idealreg2spillmask[Op_RegD];
- *idealreg2debugmask[Op_RegP]= *idealreg2spillmask[Op_RegP];
+ *idealreg2debugmask [Op_RegN]= *idealreg2spillmask[Op_RegN];
+ *idealreg2debugmask [Op_RegI]= *idealreg2spillmask[Op_RegI];
+ *idealreg2debugmask [Op_RegL]= *idealreg2spillmask[Op_RegL];
+ *idealreg2debugmask [Op_RegF]= *idealreg2spillmask[Op_RegF];
+ *idealreg2debugmask [Op_RegD]= *idealreg2spillmask[Op_RegD];
+ *idealreg2debugmask [Op_RegP]= *idealreg2spillmask[Op_RegP];
+
+ *idealreg2mhdebugmask[Op_RegN]= *idealreg2spillmask[Op_RegN];
+ *idealreg2mhdebugmask[Op_RegI]= *idealreg2spillmask[Op_RegI];
+ *idealreg2mhdebugmask[Op_RegL]= *idealreg2spillmask[Op_RegL];
+ *idealreg2mhdebugmask[Op_RegF]= *idealreg2spillmask[Op_RegF];
+ *idealreg2mhdebugmask[Op_RegD]= *idealreg2spillmask[Op_RegD];
+ *idealreg2mhdebugmask[Op_RegP]= *idealreg2spillmask[Op_RegP];
// Prevent stub compilations from attempting to reference
// callee-saved registers from debug info
@@ -458,14 +482,31 @@
if( _register_save_policy[i] == 'C' ||
_register_save_policy[i] == 'A' ||
(_register_save_policy[i] == 'E' && exclude_soe) ) {
- idealreg2debugmask[Op_RegN]->Remove(i);
- idealreg2debugmask[Op_RegI]->Remove(i); // Exclude save-on-call
- idealreg2debugmask[Op_RegL]->Remove(i); // registers from debug
- idealreg2debugmask[Op_RegF]->Remove(i); // masks
- idealreg2debugmask[Op_RegD]->Remove(i);
- idealreg2debugmask[Op_RegP]->Remove(i);
+ idealreg2debugmask [Op_RegN]->Remove(i);
+ idealreg2debugmask [Op_RegI]->Remove(i); // Exclude save-on-call
+ idealreg2debugmask [Op_RegL]->Remove(i); // registers from debug
+ idealreg2debugmask [Op_RegF]->Remove(i); // masks
+ idealreg2debugmask [Op_RegD]->Remove(i);
+ idealreg2debugmask [Op_RegP]->Remove(i);
+
+ idealreg2mhdebugmask[Op_RegN]->Remove(i);
+ idealreg2mhdebugmask[Op_RegI]->Remove(i);
+ idealreg2mhdebugmask[Op_RegL]->Remove(i);
+ idealreg2mhdebugmask[Op_RegF]->Remove(i);
+ idealreg2mhdebugmask[Op_RegD]->Remove(i);
+ idealreg2mhdebugmask[Op_RegP]->Remove(i);
}
}
+
+ // Subtract the register we use to save the SP for MethodHandle
+ // invokes to from the debug mask.
+ const RegMask save_mask = method_handle_invoke_SP_save_mask();
+ idealreg2mhdebugmask[Op_RegN]->SUBTRACT(save_mask);
+ idealreg2mhdebugmask[Op_RegI]->SUBTRACT(save_mask);
+ idealreg2mhdebugmask[Op_RegL]->SUBTRACT(save_mask);
+ idealreg2mhdebugmask[Op_RegF]->SUBTRACT(save_mask);
+ idealreg2mhdebugmask[Op_RegD]->SUBTRACT(save_mask);
+ idealreg2mhdebugmask[Op_RegP]->SUBTRACT(save_mask);
}
//---------------------------is_save_on_entry----------------------------------
@@ -989,6 +1030,7 @@
CallNode *call;
const TypeTuple *domain;
ciMethod* method = NULL;
+ bool is_method_handle_invoke = false; // for special kill effects
if( sfpt->is_Call() ) {
call = sfpt->as_Call();
domain = call->tf()->domain();
@@ -1013,6 +1055,8 @@
mcall_java->_method = method;
mcall_java->_bci = call_java->_bci;
mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
+ is_method_handle_invoke = call_java->is_method_handle_invoke();
+ mcall_java->_method_handle_invoke = is_method_handle_invoke;
if( mcall_java->is_MachCallStaticJava() )
mcall_java->as_MachCallStaticJava()->_name =
call_java->as_CallStaticJava()->_name;
@@ -1126,6 +1170,15 @@
mcall->_argsize = out_arg_limit_per_call - begin_out_arg_area;
}
+ if (is_method_handle_invoke) {
+ // Kill some extra stack space in case method handles want to do
+ // a little in-place argument insertion.
+ int regs_per_word = NOT_LP64(1) LP64_ONLY(2); // %%% make a global const!
+ out_arg_limit_per_call += MethodHandlePushLimit * regs_per_word;
+ // Do not update mcall->_argsize because (a) the extra space is not
+ // pushed as arguments and (b) _argsize is dead (not used anywhere).
+ }
+
// Compute the max stack slot killed by any call. These will not be
// available for debug info, and will be used to adjust FIRST_STACK_mask
// after all call sites have been visited.
--- a/hotspot/src/share/vm/opto/matcher.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/matcher.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -117,8 +117,9 @@
static const int base2reg[]; // Map Types to machine register types
// Convert ideal machine register to a register mask for spill-loads
static const RegMask *idealreg2regmask[];
- RegMask *idealreg2spillmask[_last_machine_leaf];
- RegMask *idealreg2debugmask[_last_machine_leaf];
+ RegMask *idealreg2spillmask [_last_machine_leaf];
+ RegMask *idealreg2debugmask [_last_machine_leaf];
+ RegMask *idealreg2mhdebugmask[_last_machine_leaf];
void init_spill_mask( Node *ret );
// Convert machine register number to register mask
static uint mreg2regmask_max;
@@ -297,6 +298,8 @@
// Register for MODL projection of divmodL
static RegMask modL_proj_mask();
+ static const RegMask method_handle_invoke_SP_save_mask();
+
// Java-Interpreter calling convention
// (what you use when calling between compiled-Java and Interpreted-Java
--- a/hotspot/src/share/vm/opto/output.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/output.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -794,6 +794,7 @@
#endif
int safepoint_pc_offset = current_offset;
+ bool is_method_handle_invoke = false;
// Add the safepoint in the DebugInfoRecorder
if( !mach->is_MachCall() ) {
@@ -801,6 +802,11 @@
debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
} else {
mcall = mach->as_MachCall();
+
+ // Is the call a MethodHandle call?
+ if (mcall->is_MachCallJava())
+ is_method_handle_invoke = mcall->as_MachCallJava()->_method_handle_invoke;
+
safepoint_pc_offset += mcall->ret_addr_offset();
debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
}
@@ -911,9 +917,9 @@
ciMethod* scope_method = method ? method : _method;
// Describe the scope here
assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
- assert(!jvms->should_reexecute() || depth==max_depth, "reexecute allowed only for the youngest");
+ assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest");
// Now we can describe the scope.
- debug_info()->describe_scope(safepoint_pc_offset,scope_method,jvms->bci(),jvms->should_reexecute(),locvals,expvals,monvals);
+ debug_info()->describe_scope(safepoint_pc_offset, scope_method, jvms->bci(), jvms->should_reexecute(), is_method_handle_invoke, locvals, expvals, monvals);
} // End jvms loop
// Mark the end of the scope set.
--- a/hotspot/src/share/vm/opto/parse.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/parse.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -39,6 +39,7 @@
// Always between 0.0 and 1.0. Represents the percentage of the method's
// total execution time used at this call site.
const float _site_invoke_ratio;
+ const int _site_depth_adjust;
float compute_callee_frequency( int caller_bci ) const;
GrowableArray<InlineTree*> _subtrees;
@@ -50,7 +51,8 @@
ciMethod* callee_method,
JVMState* caller_jvms,
int caller_bci,
- float site_invoke_ratio);
+ float site_invoke_ratio,
+ int site_depth_adjust);
InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
JVMState* caller_jvms,
int caller_bci);
@@ -61,14 +63,15 @@
InlineTree *caller_tree() const { return _caller_tree; }
InlineTree* callee_at(int bci, ciMethod* m) const;
- int inline_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; }
+ int inline_depth() const { return stack_depth() + _site_depth_adjust; }
+ int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; }
public:
static InlineTree* build_inline_tree_root();
static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found = false);
// For temporary (stack-allocated, stateless) ilts:
- InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio);
+ InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int site_depth_adjust);
// InlineTree enum
enum InlineStyle {
--- a/hotspot/src/share/vm/opto/parse3.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/parse3.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -125,7 +125,25 @@
void Parse::do_get_xxx(const TypePtr* obj_type, Node* obj, ciField* field, bool is_field) {
// Does this field have a constant value? If so, just push the value.
- if (field->is_constant() && push_constant(field->constant_value())) return;
+ if (field->is_constant()) {
+ if (field->is_static()) {
+ // final static field
+ if (push_constant(field->constant_value()))
+ return;
+ }
+ else {
+ // final non-static field of a trusted class ({java,sun}.dyn
+ // classes).
+ if (obj->is_Con()) {
+ const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
+ ciObject* constant_oop = oop_ptr->const_oop();
+ ciConstant constant = field->constant_value_of(constant_oop);
+
+ if (push_constant(constant, true))
+ return;
+ }
+ }
+ }
ciType* field_klass = field->type();
bool is_vol = field->is_volatile();
@@ -145,7 +163,7 @@
if (!field->type()->is_loaded()) {
type = TypeInstPtr::BOTTOM;
must_assert_null = true;
- } else if (field->is_constant()) {
+ } else if (field->is_constant() && field->is_static()) {
// This can happen if the constant oop is non-perm.
ciObject* con = field->constant_value().as_object();
// Do not "join" in the previous type; it doesn't add value,
--- a/hotspot/src/share/vm/opto/runtime.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/runtime.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1998-2007 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -790,7 +790,7 @@
NOT_PRODUCT(Exceptions::debug_check_abort(exception));
#ifdef ASSERT
- if (!(exception->is_a(SystemDictionary::throwable_klass()))) {
+ if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
// should throw an exception here
ShouldNotReachHere();
}
@@ -858,6 +858,9 @@
thread->set_exception_pc(pc);
thread->set_exception_handler_pc(handler_address);
thread->set_exception_stack_size(0);
+
+ // Check if the exception PC is a MethodHandle call.
+ thread->set_is_method_handle_exception(nm->is_method_handle_return(pc));
}
// Restore correct return pc. Was saved above.
@@ -936,7 +939,7 @@
#endif
assert (exception != NULL, "should have thrown a NULLPointerException");
#ifdef ASSERT
- if (!(exception->is_a(SystemDictionary::throwable_klass()))) {
+ if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
// should throw an exception here
ShouldNotReachHere();
}
--- a/hotspot/src/share/vm/opto/type.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/opto/type.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -2431,7 +2431,7 @@
//------------------------------make_from_constant-----------------------------
// Make a java pointer from an oop constant
const TypeOopPtr* TypeOopPtr::make_from_constant(ciObject* o, bool require_constant) {
- if (o->is_method_data() || o->is_method()) {
+ if (o->is_method_data() || o->is_method() || o->is_cpcache()) {
// Treat much like a typeArray of bytes, like below, but fake the type...
const Type* etype = (Type*)get_const_basic_type(T_BYTE);
const TypeAry* arr0 = TypeAry::make(etype, TypeInt::POS);
@@ -3966,7 +3966,7 @@
const TypeFunc* tf = C->last_tf(method); // check cache
if (tf != NULL) return tf; // The hit rate here is almost 50%.
const TypeTuple *domain;
- if (method->flags().is_static()) {
+ if (method->is_static()) {
domain = TypeTuple::make_domain(NULL, method->signature());
} else {
domain = TypeTuple::make_domain(method->holder(), method->signature());
--- a/hotspot/src/share/vm/prims/jni.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/prims/jni.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -396,11 +396,11 @@
oop mirror = NULL;
int slot = 0;
- if (reflected->klass() == SystemDictionary::reflect_constructor_klass()) {
+ if (reflected->klass() == SystemDictionary::reflect_Constructor_klass()) {
mirror = java_lang_reflect_Constructor::clazz(reflected);
slot = java_lang_reflect_Constructor::slot(reflected);
} else {
- assert(reflected->klass() == SystemDictionary::reflect_method_klass(), "wrong type");
+ assert(reflected->klass() == SystemDictionary::reflect_Method_klass(), "wrong type");
mirror = java_lang_reflect_Method::clazz(reflected);
slot = java_lang_reflect_Method::slot(reflected);
}
@@ -496,7 +496,7 @@
klassOop super = Klass::cast(k)->java_super();
// super2 is the value computed by the compiler's getSuperClass intrinsic:
debug_only(klassOop super2 = ( Klass::cast(k)->oop_is_javaArray()
- ? SystemDictionary::object_klass()
+ ? SystemDictionary::Object_klass()
: Klass::cast(k)->super() ) );
assert(super == super2,
"java_super computation depends on interface, array, other super");
@@ -584,7 +584,7 @@
if (thread->has_pending_exception()) {
Handle ex(thread, thread->pending_exception());
thread->clear_pending_exception();
- if (ex->is_a(SystemDictionary::threaddeath_klass())) {
+ if (ex->is_a(SystemDictionary::ThreadDeath_klass())) {
// Don't print anything if we are being killed.
} else {
jio_fprintf(defaultStream::error_stream(), "Exception ");
@@ -593,12 +593,12 @@
jio_fprintf(defaultStream::error_stream(),
"in thread \"%s\" ", thread->get_thread_name());
}
- if (ex->is_a(SystemDictionary::throwable_klass())) {
+ if (ex->is_a(SystemDictionary::Throwable_klass())) {
JavaValue result(T_VOID);
JavaCalls::call_virtual(&result,
ex,
KlassHandle(THREAD,
- SystemDictionary::throwable_klass()),
+ SystemDictionary::Throwable_klass()),
vmSymbolHandles::printStackTrace_name(),
vmSymbolHandles::void_method_signature(),
THREAD);
@@ -3241,7 +3241,7 @@
jint b = Atomic::xchg(0xdeadbeef, &a);
void *c = &a;
void *d = Atomic::xchg_ptr(&b, &c);
- assert(a == 0xdeadbeef && b == (jint) 0xcafebabe, "Atomic::xchg() works");
+ assert(a == (jint) 0xdeadbeef && b == (jint) 0xcafebabe, "Atomic::xchg() works");
assert(c == &b && d == &a, "Atomic::xchg_ptr() works");
}
#endif // ZERO && ASSERT
--- a/hotspot/src/share/vm/prims/jniCheck.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/prims/jniCheck.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -341,7 +341,7 @@
ReportJNIFatalError(thr, fatal_received_null_class);
}
- if (mirror->klass() != SystemDictionary::class_klass()) {
+ if (mirror->klass() != SystemDictionary::Class_klass()) {
ReportJNIFatalError(thr, fatal_class_not_a_class);
}
@@ -358,7 +358,7 @@
assert(klass != NULL, "klass argument must have a value");
if (!Klass::cast(klass)->oop_is_instance() ||
- !instanceKlass::cast(klass)->is_subclass_of(SystemDictionary::throwable_klass())) {
+ !instanceKlass::cast(klass)->is_subclass_of(SystemDictionary::Throwable_klass())) {
ReportJNIFatalError(thr, fatal_class_not_a_throwable_class);
}
}
--- a/hotspot/src/share/vm/prims/jvm.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/prims/jvm.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -80,7 +80,7 @@
while (!vfst.at_end()) {
methodOop m = vfst.method();
- if (!vfst.method()->method_holder()->klass_part()->is_subclass_of(SystemDictionary::classloader_klass())&&
+ if (!vfst.method()->method_holder()->klass_part()->is_subclass_of(SystemDictionary::ClassLoader_klass())&&
!vfst.method()->method_holder()->klass_part()->is_subclass_of(access_controller_klass) &&
!vfst.method()->method_holder()->klass_part()->is_subclass_of(privileged_action_klass)) {
break;
@@ -257,7 +257,7 @@
Handle value_str = java_lang_String::create_from_platform_dependent_str((value != NULL ? value : ""), CHECK);
JavaCalls::call_virtual(&r,
props,
- KlassHandle(THREAD, SystemDictionary::properties_klass()),
+ KlassHandle(THREAD, SystemDictionary::Properties_klass()),
vmSymbolHandles::put_name(),
vmSymbolHandles::object_object_object_signature(),
key_str,
@@ -495,7 +495,7 @@
guarantee(klass->is_cloneable(), "all arrays are cloneable");
} else {
guarantee(obj->is_instance(), "should be instanceOop");
- bool cloneable = klass->is_subtype_of(SystemDictionary::cloneable_klass());
+ bool cloneable = klass->is_subtype_of(SystemDictionary::Cloneable_klass());
guarantee(cloneable == klass->is_cloneable(), "incorrect cloneable flag");
}
#endif
@@ -908,7 +908,7 @@
// Special handling for primitive objects
if (java_lang_Class::is_primitive(mirror)) {
// Primitive objects does not have any interfaces
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::class_klass(), 0, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_NULL);
return (jobjectArray) JNIHandles::make_local(env, r);
}
@@ -923,7 +923,7 @@
}
// Allocate result array
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::class_klass(), size, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::Class_klass(), size, CHECK_NULL);
objArrayHandle result (THREAD, r);
// Fill in result
if (klass->oop_is_instance()) {
@@ -934,8 +934,8 @@
}
} else {
// All arrays implement java.lang.Cloneable and java.io.Serializable
- result->obj_at_put(0, Klass::cast(SystemDictionary::cloneable_klass())->java_mirror());
- result->obj_at_put(1, Klass::cast(SystemDictionary::serializable_klass())->java_mirror());
+ result->obj_at_put(0, Klass::cast(SystemDictionary::Cloneable_klass())->java_mirror());
+ result->obj_at_put(1, Klass::cast(SystemDictionary::Serializable_klass())->java_mirror());
}
return (jobjectArray) JNIHandles::make_local(env, result());
JVM_END
@@ -1098,8 +1098,8 @@
pending_exception = Handle(THREAD, PENDING_EXCEPTION);
CLEAR_PENDING_EXCEPTION;
- if ( pending_exception->is_a(SystemDictionary::exception_klass()) &&
- !pending_exception->is_a(SystemDictionary::runtime_exception_klass())) {
+ if ( pending_exception->is_a(SystemDictionary::Exception_klass()) &&
+ !pending_exception->is_a(SystemDictionary::RuntimeException_klass())) {
// Throw a java.security.PrivilegedActionException(Exception e) exception
JavaCallArguments args(pending_exception);
THROW_ARG_0(vmSymbolHandles::java_security_PrivilegedActionException(),
@@ -1190,7 +1190,7 @@
// the resource area must be registered in case of a gc
RegisterArrayForGC ragc(thread, local_array);
- objArrayOop context = oopFactory::new_objArray(SystemDictionary::protectionDomain_klass(),
+ objArrayOop context = oopFactory::new_objArray(SystemDictionary::ProtectionDomain_klass(),
local_array->length(), CHECK_NULL);
objArrayHandle h_context(thread, context);
for (int index = 0; index < local_array->length(); index++) {
@@ -1251,7 +1251,7 @@
if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass)) ||
! Klass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)))->oop_is_instance()) {
- oop result = oopFactory::new_objArray(SystemDictionary::class_klass(), 0, CHECK_NULL);
+ oop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_NULL);
return (jobjectArray)JNIHandles::make_local(env, result);
}
@@ -1259,7 +1259,7 @@
if (k->inner_classes()->length() == 0) {
// Neither an inner nor outer class
- oop result = oopFactory::new_objArray(SystemDictionary::class_klass(), 0, CHECK_NULL);
+ oop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_NULL);
return (jobjectArray)JNIHandles::make_local(env, result);
}
@@ -1269,7 +1269,7 @@
int length = icls->length();
// Allocate temp. result array
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::class_klass(), length/4, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::Class_klass(), length/4, CHECK_NULL);
objArrayHandle result (THREAD, r);
int members = 0;
@@ -1299,7 +1299,7 @@
if (members != length) {
// Return array of right length
- objArrayOop res = oopFactory::new_objArray(SystemDictionary::class_klass(), members, CHECK_NULL);
+ objArrayOop res = oopFactory::new_objArray(SystemDictionary::Class_klass(), members, CHECK_NULL);
for(int i = 0; i < members; i++) {
res->obj_at_put(i, result->obj_at(i));
}
@@ -1470,11 +1470,11 @@
oop mirror = NULL;
int slot = 0;
- if (reflected->klass() == SystemDictionary::reflect_constructor_klass()) {
+ if (reflected->klass() == SystemDictionary::reflect_Constructor_klass()) {
mirror = java_lang_reflect_Constructor::clazz(reflected);
slot = java_lang_reflect_Constructor::slot(reflected);
} else {
- assert(reflected->klass() == SystemDictionary::reflect_method_klass(),
+ assert(reflected->klass() == SystemDictionary::reflect_Method_klass(),
"wrong type");
mirror = java_lang_reflect_Method::clazz(reflected);
slot = java_lang_reflect_Method::slot(reflected);
@@ -1530,7 +1530,7 @@
if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass)) ||
Klass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)))->oop_is_javaArray()) {
// Return empty array
- oop res = oopFactory::new_objArray(SystemDictionary::reflect_field_klass(), 0, CHECK_NULL);
+ oop res = oopFactory::new_objArray(SystemDictionary::reflect_Field_klass(), 0, CHECK_NULL);
return (jobjectArray) JNIHandles::make_local(env, res);
}
@@ -1558,13 +1558,13 @@
} else {
num_fields = fields_len / instanceKlass::next_offset;
- if (k() == SystemDictionary::throwable_klass()) {
+ if (k() == SystemDictionary::Throwable_klass()) {
num_fields--;
skip_backtrace = true;
}
}
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_field_klass(), num_fields, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Field_klass(), num_fields, CHECK_NULL);
objArrayHandle result (THREAD, r);
int out_idx = 0;
@@ -1598,7 +1598,7 @@
if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass))
|| Klass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)))->oop_is_javaArray()) {
// Return empty array
- oop res = oopFactory::new_objArray(SystemDictionary::reflect_method_klass(), 0, CHECK_NULL);
+ oop res = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), 0, CHECK_NULL);
return (jobjectArray) JNIHandles::make_local(env, res);
}
@@ -1622,7 +1622,7 @@
}
// Allocate result
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_method_klass(), num_methods, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Method_klass(), num_methods, CHECK_NULL);
objArrayHandle result (THREAD, r);
int out_idx = 0;
@@ -1650,7 +1650,7 @@
if (java_lang_Class::is_primitive(JNIHandles::resolve_non_null(ofClass))
|| Klass::cast(java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)))->oop_is_javaArray()) {
// Return empty array
- oop res = oopFactory::new_objArray(SystemDictionary::reflect_constructor_klass(), 0 , CHECK_NULL);
+ oop res = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), 0 , CHECK_NULL);
return (jobjectArray) JNIHandles::make_local(env, res);
}
@@ -1674,7 +1674,7 @@
}
// Allocate result
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_constructor_klass(), num_constructors, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), num_constructors, CHECK_NULL);
objArrayHandle result(THREAD, r);
int out_idx = 0;
@@ -1887,7 +1887,7 @@
symbolHandle klass_name (THREAD, cp->klass_name_at(klass_ref));
symbolHandle member_name(THREAD, cp->uncached_name_ref_at(index));
symbolHandle member_sig (THREAD, cp->uncached_signature_ref_at(index));
- objArrayOop dest_o = oopFactory::new_objArray(SystemDictionary::string_klass(), 3, CHECK_NULL);
+ objArrayOop dest_o = oopFactory::new_objArray(SystemDictionary::String_klass(), 3, CHECK_NULL);
objArrayHandle dest(THREAD, dest_o);
Handle str = java_lang_String::create_from_symbol(klass_name, CHECK_NULL);
dest->obj_at_put(0, str());
@@ -2575,7 +2575,7 @@
JavaValue result(T_VOID);
JavaCalls::call_virtual(&result,
obj,
- KlassHandle(THREAD, SystemDictionary::thread_klass()),
+ KlassHandle(THREAD, SystemDictionary::Thread_klass()),
vmSymbolHandles::run_method_name(),
vmSymbolHandles::void_method_signature(),
THREAD);
@@ -2673,7 +2673,7 @@
// Fix for 4314342, 4145910, perhaps others: it now doesn't have
// any effect on the "liveness" of a thread; see
// JVM_IsThreadAlive, below.
- if (java_throwable->is_a(SystemDictionary::threaddeath_klass())) {
+ if (java_throwable->is_a(SystemDictionary::ThreadDeath_klass())) {
java_lang_Thread::set_stillborn(java_thread);
}
THROW_OOP(java_throwable);
@@ -3028,7 +3028,7 @@
}
// Create result array of type [Ljava/lang/Class;
- objArrayOop result = oopFactory::new_objArray(SystemDictionary::class_klass(), depth, CHECK_NULL);
+ objArrayOop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), depth, CHECK_NULL);
// Fill in mirrors corresponding to method holders
int index = 0;
while (first != NULL) {
@@ -4324,7 +4324,7 @@
JvmtiVMObjectAllocEventCollector oam;
int num_threads = tle.num_threads();
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::thread_klass(), num_threads, CHECK_NULL);
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::Thread_klass(), num_threads, CHECK_NULL);
objArrayHandle threads_ah(THREAD, r);
for (int i = 0; i < num_threads; i++) {
@@ -4358,7 +4358,7 @@
// check if threads is not an array of objects of Thread class
klassOop k = objArrayKlass::cast(ah->klass())->element_klass();
- if (k != SystemDictionary::thread_klass()) {
+ if (k != SystemDictionary::Thread_klass()) {
THROW_(vmSymbols::java_lang_IllegalArgumentException(), 0);
}
@@ -4418,7 +4418,7 @@
if (encl_method_class_idx == 0) {
return NULL;
}
- objArrayOop dest_o = oopFactory::new_objArray(SystemDictionary::object_klass(), 3, CHECK_NULL);
+ objArrayOop dest_o = oopFactory::new_objArray(SystemDictionary::Object_klass(), 3, CHECK_NULL);
objArrayHandle dest(THREAD, dest_o);
klassOop enc_k = ik_h->constants()->klass_at(encl_method_class_idx, CHECK_NULL);
dest->obj_at_put(0, Klass::cast(enc_k)->java_mirror());
@@ -4532,7 +4532,7 @@
values_h->int_at(0) == java_lang_Thread::NEW,
"Invalid threadStatus value");
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
1, /* only 1 substate */
CHECK_NULL);
names_h = objArrayHandle(THREAD, r);
@@ -4545,7 +4545,7 @@
values_h->int_at(0) == java_lang_Thread::RUNNABLE,
"Invalid threadStatus value");
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
1, /* only 1 substate */
CHECK_NULL);
names_h = objArrayHandle(THREAD, r);
@@ -4558,7 +4558,7 @@
values_h->int_at(0) == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER,
"Invalid threadStatus value");
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
1, /* only 1 substate */
CHECK_NULL);
names_h = objArrayHandle(THREAD, r);
@@ -4571,7 +4571,7 @@
values_h->int_at(0) == java_lang_Thread::IN_OBJECT_WAIT &&
values_h->int_at(1) == java_lang_Thread::PARKED,
"Invalid threadStatus value");
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
2, /* number of substates */
CHECK_NULL);
names_h = objArrayHandle(THREAD, r);
@@ -4589,7 +4589,7 @@
values_h->int_at(1) == java_lang_Thread::IN_OBJECT_WAIT_TIMED &&
values_h->int_at(2) == java_lang_Thread::PARKED_TIMED,
"Invalid threadStatus value");
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
3, /* number of substates */
CHECK_NULL);
names_h = objArrayHandle(THREAD, r);
@@ -4608,7 +4608,7 @@
assert(values_h->length() == 1 &&
values_h->int_at(0) == java_lang_Thread::TERMINATED,
"Invalid threadStatus value");
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
1, /* only 1 substate */
CHECK_NULL);
names_h = objArrayHandle(THREAD, r);
@@ -4643,4 +4643,3 @@
#endif // KERNEL
}
JVM_END
-
--- a/hotspot/src/share/vm/prims/jvmtiEnter.xsl Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiEnter.xsl Fri Jan 15 14:25:44 2010 -0800
@@ -773,7 +773,7 @@
</xsl:apply-templates>
<xsl:text>
}
- if (!thread_oop->is_a(SystemDictionary::thread_klass())) {
+ if (!thread_oop->is_a(SystemDictionary::Thread_klass())) {
</xsl:text>
<xsl:apply-templates select=".." mode="traceError">
<xsl:with-param name="err">JVMTI_ERROR_INVALID_THREAD</xsl:with-param>
@@ -857,7 +857,7 @@
</xsl:apply-templates>
<xsl:text>
}
- if (!k_mirror->is_a(SystemDictionary::class_klass())) {
+ if (!k_mirror->is_a(SystemDictionary::Class_klass())) {
</xsl:text>
<xsl:apply-templates select=".." mode="traceError">
<xsl:with-param name="err">JVMTI_ERROR_INVALID_CLASS</xsl:with-param>
--- a/hotspot/src/share/vm/prims/jvmtiEnv.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiEnv.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -133,7 +133,7 @@
if (thread_oop == NULL) {
return JVMTI_ERROR_INVALID_THREAD;
}
- if (!thread_oop->is_a(SystemDictionary::thread_klass())) {
+ if (!thread_oop->is_a(SystemDictionary::Thread_klass())) {
return JVMTI_ERROR_INVALID_THREAD;
}
JavaThread* java_thread = java_lang_Thread::thread(thread_oop);
@@ -199,7 +199,7 @@
if (k_mirror == NULL) {
return JVMTI_ERROR_INVALID_CLASS;
}
- if (!k_mirror->is_a(SystemDictionary::class_klass())) {
+ if (!k_mirror->is_a(SystemDictionary::Class_klass())) {
return JVMTI_ERROR_INVALID_CLASS;
}
@@ -266,7 +266,7 @@
oop mirror = JNIHandles::resolve_external_guard(object);
NULL_CHECK(mirror, JVMTI_ERROR_INVALID_OBJECT);
- if (mirror->klass() == SystemDictionary::class_klass()) {
+ if (mirror->klass() == SystemDictionary::Class_klass()) {
if (!java_lang_Class::is_primitive(mirror)) {
mirror = java_lang_Class::as_klassOop(mirror);
assert(mirror != NULL, "class for non-primitive mirror must exist");
@@ -327,7 +327,7 @@
if (thread_oop == NULL) {
return JVMTI_ERROR_INVALID_THREAD;
}
- if (!thread_oop->is_a(SystemDictionary::thread_klass())) {
+ if (!thread_oop->is_a(SystemDictionary::Thread_klass())) {
return JVMTI_ERROR_INVALID_THREAD;
}
java_thread = java_lang_Thread::thread(thread_oop);
@@ -592,7 +592,6 @@
break;
case JVMTI_VERBOSE_GC:
PrintGC = value != 0;
- TraceClassUnloading = value != 0;
break;
case JVMTI_VERBOSE_JNI:
PrintJNIResolving = value != 0;
@@ -632,7 +631,7 @@
thread_oop = JNIHandles::resolve_external_guard(thread);
}
- if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::thread_klass())) {
+ if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass())) {
return JVMTI_ERROR_INVALID_THREAD;
}
@@ -870,7 +869,7 @@
jvmtiError
JvmtiEnv::InterruptThread(jthread thread) {
oop thread_oop = JNIHandles::resolve_external_guard(thread);
- if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::thread_klass()))
+ if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass()))
return JVMTI_ERROR_INVALID_THREAD;
JavaThread* current_thread = JavaThread::current();
@@ -907,7 +906,7 @@
} else {
thread_oop = JNIHandles::resolve_external_guard(thread);
}
- if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::thread_klass()))
+ if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass()))
return JVMTI_ERROR_INVALID_THREAD;
Handle thread_obj(current_thread, thread_oop);
@@ -1073,7 +1072,7 @@
jvmtiError
JvmtiEnv::RunAgentThread(jthread thread, jvmtiStartFunction proc, const void* arg, jint priority) {
oop thread_oop = JNIHandles::resolve_external_guard(thread);
- if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::thread_klass())) {
+ if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass())) {
return JVMTI_ERROR_INVALID_THREAD;
}
if (priority < JVMTI_THREAD_MIN_PRIORITY || priority > JVMTI_THREAD_MAX_PRIORITY) {
--- a/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiEnvBase.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -527,7 +527,7 @@
JavaThread *
JvmtiEnvBase::get_JavaThread(jthread jni_thread) {
oop t = JNIHandles::resolve_external_guard(jni_thread);
- if (t == NULL || !t->is_a(SystemDictionary::thread_klass())) {
+ if (t == NULL || !t->is_a(SystemDictionary::Thread_klass())) {
return NULL;
}
// The following returns NULL if the thread has not yet run or is in
@@ -1269,7 +1269,7 @@
for (int i = 0; i < _thread_count; ++i) {
jthread jt = _thread_list[i];
oop thread_oop = JNIHandles::resolve_external_guard(jt);
- if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::thread_klass())) {
+ if (thread_oop == NULL || !thread_oop->is_a(SystemDictionary::Thread_klass())) {
set_result(JVMTI_ERROR_INVALID_THREAD);
return;
}
--- a/hotspot/src/share/vm/prims/jvmtiExport.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiExport.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -656,7 +656,7 @@
klassOop k = obj->klass();
// if the object is a java.lang.Class then return the java mirror
- if (k == SystemDictionary::class_klass()) {
+ if (k == SystemDictionary::Class_klass()) {
if (!java_lang_Class::is_primitive(obj)) {
k = java_lang_Class::as_klassOop(obj);
assert(k != NULL, "class for non-primitive mirror must exist");
@@ -1925,7 +1925,7 @@
if (collector != NULL && collector->is_enabled()) {
// Don't record classes as these will be notified via the ClassLoad
// event.
- if (obj->klass() != SystemDictionary::class_klass()) {
+ if (obj->klass() != SystemDictionary::Class_klass()) {
collector->record_allocation(obj);
}
}
--- a/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiRedefineClasses.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -3214,7 +3214,7 @@
// - all instanceKlasses for redefined classes reused & contents updated
the_class->vtable()->initialize_vtable(false, THREAD);
the_class->itable()->initialize_itable(false, THREAD);
- assert(!HAS_PENDING_EXCEPTION || (THREAD->pending_exception()->is_a(SystemDictionary::threaddeath_klass())), "redefine exception");
+ assert(!HAS_PENDING_EXCEPTION || (THREAD->pending_exception()->is_a(SystemDictionary::ThreadDeath_klass())), "redefine exception");
}
// Leave arrays of jmethodIDs and itable index cache unchanged
--- a/hotspot/src/share/vm/prims/jvmtiTagMap.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/prims/jvmtiTagMap.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -579,7 +579,7 @@
// If the object is a java.lang.Class then return the klassOop,
// otherwise return the original object
static inline oop klassOop_if_java_lang_Class(oop o) {
- if (o->klass() == SystemDictionary::class_klass()) {
+ if (o->klass() == SystemDictionary::Class_klass()) {
if (!java_lang_Class::is_primitive(o)) {
o = (oop)java_lang_Class::as_klassOop(o);
assert(o != NULL, "class for non-primitive mirror must exist");
@@ -644,7 +644,7 @@
} else {
// if the object represents a runtime class then use the
// tag for java.lang.Class
- _klass = SystemDictionary::class_klass();
+ _klass = SystemDictionary::Class_klass();
}
_klass_tag = tag_for(tag_map, _klass);
}
@@ -747,7 +747,7 @@
// get referrer class tag.
klassOop k = (_referrer == referrer) ? // Check if referrer is a class...
_referrer->klass() // No, just get its class
- : SystemDictionary::class_klass(); // Yes, its class is Class
+ : SystemDictionary::Class_klass(); // Yes, its class is Class
_referrer_klass_tag = tag_for(tag_map, k);
}
}
@@ -1126,7 +1126,7 @@
oop str,
void* user_data)
{
- assert(str->klass() == SystemDictionary::string_klass(), "not a string");
+ assert(str->klass() == SystemDictionary::String_klass(), "not a string");
// get the string value and length
// (string value may be offset from the base)
@@ -1186,7 +1186,7 @@
// for static fields only the index will be set
static jvmtiHeapReferenceInfo reference_info = { 0 };
- assert(obj->klass() == SystemDictionary::class_klass(), "not a class");
+ assert(obj->klass() == SystemDictionary::Class_klass(), "not a class");
if (java_lang_Class::is_primitive(obj)) {
return 0;
}
@@ -1498,7 +1498,7 @@
if (callbacks()->primitive_field_callback != NULL && obj->is_instance()) {
jint res;
jvmtiPrimitiveFieldCallback cb = callbacks()->primitive_field_callback;
- if (obj->klass() == SystemDictionary::class_klass()) {
+ if (obj->klass() == SystemDictionary::Class_klass()) {
res = invoke_primitive_field_callback_for_static_fields(&wrapper,
obj,
cb,
@@ -1515,7 +1515,7 @@
// string callback
if (!is_array &&
callbacks()->string_primitive_value_callback != NULL &&
- obj->klass() == SystemDictionary::string_klass()) {
+ obj->klass() == SystemDictionary::String_klass()) {
jint res = invoke_string_value_callback(
callbacks()->string_primitive_value_callback,
&wrapper,
@@ -2381,7 +2381,7 @@
// invoke the string value callback
inline bool CallbackInvoker::report_string_value(oop str) {
- assert(str->klass() == SystemDictionary::string_klass(), "not a string");
+ assert(str->klass() == SystemDictionary::String_klass(), "not a string");
AdvancedHeapWalkContext* context = advanced_context();
assert(context->string_primitive_value_callback() != NULL, "no callback");
@@ -2928,7 +2928,7 @@
// super (only if something more interesting than java.lang.Object)
klassOop java_super = ik->java_super();
- if (java_super != NULL && java_super != SystemDictionary::object_klass()) {
+ if (java_super != NULL && java_super != SystemDictionary::Object_klass()) {
oop super = Klass::cast(java_super)->java_mirror();
if (!CallbackInvoker::report_superclass_reference(mirror, super)) {
return false;
@@ -3070,7 +3070,7 @@
// if the object is a java.lang.String
if (is_reporting_string_values() &&
- o->klass() == SystemDictionary::string_klass()) {
+ o->klass() == SystemDictionary::String_klass()) {
if (!CallbackInvoker::report_string_value(o)) {
return false;
}
@@ -3255,7 +3255,7 @@
// instance
if (o->is_instance()) {
- if (o->klass() == SystemDictionary::class_klass()) {
+ if (o->klass() == SystemDictionary::Class_klass()) {
o = klassOop_if_java_lang_Class(o);
if (o->is_klass()) {
// a java.lang.Class
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/prims/methodHandleWalk.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -0,0 +1,1401 @@
+/*
+ * Copyright 2008-2010 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/*
+ * JSR 292 reference implementation: method handle structure analysis
+ */
+
+#include "incls/_precompiled.incl"
+#include "incls/_methodHandleWalk.cpp.incl"
+
+
+// -----------------------------------------------------------------------------
+// MethodHandleChain
+
+void MethodHandleChain::set_method_handle(Handle mh, TRAPS) {
+ if (!java_dyn_MethodHandle::is_instance(mh())) lose("bad method handle", CHECK);
+
+ // set current method handle and unpack partially
+ _method_handle = mh;
+ _is_last = false;
+ _is_bound = false;
+ _arg_slot = -1;
+ _arg_type = T_VOID;
+ _conversion = -1;
+ _last_invoke = Bytecodes::_nop; //arbitrary non-garbage
+
+ if (sun_dyn_DirectMethodHandle::is_instance(mh())) {
+ set_last_method(mh(), THREAD);
+ return;
+ }
+ if (sun_dyn_AdapterMethodHandle::is_instance(mh())) {
+ _conversion = AdapterMethodHandle_conversion();
+ assert(_conversion != -1, "bad conv value");
+ assert(sun_dyn_BoundMethodHandle::is_instance(mh()), "also BMH");
+ }
+ if (sun_dyn_BoundMethodHandle::is_instance(mh())) {
+ if (!is_adapter()) // keep AMH and BMH separate in this model
+ _is_bound = true;
+ _arg_slot = BoundMethodHandle_vmargslot();
+ oop target = MethodHandle_vmtarget_oop();
+ if (!is_bound() || java_dyn_MethodHandle::is_instance(target)) {
+ _arg_type = compute_bound_arg_type(target, NULL, _arg_slot, CHECK);
+ } else if (target != NULL && target->is_method()) {
+ methodOop m = (methodOop) target;
+ _arg_type = compute_bound_arg_type(NULL, m, _arg_slot, CHECK);
+ set_last_method(mh(), CHECK);
+ } else {
+ _is_bound = false; // lose!
+ }
+ }
+ if (is_bound() && _arg_type == T_VOID) {
+ lose("bad vmargslot", CHECK);
+ }
+ if (!is_bound() && !is_adapter()) {
+ lose("unrecognized MH type", CHECK);
+ }
+}
+
+
+void MethodHandleChain::set_last_method(oop target, TRAPS) {
+ _is_last = true;
+ klassOop receiver_limit_oop = NULL;
+ int flags = 0;
+ methodOop m = MethodHandles::decode_method(target, receiver_limit_oop, flags);
+ _last_method = methodHandle(THREAD, m);
+ if ((flags & MethodHandles::_dmf_has_receiver) == 0)
+ _last_invoke = Bytecodes::_invokestatic;
+ else if ((flags & MethodHandles::_dmf_does_dispatch) == 0)
+ _last_invoke = Bytecodes::_invokespecial;
+ else if ((flags & MethodHandles::_dmf_from_interface) != 0)
+ _last_invoke = Bytecodes::_invokeinterface;
+ else
+ _last_invoke = Bytecodes::_invokevirtual;
+}
+
+
+BasicType MethodHandleChain::compute_bound_arg_type(oop target, methodOop m, int arg_slot, TRAPS) {
+ // There is no direct indication of whether the argument is primitive or not.
+ // It is implied by the _vmentry code, and by the MethodType of the target.
+ // FIXME: Make it explicit MethodHandleImpl refactors out from MethodHandle
+ BasicType arg_type = T_VOID;
+ if (target != NULL) {
+ oop mtype = java_dyn_MethodHandle::type(target);
+ int arg_num = MethodHandles::argument_slot_to_argnum(mtype, arg_slot);
+ if (arg_num >= 0) {
+ oop ptype = java_dyn_MethodType::ptype(mtype, arg_num);
+ arg_type = java_lang_Class::as_BasicType(ptype);
+ }
+ } else if (m != NULL) {
+ // figure out the argument type from the slot
+ // FIXME: make this explicit in the MH
+ int cur_slot = m->size_of_parameters();
+ if (arg_slot >= cur_slot)
+ return T_VOID;
+ if (!m->is_static()) {
+ cur_slot -= type2size[T_OBJECT];
+ if (cur_slot == arg_slot)
+ return T_OBJECT;
+ }
+ for (SignatureStream ss(m->signature()); !ss.is_done(); ss.next()) {
+ BasicType bt = ss.type();
+ cur_slot -= type2size[bt];
+ if (cur_slot <= arg_slot) {
+ if (cur_slot == arg_slot)
+ arg_type = bt;
+ break;
+ }
+ }
+ }
+ if (arg_type == T_ARRAY)
+ arg_type = T_OBJECT;
+ return arg_type;
+}
+
+
+void MethodHandleChain::lose(const char* msg, TRAPS) {
+ assert(false, "lose");
+ _lose_message = msg;
+ if (!THREAD->is_Java_thread() || ((JavaThread*)THREAD)->thread_state() != _thread_in_vm) {
+ // throw a preallocated exception
+ THROW_OOP(Universe::virtual_machine_error_instance());
+ }
+ THROW_MSG(vmSymbols::java_lang_InternalError(), msg);
+}
+
+
+// -----------------------------------------------------------------------------
+// MethodHandleWalker
+
+Bytecodes::Code MethodHandleWalker::conversion_code(BasicType src, BasicType dest) {
+ if (is_subword_type(src)) {
+ src = T_INT; // all subword src types act like int
+ }
+ if (src == dest) {
+ return Bytecodes::_nop;
+ }
+
+#define SRC_DEST(s,d) (((int)(s) << 4) + (int)(d))
+ switch (SRC_DEST(src, dest)) {
+ case SRC_DEST(T_INT, T_LONG): return Bytecodes::_i2l;
+ case SRC_DEST(T_INT, T_FLOAT): return Bytecodes::_i2f;
+ case SRC_DEST(T_INT, T_DOUBLE): return Bytecodes::_i2d;
+ case SRC_DEST(T_INT, T_BYTE): return Bytecodes::_i2b;
+ case SRC_DEST(T_INT, T_CHAR): return Bytecodes::_i2c;
+ case SRC_DEST(T_INT, T_SHORT): return Bytecodes::_i2s;
+
+ case SRC_DEST(T_LONG, T_INT): return Bytecodes::_l2i;
+ case SRC_DEST(T_LONG, T_FLOAT): return Bytecodes::_l2f;
+ case SRC_DEST(T_LONG, T_DOUBLE): return Bytecodes::_l2d;
+
+ case SRC_DEST(T_FLOAT, T_INT): return Bytecodes::_f2i;
+ case SRC_DEST(T_FLOAT, T_LONG): return Bytecodes::_f2l;
+ case SRC_DEST(T_FLOAT, T_DOUBLE): return Bytecodes::_f2d;
+
+ case SRC_DEST(T_DOUBLE, T_INT): return Bytecodes::_d2i;
+ case SRC_DEST(T_DOUBLE, T_LONG): return Bytecodes::_d2l;
+ case SRC_DEST(T_DOUBLE, T_FLOAT): return Bytecodes::_d2f;
+ }
+#undef SRC_DEST
+
+ // cannot do it in one step, or at all
+ return Bytecodes::_illegal;
+}
+
+
+// -----------------------------------------------------------------------------
+// MethodHandleWalker::walk
+//
+MethodHandleWalker::ArgToken
+MethodHandleWalker::walk(TRAPS) {
+ ArgToken empty = ArgToken(); // Empty return value.
+
+ walk_incoming_state(CHECK_(empty));
+
+ for (;;) {
+ set_method_handle(chain().method_handle_oop());
+
+ assert(_outgoing_argc == argument_count_slow(), "empty slots under control");
+
+ if (chain().is_adapter()) {
+ int conv_op = chain().adapter_conversion_op();
+ int arg_slot = chain().adapter_arg_slot();
+ SlotState* arg_state = slot_state(arg_slot);
+ if (arg_state == NULL
+ && conv_op > sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW) {
+ lose("bad argument index", CHECK_(empty));
+ }
+
+ // perform the adapter action
+ switch (chain().adapter_conversion_op()) {
+ case sun_dyn_AdapterMethodHandle::OP_RETYPE_ONLY:
+ // No changes to arguments; pass the bits through.
+ break;
+
+ case sun_dyn_AdapterMethodHandle::OP_RETYPE_RAW: {
+ // To keep the verifier happy, emit bitwise ("raw") conversions as needed.
+ // See MethodHandles::same_basic_type_for_arguments for allowed conversions.
+ Handle incoming_mtype(THREAD, chain().method_type_oop());
+ oop outgoing_mh_oop = chain().vmtarget_oop();
+ if (!java_dyn_MethodHandle::is_instance(outgoing_mh_oop))
+ lose("outgoing target not a MethodHandle", CHECK_(empty));
+ Handle outgoing_mtype(THREAD, java_dyn_MethodHandle::type(outgoing_mh_oop));
+ outgoing_mh_oop = NULL; // GC safety
+
+ int nptypes = java_dyn_MethodType::ptype_count(outgoing_mtype());
+ if (nptypes != java_dyn_MethodType::ptype_count(incoming_mtype()))
+ lose("incoming and outgoing parameter count do not agree", CHECK_(empty));
+
+ for (int i = 0, slot = _outgoing.length() - 1; slot >= 0; slot--) {
+ SlotState* arg_state = slot_state(slot);
+ if (arg_state->_type == T_VOID) continue;
+ ArgToken arg = _outgoing.at(slot)._arg;
+
+ klassOop in_klass = NULL;
+ klassOop out_klass = NULL;
+ BasicType inpbt = java_lang_Class::as_BasicType(java_dyn_MethodType::ptype(incoming_mtype(), i), &in_klass);
+ BasicType outpbt = java_lang_Class::as_BasicType(java_dyn_MethodType::ptype(outgoing_mtype(), i), &out_klass);
+ assert(inpbt == arg.basic_type(), "sanity");
+
+ if (inpbt != outpbt) {
+ vmIntrinsics::ID iid = vmIntrinsics::for_raw_conversion(inpbt, outpbt);
+ if (iid == vmIntrinsics::_none) {
+ lose("no raw conversion method", CHECK_(empty));
+ }
+ ArgToken arglist[2];
+ arglist[0] = arg; // outgoing 'this'
+ arglist[1] = ArgToken(); // sentinel
+ arg = make_invoke(NULL, iid, Bytecodes::_invokestatic, false, 1, &arglist[0], CHECK_(empty));
+ change_argument(inpbt, slot, outpbt, arg);
+ }
+
+ i++; // We need to skip void slots at the top of the loop.
+ }
+
+ BasicType inrbt = java_lang_Class::as_BasicType(java_dyn_MethodType::rtype(incoming_mtype()));
+ BasicType outrbt = java_lang_Class::as_BasicType(java_dyn_MethodType::rtype(outgoing_mtype()));
+ if (inrbt != outrbt) {
+ if (inrbt == T_INT && outrbt == T_VOID) {
+ // See comments in MethodHandles::same_basic_type_for_arguments.
+ } else {
+ assert(false, "IMPLEMENT ME");
+ lose("no raw conversion method", CHECK_(empty));
+ }
+ }
+ break;
+ }
+
+ case sun_dyn_AdapterMethodHandle::OP_CHECK_CAST: {
+ // checkcast the Nth outgoing argument in place
+ klassOop dest_klass = NULL;
+ BasicType dest = java_lang_Class::as_BasicType(chain().adapter_arg_oop(), &dest_klass);
+ assert(dest == T_OBJECT, "");
+ assert(dest == arg_state->_type, "");
+ ArgToken arg = arg_state->_arg;
+ ArgToken new_arg = make_conversion(T_OBJECT, dest_klass, Bytecodes::_checkcast, arg, CHECK_(empty));
+ assert(arg.index() == new_arg.index(), "should be the same index");
+ debug_only(dest_klass = (klassOop)badOop);
+ break;
+ }
+
+ case sun_dyn_AdapterMethodHandle::OP_PRIM_TO_PRIM: {
+ // i2l, etc., on the Nth outgoing argument in place
+ BasicType src = chain().adapter_conversion_src_type(),
+ dest = chain().adapter_conversion_dest_type();
+ Bytecodes::Code bc = conversion_code(src, dest);
+ ArgToken arg = arg_state->_arg;
+ if (bc == Bytecodes::_nop) {
+ break;
+ } else if (bc != Bytecodes::_illegal) {
+ arg = make_conversion(dest, NULL, bc, arg, CHECK_(empty));
+ } else if (is_subword_type(dest)) {
+ bc = conversion_code(src, T_INT);
+ if (bc != Bytecodes::_illegal) {
+ arg = make_conversion(dest, NULL, bc, arg, CHECK_(empty));
+ bc = conversion_code(T_INT, dest);
+ arg = make_conversion(dest, NULL, bc, arg, CHECK_(empty));
+ }
+ }
+ if (bc == Bytecodes::_illegal) {
+ lose("bad primitive conversion", CHECK_(empty));
+ }
+ change_argument(src, arg_slot, dest, arg);
+ break;
+ }
+
+ case sun_dyn_AdapterMethodHandle::OP_REF_TO_PRIM: {
+ // checkcast to wrapper type & call intValue, etc.
+ BasicType dest = chain().adapter_conversion_dest_type();
+ ArgToken arg = arg_state->_arg;
+ arg = make_conversion(T_OBJECT, SystemDictionary::box_klass(dest),
+ Bytecodes::_checkcast, arg, CHECK_(empty));
+ vmIntrinsics::ID unboxer = vmIntrinsics::for_unboxing(dest);
+ if (unboxer == vmIntrinsics::_none) {
+ lose("no unboxing method", CHECK_(empty));
+ }
+ ArgToken arglist[2];
+ arglist[0] = arg; // outgoing 'this'
+ arglist[1] = ArgToken(); // sentinel
+ arg = make_invoke(NULL, unboxer, Bytecodes::_invokevirtual, false, 1, &arglist[0], CHECK_(empty));
+ change_argument(T_OBJECT, arg_slot, dest, arg);
+ break;
+ }
+
+ case sun_dyn_AdapterMethodHandle::OP_PRIM_TO_REF: {
+ // call wrapper type.valueOf
+ BasicType src = chain().adapter_conversion_src_type();
+ ArgToken arg = arg_state->_arg;
+ vmIntrinsics::ID boxer = vmIntrinsics::for_boxing(src);
+ if (boxer == vmIntrinsics::_none) {
+ lose("no boxing method", CHECK_(empty));
+ }
+ ArgToken arglist[2];
+ arglist[0] = arg; // outgoing value
+ arglist[1] = ArgToken(); // sentinel
+ assert(false, "I think the argument count must be 1 instead of 0");
+ arg = make_invoke(NULL, boxer, Bytecodes::_invokevirtual, false, 0, &arglist[0], CHECK_(empty));
+ change_argument(src, arg_slot, T_OBJECT, arg);
+ break;
+ }
+
+ case sun_dyn_AdapterMethodHandle::OP_SWAP_ARGS: {
+ int dest_arg_slot = chain().adapter_conversion_vminfo();
+ if (!slot_has_argument(dest_arg_slot)) {
+ lose("bad swap index", CHECK_(empty));
+ }
+ // a simple swap between two arguments
+ SlotState* dest_arg_state = slot_state(dest_arg_slot);
+ SlotState temp = (*dest_arg_state);
+ (*dest_arg_state) = (*arg_state);
+ (*arg_state) = temp;
+ break;
+ }
+
+ case sun_dyn_AdapterMethodHandle::OP_ROT_ARGS: {
+ int dest_arg_slot = chain().adapter_conversion_vminfo();
+ if (!slot_has_argument(dest_arg_slot) || arg_slot == dest_arg_slot) {
+ lose("bad rotate index", CHECK_(empty));
+ }
+ SlotState* dest_arg_state = slot_state(dest_arg_slot);
+ // Rotate the source argument (plus following N slots) into the
+ // position occupied by the dest argument (plus following N slots).
+ int rotate_count = type2size[dest_arg_state->_type];
+ // (no other rotate counts are currently supported)
+ if (arg_slot < dest_arg_slot) {
+ for (int i = 0; i < rotate_count; i++) {
+ SlotState temp = _outgoing.at(arg_slot);
+ _outgoing.remove_at(arg_slot);
+ _outgoing.insert_before(dest_arg_slot + rotate_count - 1, temp);
+ }
+ } else { // arg_slot > dest_arg_slot
+ for (int i = 0; i < rotate_count; i++) {
+ SlotState temp = _outgoing.at(arg_slot + rotate_count - 1);
+ _outgoing.remove_at(arg_slot + rotate_count - 1);
+ _outgoing.insert_before(dest_arg_slot, temp);
+ }
+ }
+ break;
+ }
+
+ case sun_dyn_AdapterMethodHandle::OP_DUP_ARGS: {
+ int dup_slots = chain().adapter_conversion_stack_pushes();
+ if (dup_slots <= 0) {
+ lose("bad dup count", CHECK_(empty));
+ }
+ for (int i = 0; i < dup_slots; i++) {
+ SlotState* dup = slot_state(arg_slot + 2*i);
+ if (dup == NULL) break; // safety net
+ if (dup->_type != T_VOID) _outgoing_argc += 1;
+ _outgoing.insert_before(i, (*dup));
+ }
+ break;
+ }
+
+ case sun_dyn_AdapterMethodHandle::OP_DROP_ARGS: {
+ int drop_slots = -chain().adapter_conversion_stack_pushes();
+ if (drop_slots <= 0) {
+ lose("bad drop count", CHECK_(empty));
+ }
+ for (int i = 0; i < drop_slots; i++) {
+ SlotState* drop = slot_state(arg_slot);
+ if (drop == NULL) break; // safety net
+ if (drop->_type != T_VOID) _outgoing_argc -= 1;
+ _outgoing.remove_at(arg_slot);
+ }
+ break;
+ }
+
+ case sun_dyn_AdapterMethodHandle::OP_COLLECT_ARGS: { //NYI, may GC
+ lose("unimplemented", CHECK_(empty));
+ break;
+ }
+
+ case sun_dyn_AdapterMethodHandle::OP_SPREAD_ARGS: {
+ klassOop array_klass_oop = NULL;
+ BasicType array_type = java_lang_Class::as_BasicType(chain().adapter_arg_oop(),
+ &array_klass_oop);
+ assert(array_type == T_OBJECT, "");
+ assert(Klass::cast(array_klass_oop)->oop_is_array(), "");
+ arrayKlassHandle array_klass(THREAD, array_klass_oop);
+ debug_only(array_klass_oop = (klassOop)badOop);
+
+ klassOop element_klass_oop = NULL;
+ BasicType element_type = java_lang_Class::as_BasicType(array_klass->component_mirror(),
+ &element_klass_oop);
+ KlassHandle element_klass(THREAD, element_klass_oop);
+ debug_only(element_klass_oop = (klassOop)badOop);
+
+ // Fetch the argument, which we will cast to the required array type.
+ assert(arg_state->_type == T_OBJECT, "");
+ ArgToken array_arg = arg_state->_arg;
+ array_arg = make_conversion(T_OBJECT, array_klass(), Bytecodes::_checkcast, array_arg, CHECK_(empty));
+ change_argument(T_OBJECT, arg_slot, T_VOID, ArgToken(tt_void));
+
+ // Check the required length.
+ int spread_slots = 1 + chain().adapter_conversion_stack_pushes();
+ int spread_length = spread_slots;
+ if (type2size[element_type] == 2) {
+ if (spread_slots % 2 != 0) spread_slots = -1; // force error
+ spread_length = spread_slots / 2;
+ }
+ if (spread_slots < 0) {
+ lose("bad spread length", CHECK_(empty));
+ }
+
+ jvalue length_jvalue; length_jvalue.i = spread_length;
+ ArgToken length_arg = make_prim_constant(T_INT, &length_jvalue, CHECK_(empty));
+ // Call a built-in method known to the JVM to validate the length.
+ ArgToken arglist[3];
+ arglist[0] = array_arg; // value to check
+ arglist[1] = length_arg; // length to check
+ arglist[2] = ArgToken(); // sentinel
+ make_invoke(NULL, vmIntrinsics::_checkSpreadArgument,
+ Bytecodes::_invokestatic, false, 3, &arglist[0], CHECK_(empty));
+
+ // Spread out the array elements.
+ Bytecodes::Code aload_op = Bytecodes::_aaload;
+ if (element_type != T_OBJECT) {
+ lose("primitive array NYI", CHECK_(empty));
+ }
+ int ap = arg_slot;
+ for (int i = 0; i < spread_length; i++) {
+ jvalue offset_jvalue; offset_jvalue.i = i;
+ ArgToken offset_arg = make_prim_constant(T_INT, &offset_jvalue, CHECK_(empty));
+ ArgToken element_arg = make_fetch(element_type, element_klass(), aload_op, array_arg, offset_arg, CHECK_(empty));
+ change_argument(T_VOID, ap, element_type, element_arg);
+ ap += type2size[element_type];
+ }
+ break;
+ }
+
+ case sun_dyn_AdapterMethodHandle::OP_FLYBY: //NYI, runs Java code
+ case sun_dyn_AdapterMethodHandle::OP_RICOCHET: //NYI, runs Java code
+ lose("unimplemented", CHECK_(empty));
+ break;
+
+ default:
+ lose("bad adapter conversion", CHECK_(empty));
+ break;
+ }
+ }
+
+ if (chain().is_bound()) {
+ // push a new argument
+ BasicType arg_type = chain().bound_arg_type();
+ jint arg_slot = chain().bound_arg_slot();
+ oop arg_oop = chain().bound_arg_oop();
+ ArgToken arg;
+ if (arg_type == T_OBJECT) {
+ arg = make_oop_constant(arg_oop, CHECK_(empty));
+ } else {
+ jvalue arg_value;
+ BasicType bt = java_lang_boxing_object::get_value(arg_oop, &arg_value);
+ if (bt == arg_type) {
+ arg = make_prim_constant(arg_type, &arg_value, CHECK_(empty));
+ } else {
+ lose("bad bound value", CHECK_(empty));
+ }
+ }
+ debug_only(arg_oop = badOop);
+ change_argument(T_VOID, arg_slot, arg_type, arg);
+ }
+
+ // this test must come after the body of the loop
+ if (!chain().is_last()) {
+ chain().next(CHECK_(empty));
+ } else {
+ break;
+ }
+ }
+
+ // finish the sequence with a tail-call to the ultimate target
+ // parameters are passed in logical order (recv 1st), not slot order
+ ArgToken* arglist = NEW_RESOURCE_ARRAY(ArgToken, _outgoing.length() + 1);
+ int ap = 0;
+ for (int i = _outgoing.length() - 1; i >= 0; i--) {
+ SlotState* arg_state = slot_state(i);
+ if (arg_state->_type == T_VOID) continue;
+ arglist[ap++] = _outgoing.at(i)._arg;
+ }
+ assert(ap == _outgoing_argc, "");
+ arglist[ap] = ArgToken(); // add a sentinel, for the sake of asserts
+ return make_invoke(chain().last_method_oop(),
+ vmIntrinsics::_none,
+ chain().last_invoke_code(), true,
+ ap, arglist, THREAD);
+}
+
+
+// -----------------------------------------------------------------------------
+// MethodHandleWalker::walk_incoming_state
+//
+void MethodHandleWalker::walk_incoming_state(TRAPS) {
+ Handle mtype(THREAD, chain().method_type_oop());
+ int nptypes = java_dyn_MethodType::ptype_count(mtype());
+ _outgoing_argc = nptypes;
+ int argp = nptypes - 1;
+ if (argp >= 0) {
+ _outgoing.at_grow(argp, make_state(T_VOID, ArgToken(tt_void))); // presize
+ }
+ for (int i = 0; i < nptypes; i++) {
+ klassOop arg_type_klass = NULL;
+ BasicType arg_type = java_lang_Class::as_BasicType(
+ java_dyn_MethodType::ptype(mtype(), i), &arg_type_klass);
+ int index = new_local_index(arg_type);
+ ArgToken arg = make_parameter(arg_type, arg_type_klass, index, CHECK);
+ debug_only(arg_type_klass = (klassOop) NULL);
+ _outgoing.at_put(argp, make_state(arg_type, arg));
+ if (type2size[arg_type] == 2) {
+ // add the extra slot, so we can model the JVM stack
+ _outgoing.insert_before(argp+1, make_state(T_VOID, ArgToken(tt_void)));
+ }
+ --argp;
+ }
+ // call make_parameter at the end of the list for the return type
+ klassOop ret_type_klass = NULL;
+ BasicType ret_type = java_lang_Class::as_BasicType(
+ java_dyn_MethodType::rtype(mtype()), &ret_type_klass);
+ ArgToken ret = make_parameter(ret_type, ret_type_klass, -1, CHECK);
+ // ignore ret; client can catch it if needed
+}
+
+
+// -----------------------------------------------------------------------------
+// MethodHandleWalker::change_argument
+//
+// This is messy because some kinds of arguments are paired with
+// companion slots containing an empty value.
+void MethodHandleWalker::change_argument(BasicType old_type, int slot, BasicType new_type,
+ const ArgToken& new_arg) {
+ int old_size = type2size[old_type];
+ int new_size = type2size[new_type];
+ if (old_size == new_size) {
+ // simple case first
+ _outgoing.at_put(slot, make_state(new_type, new_arg));
+ } else if (old_size > new_size) {
+ for (int i = old_size - 1; i >= new_size; i--) {
+ assert((i != 0) == (_outgoing.at(slot + i)._type == T_VOID), "");
+ _outgoing.remove_at(slot + i);
+ }
+ if (new_size > 0)
+ _outgoing.at_put(slot, make_state(new_type, new_arg));
+ else
+ _outgoing_argc -= 1; // deleted a real argument
+ } else {
+ for (int i = old_size; i < new_size; i++) {
+ _outgoing.insert_before(slot + i, make_state(T_VOID, ArgToken(tt_void)));
+ }
+ _outgoing.at_put(slot, make_state(new_type, new_arg));
+ if (old_size == 0)
+ _outgoing_argc += 1; // inserted a real argument
+ }
+}
+
+
+#ifdef ASSERT
+int MethodHandleWalker::argument_count_slow() {
+ int args_seen = 0;
+ for (int i = _outgoing.length() - 1; i >= 0; i--) {
+ if (_outgoing.at(i)._type != T_VOID) {
+ ++args_seen;
+ }
+ }
+ return args_seen;
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// MethodHandleCompiler
+
+MethodHandleCompiler::MethodHandleCompiler(Handle root, methodHandle callee, bool is_invokedynamic, TRAPS)
+ : MethodHandleWalker(root, is_invokedynamic, THREAD),
+ _callee(callee),
+ _thread(THREAD),
+ _bytecode(THREAD, 50),
+ _constants(THREAD, 10),
+ _cur_stack(0),
+ _max_stack(0),
+ _rtype(T_ILLEGAL)
+{
+
+ // Element zero is always the null constant.
+ (void) _constants.append(NULL);
+
+ // Set name and signature index.
+ _name_index = cpool_symbol_put(_callee->name());
+ _signature_index = cpool_symbol_put(_callee->signature());
+
+ // Get return type klass.
+ Handle first_mtype(THREAD, chain().method_type_oop());
+ // _rklass is NULL for primitives.
+ _rtype = java_lang_Class::as_BasicType(java_dyn_MethodType::rtype(first_mtype()), &_rklass);
+ if (_rtype == T_ARRAY) _rtype = T_OBJECT;
+
+ int params = _callee->size_of_parameters(); // Incoming arguments plus receiver.
+ _num_params = for_invokedynamic() ? params - 1 : params; // XXX Check if callee is static?
+}
+
+
+// -----------------------------------------------------------------------------
+// MethodHandleCompiler::compile
+//
+// Compile this MethodHandle into a bytecode adapter and return a
+// methodOop.
+methodHandle MethodHandleCompiler::compile(TRAPS) {
+ assert(_thread == THREAD, "must be same thread");
+ methodHandle nullHandle;
+ (void) walk(CHECK_(nullHandle));
+ return get_method_oop(CHECK_(nullHandle));
+}
+
+
+void MethodHandleCompiler::emit_bc(Bytecodes::Code op, int index) {
+ Bytecodes::check(op); // Are we legal?
+
+ switch (op) {
+ // b
+ case Bytecodes::_aconst_null:
+ case Bytecodes::_iconst_m1:
+ case Bytecodes::_iconst_0:
+ case Bytecodes::_iconst_1:
+ case Bytecodes::_iconst_2:
+ case Bytecodes::_iconst_3:
+ case Bytecodes::_iconst_4:
+ case Bytecodes::_iconst_5:
+ case Bytecodes::_lconst_0:
+ case Bytecodes::_lconst_1:
+ case Bytecodes::_fconst_0:
+ case Bytecodes::_fconst_1:
+ case Bytecodes::_fconst_2:
+ case Bytecodes::_dconst_0:
+ case Bytecodes::_dconst_1:
+ case Bytecodes::_iload_0:
+ case Bytecodes::_iload_1:
+ case Bytecodes::_iload_2:
+ case Bytecodes::_iload_3:
+ case Bytecodes::_lload_0:
+ case Bytecodes::_lload_1:
+ case Bytecodes::_lload_2:
+ case Bytecodes::_lload_3:
+ case Bytecodes::_fload_0:
+ case Bytecodes::_fload_1:
+ case Bytecodes::_fload_2:
+ case Bytecodes::_fload_3:
+ case Bytecodes::_dload_0:
+ case Bytecodes::_dload_1:
+ case Bytecodes::_dload_2:
+ case Bytecodes::_dload_3:
+ case Bytecodes::_aload_0:
+ case Bytecodes::_aload_1:
+ case Bytecodes::_aload_2:
+ case Bytecodes::_aload_3:
+ case Bytecodes::_istore_0:
+ case Bytecodes::_istore_1:
+ case Bytecodes::_istore_2:
+ case Bytecodes::_istore_3:
+ case Bytecodes::_lstore_0:
+ case Bytecodes::_lstore_1:
+ case Bytecodes::_lstore_2:
+ case Bytecodes::_lstore_3:
+ case Bytecodes::_fstore_0:
+ case Bytecodes::_fstore_1:
+ case Bytecodes::_fstore_2:
+ case Bytecodes::_fstore_3:
+ case Bytecodes::_dstore_0:
+ case Bytecodes::_dstore_1:
+ case Bytecodes::_dstore_2:
+ case Bytecodes::_dstore_3:
+ case Bytecodes::_astore_0:
+ case Bytecodes::_astore_1:
+ case Bytecodes::_astore_2:
+ case Bytecodes::_astore_3:
+ case Bytecodes::_i2l:
+ case Bytecodes::_i2f:
+ case Bytecodes::_i2d:
+ case Bytecodes::_i2b:
+ case Bytecodes::_i2c:
+ case Bytecodes::_i2s:
+ case Bytecodes::_l2i:
+ case Bytecodes::_l2f:
+ case Bytecodes::_l2d:
+ case Bytecodes::_f2i:
+ case Bytecodes::_f2l:
+ case Bytecodes::_f2d:
+ case Bytecodes::_d2i:
+ case Bytecodes::_d2l:
+ case Bytecodes::_d2f:
+ case Bytecodes::_ireturn:
+ case Bytecodes::_lreturn:
+ case Bytecodes::_freturn:
+ case Bytecodes::_dreturn:
+ case Bytecodes::_areturn:
+ case Bytecodes::_return:
+ assert(strcmp(Bytecodes::format(op), "b") == 0, "wrong bytecode format");
+ _bytecode.push(op);
+ break;
+
+ // bi
+ case Bytecodes::_ldc:
+ case Bytecodes::_iload:
+ case Bytecodes::_lload:
+ case Bytecodes::_fload:
+ case Bytecodes::_dload:
+ case Bytecodes::_aload:
+ case Bytecodes::_istore:
+ case Bytecodes::_lstore:
+ case Bytecodes::_fstore:
+ case Bytecodes::_dstore:
+ case Bytecodes::_astore:
+ assert(strcmp(Bytecodes::format(op), "bi") == 0, "wrong bytecode format");
+ assert((char) index == index, "index does not fit in 8-bit");
+ _bytecode.push(op);
+ _bytecode.push(index);
+ break;
+
+ // bii
+ case Bytecodes::_ldc2_w:
+ case Bytecodes::_checkcast:
+ assert(strcmp(Bytecodes::format(op), "bii") == 0, "wrong bytecode format");
+ assert((short) index == index, "index does not fit in 16-bit");
+ _bytecode.push(op);
+ _bytecode.push(index >> 8);
+ _bytecode.push(index);
+ break;
+
+ // bjj
+ case Bytecodes::_invokestatic:
+ case Bytecodes::_invokespecial:
+ case Bytecodes::_invokevirtual:
+ assert(strcmp(Bytecodes::format(op), "bjj") == 0, "wrong bytecode format");
+ assert((short) index == index, "index does not fit in 16-bit");
+ _bytecode.push(op);
+ _bytecode.push(index >> 8);
+ _bytecode.push(index);
+ break;
+
+ default:
+ ShouldNotReachHere();
+ }
+}
+
+
+void MethodHandleCompiler::emit_load(BasicType bt, int index) {
+ if (index <= 3) {
+ switch (bt) {
+ case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT:
+ case T_INT: emit_bc(Bytecodes::cast(Bytecodes::_iload_0 + index)); break;
+ case T_LONG: emit_bc(Bytecodes::cast(Bytecodes::_lload_0 + index)); break;
+ case T_FLOAT: emit_bc(Bytecodes::cast(Bytecodes::_fload_0 + index)); break;
+ case T_DOUBLE: emit_bc(Bytecodes::cast(Bytecodes::_dload_0 + index)); break;
+ case T_OBJECT: emit_bc(Bytecodes::cast(Bytecodes::_aload_0 + index)); break;
+ default:
+ ShouldNotReachHere();
+ }
+ }
+ else {
+ switch (bt) {
+ case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT:
+ case T_INT: emit_bc(Bytecodes::_iload, index); break;
+ case T_LONG: emit_bc(Bytecodes::_lload, index); break;
+ case T_FLOAT: emit_bc(Bytecodes::_fload, index); break;
+ case T_DOUBLE: emit_bc(Bytecodes::_dload, index); break;
+ case T_OBJECT: emit_bc(Bytecodes::_aload, index); break;
+ default:
+ ShouldNotReachHere();
+ }
+ }
+ stack_push(bt);
+}
+
+void MethodHandleCompiler::emit_store(BasicType bt, int index) {
+ if (index <= 3) {
+ switch (bt) {
+ case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT:
+ case T_INT: emit_bc(Bytecodes::cast(Bytecodes::_istore_0 + index)); break;
+ case T_LONG: emit_bc(Bytecodes::cast(Bytecodes::_lstore_0 + index)); break;
+ case T_FLOAT: emit_bc(Bytecodes::cast(Bytecodes::_fstore_0 + index)); break;
+ case T_DOUBLE: emit_bc(Bytecodes::cast(Bytecodes::_dstore_0 + index)); break;
+ case T_OBJECT: emit_bc(Bytecodes::cast(Bytecodes::_astore_0 + index)); break;
+ default:
+ ShouldNotReachHere();
+ }
+ }
+ else {
+ switch (bt) {
+ case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT:
+ case T_INT: emit_bc(Bytecodes::_istore, index); break;
+ case T_LONG: emit_bc(Bytecodes::_lstore, index); break;
+ case T_FLOAT: emit_bc(Bytecodes::_fstore, index); break;
+ case T_DOUBLE: emit_bc(Bytecodes::_dstore, index); break;
+ case T_OBJECT: emit_bc(Bytecodes::_astore, index); break;
+ default:
+ ShouldNotReachHere();
+ }
+ }
+ stack_pop(bt);
+}
+
+
+void MethodHandleCompiler::emit_load_constant(ArgToken arg) {
+ BasicType bt = arg.basic_type();
+ switch (bt) {
+ case T_INT: {
+ jint value = arg.get_jint();
+ if (-1 <= value && value <= 5)
+ emit_bc(Bytecodes::cast(Bytecodes::_iconst_0 + value));
+ else
+ emit_bc(Bytecodes::_ldc, cpool_int_put(value));
+ break;
+ }
+ case T_LONG: {
+ jlong value = arg.get_jlong();
+ if (0 <= value && value <= 1)
+ emit_bc(Bytecodes::cast(Bytecodes::_lconst_0 + (int) value));
+ else
+ emit_bc(Bytecodes::_ldc2_w, cpool_long_put(value));
+ break;
+ }
+ case T_FLOAT: {
+ jfloat value = arg.get_jfloat();
+ if (value == 0.0 || value == 1.0 || value == 2.0)
+ emit_bc(Bytecodes::cast(Bytecodes::_fconst_0 + (int) value));
+ else
+ emit_bc(Bytecodes::_ldc, cpool_float_put(value));
+ break;
+ }
+ case T_DOUBLE: {
+ jdouble value = arg.get_jdouble();
+ if (value == 0.0 || value == 1.0)
+ emit_bc(Bytecodes::cast(Bytecodes::_dconst_0 + (int) value));
+ else
+ emit_bc(Bytecodes::_ldc2_w, cpool_double_put(value));
+ break;
+ }
+ case T_OBJECT: {
+ Handle value = arg.object();
+ if (value.is_null())
+ emit_bc(Bytecodes::_aconst_null);
+ else
+ emit_bc(Bytecodes::_ldc, cpool_object_put(value));
+ break;
+ }
+ default:
+ ShouldNotReachHere();
+ }
+ stack_push(bt);
+}
+
+
+MethodHandleWalker::ArgToken
+MethodHandleCompiler::make_conversion(BasicType type, klassOop tk, Bytecodes::Code op,
+ const ArgToken& src, TRAPS) {
+
+ BasicType srctype = src.basic_type();
+ int index = src.index();
+
+ switch (op) {
+ case Bytecodes::_i2l:
+ case Bytecodes::_i2f:
+ case Bytecodes::_i2d:
+ case Bytecodes::_i2b:
+ case Bytecodes::_i2c:
+ case Bytecodes::_i2s:
+
+ case Bytecodes::_l2i:
+ case Bytecodes::_l2f:
+ case Bytecodes::_l2d:
+
+ case Bytecodes::_f2i:
+ case Bytecodes::_f2l:
+ case Bytecodes::_f2d:
+
+ case Bytecodes::_d2i:
+ case Bytecodes::_d2l:
+ case Bytecodes::_d2f:
+ emit_load(srctype, index);
+ stack_pop(srctype); // pop the src type
+ emit_bc(op);
+ stack_push(type); // push the dest value
+ if (srctype != type)
+ index = new_local_index(type);
+ emit_store(type, index);
+ break;
+
+ case Bytecodes::_checkcast:
+ emit_load(srctype, index);
+ emit_bc(op, cpool_klass_put(tk));
+ emit_store(srctype, index);
+ break;
+
+ default:
+ ShouldNotReachHere();
+ }
+
+ return make_parameter(type, tk, index, THREAD);
+}
+
+
+// -----------------------------------------------------------------------------
+// MethodHandleCompiler
+//
+
+static jvalue zero_jvalue;
+
+// Emit bytecodes for the given invoke instruction.
+MethodHandleWalker::ArgToken
+MethodHandleCompiler::make_invoke(methodOop m, vmIntrinsics::ID iid,
+ Bytecodes::Code op, bool tailcall,
+ int argc, MethodHandleWalker::ArgToken* argv,
+ TRAPS) {
+ if (m == NULL) {
+ // Get the intrinsic methodOop.
+ m = vmIntrinsics::method_for(iid);
+ }
+
+ klassOop klass = m->method_holder();
+ symbolOop name = m->name();
+ symbolOop signature = m->signature();
+
+ if (tailcall) {
+ // Actually, in order to make these methods more recognizable,
+ // let's put them in holder classes MethodHandle and InvokeDynamic.
+ // That way stack walkers and compiler heuristics can recognize them.
+ _target_klass = (for_invokedynamic()
+ ? SystemDictionary::InvokeDynamic_klass()
+ : SystemDictionary::MethodHandle_klass());
+ }
+
+ // instanceKlass* ik = instanceKlass::cast(klass);
+ // tty->print_cr("MethodHandleCompiler::make_invoke: %s %s.%s%s", Bytecodes::name(op), ik->external_name(), name->as_C_string(), signature->as_C_string());
+
+ // Inline the method.
+ InvocationCounter* ic = m->invocation_counter();
+ ic->set_carry();
+
+ for (int i = 0; i < argc; i++) {
+ ArgToken arg = argv[i];
+ TokenType tt = arg.token_type();
+ BasicType bt = arg.basic_type();
+
+ switch (tt) {
+ case tt_parameter:
+ case tt_temporary:
+ emit_load(bt, arg.index());
+ break;
+ case tt_constant:
+ emit_load_constant(arg);
+ break;
+ case tt_illegal:
+ // Sentinel.
+ assert(i == (argc - 1), "sentinel must be last entry");
+ break;
+ case tt_void:
+ default:
+ ShouldNotReachHere();
+ }
+ }
+
+ // Populate constant pool.
+ int name_index = cpool_symbol_put(name);
+ int signature_index = cpool_symbol_put(signature);
+ int name_and_type_index = cpool_name_and_type_put(name_index, signature_index);
+ int klass_index = cpool_klass_put(klass);
+ int methodref_index = cpool_methodref_put(klass_index, name_and_type_index);
+
+ // Generate invoke.
+ switch (op) {
+ case Bytecodes::_invokestatic:
+ case Bytecodes::_invokespecial:
+ case Bytecodes::_invokevirtual:
+ emit_bc(op, methodref_index);
+ break;
+ case Bytecodes::_invokeinterface:
+ Unimplemented();
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+
+ // If tailcall, we have walked all the way to a direct method handle.
+ // Otherwise, make a recursive call to some helper routine.
+ BasicType rbt = m->result_type();
+ if (rbt == T_ARRAY) rbt = T_OBJECT;
+ ArgToken ret;
+ if (tailcall) {
+ if (rbt != _rtype) {
+ if (rbt == T_VOID) {
+ // push a zero of the right sort
+ ArgToken zero;
+ if (_rtype == T_OBJECT) {
+ zero = make_oop_constant(NULL, CHECK_(zero));
+ } else {
+ zero = make_prim_constant(_rtype, &zero_jvalue, CHECK_(zero));
+ }
+ emit_load_constant(zero);
+ } else if (_rtype == T_VOID) {
+ // We'll emit a _return with something on the stack.
+ // It's OK to ignore what's on the stack.
+ } else {
+ tty->print_cr("*** rbt=%d != rtype=%d", rbt, _rtype);
+ assert(false, "IMPLEMENT ME");
+ }
+ }
+ switch (_rtype) {
+ case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT:
+ case T_INT: emit_bc(Bytecodes::_ireturn); break;
+ case T_LONG: emit_bc(Bytecodes::_lreturn); break;
+ case T_FLOAT: emit_bc(Bytecodes::_freturn); break;
+ case T_DOUBLE: emit_bc(Bytecodes::_dreturn); break;
+ case T_VOID: emit_bc(Bytecodes::_return); break;
+ case T_OBJECT:
+ if (_rklass.not_null() && _rklass() != SystemDictionary::Object_klass())
+ emit_bc(Bytecodes::_checkcast, cpool_klass_put(_rklass()));
+ emit_bc(Bytecodes::_areturn);
+ break;
+ default: ShouldNotReachHere();
+ }
+ ret = ArgToken(); // Dummy return value.
+ }
+ else {
+ stack_push(rbt); // The return value is already pushed onto the stack.
+ int index = new_local_index(rbt);
+ switch (rbt) {
+ case T_BOOLEAN: case T_BYTE: case T_CHAR: case T_SHORT:
+ case T_INT: case T_LONG: case T_FLOAT: case T_DOUBLE:
+ case T_OBJECT:
+ emit_store(rbt, index);
+ ret = ArgToken(tt_temporary, rbt, index);
+ break;
+ case T_VOID:
+ ret = ArgToken(tt_void);
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+ }
+
+ return ret;
+}
+
+MethodHandleWalker::ArgToken
+MethodHandleCompiler::make_fetch(BasicType type, klassOop tk, Bytecodes::Code op,
+ const MethodHandleWalker::ArgToken& base,
+ const MethodHandleWalker::ArgToken& offset,
+ TRAPS) {
+ Unimplemented();
+ return ArgToken();
+}
+
+
+int MethodHandleCompiler::cpool_primitive_put(BasicType bt, jvalue* con) {
+ jvalue con_copy;
+ assert(bt < T_OBJECT, "");
+ if (type2aelembytes(bt) < jintSize) {
+ // widen to int
+ con_copy = (*con);
+ con = &con_copy;
+ switch (bt) {
+ case T_BOOLEAN: con->i = (con->z ? 1 : 0); break;
+ case T_BYTE: con->i = con->b; break;
+ case T_CHAR: con->i = con->c; break;
+ case T_SHORT: con->i = con->s; break;
+ default: ShouldNotReachHere();
+ }
+ bt = T_INT;
+ }
+
+// for (int i = 1, imax = _constants.length(); i < imax; i++) {
+// ConstantValue* con = _constants.at(i);
+// if (con != NULL && con->is_primitive() && con->_type == bt) {
+// bool match = false;
+// switch (type2size[bt]) {
+// case 1: if (pcon->_value.i == con->i) match = true; break;
+// case 2: if (pcon->_value.j == con->j) match = true; break;
+// }
+// if (match)
+// return i;
+// }
+// }
+ ConstantValue* cv = new ConstantValue(bt, *con);
+ int index = _constants.append(cv);
+
+ // long and double entries take 2 slots, we add another empty entry.
+ if (type2size[bt] == 2)
+ (void) _constants.append(NULL);
+
+ return index;
+}
+
+
+constantPoolHandle MethodHandleCompiler::get_constant_pool(TRAPS) const {
+ constantPoolHandle nullHandle;
+ bool is_conc_safe = true;
+ constantPoolOop cpool_oop = oopFactory::new_constantPool(_constants.length(), is_conc_safe, CHECK_(nullHandle));
+ constantPoolHandle cpool(THREAD, cpool_oop);
+
+ // Fill the real constant pool skipping the zero element.
+ for (int i = 1; i < _constants.length(); i++) {
+ ConstantValue* cv = _constants.at(i);
+ switch (cv->tag()) {
+ case JVM_CONSTANT_Utf8: cpool->symbol_at_put( i, cv->symbol_oop() ); break;
+ case JVM_CONSTANT_Integer: cpool->int_at_put( i, cv->get_jint() ); break;
+ case JVM_CONSTANT_Float: cpool->float_at_put( i, cv->get_jfloat() ); break;
+ case JVM_CONSTANT_Long: cpool->long_at_put( i, cv->get_jlong() ); break;
+ case JVM_CONSTANT_Double: cpool->double_at_put( i, cv->get_jdouble() ); break;
+ case JVM_CONSTANT_Class: cpool->klass_at_put( i, cv->klass_oop() ); break;
+ case JVM_CONSTANT_Methodref: cpool->method_at_put( i, cv->first_index(), cv->second_index()); break;
+ case JVM_CONSTANT_NameAndType: cpool->name_and_type_at_put(i, cv->first_index(), cv->second_index()); break;
+ case JVM_CONSTANT_Object: cpool->object_at_put( i, cv->object_oop() ); break;
+ default: ShouldNotReachHere();
+ }
+
+ switch (cv->tag()) {
+ case JVM_CONSTANT_Long:
+ case JVM_CONSTANT_Double:
+ i++; // Skip empty entry.
+ assert(_constants.at(i) == NULL, "empty entry");
+ break;
+ }
+ }
+
+ // Set the constant pool holder to the target method's class.
+ cpool->set_pool_holder(_target_klass());
+
+ return cpool;
+}
+
+
+methodHandle MethodHandleCompiler::get_method_oop(TRAPS) const {
+ methodHandle nullHandle;
+ // Create a method that holds the generated bytecode. invokedynamic
+ // has no receiver, normal MH calls do.
+ int flags_bits;
+ if (for_invokedynamic())
+ flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL | JVM_ACC_STATIC);
+ else
+ flags_bits = (/*JVM_MH_INVOKE_BITS |*/ JVM_ACC_PUBLIC | JVM_ACC_FINAL);
+
+ bool is_conc_safe = true;
+ methodOop m_oop = oopFactory::new_method(bytecode_length(),
+ accessFlags_from(flags_bits),
+ 0, 0, 0, is_conc_safe, CHECK_(nullHandle));
+ methodHandle m(THREAD, m_oop);
+ m_oop = NULL; // oop not GC safe
+
+ constantPoolHandle cpool = get_constant_pool(CHECK_(nullHandle));
+ m->set_constants(cpool());
+
+ m->set_name_index(_name_index);
+ m->set_signature_index(_signature_index);
+
+ m->set_code((address) bytecode());
+
+ m->set_max_stack(_max_stack);
+ m->set_max_locals(max_locals());
+ m->set_size_of_parameters(_num_params);
+
+ typeArrayHandle exception_handlers(THREAD, Universe::the_empty_int_array());
+ m->set_exception_table(exception_handlers());
+
+ // Set the carry bit of the invocation counter to force inlining of
+ // the adapter.
+ InvocationCounter* ic = m->invocation_counter();
+ ic->set_carry();
+
+ // Rewrite the method and set up the constant pool cache.
+ objArrayOop m_array = oopFactory::new_system_objArray(1, CHECK_(nullHandle));
+ objArrayHandle methods(THREAD, m_array);
+ methods->obj_at_put(0, m());
+ Rewriter::rewrite(_target_klass(), cpool, methods, CHECK_(nullHandle)); // Use fake class.
+
+#ifndef PRODUCT
+ if (TraceMethodHandles) {
+ m->print();
+ m->print_codes();
+ }
+#endif //PRODUCT
+
+ return m;
+}
+
+
+#ifndef PRODUCT
+
+#if 0
+// MH printer for debugging.
+
+class MethodHandlePrinter : public MethodHandleWalker {
+private:
+ outputStream* _out;
+ bool _verbose;
+ int _temp_num;
+ stringStream _strbuf;
+ const char* strbuf() {
+ const char* s = _strbuf.as_string();
+ _strbuf.reset();
+ return s;
+ }
+ ArgToken token(const char* str) {
+ return (ArgToken) str;
+ }
+ void start_params() {
+ _out->print("(");
+ }
+ void end_params() {
+ if (_verbose) _out->print("\n");
+ _out->print(") => {");
+ }
+ void put_type_name(BasicType type, klassOop tk, outputStream* s) {
+ const char* kname = NULL;
+ if (tk != NULL)
+ kname = Klass::cast(tk)->external_name();
+ s->print("%s", (kname != NULL) ? kname : type2name(type));
+ }
+ ArgToken maybe_make_temp(const char* statement_op, BasicType type, const char* temp_name) {
+ const char* value = strbuf();
+ if (!_verbose) return token(value);
+ // make an explicit binding for each separate value
+ _strbuf.print("%s%d", temp_name, ++_temp_num);
+ const char* temp = strbuf();
+ _out->print("\n %s %s %s = %s;", statement_op, type2name(type), temp, value);
+ return token(temp);
+ }
+
+public:
+ MethodHandlePrinter(Handle root, bool verbose, outputStream* out, TRAPS)
+ : MethodHandleWalker(root, THREAD),
+ _out(out),
+ _verbose(verbose),
+ _temp_num(0)
+ {
+ start_params();
+ }
+ virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) {
+ if (argnum < 0) {
+ end_params();
+ return NULL;
+ }
+ if (argnum == 0) {
+ _out->print(_verbose ? "\n " : "");
+ } else {
+ _out->print(_verbose ? ",\n " : ", ");
+ }
+ if (argnum >= _temp_num)
+ _temp_num = argnum;
+ // generate an argument name
+ _strbuf.print("a%d", argnum);
+ const char* arg = strbuf();
+ put_type_name(type, tk, _out);
+ _out->print(" %s", arg);
+ return token(arg);
+ }
+ virtual ArgToken make_oop_constant(oop con, TRAPS) {
+ if (con == NULL)
+ _strbuf.print("null");
+ else
+ con->print_value_on(&_strbuf);
+ if (_strbuf.size() == 0) { // yuck
+ _strbuf.print("(a ");
+ put_type_name(T_OBJECT, con->klass(), &_strbuf);
+ _strbuf.print(")");
+ }
+ return maybe_make_temp("constant", T_OBJECT, "k");
+ }
+ virtual ArgToken make_prim_constant(BasicType type, jvalue* con, TRAPS) {
+ java_lang_boxing_object::print(type, con, &_strbuf);
+ return maybe_make_temp("constant", type, "k");
+ }
+ virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, ArgToken src, TRAPS) {
+ _strbuf.print("%s(%s", Bytecodes::name(op), (const char*)src);
+ if (tk != NULL) {
+ _strbuf.print(", ");
+ put_type_name(type, tk, &_strbuf);
+ }
+ _strbuf.print(")");
+ return maybe_make_temp("convert", type, "v");
+ }
+ virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, ArgToken base, ArgToken offset, TRAPS) {
+ _strbuf.print("%s(%s, %s", Bytecodes::name(op), (const char*)base, (const char*)offset);
+ if (tk != NULL) {
+ _strbuf.print(", ");
+ put_type_name(type, tk, &_strbuf);
+ }
+ _strbuf.print(")");
+ return maybe_make_temp("fetch", type, "x");
+ }
+ virtual ArgToken make_invoke(methodOop m, vmIntrinsics::ID iid,
+ Bytecodes::Code op, bool tailcall,
+ int argc, ArgToken* argv, TRAPS) {
+ symbolOop name, sig;
+ if (m != NULL) {
+ name = m->name();
+ sig = m->signature();
+ } else {
+ name = vmSymbols::symbol_at(vmIntrinsics::name_for(iid));
+ sig = vmSymbols::symbol_at(vmIntrinsics::signature_for(iid));
+ }
+ _strbuf.print("%s %s%s(", Bytecodes::name(op), name->as_C_string(), sig->as_C_string());
+ for (int i = 0; i < argc; i++) {
+ _strbuf.print("%s%s", (i > 0 ? ", " : ""), (const char*)argv[i]);
+ }
+ _strbuf.print(")");
+ if (!tailcall) {
+ BasicType rt = char2type(sig->byte_at(sig->utf8_length()-1));
+ if (rt == T_ILLEGAL) rt = T_OBJECT; // ';' at the end of '(...)L...;'
+ return maybe_make_temp("invoke", rt, "x");
+ } else {
+ const char* ret = strbuf();
+ _out->print(_verbose ? "\n return " : " ");
+ _out->print("%s", ret);
+ _out->print(_verbose ? "\n}\n" : " }");
+ }
+ return ArgToken();
+ }
+
+ virtual void set_method_handle(oop mh) {
+ if (WizardMode && Verbose) {
+ tty->print("\n--- next target: ");
+ mh->print();
+ }
+ }
+
+ static void print(Handle root, bool verbose, outputStream* out, TRAPS) {
+ ResourceMark rm;
+ MethodHandlePrinter printer(root, verbose, out, CHECK);
+ printer.walk(CHECK);
+ out->print("\n");
+ }
+ static void print(Handle root, bool verbose = Verbose, outputStream* out = tty) {
+ EXCEPTION_MARK;
+ ResourceMark rm;
+ MethodHandlePrinter printer(root, verbose, out, THREAD);
+ if (!HAS_PENDING_EXCEPTION)
+ printer.walk(THREAD);
+ if (HAS_PENDING_EXCEPTION) {
+ oop ex = PENDING_EXCEPTION;
+ CLEAR_PENDING_EXCEPTION;
+ out->print("\n*** ");
+ if (ex != Universe::virtual_machine_error_instance())
+ ex->print_on(out);
+ else
+ out->print("lose: %s", printer.lose_message());
+ out->print("\n}\n");
+ }
+ out->print("\n");
+ }
+};
+#endif // 0
+
+extern "C"
+void print_method_handle(oop mh) {
+ if (java_dyn_MethodHandle::is_instance(mh)) {
+ //MethodHandlePrinter::print(mh);
+ } else {
+ tty->print("*** not a method handle: ");
+ mh->print();
+ }
+}
+
+#endif // PRODUCT
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/prims/methodHandleWalk.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -0,0 +1,413 @@
+/*
+ * Copyright 2008-2010 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// Low-level parser for method handle chains.
+class MethodHandleChain : StackObj {
+public:
+ typedef MethodHandles::EntryKind EntryKind;
+
+private:
+ Handle _root; // original target
+ Handle _method_handle; // current target
+ bool _is_last; // final guy in chain
+ bool _is_bound; // has a bound argument
+ BasicType _arg_type; // if is_bound, the bound argument type
+ int _arg_slot; // if is_bound or is_adapter, affected argument slot
+ jint _conversion; // conversion field of AMH or -1
+ methodHandle _last_method; // if is_last, which method we target
+ Bytecodes::Code _last_invoke; // if is_last, type of invoke
+ const char* _lose_message; // saved argument to lose()
+
+ void set_method_handle(Handle target, TRAPS);
+ void set_last_method(oop target, TRAPS);
+ static BasicType compute_bound_arg_type(oop target, methodOop m, int arg_slot, TRAPS);
+
+ oop MethodHandle_type_oop() { return java_dyn_MethodHandle::type(method_handle_oop()); }
+ oop MethodHandle_vmtarget_oop() { return java_dyn_MethodHandle::vmtarget(method_handle_oop()); }
+ int MethodHandle_vmslots() { return java_dyn_MethodHandle::vmslots(method_handle_oop()); }
+ int DirectMethodHandle_vmindex() { return sun_dyn_DirectMethodHandle::vmindex(method_handle_oop()); }
+ oop BoundMethodHandle_argument_oop() { return sun_dyn_BoundMethodHandle::argument(method_handle_oop()); }
+ int BoundMethodHandle_vmargslot() { return sun_dyn_BoundMethodHandle::vmargslot(method_handle_oop()); }
+ int AdapterMethodHandle_conversion() { return sun_dyn_AdapterMethodHandle::conversion(method_handle_oop()); }
+
+public:
+ MethodHandleChain(Handle root, TRAPS)
+ : _root(root)
+ { set_method_handle(root, THREAD); }
+
+ bool is_adapter() { return _conversion != -1; }
+ bool is_bound() { return _is_bound; }
+ bool is_last() { return _is_last; }
+
+ void next(TRAPS) {
+ assert(!is_last(), "");
+ set_method_handle(MethodHandle_vmtarget_oop(), THREAD);
+ }
+
+ Handle method_handle() { return _method_handle; }
+ oop method_handle_oop() { return _method_handle(); }
+ oop method_type_oop() { return MethodHandle_type_oop(); }
+ oop vmtarget_oop() { return MethodHandle_vmtarget_oop(); }
+
+ jint adapter_conversion() { assert(is_adapter(), ""); return _conversion; }
+ int adapter_conversion_op() { return MethodHandles::adapter_conversion_op(adapter_conversion()); }
+ BasicType adapter_conversion_src_type()
+ { return MethodHandles::adapter_conversion_src_type(adapter_conversion()); }
+ BasicType adapter_conversion_dest_type()
+ { return MethodHandles::adapter_conversion_dest_type(adapter_conversion()); }
+ int adapter_conversion_stack_move()
+ { return MethodHandles::adapter_conversion_stack_move(adapter_conversion()); }
+ int adapter_conversion_stack_pushes()
+ { return adapter_conversion_stack_move() / MethodHandles::stack_move_unit(); }
+ int adapter_conversion_vminfo()
+ { return MethodHandles::adapter_conversion_vminfo(adapter_conversion()); }
+ int adapter_arg_slot() { assert(is_adapter(), ""); return _arg_slot; }
+ oop adapter_arg_oop() { assert(is_adapter(), ""); return BoundMethodHandle_argument_oop(); }
+
+ BasicType bound_arg_type() { assert(is_bound(), ""); return _arg_type; }
+ int bound_arg_slot() { assert(is_bound(), ""); return _arg_slot; }
+ oop bound_arg_oop() { assert(is_bound(), ""); return BoundMethodHandle_argument_oop(); }
+
+ methodOop last_method_oop() { assert(is_last(), ""); return _last_method(); }
+ Bytecodes::Code last_invoke_code() { assert(is_last(), ""); return _last_invoke; }
+
+ void lose(const char* msg, TRAPS);
+ const char* lose_message() { return _lose_message; }
+};
+
+
+// Structure walker for method handles.
+// Does abstract interpretation on top of low-level parsing.
+// You supply the tokens shuffled by the abstract interpretation.
+class MethodHandleWalker : StackObj {
+public:
+ // Stack values:
+ enum TokenType {
+ tt_void,
+ tt_parameter,
+ tt_temporary,
+ tt_constant,
+ tt_illegal
+ };
+
+ // Argument token:
+ class ArgToken {
+ private:
+ TokenType _tt;
+ BasicType _bt;
+ jvalue _value;
+ Handle _handle;
+
+ public:
+ ArgToken(TokenType tt = tt_illegal) : _tt(tt) {}
+ ArgToken(TokenType tt, BasicType bt, jvalue value) : _tt(tt), _bt(bt), _value(value) {}
+
+ ArgToken(TokenType tt, BasicType bt, int index) : _tt(tt), _bt(bt) {
+ _value.i = index;
+ }
+
+ ArgToken(TokenType tt, BasicType bt, Handle value) : _tt(tt), _bt(bt) {
+ _handle = value;
+ }
+
+ TokenType token_type() const { return _tt; }
+ BasicType basic_type() const { return _bt; }
+ int index() const { return _value.i; }
+ Handle object() const { return _handle; }
+
+ jint get_jint() const { return _value.i; }
+ jlong get_jlong() const { return _value.j; }
+ jfloat get_jfloat() const { return _value.f; }
+ jdouble get_jdouble() const { return _value.d; }
+ };
+
+ // Abstract interpretation state:
+ struct SlotState {
+ BasicType _type;
+ ArgToken _arg;
+ SlotState() : _type(), _arg() {}
+ };
+ static SlotState make_state(BasicType type, ArgToken arg) {
+ SlotState ss;
+ ss._type = type; ss._arg = arg;
+ return ss;
+ }
+
+private:
+ MethodHandleChain _chain;
+ bool _for_invokedynamic;
+ int _local_index;
+
+ GrowableArray<SlotState> _outgoing; // current outgoing parameter slots
+ int _outgoing_argc; // # non-empty outgoing slots
+
+ // Replace a value of type old_type at slot (and maybe slot+1) with the new value.
+ // If old_type != T_VOID, remove the old argument at that point.
+ // If new_type != T_VOID, insert the new argument at that point.
+ // Insert or delete a second empty slot as needed.
+ void change_argument(BasicType old_type, int slot, BasicType new_type, const ArgToken& new_arg);
+
+ SlotState* slot_state(int slot) {
+ if (slot < 0 || slot >= _outgoing.length())
+ return NULL;
+ return _outgoing.adr_at(slot);
+ }
+ BasicType slot_type(int slot) {
+ SlotState* ss = slot_state(slot);
+ if (ss == NULL)
+ return T_ILLEGAL;
+ return ss->_type;
+ }
+ bool slot_has_argument(int slot) {
+ return slot_type(slot) < T_VOID;
+ }
+
+#ifdef ASSERT
+ int argument_count_slow();
+#endif
+
+ // Return a bytecode for converting src to dest, if one exists.
+ Bytecodes::Code conversion_code(BasicType src, BasicType dest);
+
+ void walk_incoming_state(TRAPS);
+
+public:
+ MethodHandleWalker(Handle root, bool for_invokedynamic, TRAPS)
+ : _chain(root, THREAD),
+ _for_invokedynamic(for_invokedynamic),
+ _outgoing(THREAD, 10),
+ _outgoing_argc(0)
+ {
+ _local_index = for_invokedynamic ? 0 : 1;
+ }
+
+ MethodHandleChain& chain() { return _chain; }
+
+ bool for_invokedynamic() const { return _for_invokedynamic; }
+
+ int new_local_index(BasicType bt) {
+ //int index = _for_invokedynamic ? _local_index : _local_index - 1;
+ int index = _local_index;
+ _local_index += type2size[bt];
+ return index;
+ }
+
+ int max_locals() const { return _local_index; }
+
+ // plug-in abstract interpretation steps:
+ virtual ArgToken make_parameter( BasicType type, klassOop tk, int argnum, TRAPS ) = 0;
+ virtual ArgToken make_prim_constant( BasicType type, jvalue* con, TRAPS ) = 0;
+ virtual ArgToken make_oop_constant( oop con, TRAPS ) = 0;
+ virtual ArgToken make_conversion( BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS ) = 0;
+ virtual ArgToken make_fetch( BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS ) = 0;
+ virtual ArgToken make_invoke( methodOop m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS ) = 0;
+
+ // For make_invoke, the methodOop can be NULL if the intrinsic ID
+ // is something other than vmIntrinsics::_none.
+
+ // and in case anyone cares to related the previous actions to the chain:
+ virtual void set_method_handle(oop mh) { }
+
+ void lose(const char* msg, TRAPS) { chain().lose(msg, THREAD); }
+ const char* lose_message() { return chain().lose_message(); }
+
+ ArgToken walk(TRAPS);
+};
+
+
+// An abstract interpreter for method handle chains.
+// Produces an account of the semantics of a chain, in terms of a static IR.
+// The IR happens to be JVM bytecodes.
+class MethodHandleCompiler : public MethodHandleWalker {
+private:
+ methodHandle _callee;
+ KlassHandle _rklass; // Return type for casting.
+ BasicType _rtype;
+ KlassHandle _target_klass;
+ Thread* _thread;
+
+ // Fake constant pool entry.
+ class ConstantValue {
+ private:
+ int _tag; // Constant pool tag type.
+ JavaValue _value;
+ Handle _handle;
+
+ public:
+ // Constructor for oop types.
+ ConstantValue(int tag, Handle con) : _tag(tag), _handle(con) {
+ assert(tag == JVM_CONSTANT_Utf8 ||
+ tag == JVM_CONSTANT_Class ||
+ tag == JVM_CONSTANT_String ||
+ tag == JVM_CONSTANT_Object, "must be oop type");
+ }
+
+ // Constructor for oop reference types.
+ ConstantValue(int tag, int index) : _tag(tag) {
+ assert(JVM_CONSTANT_Fieldref <= tag && tag <= JVM_CONSTANT_NameAndType, "must be ref type");
+ _value.set_jint(index);
+ }
+ ConstantValue(int tag, int first_index, int second_index) : _tag(tag) {
+ assert(JVM_CONSTANT_Fieldref <= tag && tag <= JVM_CONSTANT_NameAndType, "must be ref type");
+ _value.set_jint(first_index << 16 | second_index);
+ }
+
+ // Constructor for primitive types.
+ ConstantValue(BasicType bt, jvalue con) {
+ _value.set_type(bt);
+ switch (bt) {
+ case T_INT: _tag = JVM_CONSTANT_Integer; _value.set_jint( con.i); break;
+ case T_LONG: _tag = JVM_CONSTANT_Long; _value.set_jlong( con.j); break;
+ case T_FLOAT: _tag = JVM_CONSTANT_Float; _value.set_jfloat( con.f); break;
+ case T_DOUBLE: _tag = JVM_CONSTANT_Double; _value.set_jdouble(con.d); break;
+ default: ShouldNotReachHere();
+ }
+ }
+
+ int tag() const { return _tag; }
+ symbolOop symbol_oop() const { return (symbolOop) _handle(); }
+ klassOop klass_oop() const { return (klassOop) _handle(); }
+ oop object_oop() const { return _handle(); }
+ int index() const { return _value.get_jint(); }
+ int first_index() const { return _value.get_jint() >> 16; }
+ int second_index() const { return _value.get_jint() & 0x0000FFFF; }
+
+ bool is_primitive() const { return is_java_primitive(_value.get_type()); }
+ jint get_jint() const { return _value.get_jint(); }
+ jlong get_jlong() const { return _value.get_jlong(); }
+ jfloat get_jfloat() const { return _value.get_jfloat(); }
+ jdouble get_jdouble() const { return _value.get_jdouble(); }
+ };
+
+ // Fake constant pool.
+ GrowableArray<ConstantValue*> _constants;
+
+ // Accumulated compiler state:
+ GrowableArray<unsigned char> _bytecode;
+
+ int _cur_stack;
+ int _max_stack;
+ int _num_params;
+ int _name_index;
+ int _signature_index;
+
+ void stack_push(BasicType bt) {
+ _cur_stack += type2size[bt];
+ if (_cur_stack > _max_stack) _max_stack = _cur_stack;
+ }
+ void stack_pop(BasicType bt) {
+ _cur_stack -= type2size[bt];
+ assert(_cur_stack >= 0, "sanity");
+ }
+
+ unsigned char* bytecode() const { return _bytecode.adr_at(0); }
+ int bytecode_length() const { return _bytecode.length(); }
+
+ // Fake constant pool.
+ int cpool_oop_put(int tag, Handle con) {
+ if (con.is_null()) return 0;
+ ConstantValue* cv = new ConstantValue(tag, con);
+ return _constants.append(cv);
+ }
+
+ int cpool_oop_reference_put(int tag, int first_index, int second_index) {
+ if (first_index == 0 && second_index == 0) return 0;
+ assert(first_index != 0 && second_index != 0, "no zero indexes");
+ ConstantValue* cv = new ConstantValue(tag, first_index, second_index);
+ return _constants.append(cv);
+ }
+
+ int cpool_primitive_put(BasicType type, jvalue* con);
+
+ int cpool_int_put(jint value) {
+ jvalue con; con.i = value;
+ return cpool_primitive_put(T_INT, &con);
+ }
+ int cpool_long_put(jlong value) {
+ jvalue con; con.j = value;
+ return cpool_primitive_put(T_LONG, &con);
+ }
+ int cpool_float_put(jfloat value) {
+ jvalue con; con.f = value;
+ return cpool_primitive_put(T_FLOAT, &con);
+ }
+ int cpool_double_put(jdouble value) {
+ jvalue con; con.d = value;
+ return cpool_primitive_put(T_DOUBLE, &con);
+ }
+
+ int cpool_object_put(Handle obj) {
+ return cpool_oop_put(JVM_CONSTANT_Object, obj);
+ }
+ int cpool_symbol_put(symbolOop sym) {
+ return cpool_oop_put(JVM_CONSTANT_Utf8, sym);
+ }
+ int cpool_klass_put(klassOop klass) {
+ return cpool_oop_put(JVM_CONSTANT_Class, klass);
+ }
+ int cpool_methodref_put(int class_index, int name_and_type_index) {
+ return cpool_oop_reference_put(JVM_CONSTANT_Methodref, class_index, name_and_type_index);
+ }
+ int cpool_name_and_type_put(int name_index, int signature_index) {
+ return cpool_oop_reference_put(JVM_CONSTANT_NameAndType, name_index, signature_index);
+ }
+
+ void emit_bc(Bytecodes::Code op, int index = 0);
+ void emit_load(BasicType bt, int index);
+ void emit_store(BasicType bt, int index);
+ void emit_load_constant(ArgToken arg);
+
+ virtual ArgToken make_parameter(BasicType type, klassOop tk, int argnum, TRAPS) {
+ return ArgToken(tt_parameter, type, argnum);
+ }
+ virtual ArgToken make_oop_constant(oop con, TRAPS) {
+ Handle h(THREAD, con);
+ return ArgToken(tt_constant, T_OBJECT, h);
+ }
+ virtual ArgToken make_prim_constant(BasicType type, jvalue* con, TRAPS) {
+ return ArgToken(tt_constant, type, *con);
+ }
+
+ virtual ArgToken make_conversion(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& src, TRAPS);
+ virtual ArgToken make_fetch(BasicType type, klassOop tk, Bytecodes::Code op, const ArgToken& base, const ArgToken& offset, TRAPS);
+ virtual ArgToken make_invoke(methodOop m, vmIntrinsics::ID iid, Bytecodes::Code op, bool tailcall, int argc, ArgToken* argv, TRAPS);
+
+ // Get a real constant pool.
+ constantPoolHandle get_constant_pool(TRAPS) const;
+
+ // Get a real methodOop.
+ methodHandle get_method_oop(TRAPS) const;
+
+public:
+ MethodHandleCompiler(Handle root, methodHandle call_method, bool for_invokedynamic, TRAPS);
+
+ // Compile the given MH chain into bytecode.
+ methodHandle compile(TRAPS);
+
+ // Tests if the given class is a MH adapter holder.
+ static bool klass_is_method_handle_adapter_holder(klassOop klass) {
+ return (klass == SystemDictionary::MethodHandle_klass() ||
+ klass == SystemDictionary::InvokeDynamic_klass());
+ }
+};
--- a/hotspot/src/share/vm/prims/methodHandles.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -132,8 +132,9 @@
}
return m;
} else {
+ assert(vmtarget->is_klass(), "must be class or interface");
decode_flags_result |= MethodHandles::_dmf_does_dispatch;
- assert(vmtarget->is_klass(), "must be class or interface");
+ decode_flags_result |= MethodHandles::_dmf_has_receiver;
receiver_limit_result = (klassOop)vmtarget;
Klass* tk = Klass::cast((klassOop)vmtarget);
if (tk->is_interface()) {
@@ -142,7 +143,7 @@
return klassItable::method_for_itable_index((klassOop)vmtarget, vmindex);
} else {
if (!tk->oop_is_instance())
- tk = instanceKlass::cast(SystemDictionary::object_klass());
+ tk = instanceKlass::cast(SystemDictionary::Object_klass());
return ((instanceKlass*)tk)->method_at_vtable(vmindex);
}
}
@@ -179,8 +180,10 @@
// short-circuits directly to the methodOop.
// (It might be another argument besides a receiver also.)
assert(target->is_method(), "must be a simple method");
+ decode_flags_result |= MethodHandles::_dmf_binds_method;
methodOop m = (methodOop) target;
- decode_flags_result |= MethodHandles::_dmf_binds_method;
+ if (!m->is_static())
+ decode_flags_result |= MethodHandles::_dmf_has_receiver;
return m;
}
}
@@ -233,8 +236,8 @@
BasicType recv_bt = char2type(sig->byte_at(1));
// Note: recv_bt might be T_ILLEGAL if byte_at(2) is ')'
assert(sig->byte_at(0) == '(', "must be method sig");
- if (recv_bt == T_OBJECT || recv_bt == T_ARRAY)
- decode_flags_result |= _dmf_has_receiver;
+// if (recv_bt == T_OBJECT || recv_bt == T_ARRAY)
+// decode_flags_result |= _dmf_has_receiver;
} else {
// non-static method
decode_flags_result |= _dmf_has_receiver;
@@ -261,14 +264,14 @@
return decode_MemberName(x, receiver_limit_result, decode_flags_result);
} else if (java_dyn_MethodHandle::is_subclass(xk)) {
return decode_MethodHandle(x, receiver_limit_result, decode_flags_result);
- } else if (xk == SystemDictionary::reflect_method_klass()) {
+ } else if (xk == SystemDictionary::reflect_Method_klass()) {
oop clazz = java_lang_reflect_Method::clazz(x);
int slot = java_lang_reflect_Method::slot(x);
klassOop k = java_lang_Class::as_klassOop(clazz);
if (k != NULL && Klass::cast(k)->oop_is_instance())
return decode_methodOop(instanceKlass::cast(k)->method_with_idnum(slot),
decode_flags_result);
- } else if (xk == SystemDictionary::reflect_constructor_klass()) {
+ } else if (xk == SystemDictionary::reflect_Constructor_klass()) {
oop clazz = java_lang_reflect_Constructor::clazz(x);
int slot = java_lang_reflect_Constructor::slot(x);
klassOop k = java_lang_Class::as_klassOop(clazz);
@@ -325,7 +328,7 @@
};
void MethodHandles::init_MemberName(oop mname_oop, oop target_oop) {
- if (target_oop->klass() == SystemDictionary::reflect_field_klass()) {
+ if (target_oop->klass() == SystemDictionary::reflect_Field_klass()) {
oop clazz = java_lang_reflect_Field::clazz(target_oop); // fd.field_holder()
int slot = java_lang_reflect_Field::slot(target_oop); // fd.index()
int mods = java_lang_reflect_Field::modifiers(target_oop);
@@ -410,7 +413,7 @@
if (defc_klassOop == NULL) return; // a primitive; no resolution possible
if (!Klass::cast(defc_klassOop)->oop_is_instance()) {
if (!Klass::cast(defc_klassOop)->oop_is_array()) return;
- defc_klassOop = SystemDictionary::object_klass();
+ defc_klassOop = SystemDictionary::Object_klass();
}
instanceKlassHandle defc(THREAD, defc_klassOop);
defc_klassOop = NULL; // safety
@@ -746,7 +749,7 @@
return NULL; // unformed MH
}
klassOop tklass = target->klass();
- if (Klass::cast(tklass)->is_subclass_of(SystemDictionary::object_klass())) {
+ if (Klass::cast(tklass)->is_subclass_of(SystemDictionary::Object_klass())) {
return target; // target is another MH (or something else?)
}
}
@@ -818,26 +821,26 @@
for (int i = 0; ; i++) {
const char* test_name = always_null_names[i];
if (test_name == NULL) break;
- if (name->equals(test_name, (int) strlen(test_name)))
+ if (name->equals(test_name))
return true;
}
return false;
}
bool MethodHandles::class_cast_needed(klassOop src, klassOop dst) {
- if (src == dst || dst == SystemDictionary::object_klass())
+ if (src == dst || dst == SystemDictionary::Object_klass())
return false; // quickest checks
Klass* srck = Klass::cast(src);
Klass* dstk = Klass::cast(dst);
if (dstk->is_interface()) {
// interface receivers can safely be viewed as untyped,
// because interface calls always include a dynamic check
- //dstk = Klass::cast(SystemDictionary::object_klass());
+ //dstk = Klass::cast(SystemDictionary::Object_klass());
return false;
}
if (srck->is_interface()) {
// interface arguments must be viewed as untyped
- //srck = Klass::cast(SystemDictionary::object_klass());
+ //srck = Klass::cast(SystemDictionary::Object_klass());
return true;
}
if (is_always_null_type(src)) {
@@ -850,7 +853,7 @@
}
static oop object_java_mirror() {
- return Klass::cast(SystemDictionary::object_klass())->java_mirror();
+ return Klass::cast(SystemDictionary::Object_klass())->java_mirror();
}
bool MethodHandles::same_basic_type_for_arguments(BasicType src,
@@ -1446,7 +1449,7 @@
break;
}
// check subrange of Integer.value, if necessary
- if (argument == NULL || argument->klass() != SystemDictionary::int_klass()) {
+ if (argument == NULL || argument->klass() != SystemDictionary::Integer_klass()) {
err = "bound integer argument must be of type java.lang.Integer";
break;
}
@@ -1469,7 +1472,7 @@
BasicType argbox = java_lang_boxing_object::basic_type(argument);
if (argbox != ptype) {
err = check_argument_type_change(T_OBJECT, (argument == NULL
- ? SystemDictionary::object_klass()
+ ? SystemDictionary::Object_klass()
: argument->klass()),
ptype, ptype_klass(), argnum);
assert(err != NULL, "this must be an error");
@@ -1487,8 +1490,9 @@
int target_pushes = decode_MethodHandle_stack_pushes(target());
assert(this_pushes == slots_pushed + target_pushes, "BMH stack motion must be correct");
// do not blow the stack; use a Java-based adapter if this limit is exceeded
- if (slots_pushed + target_pushes > MethodHandlePushLimit)
- err = "too many bound parameters";
+ // FIXME
+ // if (slots_pushed + target_pushes > MethodHandlePushLimit)
+ // err = "too many bound parameters";
}
}
@@ -1518,6 +1522,11 @@
verify_vmslots(mh, CHECK);
}
+ // Get bound type and required slots.
+ oop ptype_oop = java_dyn_MethodType::ptype(java_dyn_MethodHandle::type(target()), argnum);
+ BasicType ptype = java_lang_Class::as_BasicType(ptype_oop);
+ int slots_pushed = type2size[ptype];
+
// If (a) the target is a direct non-dispatched method handle,
// or (b) the target is a dispatched direct method handle and we
// are binding the receiver, cut out the middle-man.
@@ -1529,7 +1538,7 @@
int decode_flags = 0; klassOop receiver_limit_oop = NULL;
methodHandle m(THREAD, decode_method(target(), receiver_limit_oop, decode_flags));
if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "DMH failed to decode"); }
- DEBUG_ONLY(int m_vmslots = m->size_of_parameters() - 1); // pos. of 1st arg.
+ DEBUG_ONLY(int m_vmslots = m->size_of_parameters() - slots_pushed); // pos. of 1st arg.
assert(sun_dyn_BoundMethodHandle::vmslots(mh()) == m_vmslots, "type w/ m sig");
if (argnum == 0 && (decode_flags & _dmf_has_receiver) != 0) {
KlassHandle receiver_limit(THREAD, receiver_limit_oop);
@@ -1554,10 +1563,6 @@
}
// Next question: Is this a ref, int, or long bound value?
- oop ptype_oop = java_dyn_MethodType::ptype(java_dyn_MethodHandle::type(target()), argnum);
- BasicType ptype = java_lang_Class::as_BasicType(ptype_oop);
- int slots_pushed = type2size[ptype];
-
MethodHandleEntry* me = NULL;
if (ptype == T_OBJECT) {
if (direct_to_method) me = MethodHandles::entry(_bound_ref_direct_mh);
@@ -2170,7 +2175,7 @@
symbolOop name = vmSymbols::toString_name(), sig = vmSymbols::void_string_signature();
JavaCallArguments args(Handle(THREAD, JNIHandles::resolve_non_null(erased_jh)));
JavaValue result(T_OBJECT);
- JavaCalls::call_virtual(&result, SystemDictionary::object_klass(), name, sig,
+ JavaCalls::call_virtual(&result, SystemDictionary::Object_klass(), name, sig,
&args, CHECK);
Handle str(THREAD, (oop)result.get_jobject());
java_lang_String::print(str, tty);
--- a/hotspot/src/share/vm/prims/nativeLookup.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/prims/nativeLookup.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -137,7 +137,7 @@
}
// Otherwise call static method findNative in ClassLoader
- KlassHandle klass (THREAD, SystemDictionary::classloader_klass());
+ KlassHandle klass (THREAD, SystemDictionary::ClassLoader_klass());
Handle name_arg = java_lang_String::create_from_str(jni_name, CHECK_NULL);
JavaValue result(T_LONG);
--- a/hotspot/src/share/vm/runtime/arguments.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -948,6 +948,7 @@
}
}
+#ifndef KERNEL
// If the user has chosen ParallelGCThreads > 0, we set UseParNewGC
// if it's not explictly set or unset. If the user has chosen
// UseParNewGC and not explicitly set ParallelGCThreads we
@@ -1177,8 +1178,7 @@
// the value (either from the command line or ergonomics) of
// OldPLABSize. Following OldPLABSize is an ergonomics decision.
FLAG_SET_ERGO(uintx, CMSParPromoteBlocksToClaim, OldPLABSize);
- }
- else {
+ } else {
// OldPLABSize and CMSParPromoteBlocksToClaim are both set.
// CMSParPromoteBlocksToClaim is a collector-specific flag, so
// we'll let it to take precedence.
@@ -1188,7 +1188,23 @@
" CMSParPromoteBlocksToClaim will take precedence.\n");
}
}
+ if (!FLAG_IS_DEFAULT(ResizeOldPLAB) && !ResizeOldPLAB) {
+ // OldPLAB sizing manually turned off: Use a larger default setting,
+ // unless it was manually specified. This is because a too-low value
+ // will slow down scavenges.
+ if (FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim)) {
+ FLAG_SET_ERGO(uintx, CMSParPromoteBlocksToClaim, 50); // default value before 6631166
+ }
+ }
+ // Overwrite OldPLABSize which is the variable we will internally use everywhere.
+ FLAG_SET_ERGO(uintx, OldPLABSize, CMSParPromoteBlocksToClaim);
+ // If either of the static initialization defaults have changed, note this
+ // modification.
+ if (!FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
+ CFLS_LAB::modify_initialization(OldPLABSize, OldPLABWeight);
+ }
}
+#endif // KERNEL
inline uintx max_heap_for_compressed_oops() {
LP64_ONLY(return oopDesc::OopEncodingHeapMax - MaxPermSize - os::vm_page_size());
@@ -1850,7 +1866,6 @@
FLAG_SET_CMDLINE(bool, TraceClassUnloading, true);
} else if (!strcmp(tail, ":gc")) {
FLAG_SET_CMDLINE(bool, PrintGC, true);
- FLAG_SET_CMDLINE(bool, TraceClassUnloading, true);
} else if (!strcmp(tail, ":jni")) {
FLAG_SET_CMDLINE(bool, PrintJNIResolving, true);
}
@@ -2370,22 +2385,25 @@
"ExtendedDTraceProbes flag is only applicable on Solaris\n");
return JNI_EINVAL;
#endif // ndef SOLARIS
- } else
#ifdef ASSERT
- if (match_option(option, "-XX:+FullGCALot", &tail)) {
+ } else if (match_option(option, "-XX:+FullGCALot", &tail)) {
FLAG_SET_CMDLINE(bool, FullGCALot, true);
// disable scavenge before parallel mark-compact
FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false);
- } else
#endif
- if (match_option(option, "-XX:ParCMSPromoteBlocksToClaim=", &tail)) {
+ } else if (match_option(option, "-XX:CMSParPromoteBlocksToClaim=", &tail)) {
julong cms_blocks_to_claim = (julong)atol(tail);
FLAG_SET_CMDLINE(uintx, CMSParPromoteBlocksToClaim, cms_blocks_to_claim);
jio_fprintf(defaultStream::error_stream(),
- "Please use -XX:CMSParPromoteBlocksToClaim in place of "
+ "Please use -XX:OldPLABSize in place of "
+ "-XX:CMSParPromoteBlocksToClaim in the future\n");
+ } else if (match_option(option, "-XX:ParCMSPromoteBlocksToClaim=", &tail)) {
+ julong cms_blocks_to_claim = (julong)atol(tail);
+ FLAG_SET_CMDLINE(uintx, CMSParPromoteBlocksToClaim, cms_blocks_to_claim);
+ jio_fprintf(defaultStream::error_stream(),
+ "Please use -XX:OldPLABSize in place of "
"-XX:ParCMSPromoteBlocksToClaim in the future\n");
- } else
- if (match_option(option, "-XX:ParallelGCOldGenAllocBufferSize=", &tail)) {
+ } else if (match_option(option, "-XX:ParallelGCOldGenAllocBufferSize=", &tail)) {
julong old_plab_size = 0;
ArgsRange errcode = parse_memory_size(tail, &old_plab_size, 1);
if (errcode != arg_in_range) {
@@ -2398,8 +2416,7 @@
jio_fprintf(defaultStream::error_stream(),
"Please use -XX:OldPLABSize in place of "
"-XX:ParallelGCOldGenAllocBufferSize in the future\n");
- } else
- if (match_option(option, "-XX:ParallelGCToSpaceAllocBufferSize=", &tail)) {
+ } else if (match_option(option, "-XX:ParallelGCToSpaceAllocBufferSize=", &tail)) {
julong young_plab_size = 0;
ArgsRange errcode = parse_memory_size(tail, &young_plab_size, 1);
if (errcode != arg_in_range) {
@@ -2412,8 +2429,7 @@
jio_fprintf(defaultStream::error_stream(),
"Please use -XX:YoungPLABSize in place of "
"-XX:ParallelGCToSpaceAllocBufferSize in the future\n");
- } else
- if (match_option(option, "-XX:", &tail)) { // -XX:xxxx
+ } else if (match_option(option, "-XX:", &tail)) { // -XX:xxxx
// Skip -XX:Flags= since that case has already been handled
if (strncmp(tail, "Flags=", strlen("Flags=")) != 0) {
if (!process_argument(tail, args->ignoreUnrecognized, origin)) {
@@ -2633,6 +2649,10 @@
if (match_option(option, "-XX:-IgnoreUnrecognizedVMOptions", &tail)) {
IgnoreUnrecognizedVMOptions = false;
}
+ if (match_option(option, "-XX:+PrintFlagsInitial", &tail)) {
+ CommandLineFlags::printFlags();
+ vm_exit(0);
+ }
}
if (IgnoreUnrecognizedVMOptions) {
@@ -2699,13 +2719,19 @@
}
ScavengeRootsInCode = 1;
}
+#ifdef COMPILER2
+ if (EnableInvokeDynamic && DoEscapeAnalysis) {
+ // TODO: We need to find rules for invokedynamic and EA. For now,
+ // simply disable EA by default.
+ if (FLAG_IS_DEFAULT(DoEscapeAnalysis)) {
+ DoEscapeAnalysis = false;
+ }
+ }
+#endif
if (PrintGCDetails) {
// Turn on -verbose:gc options as well
PrintGC = true;
- if (FLAG_IS_DEFAULT(TraceClassUnloading)) {
- TraceClassUnloading = true;
- }
}
#if defined(_LP64) && defined(COMPILER1)
@@ -2722,11 +2748,21 @@
// Set flags based on ergonomics.
set_ergonomics_flags();
+#ifdef _LP64
+ // XXX JSR 292 currently does not support compressed oops.
+ if (EnableMethodHandles && UseCompressedOops) {
+ if (FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops)) {
+ UseCompressedOops = false;
+ }
+ }
+#endif // _LP64
+
// Check the GC selections again.
if (!check_gc_consistency()) {
return JNI_EINVAL;
}
+#ifndef KERNEL
if (UseConcMarkSweepGC) {
// Set flags for CMS and ParNew. Check UseConcMarkSweep first
// to ensure that when both UseConcMarkSweepGC and UseParNewGC
@@ -2744,6 +2780,7 @@
set_g1_gc_flags();
}
}
+#endif // KERNEL
#ifdef SERIALGC
assert(verify_serial_gc_flags(), "SerialGC unset");
@@ -2756,9 +2793,16 @@
set_aggressive_opts_flags();
#ifdef CC_INTERP
- // Biased locking is not implemented with c++ interpreter
+ // Clear flags not supported by the C++ interpreter
+ FLAG_SET_DEFAULT(ProfileInterpreter, false);
FLAG_SET_DEFAULT(UseBiasedLocking, false);
-#endif /* CC_INTERP */
+ LP64_ONLY(FLAG_SET_DEFAULT(UseCompressedOops, false));
+#endif // CC_INTERP
+
+#ifdef ZERO
+ // Clear flags not supported by Zero
+ FLAG_SET_DEFAULT(TaggedStackInterpreter, false);
+#endif // ZERO
#ifdef COMPILER2
if (!UseBiasedLocking || EmitSync != 0) {
@@ -2766,15 +2810,18 @@
}
#endif
+ if (PrintAssembly && FLAG_IS_DEFAULT(DebugNonSafepoints)) {
+ warning("PrintAssembly is enabled; turning on DebugNonSafepoints to gain additional output");
+ DebugNonSafepoints = true;
+ }
+
if (PrintCommandLineFlags) {
CommandLineFlags::printSetFlags();
}
-#ifdef ASSERT
if (PrintFlagsFinal) {
CommandLineFlags::printFlags();
}
-#endif
return JNI_OK;
}
--- a/hotspot/src/share/vm/runtime/frame.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/runtime/frame.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -769,9 +769,9 @@
class InterpretedArgumentOopFinder: public SignatureInfo {
private:
- OopClosure* _f; // Closure to invoke
- int _offset; // TOS-relative offset, decremented with each argument
- bool _is_static; // true if the callee is a static method
+ OopClosure* _f; // Closure to invoke
+ int _offset; // TOS-relative offset, decremented with each argument
+ bool _has_receiver; // true if the callee has a receiver
frame* _fr;
void set(int size, BasicType type) {
@@ -786,9 +786,9 @@
}
public:
- InterpretedArgumentOopFinder(symbolHandle signature, bool is_static, frame* fr, OopClosure* f) : SignatureInfo(signature) {
+ InterpretedArgumentOopFinder(symbolHandle signature, bool has_receiver, frame* fr, OopClosure* f) : SignatureInfo(signature), _has_receiver(has_receiver) {
// compute size of arguments
- int args_size = ArgumentSizeComputer(signature).size() + (is_static ? 0 : 1);
+ int args_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0);
assert(!fr->is_interpreted_frame() ||
args_size <= fr->interpreter_frame_expression_stack_size(),
"args cannot be on stack anymore");
@@ -796,11 +796,10 @@
_f = f;
_fr = fr;
_offset = args_size;
- _is_static = is_static;
}
void oops_do() {
- if (!_is_static) {
+ if (_has_receiver) {
--_offset;
oop_offset_do();
}
@@ -912,7 +911,7 @@
int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
symbolHandle signature;
- bool is_static = false;
+ bool has_receiver = false;
// Process a callee's arguments if we are at a call site
// (i.e., if we are at an invoke bytecode)
@@ -922,7 +921,7 @@
Bytecode_invoke *call = Bytecode_invoke_at_check(m, bci);
if (call != NULL) {
signature = symbolHandle(thread, call->signature());
- is_static = call->is_invokestatic();
+ has_receiver = call->has_receiver();
if (map->include_argument_oops() &&
interpreter_frame_expression_stack_size() > 0) {
ResourceMark rm(thread); // is this right ???
@@ -936,7 +935,7 @@
// code in the interpreter calls a blocking runtime
// routine which can cause this code to be executed).
// (was bug gri 7/27/98)
- oops_interpreted_arguments_do(signature, is_static, f);
+ oops_interpreted_arguments_do(signature, has_receiver, f);
}
}
}
@@ -950,7 +949,7 @@
mask = &oopmap_mask;
#endif // ASSERT
oops_interpreted_locals_do(f, max_locals, mask);
- oops_interpreted_expressions_do(f, signature, is_static,
+ oops_interpreted_expressions_do(f, signature, has_receiver,
m->max_stack(),
max_locals, mask);
} else {
@@ -992,7 +991,7 @@
void frame::oops_interpreted_expressions_do(OopClosure *f,
symbolHandle signature,
- bool is_static,
+ bool has_receiver,
int max_stack,
int max_locals,
InterpreterOopMap *mask) {
@@ -1005,7 +1004,7 @@
// arguments in callee's locals.
int args_size = 0;
if (!signature.is_null()) {
- args_size = ArgumentSizeComputer(signature).size() + (is_static ? 0 : 1);
+ args_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0);
}
intptr_t *tos_addr = interpreter_frame_tos_at(args_size);
@@ -1038,8 +1037,8 @@
}
}
-void frame::oops_interpreted_arguments_do(symbolHandle signature, bool is_static, OopClosure* f) {
- InterpretedArgumentOopFinder finder(signature, is_static, this, f);
+void frame::oops_interpreted_arguments_do(symbolHandle signature, bool has_receiver, OopClosure* f) {
+ InterpretedArgumentOopFinder finder(signature, has_receiver, this, f);
finder.oops_do();
}
@@ -1066,8 +1065,8 @@
class CompiledArgumentOopFinder: public SignatureInfo {
protected:
OopClosure* _f;
- int _offset; // the current offset, incremented with each argument
- bool _is_static; // true if the callee is a static method
+ int _offset; // the current offset, incremented with each argument
+ bool _has_receiver; // true if the callee has a receiver
frame _fr;
RegisterMap* _reg_map;
int _arg_size;
@@ -1087,24 +1086,24 @@
}
public:
- CompiledArgumentOopFinder(symbolHandle signature, bool is_static, OopClosure* f, frame fr, const RegisterMap* reg_map)
+ CompiledArgumentOopFinder(symbolHandle signature, bool has_receiver, OopClosure* f, frame fr, const RegisterMap* reg_map)
: SignatureInfo(signature) {
// initialize CompiledArgumentOopFinder
_f = f;
_offset = 0;
- _is_static = is_static;
+ _has_receiver = has_receiver;
_fr = fr;
_reg_map = (RegisterMap*)reg_map;
- _arg_size = ArgumentSizeComputer(signature).size() + (is_static ? 0 : 1);
+ _arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0);
int arg_size;
- _regs = SharedRuntime::find_callee_arguments(signature(), is_static, &arg_size);
+ _regs = SharedRuntime::find_callee_arguments(signature(), has_receiver, &arg_size);
assert(arg_size == _arg_size, "wrong arg size");
}
void oops_do() {
- if (!_is_static) {
+ if (_has_receiver) {
handle_oop_offset();
_offset++;
}
@@ -1112,9 +1111,9 @@
}
};
-void frame::oops_compiled_arguments_do(symbolHandle signature, bool is_static, const RegisterMap* reg_map, OopClosure* f) {
+void frame::oops_compiled_arguments_do(symbolHandle signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f) {
ResourceMark rm;
- CompiledArgumentOopFinder finder(signature, is_static, f, *this, reg_map);
+ CompiledArgumentOopFinder finder(signature, has_receiver, f, *this, reg_map);
finder.oops_do();
}
--- a/hotspot/src/share/vm/runtime/frame.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/runtime/frame.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -371,7 +371,7 @@
oop* oopmapreg_to_location(VMReg reg, const RegisterMap* regmap) const;
// Oops-do's
- void oops_compiled_arguments_do(symbolHandle signature, bool is_static, const RegisterMap* reg_map, OopClosure* f);
+ void oops_compiled_arguments_do(symbolHandle signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f);
void oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache = true);
private:
@@ -379,9 +379,9 @@
int max_locals,
InterpreterOopMap *mask);
void oops_interpreted_expressions_do(OopClosure *f, symbolHandle signature,
- bool is_static, int max_stack, int max_locals,
+ bool has_receiver, int max_stack, int max_locals,
InterpreterOopMap *mask);
- void oops_interpreted_arguments_do(symbolHandle signature, bool is_static, OopClosure* f);
+ void oops_interpreted_arguments_do(symbolHandle signature, bool has_receiver, OopClosure* f);
// Iteration of oops
void oops_do_internal(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache);
--- a/hotspot/src/share/vm/runtime/globals.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/runtime/globals.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -468,6 +468,8 @@
assert(Arguments::check_vm_args_consistency(), "Some flag settings conflict");
}
+#endif // PRODUCT
+
void CommandLineFlags::printFlags() {
// Print the flags sorted by name
// note: this method is called before the thread structure is in place
@@ -493,5 +495,3 @@
}
FREE_C_HEAP_ARRAY(Flag*, array);
}
-
-#endif
--- a/hotspot/src/share/vm/runtime/globals.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/runtime/globals.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -211,7 +211,7 @@
static bool wasSetOnCmdline(const char* name, bool* value);
static void printSetFlags();
- static void printFlags() PRODUCT_RETURN;
+ static void printFlags();
static void verify() PRODUCT_RETURN;
};
@@ -327,9 +327,6 @@
product(bool, UseMembar, false, \
"(Unstable) Issues membars on thread state transitions") \
\
- product(bool, PrintCommandLineFlags, false, \
- "Prints flags that appeared on the command line") \
- \
diagnostic(bool, UnlockDiagnosticVMOptions, trueInDebug, \
"Enable normal processing of flags relating to field diagnostics")\
\
@@ -1355,10 +1352,46 @@
product(uintx, ParGCDesiredObjsFromOverflowList, 20, \
"The desired number of objects to claim from the overflow list") \
\
- product(uintx, CMSParPromoteBlocksToClaim, 50, \
+ product(uintx, CMSParPromoteBlocksToClaim, 16, \
"Number of blocks to attempt to claim when refilling CMS LAB for "\
"parallel GC.") \
\
+ product(uintx, OldPLABWeight, 50, \
+ "Percentage (0-100) used to weight the current sample when" \
+ "computing exponentially decaying average for resizing CMSParPromoteBlocksToClaim.") \
+ \
+ product(bool, ResizeOldPLAB, true, \
+ "Dynamically resize (old gen) promotion labs") \
+ \
+ product(bool, PrintOldPLAB, false, \
+ "Print (old gen) promotion labs sizing decisions") \
+ \
+ product(uintx, CMSOldPLABMin, 16, \
+ "Min size of CMS gen promotion lab caches per worker per blksize")\
+ \
+ product(uintx, CMSOldPLABMax, 1024, \
+ "Max size of CMS gen promotion lab caches per worker per blksize")\
+ \
+ product(uintx, CMSOldPLABNumRefills, 4, \
+ "Nominal number of refills of CMS gen promotion lab cache" \
+ " per worker per block size") \
+ \
+ product(bool, CMSOldPLABResizeQuicker, false, \
+ "Whether to react on-the-fly during a scavenge to a sudden" \
+ " change in block demand rate") \
+ \
+ product(uintx, CMSOldPLABToleranceFactor, 4, \
+ "The tolerance of the phase-change detector for on-the-fly" \
+ " PLAB resizing during a scavenge") \
+ \
+ product(uintx, CMSOldPLABReactivityFactor, 2, \
+ "The gain in the feedback loop for on-the-fly PLAB resizing" \
+ " during a scavenge") \
+ \
+ product(uintx, CMSOldPLABReactivityCeiling, 10, \
+ "The clamping of the gain in the feedback loop for on-the-fly" \
+ " PLAB resizing during a scavenge") \
+ \
product(bool, AlwaysPreTouch, false, \
"It forces all freshly committed pages to be pre-touched.") \
\
@@ -1400,27 +1433,54 @@
"Percentage (0-100) by which the CMS incremental mode duty cycle" \
" is shifted to the right within the period between young GCs") \
\
- product(uintx, CMSExpAvgFactor, 25, \
- "Percentage (0-100) used to weight the current sample when " \
- "computing exponential averages for CMS statistics") \
- \
- product(uintx, CMS_FLSWeight, 50, \
- "Percentage (0-100) used to weight the current sample when " \
- "computing exponentially decating averages for CMS FLS statistics") \
- \
- product(uintx, CMS_FLSPadding, 2, \
- "The multiple of deviation from mean to use for buffering " \
+ product(uintx, CMSExpAvgFactor, 50, \
+ "Percentage (0-100) used to weight the current sample when" \
+ "computing exponential averages for CMS statistics.") \
+ \
+ product(uintx, CMS_FLSWeight, 75, \
+ "Percentage (0-100) used to weight the current sample when" \
+ "computing exponentially decating averages for CMS FLS statistics.") \
+ \
+ product(uintx, CMS_FLSPadding, 1, \
+ "The multiple of deviation from mean to use for buffering" \
"against volatility in free list demand.") \
\
product(uintx, FLSCoalescePolicy, 2, \
"CMS: Aggression level for coalescing, increasing from 0 to 4") \
\
- product(uintx, CMS_SweepWeight, 50, \
+ product(bool, FLSAlwaysCoalesceLarge, false, \
+ "CMS: Larger free blocks are always available for coalescing") \
+ \
+ product(double, FLSLargestBlockCoalesceProximity, 0.99, \
+ "CMS: the smaller the percentage the greater the coalition force")\
+ \
+ product(double, CMSSmallCoalSurplusPercent, 1.05, \
+ "CMS: the factor by which to inflate estimated demand of small" \
+ " block sizes to prevent coalescing with an adjoining block") \
+ \
+ product(double, CMSLargeCoalSurplusPercent, 0.95, \
+ "CMS: the factor by which to inflate estimated demand of large" \
+ " block sizes to prevent coalescing with an adjoining block") \
+ \
+ product(double, CMSSmallSplitSurplusPercent, 1.10, \
+ "CMS: the factor by which to inflate estimated demand of small" \
+ " block sizes to prevent splitting to supply demand for smaller" \
+ " blocks") \
+ \
+ product(double, CMSLargeSplitSurplusPercent, 1.00, \
+ "CMS: the factor by which to inflate estimated demand of large" \
+ " block sizes to prevent splitting to supply demand for smaller" \
+ " blocks") \
+ \
+ product(bool, CMSExtrapolateSweep, false, \
+ "CMS: cushion for block demand during sweep") \
+ \
+ product(uintx, CMS_SweepWeight, 75, \
"Percentage (0-100) used to weight the current sample when " \
"computing exponentially decaying average for inter-sweep " \
"duration") \
\
- product(uintx, CMS_SweepPadding, 2, \
+ product(uintx, CMS_SweepPadding, 1, \
"The multiple of deviation from mean to use for buffering " \
"against volatility in inter-sweep duration.") \
\
@@ -1459,6 +1519,13 @@
product(uintx, CMSIndexedFreeListReplenish, 4, \
"Replenish and indexed free list with this number of chunks") \
\
+ product(bool, CMSReplenishIntermediate, true, \
+ "Replenish all intermediate free-list caches") \
+ \
+ product(bool, CMSSplitIndexedFreeListBlocks, true, \
+ "When satisfying batched demand, splot blocks from the " \
+ "IndexedFreeList whose size is a multiple of requested size") \
+ \
product(bool, CMSLoopWarn, false, \
"Warn in case of excessive CMS looping") \
\
@@ -1593,6 +1660,18 @@
"Bitmap operations should process at most this many bits" \
"between yields") \
\
+ product(bool, CMSDumpAtPromotionFailure, false, \
+ "Dump useful information about the state of the CMS old " \
+ " generation upon a promotion failure.") \
+ \
+ product(bool, CMSPrintChunksInDump, false, \
+ "In a dump enabled by CMSDumpAtPromotionFailure, include " \
+ " more detailed information about the free chunks.") \
+ \
+ product(bool, CMSPrintObjectsInDump, false, \
+ "In a dump enabled by CMSDumpAtPromotionFailure, include " \
+ " more detailed information about the allocated objects.") \
+ \
diagnostic(bool, FLSVerifyAllHeapReferences, false, \
"Verify that all refs across the FLS boundary " \
" are to valid objects") \
@@ -1677,6 +1756,10 @@
"The youngest generation collection does not require " \
"a guarantee of full promotion of all live objects.") \
\
+ product(bool, PrintPromotionFailure, false, \
+ "Print additional diagnostic information following " \
+ " promotion failure") \
+ \
notproduct(bool, PromotionFailureALot, false, \
"Use promotion failure handling on every youngest generation " \
"collection") \
@@ -1967,9 +2050,6 @@
"number of times a GC thread (minus the coordinator) " \
"will sleep while yielding before giving up and resuming GC") \
\
- notproduct(bool, PrintFlagsFinal, false, \
- "Print all command line flags after argument processing") \
- \
/* gc tracing */ \
manageable(bool, PrintGC, false, \
"Print message at garbage collect") \
@@ -2269,11 +2349,20 @@
"If false, restricts profiled locations to the root method only") \
\
product(bool, PrintVMOptions, trueInDebug, \
- "print VM flag settings") \
+ "Print flags that appeared on the command line") \
\
product(bool, IgnoreUnrecognizedVMOptions, false, \
"Ignore unrecognized VM options") \
\
+ product(bool, PrintCommandLineFlags, false, \
+ "Print flags specified on command line or set by ergonomics") \
+ \
+ product(bool, PrintFlagsInitial, false, \
+ "Print all VM flags before argument processing and exit VM") \
+ \
+ product(bool, PrintFlagsFinal, false, \
+ "Print all VM flags after argument and ergonomic processing") \
+ \
diagnostic(bool, SerializeVMOutput, true, \
"Use a mutex to serialize output to tty and hotspot.log") \
\
@@ -2672,10 +2761,10 @@
notproduct(intx, MaxSubklassPrintSize, 4, \
"maximum number of subklasses to print when printing klass") \
\
- develop(intx, MaxInlineLevel, 9, \
+ product(intx, MaxInlineLevel, 9, \
"maximum number of nested calls that are inlined") \
\
- develop(intx, MaxRecursiveInlineLevel, 1, \
+ product(intx, MaxRecursiveInlineLevel, 1, \
"maximum number of nested recursive calls that are inlined") \
\
product_pd(intx, InlineSmallCode, \
@@ -2688,10 +2777,10 @@
product_pd(intx, FreqInlineSize, \
"maximum bytecode size of a frequent method to be inlined") \
\
- develop(intx, MaxTrivialSize, 6, \
+ product(intx, MaxTrivialSize, 6, \
"maximum bytecode size of a trivial method to be inlined") \
\
- develop(intx, MinInliningThreshold, 250, \
+ product(intx, MinInliningThreshold, 250, \
"min. invocation count a method needs to have to be inlined") \
\
develop(intx, AlignEntryCode, 4, \
--- a/hotspot/src/share/vm/runtime/jniHandles.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/runtime/jniHandles.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -144,7 +144,7 @@
EXCEPTION_MARK;
// We will never reach the CATCH below since Exceptions::_throw will cause
// the VM to exit if an exception is thrown during initialization
- klassOop k = SystemDictionary::object_klass();
+ klassOop k = SystemDictionary::Object_klass();
_deleted_handle = instanceKlass::cast(k)->allocate_permanent_instance(CATCH);
}
--- a/hotspot/src/share/vm/runtime/os.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/runtime/os.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -280,7 +280,7 @@
string,
CHECK);
- KlassHandle group(THREAD, SystemDictionary::threadGroup_klass());
+ KlassHandle group(THREAD, SystemDictionary::ThreadGroup_klass());
JavaCalls::call_special(&result,
thread_group,
group,
--- a/hotspot/src/share/vm/runtime/reflection.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/runtime/reflection.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -449,7 +449,7 @@
// sun/reflect/MagicAccessorImpl subclasses to succeed trivially.
if ( JDK_Version::is_gte_jdk14x_version()
&& UseNewReflection
- && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_magic_klass())) {
+ && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) {
return true;
}
@@ -482,6 +482,11 @@
under_host_klass(accessee_ik, accessor))
return true;
+ // Adapter frames can access anything.
+ if (MethodHandleCompiler::klass_is_method_handle_adapter_holder(accessor))
+ // This is an internal adapter frame from the MethodHandleCompiler.
+ return true;
+
if (RelaxAccessControlCheck ||
(accessor_ik->major_version() < JAVA_1_5_VERSION &&
accessee_ik->major_version() < JAVA_1_5_VERSION)) {
@@ -541,7 +546,7 @@
// sun/reflect/MagicAccessorImpl subclasses to succeed trivially.
if ( JDK_Version::is_gte_jdk14x_version()
&& UseNewReflection
- && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_magic_klass())) {
+ && Klass::cast(current_class)->is_subclass_of(SystemDictionary::reflect_MagicAccessorImpl_klass())) {
return true;
}
@@ -631,7 +636,7 @@
objArrayHandle Reflection::get_parameter_types(methodHandle method, int parameter_count, oop* return_type, TRAPS) {
// Allocate array holding parameter types (java.lang.Class instances)
- objArrayOop m = oopFactory::new_objArray(SystemDictionary::class_klass(), parameter_count, CHECK_(objArrayHandle()));
+ objArrayOop m = oopFactory::new_objArray(SystemDictionary::Class_klass(), parameter_count, CHECK_(objArrayHandle()));
objArrayHandle mirrors (THREAD, m);
int index = 0;
// Collect parameter types
@@ -1308,7 +1313,7 @@
if (Klass::cast(klass)->oop_is_array() && which == MEMBER_DECLARED) return NULL;
if (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array()) {
- klass = SystemDictionary::object_klass();
+ klass = SystemDictionary::Object_klass();
}
instanceKlassHandle h_k(THREAD, klass);
@@ -1375,13 +1380,13 @@
// Exclude primitive types
if (java_lang_Class::is_primitive(mirror) ||
(Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array() && (which == MEMBER_DECLARED))) {
- klassOop klass = SystemDictionary::reflect_method_klass();
+ klassOop klass = SystemDictionary::reflect_Method_klass();
return oopFactory::new_objArray(klass, 0, CHECK_NULL); // Return empty array
}
klassOop klass = java_lang_Class::as_klassOop(mirror);
if (Klass::cast(java_lang_Class::as_klassOop(mirror))->oop_is_array()) {
- klass = SystemDictionary::object_klass();
+ klass = SystemDictionary::Object_klass();
}
instanceKlassHandle h_k(THREAD, klass);
@@ -1411,7 +1416,7 @@
}
// Allocate result
- klassOop klass = SystemDictionary::reflect_method_klass();
+ klassOop klass = SystemDictionary::reflect_Method_klass();
objArrayOop r = oopFactory::new_objArray(klass, count, CHECK_NULL);
objArrayHandle h_result (THREAD, r);
@@ -1462,7 +1467,7 @@
}
}
// Allocate result
- klassOop klass = SystemDictionary::reflect_method_klass();
+ klassOop klass = SystemDictionary::reflect_Method_klass();
objArrayOop r = oopFactory::new_objArray(klass, count, CHECK_NULL);
objArrayHandle h_result (THREAD, r);
@@ -1523,7 +1528,7 @@
bool prim = java_lang_Class::is_primitive(mirror);
Klass* k = prim ? NULL : Klass::cast(java_lang_Class::as_klassOop(mirror));
if (prim || k->is_interface() || k->oop_is_array()) {
- return oopFactory::new_objArray(SystemDictionary::reflect_constructor_klass(), 0, CHECK_NULL); // Return empty array
+ return oopFactory::new_objArray(SystemDictionary::reflect_Constructor_klass(), 0, CHECK_NULL); // Return empty array
}
// Must be instanceKlass at this point
--- a/hotspot/src/share/vm/runtime/reflectionUtils.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/runtime/reflectionUtils.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -63,15 +63,15 @@
void FilteredFieldsMap::initialize() {
int offset;
offset = java_lang_Throwable::get_backtrace_offset();
- _filtered_fields->append(new FilteredField(SystemDictionary::throwable_klass(), offset));
+ _filtered_fields->append(new FilteredField(SystemDictionary::Throwable_klass(), offset));
// The latest version of vm may be used with old jdk.
if (JDK_Version::is_gte_jdk16x_version()) {
// The following class fields do not exist in
// previous version of jdk.
offset = sun_reflect_ConstantPool::cp_oop_offset();
- _filtered_fields->append(new FilteredField(SystemDictionary::reflect_constant_pool_klass(), offset));
+ _filtered_fields->append(new FilteredField(SystemDictionary::reflect_ConstantPool_klass(), offset));
offset = sun_reflect_UnsafeStaticFieldAccessorImpl::base_offset();
- _filtered_fields->append(new FilteredField(SystemDictionary::reflect_unsafe_static_field_accessor_impl_klass(), offset));
+ _filtered_fields->append(new FilteredField(SystemDictionary::reflect_UnsafeStaticFieldAccessorImpl_klass(), offset));
}
}
--- a/hotspot/src/share/vm/runtime/sharedRuntime.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -802,7 +802,7 @@
#ifdef ASSERT
// Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
- if (bc != Bytecodes::_invokestatic) {
+ if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) {
assert(receiver.not_null(), "should have thrown exception");
KlassHandle receiver_klass (THREAD, receiver->klass());
klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
@@ -860,7 +860,7 @@
if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
int retry_count = 0;
while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
- callee_method->method_holder() != SystemDictionary::object_klass()) {
+ callee_method->method_holder() != SystemDictionary::Object_klass()) {
// If has a pending exception then there is no need to re-try to
// resolve this method.
// If the method has been redefined, we need to try again.
@@ -1027,7 +1027,16 @@
frame stub_frame = thread->last_frame();
assert(stub_frame.is_runtime_frame(), "sanity check");
frame caller_frame = stub_frame.sender(®_map);
- if (caller_frame.is_interpreted_frame() || caller_frame.is_entry_frame() ) {
+
+ // MethodHandle invokes don't have a CompiledIC and should always
+ // simply redispatch to the callee_target.
+ address sender_pc = caller_frame.pc();
+ CodeBlob* sender_cb = caller_frame.cb();
+ nmethod* sender_nm = sender_cb->as_nmethod_or_null();
+
+ if (caller_frame.is_interpreted_frame() ||
+ caller_frame.is_entry_frame() ||
+ (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc))) {
methodOop callee = thread->callee_target();
guarantee(callee != NULL && callee->is_method(), "bad handshake");
thread->set_vm_result(callee);
@@ -1529,7 +1538,7 @@
oop SharedRuntime::wrong_method_type_is_for_single_argument(JavaThread* thr,
oopDesc* required) {
if (required == NULL) return NULL;
- if (required->klass() == SystemDictionary::class_klass())
+ if (required->klass() == SystemDictionary::Class_klass())
return required;
if (required->is_klass())
return Klass::cast(klassOop(required))->java_mirror();
@@ -2136,7 +2145,7 @@
return regs.first();
}
-VMRegPair *SharedRuntime::find_callee_arguments(symbolOop sig, bool is_static, int* arg_size) {
+VMRegPair *SharedRuntime::find_callee_arguments(symbolOop sig, bool has_receiver, int* arg_size) {
// This method is returning a data structure allocating as a
// ResourceObject, so do not put any ResourceMarks in here.
char *s = sig->as_C_string();
@@ -2148,7 +2157,7 @@
BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, 256 );
VMRegPair *regs = NEW_RESOURCE_ARRAY( VMRegPair, 256 );
int cnt = 0;
- if (!is_static) {
+ if (has_receiver) {
sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
}
--- a/hotspot/src/share/vm/runtime/sharedRuntime.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/runtime/sharedRuntime.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -357,7 +357,7 @@
// Convert a sig into a calling convention register layout
// and find interesting things about it.
- static VMRegPair* find_callee_arguments(symbolOop sig, bool is_static, int *arg_size);
+ static VMRegPair* find_callee_arguments(symbolOop sig, bool has_receiver, int *arg_size);
static VMReg name_for_receiver();
// "Top of Stack" slots that may be unused by the calling convention but must
--- a/hotspot/src/share/vm/runtime/statSampler.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/runtime/statSampler.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -177,7 +177,7 @@
// public static String getProperty(String key, String def);
JavaCalls::call_static(&result,
- KlassHandle(THREAD, SystemDictionary::system_klass()),
+ KlassHandle(THREAD, SystemDictionary::System_klass()),
vmSymbolHandles::getProperty_name(),
vmSymbolHandles::string_string_signature(),
key_str,
--- a/hotspot/src/share/vm/runtime/thread.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/runtime/thread.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -973,7 +973,7 @@
return;
}
- KlassHandle group(this, SystemDictionary::threadGroup_klass());
+ KlassHandle group(this, SystemDictionary::ThreadGroup_klass());
Handle threadObj(this, this->threadObj());
JavaCalls::call_special(&result,
@@ -1468,7 +1468,7 @@
// so call ThreadGroup.uncaughtException()
KlassHandle recvrKlass(THREAD, threadObj->klass());
CallInfo callinfo;
- KlassHandle thread_klass(THREAD, SystemDictionary::thread_klass());
+ KlassHandle thread_klass(THREAD, SystemDictionary::Thread_klass());
LinkResolver::resolve_virtual_call(callinfo, threadObj, recvrKlass, thread_klass,
vmSymbolHandles::dispatchUncaughtException_name(),
vmSymbolHandles::throwable_void_signature(),
@@ -1484,7 +1484,7 @@
uncaught_exception,
THREAD);
} else {
- KlassHandle thread_group(THREAD, SystemDictionary::threadGroup_klass());
+ KlassHandle thread_group(THREAD, SystemDictionary::ThreadGroup_klass());
JavaValue result(T_VOID);
JavaCalls::call_virtual(&result,
group, thread_group,
@@ -1505,7 +1505,7 @@
while (java_lang_Thread::threadGroup(threadObj()) != NULL && (count-- > 0)) {
EXCEPTION_MARK;
JavaValue result(T_VOID);
- KlassHandle thread_klass(THREAD, SystemDictionary::thread_klass());
+ KlassHandle thread_klass(THREAD, SystemDictionary::Thread_klass());
JavaCalls::call_virtual(&result,
threadObj, thread_klass,
vmSymbolHandles::exit_method_name(),
@@ -1743,7 +1743,7 @@
// Check for pending async. exception
if (_pending_async_exception != NULL) {
// Only overwrite an already pending exception, if it is not a threadDeath.
- if (!has_pending_exception() || !pending_exception()->is_a(SystemDictionary::threaddeath_klass())) {
+ if (!has_pending_exception() || !pending_exception()->is_a(SystemDictionary::ThreadDeath_klass())) {
// We cannot call Exceptions::_throw(...) here because we cannot block
set_pending_exception(_pending_async_exception, __FILE__, __LINE__);
@@ -1852,14 +1852,14 @@
if (is_Compiler_thread()) return;
// This is a change from JDK 1.1, but JDK 1.2 will also do it:
- if (java_throwable->is_a(SystemDictionary::threaddeath_klass())) {
+ if (java_throwable->is_a(SystemDictionary::ThreadDeath_klass())) {
java_lang_Thread::set_stillborn(threadObj());
}
{
// Actually throw the Throwable against the target Thread - however
// only if there is no thread death exception installed already.
- if (_pending_async_exception == NULL || !_pending_async_exception->is_a(SystemDictionary::threaddeath_klass())) {
+ if (_pending_async_exception == NULL || !_pending_async_exception->is_a(SystemDictionary::ThreadDeath_klass())) {
// If the topmost frame is a runtime stub, then we are calling into
// OptoRuntime from compiled code. Some runtime stubs (new, monitor_exit..)
// must deoptimize the caller before continuing, as the compiled exception handler table
@@ -3095,6 +3095,12 @@
warning("java.lang.ArithmeticException has not been initialized");
warning("java.lang.StackOverflowError has not been initialized");
}
+
+ if (EnableInvokeDynamic) {
+ // JSR 292: An intialized java.dyn.InvokeDynamic is required in
+ // the compiler.
+ initialize_class(vmSymbolHandles::java_dyn_InvokeDynamic(), CHECK_0);
+ }
}
// See : bugid 4211085.
--- a/hotspot/src/share/vm/runtime/thread.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/runtime/thread.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -772,6 +772,7 @@
volatile address _exception_pc; // PC where exception happened
volatile address _exception_handler_pc; // PC for handler of exception
volatile int _exception_stack_size; // Size of frame where exception happened
+ volatile int _is_method_handle_exception; // True if the current exception PC is at a MethodHandle call.
// support for compilation
bool _is_compiling; // is true if a compilation is active inthis thread (one compilation per thread possible)
@@ -1107,11 +1108,13 @@
int exception_stack_size() const { return _exception_stack_size; }
address exception_pc() const { return _exception_pc; }
address exception_handler_pc() const { return _exception_handler_pc; }
+ int is_method_handle_exception() const { return _is_method_handle_exception; }
void set_exception_oop(oop o) { _exception_oop = o; }
void set_exception_pc(address a) { _exception_pc = a; }
void set_exception_handler_pc(address a) { _exception_handler_pc = a; }
void set_exception_stack_size(int size) { _exception_stack_size = size; }
+ void set_is_method_handle_exception(int value) { _is_method_handle_exception = value; }
// Stack overflow support
inline size_t stack_available(address cur_sp);
@@ -1185,6 +1188,7 @@
static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc ); }
static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); }
static ByteSize exception_stack_size_offset() { return byte_offset_of(JavaThread, _exception_stack_size); }
+ static ByteSize is_method_handle_exception_offset() { return byte_offset_of(JavaThread, _is_method_handle_exception); }
static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state ); }
static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags ); }
--- a/hotspot/src/share/vm/runtime/vframe.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/runtime/vframe.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1,5 +1,5 @@
/*
- * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -124,7 +124,7 @@
static void print_locked_object_class_name(outputStream* st, Handle obj, const char* lock_state) {
if (obj.not_null()) {
st->print("\t- %s <" INTPTR_FORMAT "> ", lock_state, (address)obj());
- if (obj->klass() == SystemDictionary::class_klass()) {
+ if (obj->klass() == SystemDictionary::Class_klass()) {
klassOop target_klass = java_lang_Class::as_klassOop(obj());
st->print_cr("(a java.lang.Class for %s)", instanceKlass::cast(target_klass)->external_name());
} else {
@@ -430,8 +430,10 @@
// This is Method.invoke() -- skip it
} else if (use_new_reflection &&
Klass::cast(method()->method_holder())
- ->is_subclass_of(SystemDictionary::reflect_method_accessor_klass())) {
+ ->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass())) {
// This is an auxilary frame -- skip it
+ } else if (method()->is_method_handle_adapter()) {
+ // This is an internal adapter frame from the MethodHandleCompiler -- skip it
} else {
// This is non-excluded frame, we need to count it against the depth
if (depth-- <= 0) {
@@ -490,8 +492,8 @@
void vframeStreamCommon::skip_reflection_related_frames() {
while (!at_end() &&
(JDK_Version::is_gte_jdk14x_version() && UseNewReflection &&
- (Klass::cast(method()->method_holder())->is_subclass_of(SystemDictionary::reflect_method_accessor_klass()) ||
- Klass::cast(method()->method_holder())->is_subclass_of(SystemDictionary::reflect_constructor_accessor_klass())))) {
+ (Klass::cast(method()->method_holder())->is_subclass_of(SystemDictionary::reflect_MethodAccessorImpl_klass()) ||
+ Klass::cast(method()->method_holder())->is_subclass_of(SystemDictionary::reflect_ConstructorAccessorImpl_klass())))) {
next();
}
}
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -455,40 +455,38 @@
static_field(SystemDictionary, _shared_dictionary, Dictionary*) \
static_field(SystemDictionary, _system_loader_lock_obj, oop) \
static_field(SystemDictionary, _loader_constraints, LoaderConstraintTable*) \
- static_field(SystemDictionary, WK_KLASS(object_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(string_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(class_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(cloneable_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(classloader_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(serializable_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(system_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(throwable_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(threaddeath_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(error_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(exception_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(runtime_exception_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(classNotFoundException_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(noClassDefFoundError_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(linkageError_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(Object_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(String_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(Class_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(Cloneable_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(ClassLoader_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(Serializable_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(System_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(Throwable_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(ThreadDeath_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(Error_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(Exception_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(RuntimeException_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(ClassNotFoundException_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(NoClassDefFoundError_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(LinkageError_klass), klassOop) \
static_field(SystemDictionary, WK_KLASS(ClassCastException_klass), klassOop) \
static_field(SystemDictionary, WK_KLASS(ArrayStoreException_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(virtualMachineError_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(VirtualMachineError_klass), klassOop) \
static_field(SystemDictionary, WK_KLASS(OutOfMemoryError_klass), klassOop) \
static_field(SystemDictionary, WK_KLASS(StackOverflowError_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(protectionDomain_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(ProtectionDomain_klass), klassOop) \
static_field(SystemDictionary, WK_KLASS(AccessControlContext_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(reference_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(soft_reference_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(weak_reference_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(final_reference_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(phantom_reference_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(finalizer_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(thread_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(threadGroup_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(properties_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(stringBuffer_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(vector_klass), klassOop) \
- static_field(SystemDictionary, WK_KLASS(hashtable_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(Reference_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(SoftReference_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(WeakReference_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(FinalReference_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(PhantomReference_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(Finalizer_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(Thread_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(ThreadGroup_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(Properties_klass), klassOop) \
+ static_field(SystemDictionary, WK_KLASS(StringBuffer_klass), klassOop) \
static_field(SystemDictionary, _box_klasses[0], klassOop) \
static_field(SystemDictionary, _java_system_loader, oop) \
\
--- a/hotspot/src/share/vm/services/attachListener.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/services/attachListener.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -437,7 +437,7 @@
string,
CHECK);
- KlassHandle group(THREAD, SystemDictionary::threadGroup_klass());
+ KlassHandle group(THREAD, SystemDictionary::ThreadGroup_klass());
JavaCalls::call_special(&result,
thread_group,
group,
--- a/hotspot/src/share/vm/services/heapDumper.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/services/heapDumper.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -1274,7 +1274,7 @@
if (o->is_klass()) return;
// skip classes as these emitted as HPROF_GC_CLASS_DUMP records
- if (o->klass() == SystemDictionary::class_klass()) {
+ if (o->klass() == SystemDictionary::Class_klass()) {
if (!java_lang_Class::is_primitive(o)) {
return;
}
--- a/hotspot/src/share/vm/services/lowMemoryDetector.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/services/lowMemoryDetector.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -32,7 +32,7 @@
void LowMemoryDetector::initialize() {
EXCEPTION_MARK;
- instanceKlassHandle klass (THREAD, SystemDictionary::thread_klass());
+ instanceKlassHandle klass (THREAD, SystemDictionary::Thread_klass());
instanceHandle thread_oop = klass->allocate_instance_handle(CHECK);
const char thread_name[] = "Low Memory Detector";
--- a/hotspot/src/share/vm/services/management.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/services/management.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -491,7 +491,7 @@
int num_flags = Arguments::num_jvm_flags();
int num_args = Arguments::num_jvm_args();
- instanceKlassHandle ik (THREAD, SystemDictionary::string_klass());
+ instanceKlassHandle ik (THREAD, SystemDictionary::String_klass());
objArrayOop r = oopFactory::new_objArray(ik(), num_args + num_flags, CHECK_NULL);
objArrayHandle result_h(THREAD, r);
@@ -1321,7 +1321,7 @@
LoadedClassesEnumerator lce(THREAD); // Pass current Thread as parameter
int num_classes = lce.num_loaded_classes();
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::class_klass(), num_classes, CHECK_0);
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::Class_klass(), num_classes, CHECK_0);
objArrayHandle classes_ah(THREAD, r);
for (int i = 0; i < num_classes; i++) {
@@ -1481,7 +1481,7 @@
// last flag entry is always NULL, so subtract 1
int nFlags = (int) Flag::numFlags - 1;
// allocate a temp array
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::string_klass(),
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::String_klass(),
nFlags, CHECK_0);
objArrayHandle flags_ah(THREAD, r);
int num_entries = 0;
@@ -1497,7 +1497,7 @@
if (num_entries < nFlags) {
// Return array of right length
- objArrayOop res = oopFactory::new_objArray(SystemDictionary::string_klass(), num_entries, CHECK_0);
+ objArrayOop res = oopFactory::new_objArray(SystemDictionary::String_klass(), num_entries, CHECK_0);
for(int i = 0; i < num_entries; i++) {
res->obj_at_put(i, flags_ah->obj_at(i));
}
@@ -1593,7 +1593,7 @@
objArrayHandle names_ah(THREAD, ta);
// Make sure we have a String array
klassOop element_klass = objArrayKlass::cast(names_ah->klass())->element_klass();
- if (element_klass != SystemDictionary::string_klass()) {
+ if (element_klass != SystemDictionary::String_klass()) {
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
"Array element type is not String class", 0);
}
@@ -1747,7 +1747,7 @@
// Make sure we have a String array
klassOop element_klass = objArrayKlass::cast(names_ah->klass())->element_klass();
- if (element_klass != SystemDictionary::string_klass()) {
+ if (element_klass != SystemDictionary::String_klass()) {
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
"Array element type is not String class", 0);
}
@@ -1782,7 +1782,7 @@
num_threads += cycle->num_threads();
}
- objArrayOop r = oopFactory::new_objArray(SystemDictionary::thread_klass(), num_threads, CHECK_NH);
+ objArrayOop r = oopFactory::new_objArray(SystemDictionary::Thread_klass(), num_threads, CHECK_NH);
objArrayHandle threads_ah(THREAD, r);
int index = 0;
--- a/hotspot/src/share/vm/services/serviceUtil.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/services/serviceUtil.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -45,7 +45,7 @@
// instance
if (o->is_instance()) {
// instance objects are visible
- if (o->klass() != SystemDictionary::class_klass()) {
+ if (o->klass() != SystemDictionary::Class_klass()) {
return true;
}
if (java_lang_Class::is_primitive(o)) {
--- a/hotspot/src/share/vm/services/threadService.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/services/threadService.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -540,7 +540,7 @@
}
Handle ThreadStackTrace::allocate_fill_stack_trace_element_array(TRAPS) {
- klassOop k = SystemDictionary::stackTraceElement_klass();
+ klassOop k = SystemDictionary::StackTraceElement_klass();
assert(k != NULL, "must be loaded in 1.4+");
instanceKlassHandle ik(THREAD, k);
--- a/hotspot/src/share/vm/utilities/constantTag.hpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/utilities/constantTag.hpp Fri Jan 15 14:25:44 2010 -0800
@@ -36,7 +36,8 @@
JVM_CONSTANT_UnresolvedString = 102, // Temporary tag until actual use
JVM_CONSTANT_StringIndex = 103, // Temporary tag while constructing constant pool
JVM_CONSTANT_UnresolvedClassInError = 104, // Error tag due to resolution error
- JVM_CONSTANT_InternalMax = 104 // Last implementation tag
+ JVM_CONSTANT_Object = 105, // Required for BoundMethodHandle arguments.
+ JVM_CONSTANT_InternalMax = 105 // Last implementation tag
};
@@ -70,6 +71,8 @@
bool is_unresolved_string() const { return _tag == JVM_CONSTANT_UnresolvedString; }
bool is_string_index() const { return _tag == JVM_CONSTANT_StringIndex; }
+ bool is_object() const { return _tag == JVM_CONSTANT_Object; }
+
bool is_klass_reference() const { return is_klass_index() || is_unresolved_klass(); }
bool is_klass_or_reference() const{ return is_klass() || is_klass_reference(); }
bool is_field_or_method() const { return is_field() || is_method() || is_interface_method(); }
--- a/hotspot/src/share/vm/utilities/exceptions.cpp Thu Jan 14 15:48:26 2010 -0800
+++ b/hotspot/src/share/vm/utilities/exceptions.cpp Fri Jan 15 14:25:44 2010 -0800
@@ -122,7 +122,7 @@
// Check for special boot-strapping/vm-thread handling
if (special_exception(thread, file, line, h_exception)) return;
- assert(h_exception->is_a(SystemDictionary::throwable_klass()), "exception is not a subclass of java/lang/Throwable");
+ assert(h_exception->is_a(SystemDictionary::Throwable_klass()), "exception is not a subclass of java/lang/Throwable");
// set the pending exception
thread->set_pending_exception(h_exception(), file, line);
@@ -255,7 +255,7 @@
// Future: object initializer should take a cause argument
if (h_cause() != NULL) {
- assert(h_cause->is_a(SystemDictionary::throwable_klass()),
+ assert(h_cause->is_a(SystemDictionary::Throwable_klass()),
"exception cause is not a subclass of java/lang/Throwable");
JavaValue result1(T_OBJECT);
JavaCallArguments args1;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6909839/Test6909839.java Fri Jan 15 14:25:44 2010 -0800
@@ -0,0 +1,282 @@
+/*
+ * Copyright 2010 Sun Microsystems, Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 6909839
+ * @summary missing unsigned compare cases for some cmoves in sparc.ad
+ *
+ * @run main/othervm -XX:+AggressiveOpts -Xbatch Test6909839
+ */
+
+public class Test6909839 {
+ public static void main(String[] args) {
+ testi();
+ testi();
+ testi();
+ testui();
+ testui();
+ testui();
+ testdi();
+ testdi();
+ testdi();
+ testfi();
+ testfi();
+ testfi();
+
+ testl();
+ testl();
+ testl();
+ testul();
+ testul();
+ testul();
+ testdl();
+ testdl();
+ testdl();
+ testfl();
+ testfl();
+ testfl();
+
+ testf();
+ testf();
+ testf();
+ testuf();
+ testuf();
+ testuf();
+ testdf();
+ testdf();
+ testdf();
+ testff();
+ testff();
+ testff();
+
+ testd();
+ testd();
+ testd();
+ testud();
+ testud();
+ testud();
+ testdd();
+ testdd();
+ testdd();
+ testfd();
+ testfd();
+ testfd();
+
+ testp();
+ testp();
+ testp();
+ testup();
+ testup();
+ testup();
+ testdp();
+ testdp();
+ testdp();
+ testfp();
+ testfp();
+ testfp();
+ }
+
+ static void testui() {
+ int total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ int v = i % 4;
+ total += ((v >= 1 && v < 3) ? 1 : 2);
+ }
+ System.out.println(total);
+ }
+
+ static void testdi() {
+ int total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ int v = i % 4;
+ total += (v > 1.0) ? 1 : 2;
+ }
+ System.out.println(total);
+ }
+
+ static void testfi() {
+ int total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ int v = i % 4;
+ total += (v > 1.0f) ? 1 : 2;
+ }
+ System.out.println(total);
+ }
+
+ static void testi() {
+ int total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ total += (i % 4 != 0) ? 1 : 2;
+ }
+ System.out.println(total);
+ }
+
+ static void testul() {
+ long total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ int v = i % 4;
+ total += ((v >= 1 && v < 3) ? 1L : 2L);
+ }
+ System.out.println(total);
+ }
+
+ static void testdl() {
+ long total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ int v = i % 4;
+ total += (v > 1.0) ? 1L : 2L;
+ }
+ System.out.println(total);
+ }
+
+ static void testfl() {
+ long total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ int v = i % 4;
+ total += (v > 1.0f) ? 1L : 2L;
+ }
+ System.out.println(total);
+ }
+
+ static void testl() {
+ long total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ total += (i % 4 != 0) ? 1L : 2L;
+ }
+ System.out.println(total);
+ }
+
+ static void testuf() {
+ float total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ int v = i % 4;
+ total += ((v >= 1 && v < 3) ? 1.0f : 2.0f);
+ }
+ System.out.println(total);
+ }
+
+ static void testdf() {
+ float total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ int v = i % 4;
+ total += (v > 0.0) ? 1.0f : 2.0f;
+ }
+ System.out.println(total);
+ }
+
+ static void testff() {
+ float total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ int v = i % 4;
+ total += (v > 0.0f) ? 1.0f : 2.0f;
+ }
+ System.out.println(total);
+ }
+
+ static void testf() {
+ float total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ total += (i % 4 != 0) ? 1.0f : 2.0f;
+ }
+ System.out.println(total);
+ }
+
+ static void testud() {
+ double total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ int v = i % 4;
+ total += ((v >= 1 && v < 3) ? 1.0d : 2.0d);
+ }
+ System.out.println(total);
+ }
+
+ static void testdd() {
+ double total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ int v = i % 4;
+ total += (v > 1.0) ? 1.0d : 2.0d;
+ }
+ System.out.println(total);
+ }
+
+ static void testfd() {
+ double total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ int v = i % 4;
+ total += (v > 1.0f) ? 1.0d : 2.0d;
+ }
+ System.out.println(total);
+ }
+
+ static void testd() {
+ double total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ total += (i % 4 != 0) ? 1.0d : 2.0d;
+ }
+ System.out.println(total);
+ }
+
+ static void testp() {
+ Object a = new Object();
+ Object b = new Object();;
+ int total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ total += ((i % 4 != 0) ? a : b).hashCode();
+ }
+ System.out.println(total);
+ }
+
+ static void testup() {
+ Object a = new Object();
+ Object b = new Object();;
+ int total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ int v = i % 4;
+ total += ((v >= 1 && v < 3) ? a : b).hashCode();
+ }
+ System.out.println(total);
+ }
+
+ static void testdp() {
+ Object a = new Object();
+ Object b = new Object();;
+ int total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ int v = i % 4;
+ total += ((v > 1.0) ? a : b).hashCode();
+ }
+ System.out.println(total);
+ }
+ static void testfp() {
+ Object a = new Object();
+ Object b = new Object();;
+ int total = 0;
+ for (int i = 0 ; i < 10000; i++) {
+ int v = i % 4;
+ total += ((v > 1.0f) ? a : b).hashCode();
+ }
+ System.out.println(total);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6910484/Test.java Fri Jan 15 14:25:44 2010 -0800
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2009 SAP. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/**
+ * @test
+ * @bug 6910484
+ * @summary incorrect integer optimization (loosing and op-r in a given example)
+ *
+ * @run main/othervm -Xbatch Test
+ */
+
+public class Test {
+
+ public static void main(String[] args) {
+ long iteration = 0;
+ for(int i = 0; i <11000; i++) {
+ iteration++;
+ int result = test(255);
+ if (result != 112) {
+ System.out.println("expected 112, but got " + result + " after iteration " + iteration);
+ System.exit(97);
+ }
+ }
+ }
+
+ private static int test(int x) {
+ return (x & -32) / 2;
+ }
+
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/6912517/Test.java Fri Jan 15 14:25:44 2010 -0800
@@ -0,0 +1,113 @@
+/*
+ * Copyright 2009 D.E. Shaw. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ */
+
+/**
+ * @test
+ * @bug 6912517
+ * @summary JIT bug compiles out (and stops running) code that needs to be run. Causes NPE.
+ *
+ * @run main/othervm -Xbatch -XX:CompileThreshold=100 -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops Test
+ */
+
+/**
+ * Highlights a bug with the JIT compiler.
+ * @author Matt Bruce m b r u c e __\at/__ g m a i l DOT c o m
+ */
+public class Test implements Runnable
+{
+ private final Thread myThread;
+ private Thread myInitialThread;
+ private boolean myShouldCheckThreads;
+
+ /**
+ * Sets up the running thread, and starts it.
+ */
+ public Test(int id)
+ {
+ myThread = new Thread(this);
+ myThread.setName("Runner: " + id);
+ myThread.start();
+ myShouldCheckThreads = false;
+ }
+
+ /**
+ * @param shouldCheckThreads the shouldCheckThreads to set
+ */
+ public void setShouldCheckThreads(boolean shouldCheckThreads)
+ {
+ myShouldCheckThreads = shouldCheckThreads;
+ }
+
+ /**
+ * Starts up the two threads with enough delay between them for JIT to
+ * kick in.
+ * @param args
+ * @throws InterruptedException
+ */
+ public static void main(String[] args) throws InterruptedException
+ {
+ // let this run for a bit, so the "run" below is JITTed.
+ for (int id = 0; id < 20; id++) {
+ System.out.println("Starting thread: " + id);
+ Test bug = new Test(id);
+ bug.setShouldCheckThreads(true);
+ Thread.sleep(2500);
+ }
+ }
+
+ /**
+ * @see java.lang.Runnable#run()
+ */
+ public void run()
+ {
+ long runNumber = 0;
+ while (true) {
+ // run hot for a little while, give JIT time to kick in to this loop.
+ // then run less hot.
+ if (runNumber > 15000) {
+ try {
+ Thread.sleep(5);
+ }
+ catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ }
+ runNumber++;
+ ensureProperCallingThread();
+ }
+ }
+
+ private void ensureProperCallingThread()
+ {
+ // this should never be null. but with the JIT bug, it will be.
+ // JIT BUG IS HERE ==>>>>>
+ if (myShouldCheckThreads) {
+ if (myInitialThread == null) {
+ myInitialThread = Thread.currentThread();
+ }
+ else if (myInitialThread != Thread.currentThread()) {
+ System.out.println("Not working: " + myInitialThread.getName());
+ }
+ }
+ }
+}