--- a/hotspot/make/hotspot_version Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/make/hotspot_version Fri Oct 05 13:28:16 2012 -0700
@@ -35,7 +35,7 @@
HS_MAJOR_VER=25
HS_MINOR_VER=0
-HS_BUILD_NUMBER=03
+HS_BUILD_NUMBER=04
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
--- a/hotspot/src/cpu/sparc/vm/sparc.ad Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/sparc.ad Fri Oct 05 13:28:16 2012 -0700
@@ -1870,6 +1870,11 @@
return Op_RegD;
}
+const int Matcher::vector_shift_count_ideal_reg(int size) {
+ fatal("vector shift is not supported");
+ return Node::NotAMachineReg;
+}
+
// Limits on vector size (number of elements) loaded into vector.
const int Matcher::max_vector_size(const BasicType bt) {
assert(is_java_primitive(bt), "only primitive type vectors");
--- a/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/cpu/sparc/vm/templateTable_sparc.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -2936,6 +2936,7 @@
// Push the appendix as a trailing parameter.
// This must be done before we get the receiver,
// since the parameter_size includes it.
+ assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
__ load_resolved_reference_at_index(temp, index);
__ verify_oop(temp);
__ push_ptr(temp); // push appendix (MethodType, CallSite, etc.)
@@ -3235,15 +3236,15 @@
}
const Register Rret = Lscratch;
- const Register G4_mtype = G4_scratch; // f1
+ const Register G4_mtype = G4_scratch;
const Register O0_recv = O0;
const Register Rscratch = G3_scratch;
prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv);
__ null_check(O0_recv);
- // G4: MethodType object (from cpool->resolved_references[])
- // G5: MH.linkToCallSite method (from f2)
+ // G4: MethodType object (from cpool->resolved_references[f1], if necessary)
+ // G5: MH.invokeExact_MT method (from f2)
// Note: G4_mtype is already pushed (if necessary) by prepare_invoke
@@ -3275,8 +3276,8 @@
prepare_invoke(byte_no, G5_method, Rret, G4_callsite);
- // G4: CallSite object (from cpool->resolved_references[])
- // G5: MH.linkToCallSite method (from f1)
+ // G4: CallSite object (from cpool->resolved_references[f1])
+ // G5: MH.linkToCallSite method (from f2)
// Note: G4_callsite is already pushed by prepare_invoke
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_32.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -2139,7 +2139,7 @@
const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::f2_offset());
- size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
+ size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
resolve_cache_and_index(byte_no, cache, index, index_size);
__ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
@@ -2876,6 +2876,7 @@
// since the parameter_size includes it.
__ push(rbx);
__ mov(rbx, index);
+ assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
__ load_resolved_reference_at_index(index, rbx);
__ pop(rbx);
__ push(index); // push appendix (MethodType, CallSite, etc.)
@@ -3093,8 +3094,8 @@
void TemplateTable::invokehandle(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
- const Register rbx_method = rbx; // (from f2)
- const Register rax_mtype = rax; // (from f1)
+ const Register rbx_method = rbx;
+ const Register rax_mtype = rax;
const Register rcx_recv = rcx;
const Register rdx_flags = rdx;
@@ -3104,13 +3105,14 @@
return;
}
- prepare_invoke(byte_no,
- rbx_method, rax_mtype, // get f2 Method*, f1 MethodType
- rcx_recv);
+ prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
__ verify_method_ptr(rbx_method);
__ verify_oop(rcx_recv);
__ null_check(rcx_recv);
+ // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
+ // rbx: MH.invokeExact_MT method (from f2)
+
// Note: rax_mtype is already pushed (if necessary) by prepare_invoke
// FIXME: profile the LambdaForm also
@@ -3140,7 +3142,7 @@
prepare_invoke(byte_no, rbx_method, rax_callsite);
- // rax: CallSite object (from cpool->resolved_references[])
+ // rax: CallSite object (from cpool->resolved_references[f1])
// rbx: MH.linkToCallSite method (from f2)
// Note: rax_callsite is already pushed by prepare_invoke
--- a/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/templateTable_x86_64.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -2184,7 +2184,7 @@
const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::f2_offset());
- size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
+ size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
resolve_cache_and_index(byte_no, cache, index, index_size);
__ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
@@ -2926,6 +2926,7 @@
// since the parameter_size includes it.
__ push(rbx);
__ mov(rbx, index);
+ assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
__ load_resolved_reference_at_index(index, rbx);
__ pop(rbx);
__ push(index); // push appendix (MethodType, CallSite, etc.)
@@ -3144,8 +3145,8 @@
void TemplateTable::invokehandle(int byte_no) {
transition(vtos, vtos);
assert(byte_no == f1_byte, "use this argument");
- const Register rbx_method = rbx; // f2
- const Register rax_mtype = rax; // f1
+ const Register rbx_method = rbx;
+ const Register rax_mtype = rax;
const Register rcx_recv = rcx;
const Register rdx_flags = rdx;
@@ -3155,13 +3156,14 @@
return;
}
- prepare_invoke(byte_no,
- rbx_method, rax_mtype, // get f2 Method*, f1 MethodType
- rcx_recv);
+ prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
__ verify_method_ptr(rbx_method);
__ verify_oop(rcx_recv);
__ null_check(rcx_recv);
+ // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
+ // rbx: MH.invokeExact_MT method (from f2)
+
// Note: rax_mtype is already pushed (if necessary) by prepare_invoke
// FIXME: profile the LambdaForm also
@@ -3191,7 +3193,7 @@
prepare_invoke(byte_no, rbx_method, rax_callsite);
- // rax: CallSite object (from cpool->resolved_references[])
+ // rax: CallSite object (from cpool->resolved_references[f1])
// rbx: MH.linkToCallSite method (from f2)
// Note: rax_callsite is already pushed by prepare_invoke
--- a/hotspot/src/cpu/x86/vm/x86.ad Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/cpu/x86/vm/x86.ad Fri Oct 05 13:28:16 2012 -0700
@@ -571,6 +571,11 @@
return 0;
}
+// Only lowest bits of xmm reg are used for vector shift count.
+const int Matcher::vector_shift_count_ideal_reg(int size) {
+ return Op_VecS;
+}
+
// x86 supports misaligned vectors store/load.
const bool Matcher::misaligned_vectors_ok() {
return !AlignVector; // can be changed by flag
@@ -3758,10 +3763,24 @@
ins_pipe( pipe_slow );
%}
+// ------------------------------ Shift ---------------------------------------
+
+// Left and right shift count vectors are the same on x86
+// (only lowest bits of xmm reg are used for count).
+instruct vshiftcnt(vecS dst, rRegI cnt) %{
+ match(Set dst (LShiftCntV cnt));
+ match(Set dst (RShiftCntV cnt));
+ format %{ "movd $dst,$cnt\t! load shift count" %}
+ ins_encode %{
+ __ movdl($dst$$XMMRegister, $cnt$$Register);
+ %}
+ ins_pipe( pipe_slow );
+%}
+
// ------------------------------ LeftShift -----------------------------------
// Shorts/Chars vector left shift
-instruct vsll2S(vecS dst, regF shift) %{
+instruct vsll2S(vecS dst, vecS shift) %{
predicate(n->as_Vector()->length() == 2);
match(Set dst (LShiftVS dst shift));
format %{ "psllw $dst,$shift\t! left shift packed2S" %}
@@ -3781,7 +3800,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsll2S_reg(vecS dst, vecS src, regF shift) %{
+instruct vsll2S_reg(vecS dst, vecS src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
match(Set dst (LShiftVS src shift));
format %{ "vpsllw $dst,$src,$shift\t! left shift packed2S" %}
@@ -3803,7 +3822,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsll4S(vecD dst, regF shift) %{
+instruct vsll4S(vecD dst, vecS shift) %{
predicate(n->as_Vector()->length() == 4);
match(Set dst (LShiftVS dst shift));
format %{ "psllw $dst,$shift\t! left shift packed4S" %}
@@ -3823,7 +3842,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsll4S_reg(vecD dst, vecD src, regF shift) %{
+instruct vsll4S_reg(vecD dst, vecD src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
match(Set dst (LShiftVS src shift));
format %{ "vpsllw $dst,$src,$shift\t! left shift packed4S" %}
@@ -3845,7 +3864,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsll8S(vecX dst, regF shift) %{
+instruct vsll8S(vecX dst, vecS shift) %{
predicate(n->as_Vector()->length() == 8);
match(Set dst (LShiftVS dst shift));
format %{ "psllw $dst,$shift\t! left shift packed8S" %}
@@ -3865,7 +3884,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsll8S_reg(vecX dst, vecX src, regF shift) %{
+instruct vsll8S_reg(vecX dst, vecX src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
match(Set dst (LShiftVS src shift));
format %{ "vpsllw $dst,$src,$shift\t! left shift packed8S" %}
@@ -3887,7 +3906,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsll16S_reg(vecY dst, vecY src, regF shift) %{
+instruct vsll16S_reg(vecY dst, vecY src, vecS shift) %{
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
match(Set dst (LShiftVS src shift));
format %{ "vpsllw $dst,$src,$shift\t! left shift packed16S" %}
@@ -3910,7 +3929,7 @@
%}
// Integers vector left shift
-instruct vsll2I(vecD dst, regF shift) %{
+instruct vsll2I(vecD dst, vecS shift) %{
predicate(n->as_Vector()->length() == 2);
match(Set dst (LShiftVI dst shift));
format %{ "pslld $dst,$shift\t! left shift packed2I" %}
@@ -3930,7 +3949,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsll2I_reg(vecD dst, vecD src, regF shift) %{
+instruct vsll2I_reg(vecD dst, vecD src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
match(Set dst (LShiftVI src shift));
format %{ "vpslld $dst,$src,$shift\t! left shift packed2I" %}
@@ -3952,7 +3971,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsll4I(vecX dst, regF shift) %{
+instruct vsll4I(vecX dst, vecS shift) %{
predicate(n->as_Vector()->length() == 4);
match(Set dst (LShiftVI dst shift));
format %{ "pslld $dst,$shift\t! left shift packed4I" %}
@@ -3972,7 +3991,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsll4I_reg(vecX dst, vecX src, regF shift) %{
+instruct vsll4I_reg(vecX dst, vecX src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
match(Set dst (LShiftVI src shift));
format %{ "vpslld $dst,$src,$shift\t! left shift packed4I" %}
@@ -3994,7 +4013,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsll8I_reg(vecY dst, vecY src, regF shift) %{
+instruct vsll8I_reg(vecY dst, vecY src, vecS shift) %{
predicate(UseAVX > 1 && n->as_Vector()->length() == 8);
match(Set dst (LShiftVI src shift));
format %{ "vpslld $dst,$src,$shift\t! left shift packed8I" %}
@@ -4017,7 +4036,7 @@
%}
// Longs vector left shift
-instruct vsll2L(vecX dst, regF shift) %{
+instruct vsll2L(vecX dst, vecS shift) %{
predicate(n->as_Vector()->length() == 2);
match(Set dst (LShiftVL dst shift));
format %{ "psllq $dst,$shift\t! left shift packed2L" %}
@@ -4037,7 +4056,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsll2L_reg(vecX dst, vecX src, regF shift) %{
+instruct vsll2L_reg(vecX dst, vecX src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
match(Set dst (LShiftVL src shift));
format %{ "vpsllq $dst,$src,$shift\t! left shift packed2L" %}
@@ -4059,7 +4078,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsll4L_reg(vecY dst, vecY src, regF shift) %{
+instruct vsll4L_reg(vecY dst, vecY src, vecS shift) %{
predicate(UseAVX > 1 && n->as_Vector()->length() == 4);
match(Set dst (LShiftVL src shift));
format %{ "vpsllq $dst,$src,$shift\t! left shift packed4L" %}
@@ -4088,7 +4107,7 @@
// sign extension before a shift.
// Integers vector logical right shift
-instruct vsrl2I(vecD dst, regF shift) %{
+instruct vsrl2I(vecD dst, vecS shift) %{
predicate(n->as_Vector()->length() == 2);
match(Set dst (URShiftVI dst shift));
format %{ "psrld $dst,$shift\t! logical right shift packed2I" %}
@@ -4108,7 +4127,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsrl2I_reg(vecD dst, vecD src, regF shift) %{
+instruct vsrl2I_reg(vecD dst, vecD src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
match(Set dst (URShiftVI src shift));
format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed2I" %}
@@ -4130,7 +4149,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsrl4I(vecX dst, regF shift) %{
+instruct vsrl4I(vecX dst, vecS shift) %{
predicate(n->as_Vector()->length() == 4);
match(Set dst (URShiftVI dst shift));
format %{ "psrld $dst,$shift\t! logical right shift packed4I" %}
@@ -4150,7 +4169,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsrl4I_reg(vecX dst, vecX src, regF shift) %{
+instruct vsrl4I_reg(vecX dst, vecX src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
match(Set dst (URShiftVI src shift));
format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed4I" %}
@@ -4172,7 +4191,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsrl8I_reg(vecY dst, vecY src, regF shift) %{
+instruct vsrl8I_reg(vecY dst, vecY src, vecS shift) %{
predicate(UseAVX > 1 && n->as_Vector()->length() == 8);
match(Set dst (URShiftVI src shift));
format %{ "vpsrld $dst,$src,$shift\t! logical right shift packed8I" %}
@@ -4195,7 +4214,7 @@
%}
// Longs vector logical right shift
-instruct vsrl2L(vecX dst, regF shift) %{
+instruct vsrl2L(vecX dst, vecS shift) %{
predicate(n->as_Vector()->length() == 2);
match(Set dst (URShiftVL dst shift));
format %{ "psrlq $dst,$shift\t! logical right shift packed2L" %}
@@ -4215,7 +4234,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsrl2L_reg(vecX dst, vecX src, regF shift) %{
+instruct vsrl2L_reg(vecX dst, vecX src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
match(Set dst (URShiftVL src shift));
format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed2L" %}
@@ -4237,7 +4256,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsrl4L_reg(vecY dst, vecY src, regF shift) %{
+instruct vsrl4L_reg(vecY dst, vecY src, vecS shift) %{
predicate(UseAVX > 1 && n->as_Vector()->length() == 4);
match(Set dst (URShiftVL src shift));
format %{ "vpsrlq $dst,$src,$shift\t! logical right shift packed4L" %}
@@ -4262,7 +4281,7 @@
// ------------------- ArithmeticRightShift -----------------------------------
// Shorts/Chars vector arithmetic right shift
-instruct vsra2S(vecS dst, regF shift) %{
+instruct vsra2S(vecS dst, vecS shift) %{
predicate(n->as_Vector()->length() == 2);
match(Set dst (RShiftVS dst shift));
format %{ "psraw $dst,$shift\t! arithmetic right shift packed2S" %}
@@ -4282,7 +4301,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsra2S_reg(vecS dst, vecS src, regF shift) %{
+instruct vsra2S_reg(vecS dst, vecS src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
match(Set dst (RShiftVS src shift));
format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed2S" %}
@@ -4304,7 +4323,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsra4S(vecD dst, regF shift) %{
+instruct vsra4S(vecD dst, vecS shift) %{
predicate(n->as_Vector()->length() == 4);
match(Set dst (RShiftVS dst shift));
format %{ "psraw $dst,$shift\t! arithmetic right shift packed4S" %}
@@ -4324,7 +4343,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsra4S_reg(vecD dst, vecD src, regF shift) %{
+instruct vsra4S_reg(vecD dst, vecD src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
match(Set dst (RShiftVS src shift));
format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed4S" %}
@@ -4346,7 +4365,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsra8S(vecX dst, regF shift) %{
+instruct vsra8S(vecX dst, vecS shift) %{
predicate(n->as_Vector()->length() == 8);
match(Set dst (RShiftVS dst shift));
format %{ "psraw $dst,$shift\t! arithmetic right shift packed8S" %}
@@ -4366,7 +4385,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsra8S_reg(vecX dst, vecX src, regF shift) %{
+instruct vsra8S_reg(vecX dst, vecX src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 8);
match(Set dst (RShiftVS src shift));
format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed8S" %}
@@ -4388,7 +4407,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsra16S_reg(vecY dst, vecY src, regF shift) %{
+instruct vsra16S_reg(vecY dst, vecY src, vecS shift) %{
predicate(UseAVX > 1 && n->as_Vector()->length() == 16);
match(Set dst (RShiftVS src shift));
format %{ "vpsraw $dst,$src,$shift\t! arithmetic right shift packed16S" %}
@@ -4411,7 +4430,7 @@
%}
// Integers vector arithmetic right shift
-instruct vsra2I(vecD dst, regF shift) %{
+instruct vsra2I(vecD dst, vecS shift) %{
predicate(n->as_Vector()->length() == 2);
match(Set dst (RShiftVI dst shift));
format %{ "psrad $dst,$shift\t! arithmetic right shift packed2I" %}
@@ -4431,7 +4450,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsra2I_reg(vecD dst, vecD src, regF shift) %{
+instruct vsra2I_reg(vecD dst, vecD src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 2);
match(Set dst (RShiftVI src shift));
format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed2I" %}
@@ -4453,7 +4472,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsra4I(vecX dst, regF shift) %{
+instruct vsra4I(vecX dst, vecS shift) %{
predicate(n->as_Vector()->length() == 4);
match(Set dst (RShiftVI dst shift));
format %{ "psrad $dst,$shift\t! arithmetic right shift packed4I" %}
@@ -4473,7 +4492,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsra4I_reg(vecX dst, vecX src, regF shift) %{
+instruct vsra4I_reg(vecX dst, vecX src, vecS shift) %{
predicate(UseAVX > 0 && n->as_Vector()->length() == 4);
match(Set dst (RShiftVI src shift));
format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed4I" %}
@@ -4495,7 +4514,7 @@
ins_pipe( pipe_slow );
%}
-instruct vsra8I_reg(vecY dst, vecY src, regF shift) %{
+instruct vsra8I_reg(vecY dst, vecY src, vecS shift) %{
predicate(UseAVX > 1 && n->as_Vector()->length() == 8);
match(Set dst (RShiftVI src shift));
format %{ "vpsrad $dst,$src,$shift\t! arithmetic right shift packed8I" %}
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -82,12 +82,6 @@
# include "assembler_ppc.inline.hpp"
# include "nativeInst_ppc.hpp"
#endif
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
// put OS-includes here
# include <sys/types.h>
--- a/hotspot/src/os/linux/vm/os_linux.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/os/linux/vm/os_linux.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -82,12 +82,6 @@
# include "assembler_ppc.inline.hpp"
# include "nativeInst_ppc.hpp"
#endif
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
// put OS-includes here
# include <sys/types.h>
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -70,12 +70,6 @@
# include "assembler_sparc.inline.hpp"
# include "nativeInst_sparc.hpp"
#endif
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
// put OS-includes here
# include <dlfcn.h>
--- a/hotspot/src/os/windows/vm/os_windows.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/os/windows/vm/os_windows.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -69,12 +69,6 @@
# include "assembler_x86.inline.hpp"
# include "nativeInst_x86.hpp"
#endif
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
#ifdef _DEBUG
#include <crtdbg.h>
--- a/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -52,12 +52,6 @@
#include "thread_bsd.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
// put OS-includes here
# include <sys/types.h>
--- a/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/os_cpu/bsd_zero/vm/os_bsd_zero.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -58,12 +58,6 @@
#include "thread_bsd.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
address os::current_stack_pointer() {
address dummy = (address) &dummy;
--- a/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -52,13 +52,6 @@
#include "thread_linux.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
-
// Linux/Sparc has rather obscure naming of registers in sigcontext
// different between 32 and 64 bits
--- a/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -52,12 +52,6 @@
#include "thread_linux.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
// put OS-includes here
# include <sys/types.h>
--- a/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/os_cpu/linux_zero/vm/os_linux_zero.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -53,12 +53,6 @@
#include "thread_linux.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
address os::current_stack_pointer() {
address dummy = (address) &dummy;
--- a/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -52,13 +52,6 @@
#include "thread_solaris.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
-
# include <signal.h> // needed first to avoid name collision for "std" with SC 5.0
--- a/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -52,12 +52,6 @@
#include "thread_solaris.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
// put OS-includes here
# include <sys/types.h>
--- a/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -52,12 +52,6 @@
#include "thread_windows.inline.hpp"
#include "utilities/events.hpp"
#include "utilities/vmError.hpp"
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
# include "unwind_windows_x86.hpp"
#undef REG_SP
--- a/hotspot/src/share/vm/adlc/formssel.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/adlc/formssel.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -4049,6 +4049,7 @@
"MulVS","MulVI","MulVF","MulVD",
"DivVF","DivVD",
"AndV" ,"XorV" ,"OrV",
+ "LShiftCntV","RShiftCntV",
"LShiftVB","LShiftVS","LShiftVI","LShiftVL",
"RShiftVB","RShiftVS","RShiftVI","RShiftVL",
"URShiftVB","URShiftVS","URShiftVI","URShiftVL",
--- a/hotspot/src/share/vm/ci/ciClassList.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/ci/ciClassList.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -49,6 +49,7 @@
class ciCallSite;
class ciMemberName;
class ciMethodHandle;
+class ciMethodType;
class ciArray;
class ciObjArray;
class ciTypeArray;
@@ -99,6 +100,7 @@
friend class ciMethod; \
friend class ciMethodData; \
friend class ciMethodHandle; \
+friend class ciMethodType; \
friend class ciReceiverTypeData; \
friend class ciSymbol; \
friend class ciArray; \
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/ci/ciMethodType.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CI_CIMETHODTYPE_HPP
+#define SHARE_VM_CI_CIMETHODTYPE_HPP
+
+#include "ci/ciInstance.hpp"
+#include "ci/ciUtilities.hpp"
+#include "classfile/javaClasses.hpp"
+
+// ciMethodType
+//
+// The class represents a java.lang.invoke.MethodType object.
+class ciMethodType : public ciInstance {
+private:
+ ciType* class_to_citype(oop klass_oop) const {
+ if (java_lang_Class::is_primitive(klass_oop)) {
+ BasicType bt = java_lang_Class::primitive_type(klass_oop);
+ return ciType::make(bt);
+ } else {
+ Klass* k = java_lang_Class::as_Klass(klass_oop);
+ return CURRENT_ENV->get_klass(k);
+ }
+ }
+
+public:
+ ciMethodType(instanceHandle h_i) : ciInstance(h_i) {}
+
+ // What kind of ciObject is this?
+ bool is_method_type() const { return true; }
+
+ ciType* rtype() const {
+ GUARDED_VM_ENTRY(
+ oop rtype = java_lang_invoke_MethodType::rtype(get_oop());
+ return class_to_citype(rtype);
+ )
+ }
+
+ int ptype_count() const {
+ GUARDED_VM_ENTRY(return java_lang_invoke_MethodType::ptype_count(get_oop());)
+ }
+
+ int ptype_slot_count() const {
+ GUARDED_VM_ENTRY(return java_lang_invoke_MethodType::ptype_slot_count(get_oop());)
+ }
+
+ ciType* ptype_at(int index) const {
+ GUARDED_VM_ENTRY(
+ oop ptype = java_lang_invoke_MethodType::ptype(get_oop(), index);
+ return class_to_citype(ptype);
+ )
+ }
+};
+
+#endif // SHARE_VM_CI_CIMETHODTYPE_HPP
--- a/hotspot/src/share/vm/ci/ciObject.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/ci/ciObject.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -123,6 +123,7 @@
virtual bool is_instance() { return false; }
virtual bool is_member_name() const { return false; }
virtual bool is_method_handle() const { return false; }
+ virtual bool is_method_type() const { return false; }
virtual bool is_array() { return false; }
virtual bool is_obj_array() { return false; }
virtual bool is_type_array() { return false; }
@@ -142,35 +143,39 @@
}
// Subclass casting with assertions.
- ciNullObject* as_null_object() {
+ ciNullObject* as_null_object() {
assert(is_null_object(), "bad cast");
return (ciNullObject*)this;
}
- ciCallSite* as_call_site() {
+ ciCallSite* as_call_site() {
assert(is_call_site(), "bad cast");
- return (ciCallSite*) this;
+ return (ciCallSite*)this;
}
- ciInstance* as_instance() {
+ ciInstance* as_instance() {
assert(is_instance(), "bad cast");
return (ciInstance*)this;
}
- ciMemberName* as_member_name() {
+ ciMemberName* as_member_name() {
assert(is_member_name(), "bad cast");
return (ciMemberName*)this;
}
- ciMethodHandle* as_method_handle() {
+ ciMethodHandle* as_method_handle() {
assert(is_method_handle(), "bad cast");
- return (ciMethodHandle*) this;
+ return (ciMethodHandle*)this;
}
- ciArray* as_array() {
+ ciMethodType* as_method_type() {
+ assert(is_method_type(), "bad cast");
+ return (ciMethodType*)this;
+ }
+ ciArray* as_array() {
assert(is_array(), "bad cast");
return (ciArray*)this;
}
- ciObjArray* as_obj_array() {
+ ciObjArray* as_obj_array() {
assert(is_obj_array(), "bad cast");
return (ciObjArray*)this;
}
- ciTypeArray* as_type_array() {
+ ciTypeArray* as_type_array() {
assert(is_type_array(), "bad cast");
return (ciTypeArray*)this;
}
--- a/hotspot/src/share/vm/ci/ciObjectFactory.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/ci/ciObjectFactory.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -30,6 +30,7 @@
#include "ci/ciMethod.hpp"
#include "ci/ciMethodData.hpp"
#include "ci/ciMethodHandle.hpp"
+#include "ci/ciMethodType.hpp"
#include "ci/ciNullObject.hpp"
#include "ci/ciObjArray.hpp"
#include "ci/ciObjArrayKlass.hpp"
@@ -237,23 +238,23 @@
assert(key == NULL || Universe::heap()->is_in_reserved(key), "must be");
- NonPermObject* &bucket = find_non_perm(key);
- if (bucket != NULL) {
- return bucket->object();
- }
+ NonPermObject* &bucket = find_non_perm(key);
+ if (bucket != NULL) {
+ return bucket->object();
+ }
- // The ciObject does not yet exist. Create it and insert it
- // into the cache.
- Handle keyHandle(key);
- ciObject* new_object = create_new_object(keyHandle());
- assert(keyHandle() == new_object->get_oop(), "must be properly recorded");
- init_ident_of(new_object);
+ // The ciObject does not yet exist. Create it and insert it
+ // into the cache.
+ Handle keyHandle(key);
+ ciObject* new_object = create_new_object(keyHandle());
+ assert(keyHandle() == new_object->get_oop(), "must be properly recorded");
+ init_ident_of(new_object);
assert(Universe::heap()->is_in_reserved(new_object->get_oop()), "must be");
- // Not a perm-space object.
- insert_non_perm(bucket, keyHandle(), new_object);
- return new_object;
- }
+ // Not a perm-space object.
+ insert_non_perm(bucket, keyHandle(), new_object);
+ return new_object;
+}
// ------------------------------------------------------------------
// ciObjectFactory::get
@@ -324,6 +325,8 @@
return new (arena()) ciMemberName(h_i);
else if (java_lang_invoke_MethodHandle::is_instance(o))
return new (arena()) ciMethodHandle(h_i);
+ else if (java_lang_invoke_MethodType::is_instance(o))
+ return new (arena()) ciMethodType(h_i);
else
return new (arena()) ciInstance(h_i);
} else if (o->is_objArray()) {
--- a/hotspot/src/share/vm/ci/ciSignature.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/ci/ciSignature.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "ci/ciMethodType.hpp"
#include "ci/ciSignature.hpp"
#include "ci/ciUtilities.hpp"
#include "memory/allocation.inline.hpp"
@@ -80,6 +81,24 @@
}
// ------------------------------------------------------------------
+// ciSignature::ciSignature
+ciSignature::ciSignature(ciKlass* accessing_klass, ciSymbol* symbol, ciMethodType* method_type) :
+ _symbol(symbol),
+ _accessing_klass(accessing_klass),
+ _size( method_type->ptype_slot_count()),
+ _count(method_type->ptype_count())
+{
+ ASSERT_IN_VM;
+ EXCEPTION_CONTEXT;
+ Arena* arena = CURRENT_ENV->arena();
+ _types = new (arena) GrowableArray<ciType*>(arena, _count + 1, 0, NULL);
+ for (int i = 0; i < _count; i++) {
+ _types->append(method_type->ptype_at(i));
+ }
+ _types->append(method_type->rtype());
+}
+
+// ------------------------------------------------------------------
// ciSignature::return_type
//
// What is the return type of this signature?
--- a/hotspot/src/share/vm/ci/ciSignature.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/ci/ciSignature.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -47,6 +47,7 @@
friend class ciObjectFactory;
ciSignature(ciKlass* accessing_klass, constantPoolHandle cpool, ciSymbol* signature);
+ ciSignature(ciKlass* accessing_klass, ciSymbol* signature, ciMethodType* method_type);
void get_all_klasses();
--- a/hotspot/src/share/vm/ci/ciStreams.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/ci/ciStreams.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -364,12 +364,15 @@
constantPoolHandle cpool(_method->get_Method()->constants());
ciMethod* m = env->get_method_by_index(cpool, get_method_index(), cur_bc(), _holder);
will_link = m->is_loaded();
- // Get declared method signature and return it.
- if (has_optional_appendix()) {
- const int sig_index = get_method_signature_index();
- Symbol* sig_sym = cpool->symbol_at(sig_index);
- ciKlass* pool_holder = env->get_klass(cpool->pool_holder());
- (*declared_signature_result) = new (env->arena()) ciSignature(pool_holder, cpool, env->get_symbol(sig_sym));
+
+ // Use the MethodType stored in the CP cache to create a signature
+ // with correct types (in respect to class loaders).
+ if (has_method_type()) {
+ ciSymbol* sig_sym = env->get_symbol(cpool->symbol_at(get_method_signature_index()));
+ ciKlass* pool_holder = env->get_klass(cpool->pool_holder());
+ ciMethodType* method_type = get_method_type();
+ ciSignature* declared_signature = new (env->arena()) ciSignature(pool_holder, sig_sym, method_type);
+ (*declared_signature_result) = declared_signature;
} else {
(*declared_signature_result) = m->signature();
}
@@ -400,6 +403,31 @@
}
// ------------------------------------------------------------------
+// ciBytecodeStream::has_method_type
+//
+// Returns true if there is a MethodType argument stored in the
+// constant pool cache at the current bci.
+bool ciBytecodeStream::has_method_type() {
+ GUARDED_VM_ENTRY(
+ constantPoolHandle cpool(_method->get_Method()->constants());
+ return ConstantPool::has_method_type_at_if_loaded(cpool, get_method_index());
+ )
+}
+
+// ------------------------------------------------------------------
+// ciBytecodeStream::get_method_type
+//
+// Return the MethodType stored in the constant pool cache at
+// the current bci.
+ciMethodType* ciBytecodeStream::get_method_type() {
+ GUARDED_VM_ENTRY(
+ constantPoolHandle cpool(_method->get_Method()->constants());
+ oop method_type_oop = ConstantPool::method_type_at_if_loaded(cpool, get_method_index());
+ return CURRENT_ENV->get_object(method_type_oop)->as_method_type();
+ )
+}
+
+// ------------------------------------------------------------------
// ciBytecodeStream::get_declared_method_holder
//
// Get the declared holder of the currently referenced method.
--- a/hotspot/src/share/vm/ci/ciStreams.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/ci/ciStreams.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -257,12 +257,14 @@
int get_field_holder_index();
int get_field_signature_index();
- ciMethod* get_method(bool& will_link, ciSignature* *declared_signature_result);
- bool has_appendix();
- ciObject* get_appendix();
- ciKlass* get_declared_method_holder();
- int get_method_holder_index();
- int get_method_signature_index();
+ ciMethod* get_method(bool& will_link, ciSignature* *declared_signature_result);
+ bool has_appendix();
+ ciObject* get_appendix();
+ bool has_method_type();
+ ciMethodType* get_method_type();
+ ciKlass* get_declared_method_holder();
+ int get_method_holder_index();
+ int get_method_signature_index();
// Get the resolved references arrays from the constant pool
ciObjArray* get_resolved_references();
--- a/hotspot/src/share/vm/ci/ciTypeFlow.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/ci/ciTypeFlow.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -2194,6 +2194,10 @@
if (head->backedge_copy_count() != 0)
continue;
+ // Don't clone head of OSR loop to get correct types in start block.
+ if (is_osr_flow() && head->start() == start_bci())
+ continue;
+
// check _no_ shared head below us
Loop* ch;
for (ch = lp->child(); ch != NULL && ch->head() != head; ch = ch->sibling());
--- a/hotspot/src/share/vm/classfile/systemDictionary.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -2429,7 +2429,8 @@
methodHandle SystemDictionary::find_method_handle_invoker(Symbol* name,
Symbol* signature,
KlassHandle accessing_klass,
- Handle* appendix_result,
+ Handle *appendix_result,
+ Handle *method_type_result,
TRAPS) {
methodHandle empty;
assert(EnableInvokeDynamic, "");
@@ -2461,6 +2462,7 @@
vmSymbols::linkMethod_signature(),
&args, CHECK_(empty));
Handle mname(THREAD, (oop) result.get_jobject());
+ (*method_type_result) = method_type;
return unpack_method_and_appendix(mname, appendix_box, appendix_result, THREAD);
}
@@ -2607,7 +2609,8 @@
Handle bootstrap_specifier,
Symbol* name,
Symbol* type,
- Handle* appendix_result,
+ Handle *appendix_result,
+ Handle *method_type_result,
TRAPS) {
methodHandle empty;
Handle bsm, info;
@@ -2650,6 +2653,7 @@
vmSymbols::linkCallSite_signature(),
&args, CHECK_(empty));
Handle mname(THREAD, (oop) result.get_jobject());
+ (*method_type_result) = method_type;
return unpack_method_and_appendix(mname, appendix_box, appendix_result, THREAD);
}
--- a/hotspot/src/share/vm/classfile/systemDictionary.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/classfile/systemDictionary.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -497,6 +497,7 @@
Symbol* signature,
KlassHandle accessing_klass,
Handle *appendix_result,
+ Handle *method_type_result,
TRAPS);
// for a given signature, find the internal MethodHandle method (linkTo* or invokeBasic)
// (does not ask Java, since this is a low-level intrinsic defined by the JVM)
@@ -523,6 +524,7 @@
Symbol* name,
Symbol* type,
Handle *appendix_result,
+ Handle *method_type_result,
TRAPS);
// Utility for printing loader "name" as part of tracing constraints
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -1195,9 +1195,9 @@
set_promo_size(desired_promo_size);
}
-int CMSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
+uint CMSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
bool is_survivor_overflow,
- int tenuring_threshold,
+ uint tenuring_threshold,
size_t survivor_limit) {
assert(survivor_limit >= generation_alignment(),
"survivor_limit too small");
@@ -1315,7 +1315,7 @@
gclog_or_tty->print( " avg_promoted_padded_avg: %f"
" avg_pretenured_padded_avg: %f"
- " tenuring_thresh: %d"
+ " tenuring_thresh: %u"
" target_size: " SIZE_FORMAT
" survivor_limit: " SIZE_FORMAT,
gch->gc_stats(1)->avg_promoted()->padded_average(),
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -440,9 +440,9 @@
size_t max_eden_size);
// Calculates new survivor space size; returns a new tenuring threshold
// value. Stores new survivor size in _survivor_size.
- virtual int compute_survivor_space_size_and_threshold(
+ virtual uint compute_survivor_space_size_and_threshold(
bool is_survivor_overflow,
- int tenuring_threshold,
+ uint tenuring_threshold,
size_t survivor_limit);
virtual void compute_tenured_generation_free_space(size_t cur_tenured_free,
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -1188,29 +1188,14 @@
// liveness counting data.
class CMCountDataClosureBase: public HeapRegionClosure {
protected:
+ G1CollectedHeap* _g1h;
ConcurrentMark* _cm;
+ CardTableModRefBS* _ct_bs;
+
BitMap* _region_bm;
BitMap* _card_bm;
- void set_card_bitmap_range(BitMap::idx_t start_idx, BitMap::idx_t last_idx) {
- assert(start_idx <= last_idx, "sanity");
-
- // Set the inclusive bit range [start_idx, last_idx].
- // For small ranges (up to 8 cards) use a simple loop; otherwise
- // use par_at_put_range.
- if ((last_idx - start_idx) < 8) {
- for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
- _card_bm->par_set_bit(i);
- }
- } else {
- assert(last_idx < _card_bm->size(), "sanity");
- // Note BitMap::par_at_put_range() is exclusive.
- BitMap::idx_t max_idx = MAX2(last_idx+1, _card_bm->size());
- _card_bm->par_at_put_range(start_idx, max_idx, true);
- }
- }
-
- // It takes a region that's not empty (i.e., it has at least one
+ // Takes a region that's not empty (i.e., it has at least one
// live object in it and sets its corresponding bit on the region
// bitmap to 1. If the region is "starts humongous" it will also set
// to 1 the bits on the region bitmap that correspond to its
@@ -1231,9 +1216,11 @@
}
public:
- CMCountDataClosureBase(ConcurrentMark *cm,
+ CMCountDataClosureBase(G1CollectedHeap* g1h,
BitMap* region_bm, BitMap* card_bm):
- _cm(cm), _region_bm(region_bm), _card_bm(card_bm) { }
+ _g1h(g1h), _cm(g1h->concurrent_mark()),
+ _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
+ _region_bm(region_bm), _card_bm(card_bm) { }
};
// Closure that calculates the # live objects per region. Used
@@ -1243,9 +1230,9 @@
size_t _region_marked_bytes;
public:
- CalcLiveObjectsClosure(CMBitMapRO *bm, ConcurrentMark *cm,
+ CalcLiveObjectsClosure(CMBitMapRO *bm, G1CollectedHeap* g1h,
BitMap* region_bm, BitMap* card_bm) :
- CMCountDataClosureBase(cm, region_bm, card_bm),
+ CMCountDataClosureBase(g1h, region_bm, card_bm),
_bm(bm), _region_marked_bytes(0) { }
bool doHeapRegion(HeapRegion* hr) {
@@ -1261,44 +1248,63 @@
return false;
}
- HeapWord* nextTop = hr->next_top_at_mark_start();
- HeapWord* start = hr->bottom();
-
- assert(start <= hr->end() && start <= nextTop && nextTop <= hr->end(),
+ HeapWord* ntams = hr->next_top_at_mark_start();
+ HeapWord* start = hr->bottom();
+
+ assert(start <= hr->end() && start <= ntams && ntams <= hr->end(),
err_msg("Preconditions not met - "
- "start: "PTR_FORMAT", nextTop: "PTR_FORMAT", end: "PTR_FORMAT,
- start, nextTop, hr->end()));
+ "start: "PTR_FORMAT", ntams: "PTR_FORMAT", end: "PTR_FORMAT,
+ start, ntams, hr->end()));
// Find the first marked object at or after "start".
- start = _bm->getNextMarkedWordAddress(start, nextTop);
+ start = _bm->getNextMarkedWordAddress(start, ntams);
size_t marked_bytes = 0;
- while (start < nextTop) {
+ while (start < ntams) {
oop obj = oop(start);
int obj_sz = obj->size();
- HeapWord* obj_last = start + obj_sz - 1;
+ HeapWord* obj_end = start + obj_sz;
BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
- BitMap::idx_t last_idx = _cm->card_bitmap_index_for(obj_last);
-
- // Set the bits in the card BM for this object (inclusive).
- set_card_bitmap_range(start_idx, last_idx);
+ BitMap::idx_t end_idx = _cm->card_bitmap_index_for(obj_end);
+
+ // Note: if we're looking at the last region in heap - obj_end
+ // could be actually just beyond the end of the heap; end_idx
+ // will then correspond to a (non-existent) card that is also
+ // just beyond the heap.
+ if (_g1h->is_in_g1_reserved(obj_end) && !_ct_bs->is_card_aligned(obj_end)) {
+ // end of object is not card aligned - increment to cover
+ // all the cards spanned by the object
+ end_idx += 1;
+ }
+
+ // Set the bits in the card BM for the cards spanned by this object.
+ _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
// Add the size of this object to the number of marked bytes.
marked_bytes += (size_t)obj_sz * HeapWordSize;
// Find the next marked object after this one.
- start = _bm->getNextMarkedWordAddress(obj_last + 1, nextTop);
+ start = _bm->getNextMarkedWordAddress(obj_end, ntams);
}
// Mark the allocated-since-marking portion...
HeapWord* top = hr->top();
- if (nextTop < top) {
- BitMap::idx_t start_idx = _cm->card_bitmap_index_for(nextTop);
- BitMap::idx_t last_idx = _cm->card_bitmap_index_for(top - 1);
-
- set_card_bitmap_range(start_idx, last_idx);
+ if (ntams < top) {
+ BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
+ BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
+
+ // Note: if we're looking at the last region in heap - top
+ // could be actually just beyond the end of the heap; end_idx
+ // will then correspond to a (non-existent) card that is also
+ // just beyond the heap.
+ if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
+ // end of object is not card aligned - increment to cover
+ // all the cards spanned by the object
+ end_idx += 1;
+ }
+ _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
// This definitely means the region has live objects.
set_bit_for_region(hr);
@@ -1325,6 +1331,7 @@
// regions during the STW cleanup pause.
class VerifyLiveObjectDataHRClosure: public HeapRegionClosure {
+ G1CollectedHeap* _g1h;
ConcurrentMark* _cm;
CalcLiveObjectsClosure _calc_cl;
BitMap* _region_bm; // Region BM to be verified
@@ -1337,14 +1344,14 @@
int _failures;
public:
- VerifyLiveObjectDataHRClosure(ConcurrentMark* cm,
+ VerifyLiveObjectDataHRClosure(G1CollectedHeap* g1h,
BitMap* region_bm,
BitMap* card_bm,
BitMap* exp_region_bm,
BitMap* exp_card_bm,
bool verbose) :
- _cm(cm),
- _calc_cl(_cm->nextMarkBitMap(), _cm, exp_region_bm, exp_card_bm),
+ _g1h(g1h), _cm(g1h->concurrent_mark()),
+ _calc_cl(_cm->nextMarkBitMap(), g1h, exp_region_bm, exp_card_bm),
_region_bm(region_bm), _card_bm(card_bm), _verbose(verbose),
_exp_region_bm(exp_region_bm), _exp_card_bm(exp_card_bm),
_failures(0) { }
@@ -1491,7 +1498,7 @@
void work(uint worker_id) {
assert(worker_id < _n_workers, "invariant");
- VerifyLiveObjectDataHRClosure verify_cl(_cm,
+ VerifyLiveObjectDataHRClosure verify_cl(_g1h,
_actual_region_bm, _actual_card_bm,
_expected_region_bm,
_expected_card_bm,
@@ -1521,10 +1528,10 @@
class FinalCountDataUpdateClosure: public CMCountDataClosureBase {
public:
- FinalCountDataUpdateClosure(ConcurrentMark* cm,
+ FinalCountDataUpdateClosure(G1CollectedHeap* g1h,
BitMap* region_bm,
BitMap* card_bm) :
- CMCountDataClosureBase(cm, region_bm, card_bm) { }
+ CMCountDataClosureBase(g1h, region_bm, card_bm) { }
bool doHeapRegion(HeapRegion* hr) {
@@ -1548,24 +1555,29 @@
if (ntams < top) {
// This definitely means the region has live objects.
set_bit_for_region(hr);
- }
-
- // Now set the bits for [ntams, top]
- BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
- // set_card_bitmap_range() expects the last_idx to be with
- // the range of the bit map (see assertion in set_card_bitmap_range()),
- // so limit it to that range with this application of MIN2.
- BitMap::idx_t last_idx = MIN2(_cm->card_bitmap_index_for(top),
- _card_bm->size()-1);
- if (start_idx < _card_bm->size()) {
- set_card_bitmap_range(start_idx, last_idx);
- } else {
- // To reach here start_idx must be beyond the end of
- // the bit map and last_idx must have been limited by
- // the MIN2().
- assert(start_idx == last_idx + 1,
- err_msg("Not beyond end start_idx " SIZE_FORMAT " last_idx "
- SIZE_FORMAT, start_idx, last_idx));
+
+ // Now set the bits in the card bitmap for [ntams, top)
+ BitMap::idx_t start_idx = _cm->card_bitmap_index_for(ntams);
+ BitMap::idx_t end_idx = _cm->card_bitmap_index_for(top);
+
+ // Note: if we're looking at the last region in heap - top
+ // could be actually just beyond the end of the heap; end_idx
+ // will then correspond to a (non-existent) card that is also
+ // just beyond the heap.
+ if (_g1h->is_in_g1_reserved(top) && !_ct_bs->is_card_aligned(top)) {
+ // end of object is not card aligned - increment to cover
+ // all the cards spanned by the object
+ end_idx += 1;
+ }
+
+ assert(end_idx <= _card_bm->size(),
+ err_msg("oob: end_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
+ end_idx, _card_bm->size()));
+ assert(start_idx < _card_bm->size(),
+ err_msg("oob: start_idx= "SIZE_FORMAT", bitmap size= "SIZE_FORMAT,
+ start_idx, _card_bm->size()));
+
+ _cm->set_card_bitmap_range(_card_bm, start_idx, end_idx, true /* is_par */);
}
// Set the bit for the region if it contains live data
@@ -1606,7 +1618,7 @@
void work(uint worker_id) {
assert(worker_id < _n_workers, "invariant");
- FinalCountDataUpdateClosure final_update_cl(_cm,
+ FinalCountDataUpdateClosure final_update_cl(_g1h,
_actual_region_bm,
_actual_card_bm);
@@ -2846,20 +2858,19 @@
// Aggregate the counting data that was constructed concurrently
// with marking.
class AggregateCountDataHRClosure: public HeapRegionClosure {
+ G1CollectedHeap* _g1h;
ConcurrentMark* _cm;
+ CardTableModRefBS* _ct_bs;
BitMap* _cm_card_bm;
size_t _max_task_num;
public:
- AggregateCountDataHRClosure(ConcurrentMark *cm,
+ AggregateCountDataHRClosure(G1CollectedHeap* g1h,
BitMap* cm_card_bm,
size_t max_task_num) :
- _cm(cm), _cm_card_bm(cm_card_bm),
- _max_task_num(max_task_num) { }
-
- bool is_card_aligned(HeapWord* p) {
- return ((uintptr_t(p) & (CardTableModRefBS::card_size - 1)) == 0);
- }
+ _g1h(g1h), _cm(g1h->concurrent_mark()),
+ _ct_bs((CardTableModRefBS*) (g1h->barrier_set())),
+ _cm_card_bm(cm_card_bm), _max_task_num(max_task_num) { }
bool doHeapRegion(HeapRegion* hr) {
if (hr->continuesHumongous()) {
@@ -2890,16 +2901,23 @@
return false;
}
- assert(is_card_aligned(start), "sanity");
- assert(is_card_aligned(end), "sanity");
+ // 'start' should be in the heap.
+ assert(_g1h->is_in_g1_reserved(start) && _ct_bs->is_card_aligned(start), "sanity");
+ // 'end' *may* be just beyone the end of the heap (if hr is the last region)
+ assert(!_g1h->is_in_g1_reserved(end) || _ct_bs->is_card_aligned(end), "sanity");
BitMap::idx_t start_idx = _cm->card_bitmap_index_for(start);
BitMap::idx_t limit_idx = _cm->card_bitmap_index_for(limit);
BitMap::idx_t end_idx = _cm->card_bitmap_index_for(end);
- // If ntams is not card aligned then we bump the index for
- // limit so that we get the card spanning ntams.
- if (!is_card_aligned(limit)) {
+ // If ntams is not card aligned then we bump card bitmap index
+ // for limit so that we get the all the cards spanned by
+ // the object ending at ntams.
+ // Note: if this is the last region in the heap then ntams
+ // could be actually just beyond the end of the the heap;
+ // limit_idx will then correspond to a (non-existent) card
+ // that is also outside the heap.
+ if (_g1h->is_in_g1_reserved(limit) && !_ct_bs->is_card_aligned(limit)) {
limit_idx += 1;
}
@@ -2928,7 +2946,7 @@
// BitMap::get_next_one_offset() can handle the case when
// its left_offset parameter is greater than its right_offset
- // parameter. If does, however, have an early exit if
+ // parameter. It does, however, have an early exit if
// left_offset == right_offset. So let's limit the value
// passed in for left offset here.
BitMap::idx_t next_idx = MIN2(scan_idx + 1, limit_idx);
@@ -2964,7 +2982,7 @@
_active_workers(n_workers) { }
void work(uint worker_id) {
- AggregateCountDataHRClosure cl(_cm, _cm_card_bm, _max_task_num);
+ AggregateCountDataHRClosure cl(_g1h, _cm_card_bm, _max_task_num);
if (G1CollectedHeap::use_parallel_gc_threads()) {
_g1h->heap_region_par_iterate_chunked(&cl, worker_id,
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -806,7 +806,14 @@
return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
}
- // Counting data structure accessors
+ // Liveness counting
+
+ // Utility routine to set an exclusive range of cards on the given
+ // card liveness bitmap
+ inline void set_card_bitmap_range(BitMap* card_bm,
+ BitMap::idx_t start_idx,
+ BitMap::idx_t end_idx,
+ bool is_par);
// Returns the card number of the bottom of the G1 heap.
// Used in biasing indices into accounting card bitmaps.
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -28,6 +28,42 @@
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+// Utility routine to set an exclusive range of cards on the given
+// card liveness bitmap
+inline void ConcurrentMark::set_card_bitmap_range(BitMap* card_bm,
+ BitMap::idx_t start_idx,
+ BitMap::idx_t end_idx,
+ bool is_par) {
+
+ // Set the exclusive bit range [start_idx, end_idx).
+ assert((end_idx - start_idx) > 0, "at least one card");
+ assert(end_idx <= card_bm->size(), "sanity");
+
+ // Silently clip the end index
+ end_idx = MIN2(end_idx, card_bm->size());
+
+ // For small ranges use a simple loop; otherwise use set_range or
+ // use par_at_put_range (if parallel). The range is made up of the
+ // cards that are spanned by an object/mem region so 8 cards will
+ // allow up to object sizes up to 4K to be handled using the loop.
+ if ((end_idx - start_idx) <= 8) {
+ for (BitMap::idx_t i = start_idx; i < end_idx; i += 1) {
+ if (is_par) {
+ card_bm->par_set_bit(i);
+ } else {
+ card_bm->set_bit(i);
+ }
+ }
+ } else {
+ // Note BitMap::par_at_put_range() and BitMap::set_range() are exclusive.
+ if (is_par) {
+ card_bm->par_at_put_range(start_idx, end_idx, true);
+ } else {
+ card_bm->set_range(start_idx, end_idx);
+ }
+ }
+}
+
// Returns the index in the liveness accounting card bitmap
// for the given address
inline BitMap::idx_t ConcurrentMark::card_bitmap_index_for(HeapWord* addr) {
@@ -35,7 +71,6 @@
// by the card shift -- address 0 corresponds to card number 0. One
// must subtract the card num of the bottom of the heap to obtain a
// card table index.
-
intptr_t card_num = intptr_t(uintptr_t(addr) >> CardTableModRefBS::card_shift);
return card_num - heap_bottom_card_num();
}
@@ -46,8 +81,10 @@
size_t* marked_bytes_array,
BitMap* task_card_bm) {
G1CollectedHeap* g1h = _g1h;
+ CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
+
HeapWord* start = mr.start();
- HeapWord* last = mr.last();
+ HeapWord* end = mr.end();
size_t region_size_bytes = mr.byte_size();
uint index = hr->hrs_index();
@@ -61,24 +98,21 @@
marked_bytes_array[index] += region_size_bytes;
BitMap::idx_t start_idx = card_bitmap_index_for(start);
- BitMap::idx_t last_idx = card_bitmap_index_for(last);
+ BitMap::idx_t end_idx = card_bitmap_index_for(end);
- // The card bitmap is task/worker specific => no need to use 'par' routines.
- // Set bits in the inclusive bit range [start_idx, last_idx].
- //
- // For small ranges use a simple loop; otherwise use set_range
- // The range are the cards that are spanned by the object/region
- // so 8 cards will allow objects/regions up to 4K to be handled
- // using the loop.
- if ((last_idx - start_idx) <= 8) {
- for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
- task_card_bm->set_bit(i);
- }
- } else {
- assert(last_idx < task_card_bm->size(), "sanity");
- // Note: BitMap::set_range() is exclusive.
- task_card_bm->set_range(start_idx, last_idx+1);
+ // Note: if we're looking at the last region in heap - end
+ // could be actually just beyond the end of the heap; end_idx
+ // will then correspond to a (non-existent) card that is also
+ // just beyond the heap.
+ if (g1h->is_in_g1_reserved(end) && !ct_bs->is_card_aligned(end)) {
+ // end of region is not card aligned - incremement to cover
+ // all the cards spanned by the region.
+ end_idx += 1;
}
+ // The card bitmap is task/worker specific => no need to use
+ // the 'par' BitMap routines.
+ // Set bits in the exclusive bit range [start_idx, end_idx).
+ set_card_bitmap_range(task_card_bm, start_idx, end_idx, false /* is_par */);
}
// Counts the given memory region in the task/worker counting
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -4151,7 +4151,7 @@
}
}
-void G1CollectedHeap::release_gc_alloc_regions() {
+void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers) {
_survivor_gc_alloc_region.release();
// If we have an old GC alloc region to release, we'll save it in
// _retained_old_gc_alloc_region. If we don't
@@ -4161,8 +4161,8 @@
_retained_old_gc_alloc_region = _old_gc_alloc_region.release();
if (ResizePLAB) {
- _survivor_plab_stats.adjust_desired_plab_sz();
- _old_plab_stats.adjust_desired_plab_sz();
+ _survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
+ _old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
}
}
@@ -5427,7 +5427,7 @@
};
// Weak Reference processing during an evacuation pause (part 1).
-void G1CollectedHeap::process_discovered_references() {
+void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
double ref_proc_start = os::elapsedTime();
ReferenceProcessor* rp = _ref_processor_stw;
@@ -5454,15 +5454,14 @@
// referents points to another object which is also referenced by an
// object discovered by the STW ref processor.
- uint active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
- workers()->active_workers() : 1);
-
assert(!G1CollectedHeap::use_parallel_gc_threads() ||
- active_workers == workers()->active_workers(),
- "Need to reset active_workers");
-
- set_par_threads(active_workers);
- G1ParPreserveCMReferentsTask keep_cm_referents(this, active_workers, _task_queues);
+ no_of_gc_workers == workers()->active_workers(),
+ "Need to reset active GC workers");
+
+ set_par_threads(no_of_gc_workers);
+ G1ParPreserveCMReferentsTask keep_cm_referents(this,
+ no_of_gc_workers,
+ _task_queues);
if (G1CollectedHeap::use_parallel_gc_threads()) {
workers()->run_task(&keep_cm_referents);
@@ -5528,10 +5527,10 @@
NULL);
} else {
// Parallel reference processing
- assert(rp->num_q() == active_workers, "sanity");
- assert(active_workers <= rp->max_num_q(), "sanity");
-
- G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
+ assert(rp->num_q() == no_of_gc_workers, "sanity");
+ assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
+
+ G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
}
@@ -5546,7 +5545,7 @@
}
// Weak Reference processing during an evacuation pause (part 2).
-void G1CollectedHeap::enqueue_discovered_references() {
+void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
double ref_enq_start = os::elapsedTime();
ReferenceProcessor* rp = _ref_processor_stw;
@@ -5560,13 +5559,12 @@
} else {
// Parallel reference enqueuing
- uint active_workers = (ParallelGCThreads > 0 ? workers()->active_workers() : 1);
- assert(active_workers == workers()->active_workers(),
- "Need to reset active_workers");
- assert(rp->num_q() == active_workers, "sanity");
- assert(active_workers <= rp->max_num_q(), "sanity");
-
- G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
+ assert(no_of_gc_workers == workers()->active_workers(),
+ "Need to reset active workers");
+ assert(rp->num_q() == no_of_gc_workers, "sanity");
+ assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
+
+ G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
rp->enqueue_discovered_references(&par_task_executor);
}
@@ -5658,7 +5656,7 @@
// as we may have to copy some 'reachable' referent
// objects (and their reachable sub-graphs) that were
// not copied during the pause.
- process_discovered_references();
+ process_discovered_references(n_workers);
// Weak root processing.
// Note: when JSR 292 is enabled and code blobs can contain
@@ -5670,7 +5668,7 @@
JNIHandles::weak_oops_do(&is_alive, &keep_alive);
}
- release_gc_alloc_regions();
+ release_gc_alloc_regions(n_workers);
g1_rem_set()->cleanup_after_oops_into_collection_set_do();
concurrent_g1_refine()->clear_hot_cache();
@@ -5694,7 +5692,7 @@
// will log these updates (and dirty their associated
// cards). We need these updates logged to update any
// RSets.
- enqueue_discovered_references();
+ enqueue_discovered_references(n_workers);
if (G1DeferredRSUpdate) {
RedirtyLoggedCardTableEntryFastClosure redirty;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -326,7 +326,7 @@
void init_gc_alloc_regions();
// It releases the GC alloc regions at the end of a GC.
- void release_gc_alloc_regions();
+ void release_gc_alloc_regions(uint no_of_gc_workers);
// It does any cleanup that needs to be done on the GC alloc regions
// before a Full GC.
@@ -652,11 +652,11 @@
// Process any reference objects discovered during
// an incremental evacuation pause.
- void process_discovered_references();
+ void process_discovered_references(uint no_of_gc_workers);
// Enqueue any remaining discovered references
// after processing.
- void enqueue_discovered_references();
+ void enqueue_discovered_references(uint no_of_gc_workers);
public:
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -840,8 +840,8 @@
//
// Current tenuring threshold, set to 0 if the collector reaches the
- // maximum amount of suvivors regions.
- int _tenuring_threshold;
+ // maximum amount of survivors regions.
+ uint _tenuring_threshold;
// The limit on the number of regions allocated for survivors.
uint _max_survivor_regions;
@@ -851,7 +851,7 @@
size_t _survivor_bytes_before_gc;
size_t _capacity_before_gc;
- // The amount of survor regions after a collection.
+ // The amount of survivor regions after a collection.
uint _recorded_survivor_regions;
// List of survivor regions.
HeapRegion* _recorded_survivor_head;
@@ -862,7 +862,7 @@
public:
inline GCAllocPurpose
- evacuation_destination(HeapRegion* src_region, int age, size_t word_sz) {
+ evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) {
if (age < _tenuring_threshold && src_region->is_young()) {
return GCAllocForSurvived;
} else {
--- a/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -1037,7 +1037,7 @@
adjust_desired_tenuring_threshold();
if (ResizePLAB) {
- plab_stats()->adjust_desired_plab_sz();
+ plab_stats()->adjust_desired_plab_sz(n_workers);
}
if (PrintGC && !PrintGCDetails) {
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -121,103 +121,12 @@
// We get passed the space_top value to prevent us from traversing into
// the old_gen promotion labs, which cannot be safely parsed.
-void CardTableExtension::scavenge_contents(ObjectStartArray* start_array,
- MutableSpace* sp,
- HeapWord* space_top,
- PSPromotionManager* pm)
-{
- assert(start_array != NULL && sp != NULL && pm != NULL, "Sanity");
- assert(start_array->covered_region().contains(sp->used_region()),
- "ObjectStartArray does not cover space");
- if (sp->not_empty()) {
- oop* sp_top = (oop*)space_top;
- oop* prev_top = NULL;
- jbyte* current_card = byte_for(sp->bottom());
- jbyte* end_card = byte_for(sp_top - 1); // sp_top is exclusive
- // scan card marking array
- while (current_card <= end_card) {
- jbyte value = *current_card;
- // skip clean cards
- if (card_is_clean(value)) {
- current_card++;
- } else {
- // we found a non-clean card
- jbyte* first_nonclean_card = current_card++;
- oop* bottom = (oop*)addr_for(first_nonclean_card);
- // find object starting on card
- oop* bottom_obj = (oop*)start_array->object_start((HeapWord*)bottom);
- // bottom_obj = (oop*)start_array->object_start((HeapWord*)bottom);
- assert(bottom_obj <= bottom, "just checking");
- // make sure we don't scan oops we already looked at
- if (bottom < prev_top) bottom = prev_top;
- // figure out when to stop scanning
- jbyte* first_clean_card;
- oop* top;
- bool restart_scanning;
- do {
- restart_scanning = false;
- // find a clean card
- while (current_card <= end_card) {
- value = *current_card;
- if (card_is_clean(value)) break;
- current_card++;
- }
- // check if we reached the end, if so we are done
- if (current_card >= end_card) {
- first_clean_card = end_card + 1;
- current_card++;
- top = sp_top;
- } else {
- // we have a clean card, find object starting on that card
- first_clean_card = current_card++;
- top = (oop*)addr_for(first_clean_card);
- oop* top_obj = (oop*)start_array->object_start((HeapWord*)top);
- // top_obj = (oop*)start_array->object_start((HeapWord*)top);
- assert(top_obj <= top, "just checking");
- if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
- // an arrayOop is starting on the clean card - since we do exact store
- // checks for objArrays we are done
- } else {
- // otherwise, it is possible that the object starting on the clean card
- // spans the entire card, and that the store happened on a later card.
- // figure out where the object ends
- top = top_obj + oop(top_obj)->size();
- jbyte* top_card = CardTableModRefBS::byte_for(top - 1); // top is exclusive
- if (top_card > first_clean_card) {
- // object ends a different card
- current_card = top_card + 1;
- if (card_is_clean(*top_card)) {
- // the ending card is clean, we are done
- first_clean_card = top_card;
- } else {
- // the ending card is not clean, continue scanning at start of do-while
- restart_scanning = true;
- }
- } else {
- // object ends on the clean card, we are done.
- assert(first_clean_card == top_card, "just checking");
- }
- }
- }
- } while (restart_scanning);
- // we know which cards to scan, now clear them
- while (first_nonclean_card < first_clean_card) {
- *first_nonclean_card++ = clean_card;
- }
- // scan oops in objects
- do {
- oop(bottom_obj)->push_contents(pm);
- bottom_obj += oop(bottom_obj)->size();
- assert(bottom_obj <= sp_top, "just checking");
- } while (bottom_obj < top);
- pm->drain_stacks_cond_depth();
- // remember top oop* scanned
- prev_top = top;
- }
- }
- }
-}
+// Do not call this method if the space is empty.
+// It is a waste to start tasks and get here only to
+// do no work. If this method needs to be called
+// when the space is empty, fix the calculation of
+// end_card to allow sp_top == sp->bottom().
void CardTableExtension::scavenge_contents_parallel(ObjectStartArray* start_array,
MutableSpace* sp,
@@ -228,10 +137,11 @@
int ssize = 128; // Naked constant! Work unit = 64k.
int dirty_card_count = 0;
+ // It is a waste to get here if empty.
+ assert(sp->bottom() < sp->top(), "Should not be called if empty");
oop* sp_top = (oop*)space_top;
- oop* sp_last = sp->bottom() == space_top ? sp_top : sp_top - 1;
jbyte* start_card = byte_for(sp->bottom());
- jbyte* end_card = byte_for(sp_last) + 1;
+ jbyte* end_card = byte_for(sp_top - 1) + 1;
oop* last_scanned = NULL; // Prevent scanning objects more than once
// The width of the stripe ssize*stripe_total must be
// consistent with the number of stripes so that the complete slice
@@ -255,6 +165,16 @@
HeapWord* slice_start = addr_for(worker_start_card);
HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card));
+#ifdef ASSERT
+ if (GCWorkerDelayMillis > 0) {
+ // Delay 1 worker so that it proceeds after all the work
+ // has been completed.
+ if (stripe_number < 2) {
+ os::sleep(Thread::current(), GCWorkerDelayMillis, false);
+ }
+ }
+#endif
+
// If there are not objects starting within the chunk, skip it.
if (!start_array->object_starts_in_range(slice_start, slice_end)) {
continue;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -60,11 +60,6 @@
// BarrierSet::Name kind() { return BarrierSet::CardTableExtension; }
// Scavenge support
- void scavenge_contents(ObjectStartArray* start_array,
- MutableSpace* sp,
- HeapWord* space_top,
- PSPromotionManager* pm);
-
void scavenge_contents_parallel(ObjectStartArray* start_array,
MutableSpace* sp,
HeapWord* space_top,
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -941,9 +941,9 @@
return promo_heap_delta;
}
-int PSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
+uint PSAdaptiveSizePolicy::compute_survivor_space_size_and_threshold(
bool is_survivor_overflow,
- int tenuring_threshold,
+ uint tenuring_threshold,
size_t survivor_limit) {
assert(survivor_limit >= _intra_generation_alignment,
"survivor_limit too small");
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -353,9 +353,9 @@
// Calculates new survivor space size; returns a new tenuring threshold
// value. Stores new survivor size in _survivor_size.
- int compute_survivor_space_size_and_threshold(bool is_survivor_overflow,
- int tenuring_threshold,
- size_t survivor_limit);
+ uint compute_survivor_space_size_and_threshold(bool is_survivor_overflow,
+ uint tenuring_threshold,
+ size_t survivor_limit);
// Return the maximum size of a survivor space if the young generation were of
// size gen_size.
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -85,7 +85,7 @@
if (!promote_immediately) {
// Find the objects age, MT safe.
- int age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
+ uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
test_mark->displaced_mark_helper()->age() : test_mark->age();
// Try allocating obj in to-space (unless too old)
@@ -136,6 +136,13 @@
HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
if(lab_base != NULL) {
+#ifdef ASSERT
+ // Delay the initialization of the promotion lab (plab).
+ // This exposes uninitialized plabs to card table processing.
+ if (GCWorkerDelayMillis > 0) {
+ os::sleep(Thread::current(), GCWorkerDelayMillis, false);
+ }
+#endif
_old_lab.initialize(MemRegion(lab_base, OldPLABSize));
// Try the old lab allocation again.
new_obj = (oop) _old_lab.allocate(new_obj_size);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -59,7 +59,7 @@
ReferenceProcessor* PSScavenge::_ref_processor = NULL;
CardTableExtension* PSScavenge::_card_table = NULL;
bool PSScavenge::_survivor_overflow = false;
-int PSScavenge::_tenuring_threshold = 0;
+uint PSScavenge::_tenuring_threshold = 0;
HeapWord* PSScavenge::_young_generation_boundary = NULL;
elapsedTimer PSScavenge::_accumulated_time;
Stack<markOop, mtGC> PSScavenge::_preserved_mark_stack;
@@ -395,9 +395,13 @@
GCTaskQueue* q = GCTaskQueue::create();
- uint stripe_total = active_workers;
- for(uint i=0; i < stripe_total; i++) {
- q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
+ if (!old_gen->object_space()->is_empty()) {
+ // There are only old-to-young pointers if there are objects
+ // in the old gen.
+ uint stripe_total = active_workers;
+ for(uint i=0; i < stripe_total; i++) {
+ q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
+ }
}
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
@@ -525,7 +529,7 @@
if (PrintTenuringDistribution) {
gclog_or_tty->cr();
- gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)",
+ gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %u (max %u)",
size_policy->calculated_survivor_size_in_bytes(),
_tenuring_threshold, MaxTenuringThreshold);
}
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -66,14 +66,14 @@
static PSIsAliveClosure _is_alive_closure; // Closure used for reference processing
static CardTableExtension* _card_table; // We cache the card table for fast access.
static bool _survivor_overflow; // Overflow this collection
- static int _tenuring_threshold; // tenuring threshold for next scavenge
+ static uint _tenuring_threshold; // tenuring threshold for next scavenge
static elapsedTimer _accumulated_time; // total time spent on scavenge
static HeapWord* _young_generation_boundary; // The lowest address possible for the young_gen.
// This is used to decide if an oop should be scavenged,
// cards should be marked, etc.
static Stack<markOop, mtGC> _preserved_mark_stack; // List of marks to be restored after failed promotion
static Stack<oop, mtGC> _preserved_oop_stack; // List of oops that need their mark restored.
- static CollectorCounters* _counters; // collector performance counters
+ static CollectorCounters* _counters; // collector performance counters
static bool _promotion_failed;
static void clean_up_failed_promotion();
@@ -88,7 +88,7 @@
public:
// Accessors
- static int tenuring_threshold() { return _tenuring_threshold; }
+ static uint tenuring_threshold() { return _tenuring_threshold; }
static elapsedTimer* accumulated_time() { return &_accumulated_time; }
static bool promotion_failed() { return _promotion_failed; }
static int consecutive_skipped_scavenges()
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -165,35 +165,13 @@
}
//
-// SerialOldToYoungRootsTask
-//
-
-void SerialOldToYoungRootsTask::do_it(GCTaskManager* manager, uint which) {
- assert(_gen != NULL, "Sanity");
- assert(_gen->object_space()->contains(_gen_top) || _gen_top == _gen->object_space()->top(), "Sanity");
-
- {
- PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
-
- assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
- CardTableExtension* card_table = (CardTableExtension *)Universe::heap()->barrier_set();
- // FIX ME! Assert that card_table is the type we believe it to be.
-
- card_table->scavenge_contents(_gen->start_array(),
- _gen->object_space(),
- _gen_top,
- pm);
-
- // Do the real work
- pm->drain_stacks(false);
- }
-}
-
-//
// OldToYoungRootsTask
//
void OldToYoungRootsTask::do_it(GCTaskManager* manager, uint which) {
+ // There are not old-to-young pointers if the old gen is empty.
+ assert(!_gen->object_space()->is_empty(),
+ "Should not be called is there is no work");
assert(_gen != NULL, "Sanity");
assert(_gen->object_space()->contains(_gen_top) || _gen_top == _gen->object_space()->top(), "Sanity");
assert(_stripe_number < ParallelGCThreads, "Sanity");
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -113,25 +113,6 @@
};
//
-// SerialOldToYoungRootsTask
-//
-// This task is used to scan for roots in the perm gen
-
-class SerialOldToYoungRootsTask : public GCTask {
- private:
- PSOldGen* _gen;
- HeapWord* _gen_top;
-
- public:
- SerialOldToYoungRootsTask(PSOldGen *gen, HeapWord* gen_top) :
- _gen(gen), _gen_top(gen_top) { }
-
- char* name() { return (char *)"serial-old-to-young-roots-task"; }
-
- virtual void do_it(GCTaskManager* manager, uint which);
-};
-
-//
// OldToYoungRootsTask
//
// This task is used to scan old to young roots in parallel
--- a/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -642,7 +642,7 @@
bool AdaptiveSizePolicy::print_adaptive_size_policy_on(
outputStream* st,
- int tenuring_threshold_arg) const {
+ uint tenuring_threshold_arg) const {
if (!AdaptiveSizePolicy::print_adaptive_size_policy_on(st)) {
return false;
}
@@ -663,7 +663,7 @@
assert(!tenuring_threshold_change(), "(no change was attempted)");
}
if (tenuring_threshold_changed) {
- st->print_cr("%d", tenuring_threshold_arg);
+ st->print_cr("%u", tenuring_threshold_arg);
}
return true;
}
--- a/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/adaptiveSizePolicy.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -489,8 +489,8 @@
// Printing support
virtual bool print_adaptive_size_policy_on(outputStream* st) const;
- bool print_adaptive_size_policy_on(outputStream* st, int
- tenuring_threshold) const;
+ bool print_adaptive_size_policy_on(outputStream* st,
+ uint tenuring_threshold) const;
};
// Class that can be used to print information about the
--- a/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/ageTable.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -78,10 +78,10 @@
}
}
-int ageTable::compute_tenuring_threshold(size_t survivor_capacity) {
+uint ageTable::compute_tenuring_threshold(size_t survivor_capacity) {
size_t desired_survivor_size = (size_t)((((double) survivor_capacity)*TargetSurvivorRatio)/100);
size_t total = 0;
- int age = 1;
+ uint age = 1;
assert(sizes[0] == 0, "no objects with age zero should be recorded");
while (age < table_size) {
total += sizes[age];
@@ -90,13 +90,13 @@
if (total > desired_survivor_size) break;
age++;
}
- int result = age < MaxTenuringThreshold ? age : MaxTenuringThreshold;
+ uint result = age < MaxTenuringThreshold ? age : MaxTenuringThreshold;
if (PrintTenuringDistribution || UsePerfData) {
if (PrintTenuringDistribution) {
gclog_or_tty->cr();
- gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %d)",
+ gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %u (max %u)",
desired_survivor_size*oopSize, result, MaxTenuringThreshold);
}
@@ -106,7 +106,7 @@
total += sizes[age];
if (sizes[age] > 0) {
if (PrintTenuringDistribution) {
- gclog_or_tty->print_cr("- age %3d: %10ld bytes, %10ld total",
+ gclog_or_tty->print_cr("- age %3u: %10ld bytes, %10ld total",
age, sizes[age]*oopSize, total*oopSize);
}
}
--- a/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/ageTable.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -55,7 +55,7 @@
// add entry
void add(oop p, size_t oop_size) {
- int age = p->age();
+ uint age = p->age();
assert(age > 0 && age < table_size, "invalid age of object");
sizes[age] += oop_size;
}
@@ -66,7 +66,7 @@
void merge_par(ageTable* subTable);
// calculate new tenuring threshold based on age information
- int compute_tenuring_threshold(size_t survivor_capacity);
+ uint compute_tenuring_threshold(size_t survivor_capacity);
private:
PerfVariable* _perf_sizes[table_size];
--- a/hotspot/src/share/vm/gc_implementation/shared/gcAdaptivePolicyCounters.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcAdaptivePolicyCounters.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -188,7 +188,7 @@
inline void update_survivor_overflowed(bool survivor_overflowed) {
_survivor_overflowed_counter->set_value(survivor_overflowed);
}
- inline void update_tenuring_threshold(int threshold) {
+ inline void update_tenuring_threshold(uint threshold) {
tenuring_threshold()->set_value(threshold);
}
inline void update_increment_tenuring_threshold_for_gc_cost() {
--- a/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -87,7 +87,7 @@
// Compute desired plab size and latch result for later
// use. This should be called once at the end of parallel
// scavenge; it clears the sensor accumulators.
-void PLABStats::adjust_desired_plab_sz() {
+void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
assert(ResizePLAB, "Not set");
if (_allocated == 0) {
assert(_unused == 0,
@@ -107,7 +107,7 @@
target_refills = 1;
}
_used = _allocated - _wasted - _unused;
- size_t plab_sz = _used/(target_refills*ParallelGCThreads);
+ size_t plab_sz = _used/(target_refills*no_of_gc_workers);
if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
// Take historical weighted average
_filter.sample(plab_sz);
--- a/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/parGCAllocBuffer.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -204,7 +204,8 @@
return _desired_plab_sz;
}
- void adjust_desired_plab_sz(); // filter computation, latches output to
+ void adjust_desired_plab_sz(uint no_of_gc_workers);
+ // filter computation, latches output to
// _desired_plab_sz, clears sensor accumulators
void add_allocated(size_t v) {
--- a/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/gc_implementation/shared/vmGCOperations.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -198,8 +198,6 @@
CollectedHeap* heap = Universe::heap();
GCCauseSetter gccs(heap, _gc_cause);
- bool do_cms_concurrent = false;
-
// Check again if the space is available. Another thread
// may have similarly failed a metadata allocation and induced
// a GC that freed space for the allocation.
@@ -208,23 +206,25 @@
}
if (_result == NULL) {
- if (!UseConcMarkSweepGC) {
- // Don't clear the soft refs the first time.
+ if (UseConcMarkSweepGC) {
+ if (CMSClassUnloadingEnabled) {
+ MetaspaceGC::set_should_concurrent_collect(true);
+ }
+ // For CMS expand since the collection is going to be concurrent.
+ _result =
+ _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
+ }
+ if (_result == NULL) {
+ // Don't clear the soft refs. This GC is for reclaiming metadata
+ // and is unrelated to the fullness of the Java heap which should
+ // be the criteria for clearing SoftReferences.
+ if (Verbose && PrintGCDetails && UseConcMarkSweepGC) {
+ gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
+ }
heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
_result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
- // Don't do this for now
- // This seems too costly to do a second full GC
- // Let the metaspace grow instead
- // if (_result == NULL) {
- // // If allocation fails again, clear soft refs
- // heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
- // _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
- // }
- } else {
- MetaspaceGC::set_should_concurrent_collect(true);
- do_cms_concurrent = true;
}
- if (_result == NULL) {
+ if (_result == NULL && !UseConcMarkSweepGC /* CMS already tried */) {
// If still failing, allow the Metaspace to expand.
// See delta_capacity_until_GC() for explanation of the
// amount of the expansion.
@@ -233,18 +233,10 @@
_result =
_loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
- if (do_cms_concurrent && _result == NULL) {
- // Rather than fail with a metaspace out-of-memory, do a full
- // GC for CMS.
- heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
- _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
- }
- if (_result == NULL) {
- if (PrintGCDetails) {
- gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
- SIZE_FORMAT, _size);
- }
- }
+ }
+ if (Verbose && PrintGCDetails && _result == NULL) {
+ gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
+ SIZE_FORMAT, _size);
}
}
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -737,6 +737,7 @@
pool,
info.resolved_method(),
info.resolved_appendix(),
+ info.resolved_method_type(),
pool->resolved_references());
}
IRT_END
@@ -765,6 +766,7 @@
pool,
info.resolved_method(),
info.resolved_appendix(),
+ info.resolved_method_type(),
pool->resolved_references());
}
IRT_END
--- a/hotspot/src/share/vm/interpreter/linkResolver.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/interpreter/linkResolver.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -99,7 +99,7 @@
assert(!resolved_method->is_compiled_lambda_form(), "these must be handled via an invokehandle call");
}
-void CallInfo::set_handle(methodHandle resolved_method, Handle resolved_appendix, TRAPS) {
+void CallInfo::set_handle(methodHandle resolved_method, Handle resolved_appendix, Handle resolved_method_type, TRAPS) {
if (resolved_method.is_null()) {
THROW_MSG(vmSymbols::java_lang_InternalError(), "resolved method is null");
}
@@ -110,7 +110,8 @@
int vtable_index = Method::nonvirtual_vtable_index;
assert(resolved_method->vtable_index() == vtable_index, "");
set_common(resolved_klass, resolved_klass, resolved_method, resolved_method, vtable_index, CHECK);
- _resolved_appendix = resolved_appendix;
+ _resolved_appendix = resolved_appendix;
+ _resolved_method_type = resolved_method_type;
}
void CallInfo::set_common(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS) {
@@ -221,7 +222,8 @@
void LinkResolver::lookup_polymorphic_method(methodHandle& result,
KlassHandle klass, Symbol* name, Symbol* full_signature,
KlassHandle current_klass,
- Handle* appendix_result_or_null,
+ Handle *appendix_result_or_null,
+ Handle *method_type_result,
TRAPS) {
vmIntrinsics::ID iid = MethodHandles::signature_polymorphic_name_id(name);
if (TraceMethodHandles) {
@@ -275,10 +277,12 @@
}
Handle appendix;
+ Handle method_type;
result = SystemDictionary::find_method_handle_invoker(name,
full_signature,
current_klass,
&appendix,
+ &method_type,
CHECK);
if (TraceMethodHandles) {
tty->print("lookup_polymorphic_method => (via Java) ");
@@ -307,6 +311,7 @@
assert(appendix_result_or_null != NULL, "");
(*appendix_result_or_null) = appendix;
+ (*method_type_result) = method_type;
return;
}
}
@@ -419,7 +424,7 @@
if (resolved_method.is_null()) {
// JSR 292: see if this is an implicitly generated method MethodHandle.linkToVirtual(*...), etc
lookup_polymorphic_method(resolved_method, resolved_klass, method_name, method_signature,
- current_klass, (Handle*)NULL, THREAD);
+ current_klass, (Handle*)NULL, (Handle*)NULL, THREAD);
if (HAS_PENDING_EXCEPTION) {
nested_exception = Handle(THREAD, PENDING_EXCEPTION);
CLEAR_PENDING_EXCEPTION;
@@ -1207,11 +1212,12 @@
assert(resolved_klass() == SystemDictionary::MethodHandle_klass(), "");
assert(MethodHandles::is_signature_polymorphic_name(method_name), "");
methodHandle resolved_method;
- Handle resolved_appendix;
+ Handle resolved_appendix;
+ Handle resolved_method_type;
lookup_polymorphic_method(resolved_method, resolved_klass,
method_name, method_signature,
- current_klass, &resolved_appendix, CHECK);
- result.set_handle(resolved_method, resolved_appendix, CHECK);
+ current_klass, &resolved_appendix, &resolved_method_type, CHECK);
+ result.set_handle(resolved_method, resolved_appendix, resolved_method_type, CHECK);
}
@@ -1219,7 +1225,7 @@
assert(EnableInvokeDynamic, "");
pool->set_invokedynamic(); // mark header to flag active call sites
- //resolve_pool(<resolved_klass>, method_name, method_signature, current_klass, pool, index, CHECK);
+ //resolve_pool(<resolved_klass>, method_name, method_signature, current_klass, pool, index, CHECK);
Symbol* method_name = pool->name_ref_at(index);
Symbol* method_signature = pool->signature_ref_at(index);
KlassHandle current_klass = KlassHandle(THREAD, pool->pool_holder());
@@ -1236,9 +1242,10 @@
bootstrap_specifier = Handle(THREAD, bsm_info);
}
if (!cpce->is_f1_null()) {
- methodHandle method(THREAD, cpce->f1_as_method());
- Handle appendix(THREAD, cpce->appendix_if_resolved(pool));
- result.set_handle(method, appendix, CHECK);
+ methodHandle method( THREAD, cpce->f1_as_method());
+ Handle appendix( THREAD, cpce->appendix_if_resolved(pool));
+ Handle method_type(THREAD, cpce->method_type_if_resolved(pool));
+ result.set_handle(method, appendix, method_type, CHECK);
return;
}
@@ -1260,11 +1267,13 @@
// JSR 292: this must resolve to an implicitly generated method MH.linkToCallSite(*...)
// The appendix argument is likely to be a freshly-created CallSite.
Handle resolved_appendix;
+ Handle resolved_method_type;
methodHandle resolved_method =
SystemDictionary::find_dynamic_call_site_invoker(current_klass,
bootstrap_specifier,
method_name, method_signature,
&resolved_appendix,
+ &resolved_method_type,
THREAD);
if (HAS_PENDING_EXCEPTION) {
if (TraceMethodHandles) {
@@ -1284,7 +1293,7 @@
CLEAR_PENDING_EXCEPTION;
THROW_CAUSE(vmSymbols::java_lang_BootstrapMethodError(), nested_exception)
}
- result.set_handle(resolved_method, resolved_appendix, CHECK);
+ result.set_handle(resolved_method, resolved_appendix, resolved_method_type, CHECK);
}
//------------------------------------------------------------------------------------------------------------------------
--- a/hotspot/src/share/vm/interpreter/linkResolver.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/interpreter/linkResolver.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -76,12 +76,13 @@
methodHandle _selected_method; // dynamic (actual) target method
int _vtable_index; // vtable index of selected method
Handle _resolved_appendix; // extra argument in constant pool (if CPCE::has_appendix)
+ Handle _resolved_method_type; // MethodType (for invokedynamic and invokehandle call sites)
- void set_static( KlassHandle resolved_klass, methodHandle resolved_method , TRAPS);
- void set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method , TRAPS);
- void set_virtual( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS);
- void set_handle( methodHandle resolved_method, Handle resolved_appendix, TRAPS);
- void set_common( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index, TRAPS);
+ void set_static( KlassHandle resolved_klass, methodHandle resolved_method , TRAPS);
+ void set_interface(KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method , TRAPS);
+ void set_virtual( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index , TRAPS);
+ void set_handle( methodHandle resolved_method, Handle resolved_appendix, Handle resolved_method_type, TRAPS);
+ void set_common( KlassHandle resolved_klass, KlassHandle selected_klass, methodHandle resolved_method, methodHandle selected_method, int vtable_index , TRAPS);
friend class LinkResolver;
@@ -91,6 +92,7 @@
methodHandle resolved_method() const { return _resolved_method; }
methodHandle selected_method() const { return _selected_method; }
Handle resolved_appendix() const { return _resolved_appendix; }
+ Handle resolved_method_type() const { return _resolved_method_type; }
BasicType result_type() const { return selected_method()->result_type(); }
bool has_vtable_index() const { return _vtable_index >= 0; }
@@ -113,7 +115,7 @@
static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
static void lookup_method_in_interfaces (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
static void lookup_polymorphic_method (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature,
- KlassHandle current_klass, Handle* appendix_result_or_null, TRAPS);
+ KlassHandle current_klass, Handle *appendix_result_or_null, Handle *method_type_result, TRAPS);
static int vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
--- a/hotspot/src/share/vm/interpreter/rewriter.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/interpreter/rewriter.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -179,7 +179,7 @@
MethodHandles::is_signature_polymorphic_name(SystemDictionary::MethodHandle_klass(),
_pool->name_ref_at(cp_index))) {
// we may need a resolved_refs entry for the appendix
- add_invokedynamic_resolved_references_entry(cp_index, cache_index);
+ add_invokedynamic_resolved_references_entries(cp_index, cache_index);
status = +1;
} else {
status = -1;
@@ -211,7 +211,7 @@
if (!reverse) {
int cp_index = Bytes::get_Java_u2(p);
int cache_index = add_invokedynamic_cp_cache_entry(cp_index);
- add_invokedynamic_resolved_references_entry(cp_index, cache_index);
+ add_invokedynamic_resolved_references_entries(cp_index, cache_index);
// Replace the trailing four bytes with a CPC index for the dynamic
// call site. Unlike other CPC entries, there is one per bytecode,
// not just one per distinct CP entry. In other words, the
--- a/hotspot/src/share/vm/interpreter/rewriter.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/interpreter/rewriter.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -113,12 +113,19 @@
return ref_index;
}
- // add a new entry to the resolved_references map (for invokedynamic only)
- int add_invokedynamic_resolved_references_entry(int cp_index, int cache_index) {
+ // add a new entries to the resolved_references map (for invokedynamic and invokehandle only)
+ int add_invokedynamic_resolved_references_entries(int cp_index, int cache_index) {
assert(_resolved_reference_limit >= 0, "must add indy refs after first iteration");
- int ref_index = _resolved_references_map.append(cp_index); // many-to-one
- assert(ref_index >= _resolved_reference_limit, "");
- _invokedynamic_references_map.at_put_grow(ref_index, cache_index, -1);
+ int ref_index = -1;
+ for (int entry = 0; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
+ const int index = _resolved_references_map.append(cp_index); // many-to-one
+ assert(index >= _resolved_reference_limit, "");
+ if (entry == 0) {
+ ref_index = index;
+ }
+ assert((index - entry) == ref_index, "entries must be consecutive");
+ _invokedynamic_references_map.at_put_grow(index, cache_index, -1);
+ }
return ref_index;
}
@@ -127,15 +134,6 @@
return cp_index;
}
- // invokedynamic support - append the cpCache entry (encoded) in object map.
- // The resolved_references_map should still be in ascending order
- // The resolved_references has the invokedynamic call site objects appended after
- // the objects that are resolved in the constant pool.
- int add_callsite_entry(int main_cpc_entry) {
- int ref_index = _resolved_references_map.append(main_cpc_entry);
- return ref_index;
- }
-
// Access the contents of _cp_cache_map to determine CP cache layout.
int cp_cache_entry_pool_index(int cache_index) {
int cp_index = _cp_cache_map[cache_index];
--- a/hotspot/src/share/vm/memory/defNewGeneration.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/memory/defNewGeneration.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -43,7 +43,7 @@
protected:
Generation* _next_gen;
- int _tenuring_threshold; // Tenuring threshold for next collection.
+ uint _tenuring_threshold; // Tenuring threshold for next collection.
ageTable _age_table;
// Size of object to pretenure in words; command line provides bytes
size_t _pretenure_size_threshold_words;
@@ -325,7 +325,7 @@
bool parallel = false);
oop copy_to_survivor_space(oop old);
- int tenuring_threshold() { return _tenuring_threshold; }
+ uint tenuring_threshold() { return _tenuring_threshold; }
// Performance Counter support
void update_counters();
--- a/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/memory/threadLocalAllocBuffer.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -35,7 +35,7 @@
// the threads for allocation.
// It is thread-private at any time, but maybe multiplexed over
// time across multiple threads. The park()/unpark() pair is
-// used to make it avaiable for such multiplexing.
+// used to make it available for such multiplexing.
class ThreadLocalAllocBuffer: public CHeapObj<mtThread> {
friend class VMStructs;
private:
--- a/hotspot/src/share/vm/oops/constantPool.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/oops/constantPool.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -445,7 +445,6 @@
return e->has_appendix();
}
-
oop ConstantPool::appendix_at_if_loaded(constantPoolHandle cpool, int which) {
if (cpool->cache() == NULL) return NULL; // nothing to load yet
int cache_index = decode_cpcache_index(which, true);
@@ -454,6 +453,21 @@
}
+bool ConstantPool::has_method_type_at_if_loaded(constantPoolHandle cpool, int which) {
+ if (cpool->cache() == NULL) return false; // nothing to load yet
+ int cache_index = decode_cpcache_index(which, true);
+ ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
+ return e->has_method_type();
+}
+
+oop ConstantPool::method_type_at_if_loaded(constantPoolHandle cpool, int which) {
+ if (cpool->cache() == NULL) return NULL; // nothing to load yet
+ int cache_index = decode_cpcache_index(which, true);
+ ConstantPoolCacheEntry* e = cpool->cache()->entry_at(cache_index);
+ return e->method_type_if_resolved(cpool);
+}
+
+
Symbol* ConstantPool::impl_name_ref_at(int which, bool uncached) {
int name_index = name_ref_index_at(impl_name_and_type_ref_index_at(which, uncached));
return symbol_at(name_index);
--- a/hotspot/src/share/vm/oops/constantPool.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/oops/constantPool.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -694,13 +694,15 @@
friend class SystemDictionary;
// Used by compiler to prevent classloading.
- static Method* method_at_if_loaded (constantPoolHandle this_oop, int which);
- static bool has_appendix_at_if_loaded (constantPoolHandle this_oop, int which);
- static oop appendix_at_if_loaded (constantPoolHandle this_oop, int which);
- static Klass* klass_at_if_loaded (constantPoolHandle this_oop, int which);
- static Klass* klass_ref_at_if_loaded (constantPoolHandle this_oop, int which);
+ static Method* method_at_if_loaded (constantPoolHandle this_oop, int which);
+ static bool has_appendix_at_if_loaded (constantPoolHandle this_oop, int which);
+ static oop appendix_at_if_loaded (constantPoolHandle this_oop, int which);
+ static bool has_method_type_at_if_loaded (constantPoolHandle this_oop, int which);
+ static oop method_type_at_if_loaded (constantPoolHandle this_oop, int which);
+ static Klass* klass_at_if_loaded (constantPoolHandle this_oop, int which);
+ static Klass* klass_ref_at_if_loaded (constantPoolHandle this_oop, int which);
// Same as above - but does LinkResolving.
- static Klass* klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int which, TRAPS);
+ static Klass* klass_ref_at_if_loaded_check(constantPoolHandle this_oop, int which, TRAPS);
// Routines currently used for annotations (only called by jvm.cpp) but which might be used in the
// future by other Java code. These take constant pool indices rather than
--- a/hotspot/src/share/vm/oops/cpCache.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/oops/cpCache.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -244,21 +244,23 @@
void ConstantPoolCacheEntry::set_method_handle(constantPoolHandle cpool,
- methodHandle adapter, Handle appendix,
+ methodHandle adapter,
+ Handle appendix, Handle method_type,
objArrayHandle resolved_references) {
- set_method_handle_common(cpool, Bytecodes::_invokehandle, adapter, appendix, resolved_references);
+ set_method_handle_common(cpool, Bytecodes::_invokehandle, adapter, appendix, method_type, resolved_references);
}
void ConstantPoolCacheEntry::set_dynamic_call(constantPoolHandle cpool,
- methodHandle adapter, Handle appendix,
+ methodHandle adapter,
+ Handle appendix, Handle method_type,
objArrayHandle resolved_references) {
- set_method_handle_common(cpool, Bytecodes::_invokedynamic, adapter, appendix, resolved_references);
+ set_method_handle_common(cpool, Bytecodes::_invokedynamic, adapter, appendix, method_type, resolved_references);
}
void ConstantPoolCacheEntry::set_method_handle_common(constantPoolHandle cpool,
Bytecodes::Code invoke_code,
methodHandle adapter,
- Handle appendix,
+ Handle appendix, Handle method_type,
objArrayHandle resolved_references) {
// NOTE: This CPCE can be the subject of data races.
// There are three words to update: flags, refs[f2], f1 (in that order).
@@ -274,18 +276,21 @@
return;
}
- bool has_appendix = appendix.not_null();
+ const bool has_appendix = appendix.not_null();
+ const bool has_method_type = method_type.not_null();
// Write the flags.
set_method_flags(as_TosState(adapter->result_type()),
- ((has_appendix ? 1 : 0) << has_appendix_shift) |
- ( 1 << is_final_shift),
+ ((has_appendix ? 1 : 0) << has_appendix_shift ) |
+ ((has_method_type ? 1 : 0) << has_method_type_shift) |
+ ( 1 << is_final_shift ),
adapter->size_of_parameters());
if (TraceInvokeDynamic) {
- tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method="PTR_FORMAT" ",
+ tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method_type="PTR_FORMAT"%s method="PTR_FORMAT" ",
invoke_code,
- (intptr_t)appendix(), (has_appendix ? "" : " (unused)"),
+ (intptr_t)appendix(), (has_appendix ? "" : " (unused)"),
+ (intptr_t)method_type(), (has_method_type ? "" : " (unused)"),
(intptr_t)adapter());
adapter->print();
if (has_appendix) appendix()->print();
@@ -310,17 +315,26 @@
// This allows us to create fewer method oops, while keeping type safety.
//
+ // Store appendix, if any.
if (has_appendix) {
- int ref_index = f2_as_index();
- assert(ref_index >= 0 && ref_index < resolved_references->length(), "oob");
- assert(resolved_references->obj_at(ref_index) == NULL, "init just once");
- resolved_references->obj_at_put(ref_index, appendix());
+ const int appendix_index = f2_as_index() + _indy_resolved_references_appendix_offset;
+ assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob");
+ assert(resolved_references->obj_at(appendix_index) == NULL, "init just once");
+ resolved_references->obj_at_put(appendix_index, appendix());
+ }
+
+ // Store MethodType, if any.
+ if (has_method_type) {
+ const int method_type_index = f2_as_index() + _indy_resolved_references_method_type_offset;
+ assert(method_type_index >= 0 && method_type_index < resolved_references->length(), "oob");
+ assert(resolved_references->obj_at(method_type_index) == NULL, "init just once");
+ resolved_references->obj_at_put(method_type_index, method_type());
}
release_set_f1(adapter()); // This must be the last one to set (see NOTE above)!
- // The interpreter assembly code does not check byte_2,
- // but it is used by is_resolved, method_if_resolved, etc.
+ // The interpreter assembly code does not check byte_2,
+ // but it is used by is_resolved, method_if_resolved, etc.
set_bytecode_1(invoke_code);
NOT_PRODUCT(verify(tty));
if (TraceInvokeDynamic) {
@@ -376,7 +390,16 @@
oop ConstantPoolCacheEntry::appendix_if_resolved(constantPoolHandle cpool) {
if (is_f1_null() || !has_appendix())
return NULL;
- int ref_index = f2_as_index();
+ const int ref_index = f2_as_index() + _indy_resolved_references_appendix_offset;
+ objArrayOop resolved_references = cpool->resolved_references();
+ return resolved_references->obj_at(ref_index);
+}
+
+
+oop ConstantPoolCacheEntry::method_type_if_resolved(constantPoolHandle cpool) {
+ if (is_f1_null() || !has_method_type())
+ return NULL;
+ const int ref_index = f2_as_index() + _indy_resolved_references_method_type_offset;
objArrayOop resolved_references = cpool->resolved_references();
return resolved_references->obj_at(ref_index);
}
@@ -513,13 +536,23 @@
for (int i = 0; i < length(); i++) {
ConstantPoolCacheEntry* e = entry_at(i);
int original_index = inverse_index_map[i];
- e->initialize_entry(original_index);
+ e->initialize_entry(original_index);
assert(entry_at(i) == e, "sanity");
- }
+ }
for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) {
- int cpci = invokedynamic_references_map[ref];
- if (cpci >= 0)
+ const int cpci = invokedynamic_references_map[ref];
+ if (cpci >= 0) {
+#ifdef ASSERT
+ // invokedynamic and invokehandle have more entries; check if they
+ // all point to the same constant pool cache entry.
+ for (int entry = 1; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
+ const int cpci_next = invokedynamic_references_map[ref + entry];
+ assert(cpci == cpci_next, err_msg_res("%d == %d", cpci, cpci_next));
+ }
+#endif
entry_at(cpci)->initialize_resolved_reference_index(ref);
+ ref += ConstantPoolCacheEntry::_indy_resolved_references_entries - 1; // skip extra entries
+ }
}
}
--- a/hotspot/src/share/vm/oops/cpCache.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/oops/cpCache.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -42,10 +42,10 @@
// _indices [ b2 | b1 | index ] index = constant_pool_index
// _f1 [ entry specific ] metadata ptr (method or klass)
// _f2 [ entry specific ] vtable or res_ref index, or vfinal method ptr
-// _flags [tos|0|F=1|0|0|f|v|0 |00000|field_index] (for field entries)
-// bit length [ 4 |1| 1 |1|1|1|1|1 |--5--|----16-----]
-// _flags [tos|0|F=0|A|I|f|0|vf|00000|00000|psize] (for method entries)
-// bit length [ 4 |1| 1 |1|1|1|1|1 |--5--|--8--|--8--]
+// _flags [tos|0|F=1|0|0|0|f|v|0 |0000|field_index] (for field entries)
+// bit length [ 4 |1| 1 |1|1|1|1|1|1 |-4--|----16-----]
+// _flags [tos|0|F=0|M|A|I|f|0|vf|0000|00000|psize] (for method entries)
+// bit length [ 4 |1| 1 |1|1|1|1|1|1 |-4--|--8--|--8--]
// --------------------------------
//
@@ -166,11 +166,12 @@
tos_state_shift = BitsPerInt - tos_state_bits, // see verify_tos_state_shift below
// misc. option bits; can be any bit position in [16..27]
is_field_entry_shift = 26, // (F) is it a field or a method?
- has_appendix_shift = 25, // (A) does the call site have an appendix argument?
- is_forced_virtual_shift = 24, // (I) is the interface reference forced to virtual mode?
- is_final_shift = 23, // (f) is the field or method final?
- is_volatile_shift = 22, // (v) is the field volatile?
- is_vfinal_shift = 21, // (vf) did the call resolve to a final method?
+ has_method_type_shift = 25, // (M) does the call site have a MethodType?
+ has_appendix_shift = 24, // (A) does the call site have an appendix argument?
+ is_forced_virtual_shift = 23, // (I) is the interface reference forced to virtual mode?
+ is_final_shift = 22, // (f) is the field or method final?
+ is_volatile_shift = 21, // (v) is the field volatile?
+ is_vfinal_shift = 20, // (vf) did the call resolve to a final method?
// low order bits give field index (for FieldInfo) or method parameter size:
field_index_bits = 16,
field_index_mask = right_n_bits(field_index_bits),
@@ -223,14 +224,16 @@
void set_method_handle(
constantPoolHandle cpool, // holding constant pool (required for locking)
methodHandle method, // adapter for invokeExact, etc.
- Handle appendix, // stored in refs[f2]; could be a java.lang.invoke.MethodType
+ Handle appendix, // stored in refs[f2+0]; could be a java.lang.invoke.MethodType
+ Handle method_type, // stored in refs[f2+1]; is a java.lang.invoke.MethodType
objArrayHandle resolved_references
);
void set_dynamic_call(
constantPoolHandle cpool, // holding constant pool (required for locking)
methodHandle method, // adapter for this call site
- Handle appendix, // stored in refs[f2]; could be a java.lang.invoke.CallSite
+ Handle appendix, // stored in refs[f2+0]; could be a java.lang.invoke.CallSite
+ Handle method_type, // stored in refs[f2+1]; is a java.lang.invoke.MethodType
objArrayHandle resolved_references
);
@@ -253,12 +256,24 @@
constantPoolHandle cpool, // holding constant pool (required for locking)
Bytecodes::Code invoke_code, // _invokehandle or _invokedynamic
methodHandle adapter, // invoker method (f1)
- Handle appendix, // appendix such as CallSite, MethodType, etc. (refs[f2])
+ Handle appendix, // appendix such as CallSite, MethodType, etc. (refs[f2+0])
+ Handle method_type, // MethodType (refs[f2+1])
objArrayHandle resolved_references
);
- Method* method_if_resolved(constantPoolHandle cpool);
- oop appendix_if_resolved(constantPoolHandle cpool);
+ // invokedynamic and invokehandle call sites have two entries in the
+ // resolved references array:
+ // appendix (at index+0)
+ // MethodType (at index+1)
+ enum {
+ _indy_resolved_references_appendix_offset = 0,
+ _indy_resolved_references_method_type_offset = 1,
+ _indy_resolved_references_entries
+ };
+
+ Method* method_if_resolved(constantPoolHandle cpool);
+ oop appendix_if_resolved(constantPoolHandle cpool);
+ oop method_type_if_resolved(constantPoolHandle cpool);
void set_parameter_size(int value);
@@ -270,11 +285,11 @@
case Bytecodes::_getfield : // fall through
case Bytecodes::_invokespecial : // fall through
case Bytecodes::_invokestatic : // fall through
+ case Bytecodes::_invokehandle : // fall through
+ case Bytecodes::_invokedynamic : // fall through
case Bytecodes::_invokeinterface : return 1;
case Bytecodes::_putstatic : // fall through
case Bytecodes::_putfield : // fall through
- case Bytecodes::_invokehandle : // fall through
- case Bytecodes::_invokedynamic : // fall through
case Bytecodes::_invokevirtual : return 2;
default : break;
}
@@ -307,6 +322,7 @@
bool is_forced_virtual() const { return (_flags & (1 << is_forced_virtual_shift)) != 0; }
bool is_vfinal() const { return (_flags & (1 << is_vfinal_shift)) != 0; }
bool has_appendix() const { return (_flags & (1 << has_appendix_shift)) != 0; }
+ bool has_method_type() const { return (_flags & (1 << has_method_type_shift)) != 0; }
bool is_method_entry() const { return (_flags & (1 << is_field_entry_shift)) == 0; }
bool is_field_entry() const { return (_flags & (1 << is_field_entry_shift)) != 0; }
bool is_byte() const { return flag_state() == btos; }
--- a/hotspot/src/share/vm/oops/markOop.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/oops/markOop.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -318,7 +318,7 @@
intptr_t tmp = (intptr_t) monitor;
return (markOop) (tmp | monitor_value);
}
- static markOop encode(JavaThread* thread, int age, int bias_epoch) {
+ static markOop encode(JavaThread* thread, uint age, int bias_epoch) {
intptr_t tmp = (intptr_t) thread;
assert(UseBiasedLocking && ((tmp & (epoch_mask_in_place | age_mask_in_place | biased_lock_mask_in_place)) == 0), "misaligned JavaThread pointer");
assert(age <= max_age, "age too large");
@@ -333,10 +333,10 @@
markOop set_marked() { return markOop((value() & ~lock_mask_in_place) | marked_value); }
markOop set_unmarked() { return markOop((value() & ~lock_mask_in_place) | unlocked_value); }
- int age() const { return mask_bits(value() >> age_shift, age_mask); }
- markOop set_age(int v) const {
+ uint age() const { return mask_bits(value() >> age_shift, age_mask); }
+ markOop set_age(uint v) const {
assert((v & ~age_mask) == 0, "shouldn't overflow age field");
- return markOop((value() & ~age_mask_in_place) | (((intptr_t)v & age_mask) << age_shift));
+ return markOop((value() & ~age_mask_in_place) | (((uintptr_t)v & age_mask) << age_shift));
}
markOop incr_age() const { return age() == max_age ? markOop(this) : set_age(age() + 1); }
--- a/hotspot/src/share/vm/oops/objArrayKlass.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/oops/objArrayKlass.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -646,7 +646,7 @@
}
int remaining = oa->length() - print_len;
if (remaining > 0) {
- tty->print_cr(" - <%d more elements, increase MaxElementPrintSize to print>", remaining);
+ st->print_cr(" - <%d more elements, increase MaxElementPrintSize to print>", remaining);
}
}
--- a/hotspot/src/share/vm/oops/oop.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/oops/oop.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -327,7 +327,7 @@
oop forwardee() const;
// Age of object during scavenge
- int age() const;
+ uint age() const;
void incr_age();
// Adjust all pointers in this object to point at it's forwarded location and
--- a/hotspot/src/share/vm/oops/oop.inline.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/oops/oop.inline.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -693,7 +693,7 @@
}
// The following method needs to be MT safe.
-inline int oopDesc::age() const {
+inline uint oopDesc::age() const {
assert(!is_forwarded(), "Attempt to read age from forwarded mark");
if (has_displaced_mark()) {
return displaced_mark()->age();
--- a/hotspot/src/share/vm/oops/typeArrayKlass.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/oops/typeArrayKlass.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -381,7 +381,7 @@
}
int remaining = ta->length() - print_len;
if (remaining > 0) {
- tty->print_cr(" - <%d more elements, increase MaxElementPrintSize to print>", remaining);
+ st->print_cr(" - <%d more elements, increase MaxElementPrintSize to print>", remaining);
}
}
--- a/hotspot/src/share/vm/opto/classes.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/opto/classes.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -268,6 +268,8 @@
macro(MulVD)
macro(DivVF)
macro(DivVD)
+macro(LShiftCntV)
+macro(RShiftCntV)
macro(LShiftVB)
macro(LShiftVS)
macro(LShiftVI)
--- a/hotspot/src/share/vm/opto/matcher.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/opto/matcher.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -261,6 +261,7 @@
// Vector ideal reg
static const int vector_ideal_reg(int len);
+ static const int vector_shift_count_ideal_reg(int len);
// CPU supports misaligned vectors store/load.
static const bool misaligned_vectors_ok();
--- a/hotspot/src/share/vm/opto/parse1.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/opto/parse1.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -372,7 +372,8 @@
// in the CFG, which typeflow had previously ignored.
// E.g., Object x = coldAtFirst() && notReached()? "str": new Integer(123).
// This x will be typed as Integer if notReached is not yet linked.
- uncommon_trap(Deoptimization::Reason_unreached,
+ // It could also happen due to a problem in ciTypeFlow analysis.
+ uncommon_trap(Deoptimization::Reason_constraint,
Deoptimization::Action_reinterpret);
set_map(types_are_good);
}
--- a/hotspot/src/share/vm/opto/superword.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/opto/superword.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -1436,10 +1436,9 @@
return opd; // input is matching vector
}
if ((opd_idx == 2) && VectorNode::is_shift(p0)) {
- // No vector is needed for shift count.
- // Vector instructions do not mask shift count, do it here.
Compile* C = _phase->C;
Node* cnt = opd;
+ // Vector instructions do not mask shift count, do it here.
juint mask = (p0->bottom_type() == TypeInt::INT) ? (BitsPerInt - 1) : (BitsPerLong - 1);
const TypeInt* t = opd->find_int_type();
if (t != NULL && t->is_con()) {
@@ -1456,8 +1455,8 @@
_phase->set_ctrl(cnt, _phase->get_ctrl(opd));
}
assert(opd->bottom_type()->isa_int(), "int type only");
- // Move non constant shift count into XMM register.
- cnt = new (C) MoveI2FNode(cnt);
+ // Move non constant shift count into vector register.
+ cnt = VectorNode::shift_count(C, p0, cnt, vlen, velt_basic_type(p0));
}
if (cnt != opd) {
_igvn.register_new_node_with_optimizer(cnt);
--- a/hotspot/src/share/vm/opto/vectornode.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/opto/vectornode.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -243,6 +243,8 @@
VectorNode* VectorNode::make(Compile* C, int opc, Node* n1, Node* n2, uint vlen, BasicType bt) {
const TypeVect* vt = TypeVect::make(bt, vlen);
int vopc = VectorNode::opcode(opc, bt);
+ // This method should not be called for unimplemented vectors.
+ guarantee(vopc > 0, err_msg_res("Vector for '%s' is not implemented", NodeClassNames[opc]));
switch (vopc) {
case Op_AddVB: return new (C) AddVBNode(n1, n2, vt);
@@ -286,7 +288,7 @@
case Op_OrV: return new (C) OrVNode (n1, n2, vt);
case Op_XorV: return new (C) XorVNode(n1, n2, vt);
}
- ShouldNotReachHere();
+ fatal(err_msg_res("Missed vector creation for '%s'", NodeClassNames[vopc]));
return NULL;
}
@@ -312,7 +314,25 @@
case T_DOUBLE:
return new (C) ReplicateDNode(s, vt);
}
- ShouldNotReachHere();
+ fatal(err_msg_res("Type '%s' is not supported for vectors", type2name(bt)));
+ return NULL;
+}
+
+VectorNode* VectorNode::shift_count(Compile* C, Node* shift, Node* cnt, uint vlen, BasicType bt) {
+ assert(VectorNode::is_shift(shift) && !cnt->is_Con(), "only variable shift count");
+ // Match shift count type with shift vector type.
+ const TypeVect* vt = TypeVect::make(bt, vlen);
+ switch (shift->Opcode()) {
+ case Op_LShiftI:
+ case Op_LShiftL:
+ return new (C) LShiftCntVNode(cnt, vt);
+ case Op_RShiftI:
+ case Op_RShiftL:
+ case Op_URShiftI:
+ case Op_URShiftL:
+ return new (C) RShiftCntVNode(cnt, vt);
+ }
+ fatal(err_msg_res("Missed vector creation for '%s'", NodeClassNames[shift->Opcode()]));
return NULL;
}
@@ -335,7 +355,7 @@
case T_DOUBLE:
return new (C) PackDNode(s, vt);
}
- ShouldNotReachHere();
+ fatal(err_msg_res("Type '%s' is not supported for vectors", type2name(bt)));
return NULL;
}
@@ -371,7 +391,7 @@
case T_DOUBLE:
return new (C) Pack2DNode(n1, n2, TypeVect::make(T_DOUBLE, 2));
}
- ShouldNotReachHere();
+ fatal(err_msg_res("Type '%s' is not supported for vectors", type2name(bt)));
}
return NULL;
}
@@ -381,7 +401,6 @@
Node* adr, const TypePtr* atyp, uint vlen, BasicType bt) {
const TypeVect* vt = TypeVect::make(bt, vlen);
return new (C) LoadVectorNode(ctl, mem, adr, atyp, vt);
- return NULL;
}
// Return the vector version of a scalar store node.
@@ -413,7 +432,7 @@
case T_DOUBLE:
return new (C) ExtractDNode(v, pos);
}
- ShouldNotReachHere();
+ fatal(err_msg_res("Type '%s' is not supported for vectors", type2name(bt)));
return NULL;
}
--- a/hotspot/src/share/vm/opto/vectornode.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/opto/vectornode.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -29,7 +29,7 @@
#include "opto/node.hpp"
#include "opto/opcodes.hpp"
-//------------------------------VectorNode--------------------------------------
+//------------------------------VectorNode-------------------------------------
// Vector Operation
class VectorNode : public TypeNode {
public:
@@ -53,7 +53,7 @@
virtual uint ideal_reg() const { return Matcher::vector_ideal_reg(vect_type()->length_in_bytes()); }
static VectorNode* scalar2vector(Compile* C, Node* s, uint vlen, const Type* opd_t);
-
+ static VectorNode* shift_count(Compile* C, Node* shift, Node* cnt, uint vlen, BasicType bt);
static VectorNode* make(Compile* C, int opc, Node* n1, Node* n2, uint vlen, BasicType bt);
static int opcode(int opc, BasicType bt);
@@ -64,9 +64,9 @@
static void vector_operands(Node* n, uint* start, uint* end);
};
-//===========================Vector=ALU=Operations====================================
+//===========================Vector=ALU=Operations=============================
-//------------------------------AddVBNode---------------------------------------
+//------------------------------AddVBNode--------------------------------------
// Vector add byte
class AddVBNode : public VectorNode {
public:
@@ -74,7 +74,7 @@
virtual int Opcode() const;
};
-//------------------------------AddVSNode---------------------------------------
+//------------------------------AddVSNode--------------------------------------
// Vector add char/short
class AddVSNode : public VectorNode {
public:
@@ -82,7 +82,7 @@
virtual int Opcode() const;
};
-//------------------------------AddVINode---------------------------------------
+//------------------------------AddVINode--------------------------------------
// Vector add int
class AddVINode : public VectorNode {
public:
@@ -90,7 +90,7 @@
virtual int Opcode() const;
};
-//------------------------------AddVLNode---------------------------------------
+//------------------------------AddVLNode--------------------------------------
// Vector add long
class AddVLNode : public VectorNode {
public:
@@ -98,7 +98,7 @@
virtual int Opcode() const;
};
-//------------------------------AddVFNode---------------------------------------
+//------------------------------AddVFNode--------------------------------------
// Vector add float
class AddVFNode : public VectorNode {
public:
@@ -106,7 +106,7 @@
virtual int Opcode() const;
};
-//------------------------------AddVDNode---------------------------------------
+//------------------------------AddVDNode--------------------------------------
// Vector add double
class AddVDNode : public VectorNode {
public:
@@ -114,7 +114,7 @@
virtual int Opcode() const;
};
-//------------------------------SubVBNode---------------------------------------
+//------------------------------SubVBNode--------------------------------------
// Vector subtract byte
class SubVBNode : public VectorNode {
public:
@@ -122,7 +122,7 @@
virtual int Opcode() const;
};
-//------------------------------SubVSNode---------------------------------------
+//------------------------------SubVSNode--------------------------------------
// Vector subtract short
class SubVSNode : public VectorNode {
public:
@@ -130,7 +130,7 @@
virtual int Opcode() const;
};
-//------------------------------SubVINode---------------------------------------
+//------------------------------SubVINode--------------------------------------
// Vector subtract int
class SubVINode : public VectorNode {
public:
@@ -138,7 +138,7 @@
virtual int Opcode() const;
};
-//------------------------------SubVLNode---------------------------------------
+//------------------------------SubVLNode--------------------------------------
// Vector subtract long
class SubVLNode : public VectorNode {
public:
@@ -146,7 +146,7 @@
virtual int Opcode() const;
};
-//------------------------------SubVFNode---------------------------------------
+//------------------------------SubVFNode--------------------------------------
// Vector subtract float
class SubVFNode : public VectorNode {
public:
@@ -154,7 +154,7 @@
virtual int Opcode() const;
};
-//------------------------------SubVDNode---------------------------------------
+//------------------------------SubVDNode--------------------------------------
// Vector subtract double
class SubVDNode : public VectorNode {
public:
@@ -162,7 +162,7 @@
virtual int Opcode() const;
};
-//------------------------------MulVSNode---------------------------------------
+//------------------------------MulVSNode--------------------------------------
// Vector multiply short
class MulVSNode : public VectorNode {
public:
@@ -170,7 +170,7 @@
virtual int Opcode() const;
};
-//------------------------------MulVINode---------------------------------------
+//------------------------------MulVINode--------------------------------------
// Vector multiply int
class MulVINode : public VectorNode {
public:
@@ -178,7 +178,7 @@
virtual int Opcode() const;
};
-//------------------------------MulVFNode---------------------------------------
+//------------------------------MulVFNode--------------------------------------
// Vector multiply float
class MulVFNode : public VectorNode {
public:
@@ -186,7 +186,7 @@
virtual int Opcode() const;
};
-//------------------------------MulVDNode---------------------------------------
+//------------------------------MulVDNode--------------------------------------
// Vector multiply double
class MulVDNode : public VectorNode {
public:
@@ -194,7 +194,7 @@
virtual int Opcode() const;
};
-//------------------------------DivVFNode---------------------------------------
+//------------------------------DivVFNode--------------------------------------
// Vector divide float
class DivVFNode : public VectorNode {
public:
@@ -202,7 +202,7 @@
virtual int Opcode() const;
};
-//------------------------------DivVDNode---------------------------------------
+//------------------------------DivVDNode--------------------------------------
// Vector Divide double
class DivVDNode : public VectorNode {
public:
@@ -210,7 +210,7 @@
virtual int Opcode() const;
};
-//------------------------------LShiftVBNode---------------------------------------
+//------------------------------LShiftVBNode-----------------------------------
// Vector left shift bytes
class LShiftVBNode : public VectorNode {
public:
@@ -218,7 +218,7 @@
virtual int Opcode() const;
};
-//------------------------------LShiftVSNode---------------------------------------
+//------------------------------LShiftVSNode-----------------------------------
// Vector left shift shorts
class LShiftVSNode : public VectorNode {
public:
@@ -226,7 +226,7 @@
virtual int Opcode() const;
};
-//------------------------------LShiftVINode---------------------------------------
+//------------------------------LShiftVINode-----------------------------------
// Vector left shift ints
class LShiftVINode : public VectorNode {
public:
@@ -234,7 +234,7 @@
virtual int Opcode() const;
};
-//------------------------------LShiftVLNode---------------------------------------
+//------------------------------LShiftVLNode-----------------------------------
// Vector left shift longs
class LShiftVLNode : public VectorNode {
public:
@@ -242,7 +242,7 @@
virtual int Opcode() const;
};
-//------------------------------RShiftVBNode---------------------------------------
+//------------------------------RShiftVBNode-----------------------------------
// Vector right arithmetic (signed) shift bytes
class RShiftVBNode : public VectorNode {
public:
@@ -250,7 +250,7 @@
virtual int Opcode() const;
};
-//------------------------------RShiftVSNode---------------------------------------
+//------------------------------RShiftVSNode-----------------------------------
// Vector right arithmetic (signed) shift shorts
class RShiftVSNode : public VectorNode {
public:
@@ -258,7 +258,7 @@
virtual int Opcode() const;
};
-//------------------------------RShiftVINode---------------------------------------
+//------------------------------RShiftVINode-----------------------------------
// Vector right arithmetic (signed) shift ints
class RShiftVINode : public VectorNode {
public:
@@ -266,7 +266,7 @@
virtual int Opcode() const;
};
-//------------------------------RShiftVLNode---------------------------------------
+//------------------------------RShiftVLNode-----------------------------------
// Vector right arithmetic (signed) shift longs
class RShiftVLNode : public VectorNode {
public:
@@ -274,7 +274,7 @@
virtual int Opcode() const;
};
-//------------------------------URShiftVBNode---------------------------------------
+//------------------------------URShiftVBNode----------------------------------
// Vector right logical (unsigned) shift bytes
class URShiftVBNode : public VectorNode {
public:
@@ -282,7 +282,7 @@
virtual int Opcode() const;
};
-//------------------------------URShiftVSNode---------------------------------------
+//------------------------------URShiftVSNode----------------------------------
// Vector right logical (unsigned) shift shorts
class URShiftVSNode : public VectorNode {
public:
@@ -290,7 +290,7 @@
virtual int Opcode() const;
};
-//------------------------------URShiftVINode---------------------------------------
+//------------------------------URShiftVINode----------------------------------
// Vector right logical (unsigned) shift ints
class URShiftVINode : public VectorNode {
public:
@@ -298,7 +298,7 @@
virtual int Opcode() const;
};
-//------------------------------URShiftVLNode---------------------------------------
+//------------------------------URShiftVLNode----------------------------------
// Vector right logical (unsigned) shift longs
class URShiftVLNode : public VectorNode {
public:
@@ -306,6 +306,24 @@
virtual int Opcode() const;
};
+//------------------------------LShiftCntVNode---------------------------------
+// Vector left shift count
+class LShiftCntVNode : public VectorNode {
+ public:
+ LShiftCntVNode(Node* cnt, const TypeVect* vt) : VectorNode(cnt,vt) {}
+ virtual int Opcode() const;
+ virtual uint ideal_reg() const { return Matcher::vector_shift_count_ideal_reg(vect_type()->length_in_bytes()); }
+};
+
+//------------------------------RShiftCntVNode---------------------------------
+// Vector right shift count
+class RShiftCntVNode : public VectorNode {
+ public:
+ RShiftCntVNode(Node* cnt, const TypeVect* vt) : VectorNode(cnt,vt) {}
+ virtual int Opcode() const;
+ virtual uint ideal_reg() const { return Matcher::vector_shift_count_ideal_reg(vect_type()->length_in_bytes()); }
+};
+
//------------------------------AndVNode---------------------------------------
// Vector and integer
@@ -452,7 +470,7 @@
static PackNode* make(Compile* C, Node* s, uint vlen, BasicType bt);
};
-//------------------------------PackBNode---------------------------------------
+//------------------------------PackBNode--------------------------------------
// Pack byte scalars into vector
class PackBNode : public PackNode {
public:
@@ -460,7 +478,7 @@
virtual int Opcode() const;
};
-//------------------------------PackSNode---------------------------------------
+//------------------------------PackSNode--------------------------------------
// Pack short scalars into a vector
class PackSNode : public PackNode {
public:
@@ -469,7 +487,7 @@
virtual int Opcode() const;
};
-//------------------------------PackINode---------------------------------------
+//------------------------------PackINode--------------------------------------
// Pack integer scalars into a vector
class PackINode : public PackNode {
public:
@@ -478,7 +496,7 @@
virtual int Opcode() const;
};
-//------------------------------PackLNode---------------------------------------
+//------------------------------PackLNode--------------------------------------
// Pack long scalars into a vector
class PackLNode : public PackNode {
public:
@@ -487,7 +505,7 @@
virtual int Opcode() const;
};
-//------------------------------Pack2LNode--------------------------------------
+//------------------------------Pack2LNode-------------------------------------
// Pack 2 long scalars into a vector
class Pack2LNode : public PackNode {
public:
@@ -495,7 +513,7 @@
virtual int Opcode() const;
};
-//------------------------------PackFNode---------------------------------------
+//------------------------------PackFNode--------------------------------------
// Pack float scalars into vector
class PackFNode : public PackNode {
public:
@@ -504,7 +522,7 @@
virtual int Opcode() const;
};
-//------------------------------PackDNode---------------------------------------
+//------------------------------PackDNode--------------------------------------
// Pack double scalars into a vector
class PackDNode : public PackNode {
public:
@@ -513,7 +531,7 @@
virtual int Opcode() const;
};
-//------------------------------Pack2DNode--------------------------------------
+//------------------------------Pack2DNode-------------------------------------
// Pack 2 double scalars into a vector
class Pack2DNode : public PackNode {
public:
@@ -522,9 +540,9 @@
};
-//========================Extract_Scalar_from_Vector===============================
+//========================Extract_Scalar_from_Vector===========================
-//------------------------------ExtractNode---------------------------------------
+//------------------------------ExtractNode------------------------------------
// Extract a scalar from a vector at position "pos"
class ExtractNode : public Node {
public:
@@ -537,7 +555,7 @@
static Node* make(Compile* C, Node* v, uint position, BasicType bt);
};
-//------------------------------ExtractBNode---------------------------------------
+//------------------------------ExtractBNode-----------------------------------
// Extract a byte from a vector at position "pos"
class ExtractBNode : public ExtractNode {
public:
@@ -547,7 +565,7 @@
virtual uint ideal_reg() const { return Op_RegI; }
};
-//------------------------------ExtractUBNode--------------------------------------
+//------------------------------ExtractUBNode----------------------------------
// Extract a boolean from a vector at position "pos"
class ExtractUBNode : public ExtractNode {
public:
@@ -557,7 +575,7 @@
virtual uint ideal_reg() const { return Op_RegI; }
};
-//------------------------------ExtractCNode---------------------------------------
+//------------------------------ExtractCNode-----------------------------------
// Extract a char from a vector at position "pos"
class ExtractCNode : public ExtractNode {
public:
@@ -567,7 +585,7 @@
virtual uint ideal_reg() const { return Op_RegI; }
};
-//------------------------------ExtractSNode---------------------------------------
+//------------------------------ExtractSNode-----------------------------------
// Extract a short from a vector at position "pos"
class ExtractSNode : public ExtractNode {
public:
@@ -577,7 +595,7 @@
virtual uint ideal_reg() const { return Op_RegI; }
};
-//------------------------------ExtractINode---------------------------------------
+//------------------------------ExtractINode-----------------------------------
// Extract an int from a vector at position "pos"
class ExtractINode : public ExtractNode {
public:
@@ -587,7 +605,7 @@
virtual uint ideal_reg() const { return Op_RegI; }
};
-//------------------------------ExtractLNode---------------------------------------
+//------------------------------ExtractLNode-----------------------------------
// Extract a long from a vector at position "pos"
class ExtractLNode : public ExtractNode {
public:
@@ -597,7 +615,7 @@
virtual uint ideal_reg() const { return Op_RegL; }
};
-//------------------------------ExtractFNode---------------------------------------
+//------------------------------ExtractFNode-----------------------------------
// Extract a float from a vector at position "pos"
class ExtractFNode : public ExtractNode {
public:
@@ -607,7 +625,7 @@
virtual uint ideal_reg() const { return Op_RegF; }
};
-//------------------------------ExtractDNode---------------------------------------
+//------------------------------ExtractDNode-----------------------------------
// Extract a double from a vector at position "pos"
class ExtractDNode : public ExtractNode {
public:
--- a/hotspot/src/share/vm/runtime/arguments.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/runtime/arguments.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -1112,7 +1112,7 @@
// AlwaysTenure flag should make ParNew promote all at first collection.
// See CR 6362902.
if (AlwaysTenure) {
- FLAG_SET_CMDLINE(intx, MaxTenuringThreshold, 0);
+ FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, 0);
}
// When using compressed oops, we use local overflow stacks,
// rather than using a global overflow list chained through
@@ -1231,7 +1231,7 @@
// promote all objects surviving "tenuring_default" scavenges.
if (FLAG_IS_DEFAULT(MaxTenuringThreshold) &&
FLAG_IS_DEFAULT(SurvivorRatio)) {
- FLAG_SET_ERGO(intx, MaxTenuringThreshold, tenuring_default);
+ FLAG_SET_ERGO(uintx, MaxTenuringThreshold, tenuring_default);
}
// If we decided above (or user explicitly requested)
// `promote all' (via MaxTenuringThreshold := 0),
--- a/hotspot/src/share/vm/runtime/biasedLocking.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/runtime/biasedLocking.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -154,7 +154,7 @@
return BiasedLocking::NOT_BIASED;
}
- int age = mark->age();
+ uint age = mark->age();
markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
--- a/hotspot/src/share/vm/runtime/globals.hpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/runtime/globals.hpp Fri Oct 05 13:28:16 2012 -0700
@@ -473,7 +473,7 @@
develop(bool, CleanChunkPoolAsync, falseInEmbedded, \
"Whether to clean the chunk pool asynchronously") \
\
- /* Temporary: See 6948537 */ \
+ /* Temporary: See 6948537 */ \
experimental(bool, UseMemSetInBOT, true, \
"(Unstable) uses memset in BOT updates in GC code") \
\
@@ -1626,7 +1626,7 @@
"Use BinaryTreeDictionary as default in the CMS generation") \
\
product(uintx, CMSIndexedFreeListReplenish, 4, \
- "Replenish an indexed free list with this number of chunks") \
+ "Replenish an indexed free list with this number of chunks") \
\
product(bool, CMSReplenishIntermediate, true, \
"Replenish all intermediate free-list caches") \
@@ -2052,7 +2052,7 @@
product(uintx, TenuredGenerationSizeSupplementDecay, 2, \
"Decay factor to TenuredGenerationSizeIncrement") \
\
- product(uintx, MaxGCPauseMillis, max_uintx, \
+ product(uintx, MaxGCPauseMillis, max_uintx, \
"Adaptive size policy maximum GC pause time goal in msec, " \
"or (G1 Only) the max. GC time per MMU time slice") \
\
@@ -2266,7 +2266,7 @@
develop(bool, TraceGCTaskQueue, false, \
"Trace actions of the GC task queues") \
\
- diagnostic(bool, TraceGCTaskThread, false, \
+ diagnostic(bool, TraceGCTaskThread, false, \
"Trace actions of the GC task threads") \
\
product(bool, PrintParallelOldGCPhaseTimes, false, \
@@ -2781,7 +2781,7 @@
product(intx, SafepointTimeoutDelay, 10000, \
"Delay in milliseconds for option SafepointTimeout") \
\
- product(intx, NmethodSweepFraction, 16, \
+ product(intx, NmethodSweepFraction, 16, \
"Number of invocations of sweeper to cover all nmethods") \
\
product(intx, NmethodSweepCheckInterval, 5, \
@@ -2904,7 +2904,7 @@
"if non-zero, start verifying C heap after Nth call to " \
"malloc/realloc/free") \
\
- product(intx, TypeProfileWidth, 2, \
+ product(intx, TypeProfileWidth, 2, \
"number of receiver types to record in call/cast profile") \
\
develop(intx, BciProfileWidth, 2, \
@@ -3012,10 +3012,10 @@
product(uintx, MinHeapDeltaBytes, ScaleForWordSize(128*K), \
"Min change in heap space due to GC (in bytes)") \
\
- product(uintx, MinMetaspaceExpansion, ScaleForWordSize(256*K), \
+ product(uintx, MinMetaspaceExpansion, ScaleForWordSize(256*K), \
"Min expansion of permanent heap (in bytes)") \
\
- product(uintx, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \
+ product(uintx, MaxMetaspaceExpansion, ScaleForWordSize(4*M), \
"Max expansion of permanent heap without full GC (in bytes)") \
\
product(intx, QueuedAllocationWarningCount, 0, \
@@ -3028,10 +3028,10 @@
diagnostic(intx, VerifyGCLevel, 0, \
"Generation level at which to start +VerifyBefore/AfterGC") \
\
- product(intx, MaxTenuringThreshold, 15, \
+ product(uintx, MaxTenuringThreshold, 15, \
"Maximum value for tenuring threshold") \
\
- product(intx, InitialTenuringThreshold, 7, \
+ product(uintx, InitialTenuringThreshold, 7, \
"Initial value for tenuring threshold") \
\
product(intx, TargetSurvivorRatio, 50, \
@@ -3065,6 +3065,9 @@
develop(uintx, GCExpandToAllocateDelayMillis, 0, \
"Delay in ms between expansion and allocation") \
\
+ develop(uintx, GCWorkerDelayMillis, 0, \
+ "Delay in ms in scheduling GC workers") \
+ \
product(intx, DeferThrSuspendLoopCount, 4000, \
"(Unstable) Number of times to iterate in safepoint loop " \
" before blocking VM threads ") \
--- a/hotspot/src/share/vm/runtime/os.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/runtime/os.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -878,6 +878,7 @@
st->print(" for ");
nm->method()->print_value_on(st);
}
+ st->cr();
nm->print_nmethod(verbose);
return;
}
@@ -898,7 +899,11 @@
print = true;
}
if (print) {
- st->print_cr(INTPTR_FORMAT " is an oop", addr);
+ if (p == (HeapWord*) addr) {
+ st->print_cr(INTPTR_FORMAT " is an oop", addr);
+ } else {
+ st->print_cr(INTPTR_FORMAT " is pointing into object: " INTPTR_FORMAT, addr, p);
+ }
oop(p)->print_on(st);
return;
}
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp Fri Oct 05 13:28:16 2012 -0700
@@ -508,7 +508,7 @@
nonstatic_field(ContiguousSpace, _saved_mark_word, HeapWord*) \
\
nonstatic_field(DefNewGeneration, _next_gen, Generation*) \
- nonstatic_field(DefNewGeneration, _tenuring_threshold, int) \
+ nonstatic_field(DefNewGeneration, _tenuring_threshold, uint) \
nonstatic_field(DefNewGeneration, _age_table, ageTable) \
nonstatic_field(DefNewGeneration, _eden_space, EdenSpace*) \
nonstatic_field(DefNewGeneration, _from_space, ContiguousSpace*) \
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/7199742/Test7199742.java Fri Oct 05 13:28:16 2012 -0700
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+/**
+ * @test
+ * @bug 7199742
+ * @summary A lot of C2 OSR compilations of the same method's bci
+ *
+ * @run main/othervm -Xmx32m -Xbatch Test7199742
+ */
+
+public class Test7199742 {
+ private static final int ITERS = 10000000;
+ public static void main(String args[]) {
+ Test7199742 t = new Test7199742();
+ for (int i=0; i<10; i++) {
+ test(t, 7);
+ }
+ }
+ static Test7199742 test(Test7199742 t, int m) {
+ int i = -(ITERS/2);
+ if (i == 0) return null;
+ Test7199742 v = null;
+ while(i < ITERS) {
+ if ((i&m) == 0) {
+ v = t;
+ }
+ i++;
+ }
+ return v;
+ }
+}
+
--- a/hotspot/test/compiler/7200264/Test7200264.sh Thu Oct 04 14:34:51 2012 -0700
+++ b/hotspot/test/compiler/7200264/Test7200264.sh Fri Oct 05 13:28:16 2012 -0700
@@ -77,6 +77,16 @@
exit 0
fi
+# grep for support integer multiply vectors (cpu with SSE4.1)
+${TESTJAVA}${FS}bin${FS}java ${TESTVMOPTS} -XX:+PrintMiscellaneous -XX:+Verbose -version | grep "cores per cpu" | grep "sse4.1"
+
+if [ $? != 0 ]
+then
+ SSE=2
+else
+ SSE=4
+fi
+
cp ${TESTSRC}${FS}TestIntVect.java .
${TESTJAVA}${FS}bin${FS}javac -d . TestIntVect.java
@@ -97,6 +107,9 @@
exit 1
fi
+# MulVI is only supported with SSE4.1.
+if [ $SSE -gt 3 ]
+then
# LShiftVI+SubVI is generated for test_mulc
COUNT=`grep MulVI test.out | wc -l | awk '{print $1}'`
if [ $COUNT -lt 2 ]
@@ -104,6 +117,7 @@
echo "Test Failed: MulVI $COUNT < 2"
exit 1
fi
+fi
COUNT=`grep AndV test.out | wc -l | awk '{print $1}'`
if [ $COUNT -lt 3 ]
@@ -126,6 +140,7 @@
exit 1
fi
+# LShiftVI+SubVI is generated for test_mulc
COUNT=`grep LShiftVI test.out | wc -l | awk '{print $1}'`
if [ $COUNT -lt 5 ]
then
@@ -133,11 +148,10 @@
exit 1
fi
-# RShiftVI + URShiftVI
-COUNT=`grep RShiftVI test.out | wc -l | awk '{print $1}'`
-if [ $COUNT -lt 6 ]
+COUNT=`grep RShiftVI test.out | sed '/URShiftVI/d' | wc -l | awk '{print $1}'`
+if [ $COUNT -lt 3 ]
then
- echo "Test Failed: RShiftVI $COUNT < 6"
+ echo "Test Failed: RShiftVI $COUNT < 3"
exit 1
fi