--- a/hotspot/src/cpu/aarch64/vm/frame_aarch64.cpp Thu Aug 04 12:24:10 2016 -0400
+++ b/hotspot/src/cpu/aarch64/vm/frame_aarch64.cpp Thu Aug 04 18:34:40 2016 +0200
@@ -110,17 +110,7 @@
// Entry frame checks
if (is_entry_frame()) {
// an entry frame must have a valid fp.
-
- if (!fp_safe) return false;
-
- // Validate the JavaCallWrapper an entry frame must have
-
- address jcw = (address)entry_frame_call_wrapper();
-
- bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > fp);
-
- return jcw_safe;
-
+ return fp_safe && is_entry_frame_valid(thread);
}
intptr_t* sender_sp = NULL;
@@ -210,15 +200,8 @@
}
// construct the potential sender
-
frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
-
- // Validate the JavaCallWrapper an entry frame must have
- address jcw = (address)sender.entry_frame_call_wrapper();
-
- bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > (address)sender.fp());
-
- return jcw_safe;
+ return sender.is_entry_frame_valid(thread);
}
CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
--- a/hotspot/src/cpu/sparc/vm/frame_sparc.cpp Thu Aug 04 12:24:10 2016 -0400
+++ b/hotspot/src/cpu/sparc/vm/frame_sparc.cpp Thu Aug 04 18:34:40 2016 +0200
@@ -225,19 +225,7 @@
// Entry frame checks
if (is_entry_frame()) {
// an entry frame must have a valid fp.
-
- if (!fp_safe) {
- return false;
- }
-
- // Validate the JavaCallWrapper an entry frame must have
-
- address jcw = (address)entry_frame_call_wrapper();
-
- bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > _FP);
-
- return jcw_safe;
-
+ return fp_safe && is_entry_frame_valid(thread);
}
intptr_t* younger_sp = sp();
@@ -290,14 +278,8 @@
return false;
}
- if( sender.is_entry_frame()) {
- // Validate the JavaCallWrapper an entry frame must have
-
- address jcw = (address)sender.entry_frame_call_wrapper();
-
- bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > sender_fp);
-
- return jcw_safe;
+ if (sender.is_entry_frame()) {
+ return sender.is_entry_frame_valid(thread);
}
// If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
@@ -357,12 +339,6 @@
_cb = CodeCache::find_blob(_pc);
}
_deopt_state = unknown;
-#ifdef ASSERT
- if ( _cb != NULL && _cb->is_compiled()) {
- // Without a valid unextended_sp() we can't convert the pc to "original"
- assert(!((CompiledMethod*)_cb)->is_deopt_pc(_pc), "invariant broken");
- }
-#endif // ASSERT
}
frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
@@ -534,6 +510,7 @@
void frame::patch_pc(Thread* thread, address pc) {
+ vmassert(_deopt_state != unknown, "frame is unpatchable");
if(thread == Thread::current()) {
StubRoutines::Sparc::flush_callers_register_windows_func()();
}
--- a/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp Thu Aug 04 12:24:10 2016 -0400
+++ b/hotspot/src/cpu/sparc/vm/register_definitions_sparc.cpp Thu Aug 04 18:34:40 2016 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,9 @@
// make sure the defines don't screw up the declarations later on in this file
#define DONT_USE_REGISTER_DEFINES
-#include "precompiled.hpp"
+// Note: precompiled headers can not be used in this file because of the above
+// definition
+
#include "asm/assembler.hpp"
#include "asm/register.hpp"
#include "interp_masm_sparc.hpp"
--- a/hotspot/src/cpu/x86/vm/frame_x86.cpp Thu Aug 04 12:24:10 2016 -0400
+++ b/hotspot/src/cpu/x86/vm/frame_x86.cpp Thu Aug 04 18:34:40 2016 +0200
@@ -108,17 +108,7 @@
// Entry frame checks
if (is_entry_frame()) {
// an entry frame must have a valid fp.
-
- if (!fp_safe) return false;
-
- // Validate the JavaCallWrapper an entry frame must have
-
- address jcw = (address)entry_frame_call_wrapper();
-
- bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > fp);
-
- return jcw_safe;
-
+ return fp_safe && is_entry_frame_valid(thread);
}
intptr_t* sender_sp = NULL;
@@ -209,15 +199,8 @@
}
// construct the potential sender
-
frame sender(sender_sp, sender_unextended_sp, saved_fp, sender_pc);
-
- // Validate the JavaCallWrapper an entry frame must have
- address jcw = (address)sender.entry_frame_call_wrapper();
-
- bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > (address)sender.fp());
-
- return jcw_safe;
+ return sender.is_entry_frame_valid(thread);
}
CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/BreakpointInfo.java Thu Aug 04 12:24:10 2016 -0400
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/BreakpointInfo.java Thu Aug 04 18:34:40 2016 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2016 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
--- a/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/MethodCounters.java Thu Aug 04 12:24:10 2016 -0400
+++ b/hotspot/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/MethodCounters.java Thu Aug 04 18:34:40 2016 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2016 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
--- a/hotspot/src/share/vm/gc/g1/g1Analytics.cpp Thu Aug 04 12:24:10 2016 -0400
+++ b/hotspot/src/share/vm/gc/g1/g1Analytics.cpp Thu Aug 04 18:34:40 2016 +0200
@@ -316,8 +316,12 @@
return get_new_size_prediction(_pending_cards_seq);
}
+double G1Analytics::oldest_known_gc_end_time_sec() const {
+ return _recent_prev_end_times_for_all_gcs_sec->oldest();
+}
+
double G1Analytics::last_known_gc_end_time_sec() const {
- return _recent_prev_end_times_for_all_gcs_sec->oldest();
+ return _recent_prev_end_times_for_all_gcs_sec->last();
}
void G1Analytics::update_recent_gc_times(double end_time_sec,
--- a/hotspot/src/share/vm/gc/g1/g1Analytics.hpp Thu Aug 04 12:24:10 2016 -0400
+++ b/hotspot/src/share/vm/gc/g1/g1Analytics.hpp Thu Aug 04 18:34:40 2016 +0200
@@ -155,6 +155,7 @@
void update_recent_gc_times(double end_time_sec, double elapsed_ms);
void compute_pause_time_ratio(double interval_ms, double pause_time_ms);
+ double oldest_known_gc_end_time_sec() const;
double last_known_gc_end_time_sec() const;
};
--- a/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Thu Aug 04 12:24:10 2016 -0400
+++ b/hotspot/src/share/vm/gc/g1/g1CollectedHeap.cpp Thu Aug 04 18:34:40 2016 +0200
@@ -28,6 +28,7 @@
#include "classfile/symbolTable.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
+#include "gc/g1/g1Analytics.hpp"
#include "gc/g1/bufferingOopClosure.hpp"
#include "gc/g1/concurrentG1Refine.hpp"
#include "gc/g1/concurrentG1RefineThread.hpp"
@@ -2473,8 +2474,19 @@
}
jlong G1CollectedHeap::millis_since_last_gc() {
- // assert(false, "NYI");
- return 0;
+ jlong now = os::elapsed_counter() / NANOSECS_PER_MILLISEC;
+ const G1Analytics* analytics = _g1_policy->analytics();
+ double last = analytics->last_known_gc_end_time_sec();
+ jlong ret_val = now - (last * 1000);
+ if (ret_val < 0) {
+ // See the notes in GenCollectedHeap::millis_since_last_gc()
+ // for more information about the implementation.
+ log_warning(gc)("Detected clock going backwards. "
+ "Milliseconds since last GC would be " JLONG_FORMAT
+ ". returning zero instead.", ret_val);
+ return 0;
+ }
+ return ret_val;
}
void G1CollectedHeap::prepare_for_verify() {
--- a/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.cpp Thu Aug 04 12:24:10 2016 -0400
+++ b/hotspot/src/share/vm/gc/g1/g1DefaultPolicy.cpp Thu Aug 04 18:34:40 2016 +0200
@@ -604,7 +604,7 @@
_analytics->report_alloc_rate_ms(alloc_rate_ms);
double interval_ms =
- (end_time_sec - _analytics->last_known_gc_end_time_sec()) * 1000.0;
+ (end_time_sec - _analytics->oldest_known_gc_end_time_sec()) * 1000.0;
_analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
_analytics->compute_pause_time_ratio(interval_ms, pause_time_ms);
}
--- a/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp Thu Aug 04 12:24:10 2016 -0400
+++ b/hotspot/src/share/vm/gc/shared/genCollectedHeap.cpp Thu Aug 04 18:34:40 2016 +0200
@@ -1256,21 +1256,21 @@
};
jlong GenCollectedHeap::millis_since_last_gc() {
- // We need a monotonically non-decreasing time in ms but
- // os::javaTimeMillis() does not guarantee monotonicity.
+ // javaTimeNanos() is guaranteed to be monotonically non-decreasing
+ // provided the underlying platform provides such a time source
+ // (and it is bug free). So we still have to guard against getting
+ // back a time later than 'now'.
jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
GenTimeOfLastGCClosure tolgc_cl(now);
// iterate over generations getting the oldest
// time that a generation was collected
generation_iterate(&tolgc_cl, false);
- // javaTimeNanos() is guaranteed to be monotonically non-decreasing
- // provided the underlying platform provides such a time source
- // (and it is bug free). So we still have to guard against getting
- // back a time later than 'now'.
jlong retVal = now - tolgc_cl.time();
if (retVal < 0) {
- NOT_PRODUCT(log_warning(gc)("time warp: " JLONG_FORMAT, retVal);)
+ log_warning(gc)("Detected clock going backwards. "
+ "Milliseconds since last GC would be " JLONG_FORMAT
+ ". returning zero instead.", retVal);
return 0;
}
return retVal;
--- a/hotspot/src/share/vm/oops/methodCounters.hpp Thu Aug 04 12:24:10 2016 -0400
+++ b/hotspot/src/share/vm/oops/methodCounters.hpp Thu Aug 04 18:34:40 2016 +0200
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2016 Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
--- a/hotspot/src/share/vm/opto/cfgnode.cpp Thu Aug 04 12:24:10 2016 -0400
+++ b/hotspot/src/share/vm/opto/cfgnode.cpp Thu Aug 04 18:34:40 2016 +0200
@@ -1703,29 +1703,51 @@
}
if (uncasted) {
- // Add a cast node between the phi to be removed and its unique input.
+ // Add cast nodes between the phi to be removed and its unique input.
// Wait until after parsing for the type information to propagate from the casts.
assert(can_reshape, "Invalid during parsing");
const Type* phi_type = bottom_type();
assert(phi_type->isa_int() || phi_type->isa_ptr(), "bad phi type");
- int opcode;
- // Determine the type of cast to be added.
+ // Add casts to carry the control dependency of the Phi that is
+ // going away
+ Node* cast = NULL;
if (phi_type->isa_int()) {
- opcode = Op_CastII;
+ cast = ConstraintCastNode::make_cast(Op_CastII, r, uin, phi_type, true);
} else {
const Type* uin_type = phase->type(uin);
- if ((phi_type->join(TypePtr::NOTNULL) == uin_type->join(TypePtr::NOTNULL)) ||
- (!phi_type->isa_oopptr() && !uin_type->isa_oopptr())) {
- opcode = Op_CastPP;
+ if (!phi_type->isa_oopptr() && !uin_type->isa_oopptr()) {
+ cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, phi_type, true);
} else {
- opcode = Op_CheckCastPP;
+ // Use a CastPP for a cast to not null and a CheckCastPP for
+ // a cast to a new klass (and both if both null-ness and
+ // klass change).
+
+ // If the type of phi is not null but the type of uin may be
+ // null, uin's type must be casted to not null
+ if (phi_type->join(TypePtr::NOTNULL) == phi_type->remove_speculative() &&
+ uin_type->join(TypePtr::NOTNULL) != uin_type->remove_speculative()) {
+ cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, TypePtr::NOTNULL, true);
+ }
+
+ // If the type of phi and uin, both casted to not null,
+ // differ the klass of uin must be (check)cast'ed to match
+ // that of phi
+ if (phi_type->join_speculative(TypePtr::NOTNULL) != uin_type->join_speculative(TypePtr::NOTNULL)) {
+ Node* n = uin;
+ if (cast != NULL) {
+ cast = phase->transform(cast);
+ n = cast;
+ }
+ cast = ConstraintCastNode::make_cast(Op_CheckCastPP, r, n, phi_type, true);
+ }
+ if (cast == NULL) {
+ cast = ConstraintCastNode::make_cast(Op_CastPP, r, uin, phi_type, true);
+ }
}
}
- // Add a cast to carry the control dependency of the Phi that is
- // going away
- Node* cast = ConstraintCastNode::make_cast(opcode, r, uin, phi_type, true);
+ assert(cast != NULL, "cast should be set");
cast = phase->transform(cast);
- // set all inputs to the new cast so the Phi is removed by Identity
+ // set all inputs to the new cast(s) so the Phi is removed by Identity
PhaseIterGVN* igvn = phase->is_IterGVN();
for (uint i = 1; i < req(); i++) {
set_req_X(i, cast, igvn);
--- a/hotspot/src/share/vm/runtime/frame.cpp Thu Aug 04 12:24:10 2016 -0400
+++ b/hotspot/src/share/vm/runtime/frame.cpp Thu Aug 04 18:34:40 2016 +0200
@@ -225,6 +225,19 @@
return NULL;
}
+bool frame::is_entry_frame_valid(JavaThread* thread) const {
+ // Validate the JavaCallWrapper an entry frame must have
+ address jcw = (address)entry_frame_call_wrapper();
+ bool jcw_safe = (jcw < thread->stack_base()) && (jcw > (address)fp()); // less than stack base
+ if (!jcw_safe) {
+ return false;
+ }
+
+ // Validate sp saved in the java frame anchor
+ JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
+ return (jfa->last_Java_sp() > sp());
+}
+
bool frame::should_be_deoptimized() const {
if (_deopt_state == is_deoptimized ||
!is_compiled_frame() ) return false;
--- a/hotspot/src/share/vm/runtime/frame.hpp Thu Aug 04 12:24:10 2016 -0400
+++ b/hotspot/src/share/vm/runtime/frame.hpp Thu Aug 04 18:34:40 2016 +0200
@@ -166,6 +166,8 @@
frame sender_for_interpreter_frame(RegisterMap* map) const;
frame sender_for_native_frame(RegisterMap* map) const;
+ bool is_entry_frame_valid(JavaThread* thread) const;
+
// All frames:
// A low-level interface for vframes:
--- a/hotspot/test/runtime/StackGuardPages/exeinvoke.c Thu Aug 04 12:24:10 2016 -0400
+++ b/hotspot/test/runtime/StackGuardPages/exeinvoke.c Thu Aug 04 18:34:40 2016 +0200
@@ -242,7 +242,7 @@
CLASS_PATH_OPT, javaclasspath);
options[0].optionString = "-Xint";
- options[1].optionString = "-Xss328k";
+ options[1].optionString = "-Xss1M";
options[2].optionString = javaclasspathopt;
vm_args.version = JNI_VERSION_1_2;