8176506: C2: loop unswitching and unsafe accesses cause crash
Reviewed-by: vlivanov, mcberg, kvn, simonis
--- a/hotspot/src/cpu/aarch64/vm/aarch64.ad Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/cpu/aarch64/vm/aarch64.ad Mon May 29 18:17:49 2017 +0200
@@ -15387,9 +15387,9 @@
format %{ "ShouldNotReachHere" %}
ins_encode %{
- // TODO
- // implement proper trap call here
- __ brk(999);
+ // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't
+ // return true
+ __ dpcs1(0xdead + 1);
%}
ins_pipe(pipe_class_default);
--- a/hotspot/src/cpu/arm/vm/arm.ad Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/cpu/arm/vm/arm.ad Mon May 29 18:17:49 2017 +0200
@@ -11752,9 +11752,13 @@
size(4);
// Use the following format syntax
- format %{ "breakpoint ; ShouldNotReachHere" %}
- ins_encode %{
- __ breakpoint();
+ format %{ "ShouldNotReachHere" %}
+ ins_encode %{
+#ifdef AARCH64
+ __ dpcs1(0xdead);
+#else
+ __ udf(0xdead);
+#endif
%}
ins_pipe(tail_call);
%}
--- a/hotspot/src/cpu/arm/vm/assembler_arm_32.hpp Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/cpu/arm/vm/assembler_arm_32.hpp Mon May 29 18:17:49 2017 +0200
@@ -578,6 +578,11 @@
F(bl, 0xb)
#undef F
+ void udf(int imm_16) {
+ assert((imm_16 >> 16) == 0, "encoding constraint");
+ emit_int32(0xe7f000f0 | (imm_16 & 0xfff0) << 8 | (imm_16 & 0xf));
+ }
+
// ARMv7 instructions
#define F(mnemonic, wt) \
--- a/hotspot/src/cpu/arm/vm/assembler_arm_64.hpp Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/cpu/arm/vm/assembler_arm_64.hpp Mon May 29 18:17:49 2017 +0200
@@ -1083,6 +1083,7 @@
F(brk, 0b001, 0b000, 0b00)
F(hlt, 0b010, 0b000, 0b00)
+ F(dpcs1, 0b101, 0b000, 0b01)
#undef F
enum SystemRegister { // o0<1> op1<3> CRn<4> CRm<4> op2<3>
--- a/hotspot/src/cpu/x86/vm/assembler_x86.cpp Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.cpp Mon May 29 18:17:49 2017 +0200
@@ -3321,6 +3321,11 @@
emit_int8((unsigned char)0x90);
}
+void Assembler::ud2() {
+ emit_int8(0x0F);
+ emit_int8(0x0B);
+}
+
void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
assert(VM_Version::supports_sse4_2(), "");
InstructionMark im(this);
--- a/hotspot/src/cpu/x86/vm/assembler_x86.hpp Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/assembler_x86.hpp Mon May 29 18:17:49 2017 +0200
@@ -1554,6 +1554,9 @@
void pause();
+ // Undefined Instruction
+ void ud2();
+
// SSE4.2 string instructions
void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
void pcmpestri(XMMRegister xmm1, Address src, int imm8);
--- a/hotspot/src/cpu/x86/vm/x86.ad Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/cpu/x86/vm/x86.ad Mon May 29 18:17:49 2017 +0200
@@ -1804,9 +1804,9 @@
instruct ShouldNotReachHere() %{
match(Halt);
- format %{ "int3\t# ShouldNotReachHere" %}
- ins_encode %{
- __ int3();
+ format %{ "ud2\t# ShouldNotReachHere" %}
+ ins_encode %{
+ __ ud2();
%}
ins_pipe(pipe_slow);
%}
--- a/hotspot/src/share/vm/opto/castnode.cpp Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/share/vm/opto/castnode.cpp Mon May 29 18:17:49 2017 +0200
@@ -121,6 +121,9 @@
if (is_CastII() && as_CastII()->has_range_check()) {
return NULL;
}
+ if (type()->isa_rawptr() && (phase->type_or_null(val) == NULL || phase->type(val)->isa_oopptr())) {
+ return NULL;
+ }
for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) {
Node* u = val->fast_out(i);
if (u != this &&
@@ -308,11 +311,15 @@
if (in_ptr == TypePtr::Null) {
result = in_type;
} else if (in_ptr == TypePtr::Constant) {
- const TypeOopPtr *jptr = my_type->isa_oopptr();
- assert(jptr, "");
- result = !in_type->higher_equal(_type)
- ? my_type->cast_to_ptr_type(TypePtr::NotNull)
- : in_type;
+ if (my_type->isa_rawptr()) {
+ result = my_type;
+ } else {
+ const TypeOopPtr *jptr = my_type->isa_oopptr();
+ assert(jptr, "");
+ result = !in_type->higher_equal(_type)
+ ? my_type->cast_to_ptr_type(TypePtr::NotNull)
+ : in_type;
+ }
} else {
result = my_type->cast_to_ptr_type( my_type->join_ptr(in_ptr) );
}
--- a/hotspot/src/share/vm/opto/castnode.hpp Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/share/vm/opto/castnode.hpp Mon May 29 18:17:49 2017 +0200
@@ -116,7 +116,8 @@
virtual const Type* Value(PhaseGVN* phase) const;
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegP; }
-};
+ bool depends_only_on_test() const { return !type()->isa_rawptr() && ConstraintCastNode::depends_only_on_test(); }
+ };
//------------------------------CastX2PNode-------------------------------------
--- a/hotspot/src/share/vm/opto/classes.hpp Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/share/vm/opto/classes.hpp Mon May 29 18:17:49 2017 +0200
@@ -221,6 +221,7 @@
macro(Opaque1)
macro(Opaque2)
macro(Opaque3)
+macro(Opaque4)
macro(ProfileBoolean)
macro(OrI)
macro(OrL)
--- a/hotspot/src/share/vm/opto/escape.cpp Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/share/vm/opto/escape.cpp Mon May 29 18:17:49 2017 +0200
@@ -2302,20 +2302,34 @@
// | |
// AddP ( base == address )
//
+ // case #9. Mixed unsafe access
+ // {instance}
+ // |
+ // CheckCastPP (raw)
+ // top |
+ // \ |
+ // AddP ( base == top )
+ //
Node *base = addp->in(AddPNode::Base);
- if (base->uncast()->is_top()) { // The AddP case #3 and #6.
+ if (base->uncast()->is_top()) { // The AddP case #3 and #6 and #9.
base = addp->in(AddPNode::Address);
while (base->is_AddP()) {
// Case #6 (unsafe access) may have several chained AddP nodes.
assert(base->in(AddPNode::Base)->uncast()->is_top(), "expected unsafe access address only");
base = base->in(AddPNode::Address);
}
- Node* uncast_base = base->uncast();
- int opcode = uncast_base->Opcode();
- assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
- opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
- (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
- (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");
+ if (base->Opcode() == Op_CheckCastPP &&
+ base->bottom_type()->isa_rawptr() &&
+ _igvn->type(base->in(1))->isa_oopptr()) {
+ base = base->in(1); // Case #9
+ } else {
+ Node* uncast_base = base->uncast();
+ int opcode = uncast_base->Opcode();
+ assert(opcode == Op_ConP || opcode == Op_ThreadLocal ||
+ opcode == Op_CastX2P || uncast_base->is_DecodeNarrowPtr() ||
+ (uncast_base->is_Mem() && (uncast_base->bottom_type()->isa_rawptr() != NULL)) ||
+ (uncast_base->is_Proj() && uncast_base->in(0)->is_Allocate()), "sanity");
+ }
}
return base;
}
--- a/hotspot/src/share/vm/opto/escape.hpp Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/share/vm/opto/escape.hpp Mon May 29 18:17:49 2017 +0200
@@ -528,7 +528,7 @@
}
// Helper functions
bool is_oop_field(Node* n, int offset, bool* unsafe);
- static Node* get_addp_base(Node *addp);
+ Node* get_addp_base(Node *addp);
static Node* find_second_addp(Node* addp, Node* n);
// offset of a field reference
int address_offset(Node* adr, PhaseTransform *phase);
--- a/hotspot/src/share/vm/opto/graphKit.cpp Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/share/vm/opto/graphKit.cpp Mon May 29 18:17:49 2017 +0200
@@ -1390,6 +1390,30 @@
return cast; // Return casted value
}
+// Sometimes in intrinsics, we implicitly know an object is not null
+// (there's no actual null check) so we can cast it to not null. In
+// the course of optimizations, the input to the cast can become null.
+// In that case that data path will die and we need the control path
+// to become dead as well to keep the graph consistent. So we have to
+// add a check for null for which one branch can't be taken. It uses
+// an Opaque4 node that will cause the check to be removed after loop
+// opts so the test goes away and the compiled code doesn't execute a
+// useless check.
+Node* GraphKit::must_be_not_null(Node* value, bool do_replace_in_map) {
+ Node* chk = _gvn.transform(new CmpPNode(value, null()));
+ Node *tst = _gvn.transform(new BoolNode(chk, BoolTest::ne));
+ Node* opaq = _gvn.transform(new Opaque4Node(C, tst, intcon(1)));
+ IfNode *iff = new IfNode(control(), opaq, PROB_MAX, COUNT_UNKNOWN);
+ _gvn.set_type(iff, iff->Value(&_gvn));
+ Node *if_f = _gvn.transform(new IfFalseNode(iff));
+ Node *frame = _gvn.transform(new ParmNode(C->start(), TypeFunc::FramePtr));
+ Node *halt = _gvn.transform(new HaltNode(if_f, frame));
+ C->root()->add_req(halt);
+ Node *if_t = _gvn.transform(new IfTrueNode(iff));
+ set_control(if_t);
+ return cast_not_null(value, do_replace_in_map);
+}
+
//--------------------------replace_in_map-------------------------------------
void GraphKit::replace_in_map(Node* old, Node* neww) {
--- a/hotspot/src/share/vm/opto/graphKit.hpp Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/share/vm/opto/graphKit.hpp Mon May 29 18:17:49 2017 +0200
@@ -368,6 +368,9 @@
return null_check_common(value, type, true);
}
+ // Check if value is null and abort if it is
+ Node* must_be_not_null(Node* value, bool do_replace_in_map);
+
// Null check oop. Return null-path control into (*null_control).
// Return a cast-not-null node which depends on the not-null control.
// If never_see_null, use an uncommon trap (*null_control sees a top).
--- a/hotspot/src/share/vm/opto/library_call.cpp Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/share/vm/opto/library_call.cpp Mon May 29 18:17:49 2017 +0200
@@ -47,6 +47,7 @@
#include "opto/opaquenode.hpp"
#include "opto/parse.hpp"
#include "opto/runtime.hpp"
+#include "opto/rootnode.hpp"
#include "opto/subnode.hpp"
#include "prims/nativeLookup.hpp"
#include "prims/unsafe.hpp"
@@ -238,8 +239,8 @@
bool inline_notify(vmIntrinsics::ID id);
Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y);
// This returns Type::AnyPtr, RawPtr, or OopPtr.
- int classify_unsafe_addr(Node* &base, Node* &offset);
- Node* make_unsafe_address(Node* base, Node* offset);
+ int classify_unsafe_addr(Node* &base, Node* &offset, BasicType type);
+ Node* make_unsafe_address(Node*& base, Node* offset, BasicType type = T_ILLEGAL);
// Helper for inline_unsafe_access.
// Generates the guards that check whether the result of
// Unsafe.getObject should be recorded in an SATB log buffer.
@@ -2072,7 +2073,7 @@
}
inline int
-LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset) {
+LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset, BasicType type) {
const TypePtr* base_type = TypePtr::NULL_PTR;
if (base != NULL) base_type = _gvn.type(base)->isa_ptr();
if (base_type == NULL) {
@@ -2087,7 +2088,7 @@
return Type::RawPtr;
} else if (base_type->isa_oopptr()) {
// Base is never null => always a heap address.
- if (base_type->ptr() == TypePtr::NotNull) {
+ if (!TypePtr::NULL_PTR->higher_equal(base_type)) {
return Type::OopPtr;
}
// Offset is small => always a heap address.
@@ -2097,6 +2098,10 @@
offset_type->_lo >= 0 &&
!MacroAssembler::needs_explicit_null_check(offset_type->_hi)) {
return Type::OopPtr;
+ } else if (type == T_OBJECT) {
+ // off heap access to an oop doesn't make any sense. Has to be on
+ // heap.
+ return Type::OopPtr;
}
// Otherwise, it might either be oop+off or NULL+addr.
return Type::AnyPtr;
@@ -2106,11 +2111,23 @@
}
}
-inline Node* LibraryCallKit::make_unsafe_address(Node* base, Node* offset) {
- int kind = classify_unsafe_addr(base, offset);
+inline Node* LibraryCallKit::make_unsafe_address(Node*& base, Node* offset, BasicType type) {
+ Node* uncasted_base = base;
+ int kind = classify_unsafe_addr(uncasted_base, offset, type);
if (kind == Type::RawPtr) {
- return basic_plus_adr(top(), base, offset);
+ return basic_plus_adr(top(), uncasted_base, offset);
+ } else if (kind == Type::AnyPtr) {
+ assert(base == uncasted_base, "unexpected base change");
+ // We don't know if it's an on heap or off heap access. Fall back
+ // to raw memory access.
+ Node* raw = _gvn.transform(new CheckCastPPNode(control(), base, TypeRawPtr::BOTTOM));
+ return basic_plus_adr(top(), raw, offset);
} else {
+ assert(base == uncasted_base, "unexpected base change");
+ // We know it's an on heap access so base can't be null
+ if (TypePtr::NULL_PTR->higher_equal(_gvn.type(base))) {
+ base = must_be_not_null(base, true);
+ }
return basic_plus_adr(base, offset);
}
}
@@ -2342,7 +2359,7 @@
"fieldOffset must be byte-scaled");
// 32-bit machines ignore the high half!
offset = ConvL2X(offset);
- adr = make_unsafe_address(base, offset);
+ adr = make_unsafe_address(base, offset, type);
if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) {
heap_base_oop = base;
} else if (type == T_OBJECT) {
@@ -2753,7 +2770,7 @@
assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled");
// 32-bit machines ignore the high half of long offsets
offset = ConvL2X(offset);
- Node* adr = make_unsafe_address(base, offset);
+ Node* adr = make_unsafe_address(base, offset, type);
const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
Compile::AliasType* alias_type = C->alias_type(adr_type);
--- a/hotspot/src/share/vm/opto/macro.cpp Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/share/vm/opto/macro.cpp Mon May 29 18:17:49 2017 +0200
@@ -2662,7 +2662,8 @@
assert(n->Opcode() == Op_LoopLimit ||
n->Opcode() == Op_Opaque1 ||
n->Opcode() == Op_Opaque2 ||
- n->Opcode() == Op_Opaque3, "unknown node type in macro list");
+ n->Opcode() == Op_Opaque3 ||
+ n->Opcode() == Op_Opaque4, "unknown node type in macro list");
}
assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
progress = progress || success;
@@ -2727,6 +2728,9 @@
_igvn.replace_node(n, repl);
success = true;
#endif
+ } else if (n->Opcode() == Op_Opaque4) {
+ _igvn.replace_node(n, n->in(2));
+ success = true;
}
assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
progress = progress || success;
--- a/hotspot/src/share/vm/opto/opaquenode.cpp Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/share/vm/opto/opaquenode.cpp Mon May 29 18:17:49 2017 +0200
@@ -60,6 +60,14 @@
return (&n == this); // Always fail except on self
}
+Node* Opaque4Node::Identity(PhaseGVN* phase) {
+ return phase->C->major_progress() ? this : in(2);
+}
+
+const Type* Opaque4Node::Value(PhaseGVN* phase) const {
+ return phase->type(in(1));
+}
+
//=============================================================================
uint ProfileBooleanNode::hash() const { return NO_HASH; }
--- a/hotspot/src/share/vm/opto/opaquenode.hpp Tue Jun 06 17:07:42 2017 +0200
+++ b/hotspot/src/share/vm/opto/opaquenode.hpp Mon May 29 18:17:49 2017 +0200
@@ -87,6 +87,28 @@
bool rtm_opt() const { return (_opt == RTM_OPT); }
};
+// Used by GraphKit::must_be_not_null(): input 1 is a check that we
+// know implicitly is always true or false but the compiler has no way
+// to prove. If during optimizations, that check becomes true or
+// false, the Opaque4 node is replaced by that constant true or
+// false. Input 2 is the constant value we know the test takes. After
+// loop optimizations, we replace input 1 by input 2 so the control
+// that depends on that test can be removed and there's no overhead at
+// runtime.
+class Opaque4Node : public Node {
+ public:
+ Opaque4Node(Compile* C, Node *tst, Node* final_tst) : Node(0, tst, final_tst) {
+ // Put it on the Macro nodes list to removed during macro nodes expansion.
+ init_flags(Flag_is_macro);
+ C->add_macro_node(this);
+ }
+ virtual int Opcode() const;
+ virtual const Type *bottom_type() const { return TypeInt::BOOL; }
+ virtual const Type* Value(PhaseGVN* phase) const;
+ virtual Node* Identity(PhaseGVN* phase);
+};
+
+
//------------------------------ProfileBooleanNode-------------------------------
// A node represents value profile for a boolean during parsing.
// Once parsing is over, the node goes away (during IGVN).
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/test/compiler/unsafe/TestMaybeNullUnsafeAccess.java Mon May 29 18:17:49 2017 +0200
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2017, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8176506
+ * @summary cast before unsafe access moved in dominating null check null path causes crash
+ * @modules java.base/jdk.internal.misc:+open
+ *
+ * @run main/othervm -Xbatch -XX:-UseOnStackReplacement TestMaybeNullUnsafeAccess
+ *
+ */
+
+import jdk.internal.misc.Unsafe;
+import java.lang.reflect.Field;
+
+public class TestMaybeNullUnsafeAccess {
+
+ static final jdk.internal.misc.Unsafe UNSAFE = Unsafe.getUnsafe();
+ static final long F_OFFSET;
+
+ static class A {
+ int f;
+ A(int f) {
+ this.f = f;
+ }
+ }
+
+ static {
+ try {
+ Field fField = A.class.getDeclaredField("f");
+ F_OFFSET = UNSAFE.objectFieldOffset(fField);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ static A test_helper(Object o) {
+ // this includes a check for null with both branches taken
+ return (A)o;
+ }
+
+
+ // Loop is unswitched because of the test for null from the
+ // checkcast above, unsafe access is copied in each branch, the
+ // compiler sees a memory access to a null object
+ static int test1(Object o, long offset) {
+ int f = 0;
+ for (int i = 0; i < 100; i++) {
+ A a = test_helper(o);
+ f = UNSAFE.getInt(a, offset);
+ }
+ return f;
+ }
+
+ // Same as above except because we know the offset of the access
+ // is small, we can deduce object a cannot be null
+ static int test2(Object o) {
+ int f = 0;
+ for (int i = 0; i < 100; i++) {
+ A a = test_helper(o);
+ f = UNSAFE.getInt(a, F_OFFSET);
+ }
+ return f;
+ }
+
+ static public void main(String[] args) {
+ A a = new A(0x42);
+ for (int i = 0; i < 20000; i++) {
+ test_helper(null);
+ test_helper(a);
+ test1(a, F_OFFSET);
+ test2(a);
+ }
+ }
+
+}