--- a/make/CompileToolsHotspot.gmk Thu Mar 29 20:12:02 2018 +0100
+++ b/make/CompileToolsHotspot.gmk Sat Mar 24 01:08:35 2018 +0100
@@ -120,6 +120,7 @@
SRC := \
$(SRC_DIR)/org.graalvm.word/src \
$(SRC_DIR)/org.graalvm.collections/src \
+ $(SRC_DIR)/org.graalvm.compiler.bytecode/src \
$(SRC_DIR)/org.graalvm.compiler.replacements.verifier/src \
$(SRC_DIR)/org.graalvm.compiler.api.replacements/src \
$(SRC_DIR)/org.graalvm.compiler.code/src \
--- a/make/autoconf/hotspot.m4 Thu Mar 29 20:12:02 2018 +0100
+++ b/make/autoconf/hotspot.m4 Sat Mar 24 01:08:35 2018 +0100
@@ -343,11 +343,10 @@
fi
INCLUDE_GRAAL="true"
else
- # By default enable graal build on linux-x64 or where AOT is available.
+ # By default enable graal build on x64 or where AOT is available.
# graal build requires jvmci.
if test "x$JVM_FEATURES_jvmci" = "xjvmci" && \
- (test "x$OPENJDK_TARGET_CPU" = "xx86_64" && \
- test "x$OPENJDK_TARGET_OS" = "xlinux" || \
+ (test "x$OPENJDK_TARGET_CPU" = "xx86_64" || \
test "x$ENABLE_AOT" = "xtrue") ; then
AC_MSG_RESULT([yes])
JVM_FEATURES_graal="graal"
--- a/make/nb_native/nbproject/configurations.xml Thu Mar 29 20:12:02 2018 +0100
+++ b/make/nb_native/nbproject/configurations.xml Sat Mar 24 01:08:35 2018 +0100
@@ -2480,7 +2480,7 @@
<in>jvmtiClassFileReconstituter.hpp</in>
<in>jvmtiCodeBlobEvents.cpp</in>
<in>jvmtiCodeBlobEvents.hpp</in>
- <in>jvmtiEnter.hpp</in>
+ <in>jvmtiEnter.inline.hpp</in>
<in>jvmtiEnv.cpp</in>
<in>jvmtiEnvBase.cpp</in>
<in>jvmtiEnvBase.hpp</in>
@@ -13398,7 +13398,7 @@
tool="3"
flavor2="0">
</item>
- <item path="../../src/hotspot/share/prims/jvmtiEnter.hpp"
+ <item path="../../src/hotspot/share/prims/jvmtiEnter.inline.hpp"
ex="false"
tool="3"
flavor2="0">
@@ -27175,7 +27175,7 @@
tool="3"
flavor2="0">
</item>
- <item path="../../src/hotspot/share/prims/jvmtiEnter.hpp"
+ <item path="../../src/hotspot/share/prims/jvmtiEnter.inline.hpp"
ex="false"
tool="3"
flavor2="0">
--- a/src/hotspot/cpu/aarch64/aarch64.ad Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/aarch64.ad Sat Mar 24 01:08:35 2018 +0100
@@ -996,7 +996,7 @@
source_hpp %{
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "opto/addnode.hpp"
class CallStubImpl {
@@ -5845,8 +5845,8 @@
operand immByteMapBase()
%{
// Get base of card map
- predicate(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef) &&
- (jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base());
+ predicate(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
+ (jbyte*)n->get_ptr() == ((CardTableBarrierSet*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base());
match(ConP);
op_cost(0);
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -36,7 +36,7 @@
#include "compiler/disassembler.hpp"
#include "memory/resourceArea.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/sharedRuntime.hpp"
// for the moment we reuse the logical/floating point immediate encode
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -34,10 +34,11 @@
#include "ci/ciArrayKlass.hpp"
#include "ci/ciInstance.hpp"
#include "gc/shared/barrierSet.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "nativeInst_aarch64.hpp"
#include "oops/objArrayKlass.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_aarch64.inline.hpp"
@@ -2174,8 +2175,8 @@
__ stp(length, src_pos, Address(sp, 2*BytesPerWord));
__ str(src, Address(sp, 4*BytesPerWord));
- address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
address copyfunc_addr = StubRoutines::generic_arraycopy();
+ assert(copyfunc_addr != NULL, "generic arraycopy stub required");
// The arguments are in java calling convention so we shift them
// to C convention
@@ -2188,17 +2189,12 @@
assert_different_registers(c_rarg3, j_rarg4);
__ mov(c_rarg3, j_rarg3);
__ mov(c_rarg4, j_rarg4);
- if (copyfunc_addr == NULL) { // Use C version if stub was not generated
- __ mov(rscratch1, RuntimeAddress(C_entry));
- __ blrt(rscratch1, 5, 0, 1);
- } else {
#ifndef PRODUCT
- if (PrintC1Statistics) {
- __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
- }
+ if (PrintC1Statistics) {
+ __ incrementw(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
+ }
#endif
- __ far_call(RuntimeAddress(copyfunc_addr));
- }
+ __ far_call(RuntimeAddress(copyfunc_addr));
__ cbz(r0, *stub->continuation());
@@ -2208,14 +2204,12 @@
__ ldp(length, src_pos, Address(sp, 2*BytesPerWord));
__ ldr(src, Address(sp, 4*BytesPerWord));
- if (copyfunc_addr != NULL) {
- // r0 is -1^K where K == partial copied count
- __ eonw(rscratch1, r0, 0);
- // adjust length down and src/end pos up by partial copied count
- __ subw(length, length, rscratch1);
- __ addw(src_pos, src_pos, rscratch1);
- __ addw(dst_pos, dst_pos, rscratch1);
- }
+ // r0 is -1^K where K == partial copied count
+ __ eonw(rscratch1, r0, 0);
+ // adjust length down and src/end pos up by partial copied count
+ __ subw(length, length, rscratch1);
+ __ addw(src_pos, src_pos, rscratch1);
+ __ addw(dst_pos, dst_pos, rscratch1);
__ b(*stub->entry());
__ bind(*stub->continuation());
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,7 +31,7 @@
#include "c1/c1_Runtime1.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_aarch64.hpp"
#include "oops/compiledICHolder.hpp"
--- a/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/c2_globals_aarch64.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -65,7 +65,7 @@
// Peephole and CISC spilling both break the graph, and so makes the
// scheduler sick.
define_pd_global(bool, OptoPeephole, false);
-define_pd_global(bool, UseCISCSpill, true);
+define_pd_global(bool, UseCISCSpill, false);
define_pd_global(bool, OptoScheduling, false);
define_pd_global(bool, OptoBundling, false);
define_pd_global(bool, OptoRegScheduling, false);
--- a/src/hotspot/cpu/aarch64/frame_aarch64.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/frame_aarch64.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -158,4 +158,6 @@
// deoptimization support
void interpreter_frame_set_last_sp(intptr_t* sp);
+ static jint interpreter_frame_expression_stack_direction() { return -1; }
+
#endif // CPU_AARCH64_VM_FRAME_AARCH64_HPP
--- a/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/frame_aarch64.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -227,9 +227,6 @@
}
-inline jint frame::interpreter_frame_expression_stack_direction() { return -1; }
-
-
// Entry frames
inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
+#include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/heapRegion.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "runtime/thread.hpp"
+#include "interpreter/interp_masm.hpp"
+
+#define __ masm->
+
+void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, RegSet saved_regs) {
+ bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
+ if (!dest_uninitialized) {
+ __ push(saved_regs, sp);
+ if (count == c_rarg0) {
+ if (addr == c_rarg1) {
+ // exactly backwards!!
+ __ mov(rscratch1, c_rarg0);
+ __ mov(c_rarg0, c_rarg1);
+ __ mov(c_rarg1, rscratch1);
+ } else {
+ __ mov(c_rarg1, count);
+ __ mov(c_rarg0, addr);
+ }
+ } else {
+ __ mov(c_rarg0, addr);
+ __ mov(c_rarg1, count);
+ }
+ if (UseCompressedOops) {
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), 2);
+ } else {
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), 2);
+ }
+ __ pop(saved_regs, sp);
+ }
+}
+
+void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register start, Register end, Register scratch, RegSet saved_regs) {
+ __ push(saved_regs, sp);
+ // must compute element count unless barrier set interface is changed (other platforms supply count)
+ assert_different_registers(start, end, scratch);
+ __ lea(scratch, Address(end, BytesPerHeapOop));
+ __ sub(scratch, scratch, start); // subtract start to get #bytes
+ __ lsr(scratch, scratch, LogBytesPerHeapOop); // convert to element count
+ __ mov(c_rarg0, start);
+ __ mov(c_rarg1, scratch);
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry), 2);
+ __ pop(saved_regs, sp);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_AARCH64_GC_G1_G1BARRIERSETASSEMBLER_AARCH64_HPP
+#define CPU_AARCH64_GC_G1_G1BARRIERSETASSEMBLER_AARCH64_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
+protected:
+ void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, RegSet saved_regs);
+ void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register start, Register end, Register tmp, RegSet saved_regs);
+};
+
+#endif // CPU_AARCH64_GC_G1_G1BARRIERSETASSEMBLER_AARCH64_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_AARCH64_GC_SHARED_BARRIERSETASSEMBLER_AARCH64_HPP
+#define CPU_AARCH64_GC_SHARED_BARRIERSETASSEMBLER_AARCH64_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "memory/allocation.hpp"
+#include "oops/access.hpp"
+
+class BarrierSetAssembler: public CHeapObj<mtGC> {
+public:
+ virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
+ Register addr, Register count, RegSet saved_regs) {}
+ virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
+ Register start, Register end, Register tmp, RegSet saved_regs) {}
+};
+
+#endif // CPU_AARCH64_GC_SHARED_BARRIERSETASSEMBLER_AARCH64_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
+#include "gc/shared/cardTableBarrierSetAssembler.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "interpreter/interp_masm.hpp"
+
+#define __ masm->
+
+void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register start, Register end, Register scratch, RegSet saved_regs) {
+
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
+
+ Label L_loop;
+
+ __ lsr(start, start, CardTable::card_shift);
+ __ lsr(end, end, CardTable::card_shift);
+ __ sub(end, end, start); // number of bytes to copy
+
+ const Register count = end; // 'end' register contains bytes count now
+ __ load_byte_map_base(scratch);
+ __ add(start, start, scratch);
+ if (UseConcMarkSweepGC) {
+ __ membar(__ StoreStore);
+ }
+ __ bind(L_loop);
+ __ strb(zr, Address(start, count));
+ __ subs(count, count, 1);
+ __ br(Assembler::GE, L_loop);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/aarch64/gc/shared/cardTableBarrierSetAssembler_aarch64.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_AARCH64_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_AARCH64_HPP
+#define CPU_AARCH64_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_AARCH64_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler {
+protected:
+ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register start, Register end, Register tmp, RegSet saved_regs);
+};
+
+#endif // #ifndef CPU_AARCH64_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_AARCH64_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+#define __ masm->
+
+void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
+ Register addr, Register count, RegSet saved_regs) {
+
+ if (is_oop) {
+ gen_write_ref_array_pre_barrier(masm, decorators, addr, count, saved_regs);
+ }
+}
+
+void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
+ Register start, Register end, Register tmp,
+ RegSet saved_regs) {
+ if (is_oop) {
+ gen_write_ref_array_post_barrier(masm, decorators, start, end, tmp, saved_regs);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/aarch64/gc/shared/modRefBarrierSetAssembler_aarch64.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_AARCH64_GC_SHARED_MODREFBARRIERSETASSEMBLER_AARCH64_HPP
+#define CPU_AARCH64_GC_SHARED_MODREFBARRIERSETASSEMBLER_AARCH64_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
+
+class ModRefBarrierSetAssembler: public BarrierSetAssembler {
+protected:
+ virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, RegSet saved_regs) {}
+ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register start, Register end, Register tmp, RegSet saved_regs) {}
+
+public:
+ virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
+ Register addr, Register count, RegSet saved_regs);
+ virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
+ Register start, Register end, Register tmp, RegSet saved_regs);
+};
+
+#endif // CPU_AARCH64_GC_SHARED_MODREFBARRIERSETASSEMBLER_AARCH64_HPP
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -36,6 +36,7 @@
#include "prims/jvmtiThreadState.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.inline.hpp"
--- a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,6 +24,7 @@
*/
#include "precompiled.hpp"
+#include "interpreter/interp_masm.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
@@ -32,7 +33,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/signature.hpp"
#define __ _masm->
@@ -42,6 +43,14 @@
Register InterpreterRuntime::SignatureHandlerGenerator::to() { return sp; }
Register InterpreterRuntime::SignatureHandlerGenerator::temp() { return rscratch1; }
+InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator(
+ const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
+ _masm = new MacroAssembler(buffer);
+ _num_int_args = (method->is_static() ? 1 : 0);
+ _num_fp_args = 0;
+ _stack_offset = 0;
+}
+
void InterpreterRuntime::SignatureHandlerGenerator::pass_int() {
const Address src(from(), Interpreter::local_offset_in_bytes(offset()));
--- a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -26,8 +26,8 @@
#ifndef CPU_AARCH64_VM_INTERPRETERRT_AARCH64_HPP
#define CPU_AARCH64_VM_INTERPRETERRT_AARCH64_HPP
-#include "asm/macroAssembler.hpp"
-#include "memory/allocation.hpp"
+// This is included in the middle of class Interpreter.
+// Do not include files here.
// native method calls
@@ -47,12 +47,7 @@
public:
// Creation
- SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
- _masm = new MacroAssembler(buffer);
- _num_int_args = (method->is_static() ? 1 : 0);
- _num_fp_args = 0;
- _stack_offset = 0;
- }
+ SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer);
// Code generation
void generate(uint64_t fingerprint);
--- a/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/jvmciCodeInstaller_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#include "jvmci/jvmciCompilerToVM.hpp"
#include "jvmci/jvmciJavaClasses.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_aarch64.inline.hpp"
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -30,7 +30,7 @@
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "interpreter/interpreter.hpp"
#include "compiler/disassembler.hpp"
#include "memory/resourceArea.hpp"
@@ -42,7 +42,7 @@
#include "opto/node.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/icache.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.hpp"
@@ -3618,10 +3618,10 @@
// register obj is destroyed afterwards.
BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->kind() == BarrierSet::CardTableModRef,
+ assert(bs->kind() == BarrierSet::CardTableBarrierSet,
"Wrong barrier set kind");
- CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
@@ -4129,7 +4129,7 @@
DirtyCardQueue::byte_offset_of_buf()));
BarrierSet* bs = Universe::heap()->barrier_set();
- CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
@@ -4515,7 +4515,7 @@
void MacroAssembler::load_byte_map_base(Register reg) {
jbyte *byte_map_base =
- ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base();
+ ((CardTableBarrierSet*)(Universe::heap()->barrier_set()))->card_table()->byte_map_base();
if (is_valid_AArch64_address((address)byte_map_base)) {
// Strictly speaking the byte_map_base isn't an address at all,
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -79,8 +79,8 @@
void call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions = true);
- // Maximum size of class area in Metaspace when compressed
- uint64_t use_XOR_for_compressed_class_base;
+ // True if an XOR can be used to expand narrow klass references.
+ bool use_XOR_for_compressed_class_base;
public:
MacroAssembler(CodeBuffer* code) : Assembler(code) {
@@ -88,7 +88,7 @@
= (operand_valid_for_logical_immediate(false /*is32*/,
(uint64_t)Universe::narrow_klass_base())
&& ((uint64_t)Universe::narrow_klass_base()
- > (1u << log2_intptr(CompressedClassSpaceSize))));
+ > (1UL << log2_intptr(Universe::narrow_klass_range()))));
}
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
--- a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -30,6 +30,7 @@
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/frame.inline.hpp"
#define __ _masm->
--- a/src/hotspot/cpu/aarch64/runtime_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/runtime_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,7 +31,7 @@
#include "code/vmreg.hpp"
#include "interpreter/interpreter.hpp"
#include "opto/runtime.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -34,6 +34,7 @@
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compiledICHolder.hpp"
+#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/align.hpp"
--- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,8 +26,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
-#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_aarch64.hpp"
#include "oops/instanceOop.hpp"
@@ -620,111 +620,6 @@
void array_overlap_test(Label& L_no_overlap, Address::sxtw sf) { __ b(L_no_overlap); }
- // Generate code for an array write pre barrier
- //
- // addr - starting address
- // count - element count
- // tmp - scratch register
- // saved_regs - registers to be saved before calling static_write_ref_array_pre
- //
- // Callers must specify which registers to preserve in saved_regs.
- // Clobbers: r0-r18, v0-v7, v16-v31, except saved_regs.
- //
- void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized, RegSet saved_regs) {
- BarrierSet* bs = Universe::heap()->barrier_set();
- switch (bs->kind()) {
- case BarrierSet::G1BarrierSet:
- // With G1, don't generate the call if we statically know that the target in uninitialized
- if (!dest_uninitialized) {
- __ push(saved_regs, sp);
- if (count == c_rarg0) {
- if (addr == c_rarg1) {
- // exactly backwards!!
- __ mov(rscratch1, c_rarg0);
- __ mov(c_rarg0, c_rarg1);
- __ mov(c_rarg1, rscratch1);
- } else {
- __ mov(c_rarg1, count);
- __ mov(c_rarg0, addr);
- }
- } else {
- __ mov(c_rarg0, addr);
- __ mov(c_rarg1, count);
- }
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
- __ pop(saved_regs, sp);
- break;
- case BarrierSet::CardTableModRef:
- break;
- default:
- ShouldNotReachHere();
-
- }
- }
- }
-
- //
- // Generate code for an array write post barrier
- //
- // Input:
- // start - register containing starting address of destination array
- // end - register containing ending address of destination array
- // scratch - scratch register
- // saved_regs - registers to be saved before calling static_write_ref_array_post
- //
- // The input registers are overwritten.
- // The ending address is inclusive.
- // Callers must specify which registers to preserve in saved_regs.
- // Clobbers: r0-r18, v0-v7, v16-v31, except saved_regs.
- void gen_write_ref_array_post_barrier(Register start, Register end, Register scratch, RegSet saved_regs) {
- assert_different_registers(start, end, scratch);
- BarrierSet* bs = Universe::heap()->barrier_set();
- switch (bs->kind()) {
- case BarrierSet::G1BarrierSet:
-
- {
- __ push(saved_regs, sp);
- // must compute element count unless barrier set interface is changed (other platforms supply count)
- assert_different_registers(start, end, scratch);
- __ lea(scratch, Address(end, BytesPerHeapOop));
- __ sub(scratch, scratch, start); // subtract start to get #bytes
- __ lsr(scratch, scratch, LogBytesPerHeapOop); // convert to element count
- __ mov(c_rarg0, start);
- __ mov(c_rarg1, scratch);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
- __ pop(saved_regs, sp);
- }
- break;
- case BarrierSet::CardTableModRef:
- {
- CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
- CardTable* ct = ctbs->card_table();
- assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
-
- Label L_loop;
-
- __ lsr(start, start, CardTable::card_shift);
- __ lsr(end, end, CardTable::card_shift);
- __ sub(end, end, start); // number of bytes to copy
-
- const Register count = end; // 'end' register contains bytes count now
- __ load_byte_map_base(scratch);
- __ add(start, start, scratch);
- if (UseConcMarkSweepGC) {
- __ membar(__ StoreStore);
- }
- __ BIND(L_loop);
- __ strb(zr, Address(start, count));
- __ subs(count, count, 1);
- __ br(Assembler::GE, L_loop);
- }
- break;
- default:
- ShouldNotReachHere();
-
- }
- }
-
// The inner part of zero_words(). This is the bulk operation,
// zeroing words in blocks, possibly using DC ZVA to do it. The
// caller is responsible for zeroing the last few words.
@@ -1456,20 +1351,33 @@
BLOCK_COMMENT("Entry:");
}
+ DecoratorSet decorators = ARRAYCOPY_DISJOINT;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
+ }
+ if (aligned) {
+ decorators |= ARRAYCOPY_ALIGNED;
+ }
+
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, is_oop, d, count, saved_reg);
+
if (is_oop) {
- gen_write_ref_array_pre_barrier(d, count, dest_uninitialized, saved_reg);
// save regs before copy_memory
__ push(RegSet::of(d, count), sp);
}
copy_memory(aligned, s, d, count, rscratch1, size);
+
if (is_oop) {
__ pop(RegSet::of(d, count), sp);
if (VerifyOops)
verify_oop_array(size, d, count, r16);
__ sub(count, count, 1); // make an inclusive end pointer
__ lea(count, Address(d, count, Address::lsl(exact_log2(size))));
- gen_write_ref_array_post_barrier(d, count, rscratch1, RegSet());
}
+
+ bs->arraycopy_epilogue(_masm, decorators, is_oop, d, count, rscratch1, RegSet());
+
__ leave();
__ mov(r0, zr); // return 0
__ ret(lr);
@@ -1517,8 +1425,18 @@
__ cmp(rscratch1, count, Assembler::LSL, exact_log2(size));
__ br(Assembler::HS, nooverlap_target);
+ DecoratorSet decorators = 0;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
+ }
+ if (aligned) {
+ decorators |= ARRAYCOPY_ALIGNED;
+ }
+
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, is_oop, d, count, saved_regs);
+
if (is_oop) {
- gen_write_ref_array_pre_barrier(d, count, dest_uninitialized, saved_regs);
// save regs before copy_memory
__ push(RegSet::of(d, count), sp);
}
@@ -1529,8 +1447,8 @@
verify_oop_array(size, d, count, r16);
__ sub(count, count, 1); // make an inclusive end pointer
__ lea(count, Address(d, count, Address::lsl(exact_log2(size))));
- gen_write_ref_array_post_barrier(d, count, rscratch1, RegSet());
}
+ bs->arraycopy_epilogue(_masm, decorators, is_oop, d, count, rscratch1, RegSet());
__ leave();
__ mov(r0, zr); // return 0
__ ret(lr);
@@ -1871,7 +1789,14 @@
}
#endif //ASSERT
- gen_write_ref_array_pre_barrier(to, count, dest_uninitialized, wb_pre_saved_regs);
+ DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
+ bool is_oop = true;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
+ }
+
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, is_oop, to, count, wb_pre_saved_regs);
// save the original count
__ mov(count_save, count);
@@ -1915,7 +1840,7 @@
__ BIND(L_do_card_marks);
__ add(to, to, -heapOopSize); // make an inclusive end pointer
- gen_write_ref_array_post_barrier(start_to, to, rscratch1, wb_post_saved_regs);
+ bs->arraycopy_epilogue(_masm, decorators, is_oop, start_to, to, rscratch1, wb_post_saved_regs);
__ bind(L_done_pop);
__ pop(RegSet::of(r18, r19, r20, r21), sp);
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -35,6 +35,7 @@
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
@@ -184,7 +185,7 @@
}
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableModRef:
+ case BarrierSet::CardTableBarrierSet:
{
if (val == noreg) {
__ store_heap_oop_null(obj);
@@ -1904,7 +1905,8 @@
in_bytes(InvocationCounter::counter_offset()));
const Address mask(r1, in_bytes(MethodData::backedge_mask_offset()));
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
- r0, rscratch1, false, Assembler::EQ, &backedge_counter_overflow);
+ r0, rscratch1, false, Assembler::EQ,
+ UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
__ b(dispatch);
}
__ bind(no_mdo);
@@ -1912,7 +1914,8 @@
__ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
const Address mask(rscratch1, in_bytes(MethodCounters::backedge_mask_offset()));
__ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask,
- r0, rscratch2, false, Assembler::EQ, &backedge_counter_overflow);
+ r0, rscratch2, false, Assembler::EQ,
+ UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
} else { // not TieredCompilation
// increment counter
__ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
@@ -1960,8 +1963,8 @@
}
}
}
+ __ bind(dispatch);
}
- __ bind(dispatch);
// Pre-load the next target bytecode into rscratch1
__ load_unsigned_byte(rscratch1, Address(rbcp, 0));
@@ -1981,7 +1984,7 @@
__ b(dispatch);
}
- if (TieredCompilation || UseOnStackReplacement) {
+ if (UseOnStackReplacement) {
// invocation counter overflow
__ bind(backedge_counter_overflow);
__ neg(r2, r2);
@@ -1991,11 +1994,6 @@
CAST_FROM_FN_PTR(address,
InterpreterRuntime::frequency_counter_overflow),
r2);
- if (!UseOnStackReplacement)
- __ b(dispatch);
- }
-
- if (UseOnStackReplacement) {
__ load_unsigned_byte(r1, Address(rbcp, 0)); // restore target bytecode
// r0: osr nmethod (osr ok) or NULL (osr not possible)
--- a/src/hotspot/cpu/arm/assembler_arm.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/assembler_arm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,7 +26,7 @@
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "ci/ciEnv.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
@@ -35,7 +35,7 @@
#include "prims/jvm_misc.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/arm/assembler_arm_32.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/assembler_arm_32.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,7 +26,7 @@
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "ci/ciEnv.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
@@ -35,7 +35,7 @@
#include "prims/jvm_misc.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/arm/assembler_arm_64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/assembler_arm_64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,7 +26,7 @@
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "ci/ciEnv.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
@@ -35,7 +35,7 @@
#include "prims/jvm_misc.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/arm/c1_Defs_arm.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/c1_Defs_arm.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -79,7 +79,7 @@
#else
#define PATCHED_ADDR (204)
#endif
-#define CARDTABLEMODREF_POST_BARRIER_HELPER
+#define CARDTABLEBARRIERSET_POST_BARRIER_HELPER
#define GENERATE_ADDRESS_IS_PREFERRED
#endif // CPU_ARM_VM_C1_DEFS_ARM_HPP
--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,10 +31,11 @@
#include "ci/ciArrayKlass.hpp"
#include "ci/ciInstance.hpp"
#include "gc/shared/barrierSet.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "nativeInst_arm.hpp"
#include "oops/objArrayKlass.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_arm.inline.hpp"
@@ -2777,17 +2778,14 @@
#endif // AARCH64
address copyfunc_addr = StubRoutines::generic_arraycopy();
- if (copyfunc_addr == NULL) { // Use C version if stub was not generated
- __ call(CAST_FROM_FN_PTR(address, Runtime1::arraycopy));
- } else {
+ assert(copyfunc_addr != NULL, "generic arraycopy stub required");
#ifndef PRODUCT
- if (PrintC1Statistics) {
- __ inc_counter((address)&Runtime1::_generic_arraycopystub_cnt, tmp, tmp2);
- }
+ if (PrintC1Statistics) {
+ __ inc_counter((address)&Runtime1::_generic_arraycopystub_cnt, tmp, tmp2);
+ }
#endif // !PRODUCT
- // the stub is in the code cache so close enough
- __ call(copyfunc_addr, relocInfo::runtime_call_type);
- }
+ // the stub is in the code cache so close enough
+ __ call(copyfunc_addr, relocInfo::runtime_call_type);
#ifdef AARCH64
__ raw_pop(length, ZR);
@@ -2797,15 +2795,11 @@
__ cbz_32(R0, *stub->continuation());
- if (copyfunc_addr != NULL) {
- __ mvn_32(tmp, R0);
- restore_from_reserved_area(R0, R1, R2, R3); // load saved arguments in slow case only
- __ sub_32(length, length, tmp);
- __ add_32(src_pos, src_pos, tmp);
- __ add_32(dst_pos, dst_pos, tmp);
- } else {
- restore_from_reserved_area(R0, R1, R2, R3); // load saved arguments in slow case only
- }
+ __ mvn_32(tmp, R0);
+ restore_from_reserved_area(R0, R1, R2, R3); // load saved arguments in slow case only
+ __ sub_32(length, length, tmp);
+ __ add_32(src_pos, src_pos, tmp);
+ __ add_32(dst_pos, dst_pos, tmp);
__ b(*stub->entry());
--- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -35,7 +35,7 @@
#include "ci/ciTypeArrayKlass.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "vmreg_arm.inline.hpp"
@@ -497,7 +497,7 @@
#endif // AARCH64
}
-void LIRGenerator::CardTableModRef_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base) {
+void LIRGenerator::CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base) {
assert(addr->is_register(), "must be a register at this point");
LIR_Opr tmp = FrameMap::LR_ptr_opr;
--- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -30,7 +30,7 @@
#include "c1/c1_Runtime1.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_arm.hpp"
#include "oops/compiledICHolder.hpp"
--- a/src/hotspot/cpu/arm/frame_arm.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/frame_arm.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -135,4 +135,6 @@
// helper to update a map with callee-saved FP
static void update_map_with_saved_link(RegisterMap* map, intptr_t** link_addr);
+ static jint interpreter_frame_expression_stack_direction() { return -1; }
+
#endif // CPU_ARM_VM_FRAME_ARM_HPP
--- a/src/hotspot/cpu/arm/frame_arm.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/frame_arm.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -218,9 +218,6 @@
}
-inline jint frame::interpreter_frame_expression_stack_direction() { return -1; }
-
-
// Entry frames
inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
+#include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/heapRegion.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "interpreter/interp_masm.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/macros.hpp"
+
+#define __ masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
+void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, int callee_saved_regs) {
+ bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
+ if (!dest_uninitialized) {
+ assert( addr->encoding() < callee_saved_regs, "addr must be saved");
+ assert(count->encoding() < callee_saved_regs, "count must be saved");
+
+ BLOCK_COMMENT("PreBarrier");
+
+#ifdef AARCH64
+ callee_saved_regs = align_up(callee_saved_regs, 2);
+ for (int i = 0; i < callee_saved_regs; i += 2) {
+ __ raw_push(as_Register(i), as_Register(i+1));
+ }
+#else
+ RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1));
+ __ push(saved_regs | R9ifScratched);
+#endif // AARCH64
+
+ if (addr != R0) {
+ assert_different_registers(count, R0);
+ __ mov(R0, addr);
+ }
+#ifdef AARCH64
+ __ zero_extend(R1, count, 32); // G1BarrierSet::write_ref_array_pre_*_entry takes size_t
+#else
+ if (count != R1) {
+ __ mov(R1, count);
+ }
+#endif // AARCH64
+
+ if (UseCompressedOops) {
+ __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry));
+ } else {
+ __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry));
+ }
+
+#ifdef AARCH64
+ for (int i = callee_saved_regs - 2; i >= 0; i -= 2) {
+ __ raw_pop(as_Register(i), as_Register(i+1));
+ }
+#else
+ __ pop(saved_regs | R9ifScratched);
+#endif // AARCH64
+ }
+}
+
+void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, Register tmp) {
+
+ BLOCK_COMMENT("G1PostBarrier");
+ if (addr != R0) {
+ assert_different_registers(count, R0);
+ __ mov(R0, addr);
+ }
+#ifdef AARCH64
+ __ zero_extend(R1, count, 32); // G1BarrierSet::write_ref_array_post_entry takes size_t
+#else
+ if (count != R1) {
+ __ mov(R1, count);
+ }
+#if R9_IS_SCRATCHED
+ // Safer to save R9 here since callers may have been written
+ // assuming R9 survives. This is suboptimal but is not in
+ // general worth optimizing for the few platforms where R9
+ // is scratched. Note that the optimization might not be to
+ // difficult for this particular call site.
+ __ push(R9);
+#endif // !R9_IS_SCRATCHED
+#endif // !AARCH64
+ __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry));
+#ifndef AARCH64
+#if R9_IS_SCRATCHED
+ __ pop(R9);
+#endif // !R9_IS_SCRATCHED
+#endif // !AARCH64
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_ARM_GC_G1_G1BARRIERSETASSEMBLER_ARM_HPP
+#define CPU_ARM_GC_G1_G1BARRIERSETASSEMBLER_ARM_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
+protected:
+ void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, , int callee_saved_regs);
+ void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, Register tmp);
+};
+
+#endif // CPU_ARM_GC_G1_G1BARRIERSETASSEMBLER_ARM_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/arm/gc/shared/barrierSetAssembler_arm.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_ARM_GC_SHARED_BARRIERSETASSEMBLER_ARM_HPP
+#define CPU_ARM_GC_SHARED_BARRIERSETASSEMBLER_ARM_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "memory/allocation.hpp"
+#include "oops/access.hpp"
+
+class BarrierSetAssembler: public CHeapObj<mtGC> {
+public:
+ virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
+ Register addr, Register count, , int callee_saved_regs) {}
+ virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
+ Register addr, Register count, Register tmp) {}
+};
+
+#endif // CPU_ARM_GC_SHARED_BARRIERSETASSEMBLER_ARM_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
+#include "gc/shared/cardTableBarrierSetAssembler.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "runtime/globals.hpp"
+
+#define __ masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
+void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, Register tmp) {
+ BLOCK_COMMENT("CardTablePostBarrier");
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
+
+ Label L_cardtable_loop, L_done;
+
+ __ cbz_32(count, L_done); // zero count - nothing to do
+
+ __ add_ptr_scaled_int32(count, addr, count, LogBytesPerHeapOop);
+ __ sub(count, count, BytesPerHeapOop); // last addr
+
+ __ logical_shift_right(addr, addr, CardTable::card_shift);
+ __ logical_shift_right(count, count, CardTable::card_shift);
+ __ sub(count, count, addr); // nb of cards
+
+ // warning: Rthread has not been preserved
+ __ mov_address(tmp, (address) ct->byte_map_base(), symbolic_Relocation::card_table_reference);
+ __ add(addr,tmp, addr);
+
+ Register zero = __ zero_register(tmp);
+
+ __ BIND(L_cardtable_loop);
+ __ strb(zero, Address(addr, 1, post_indexed));
+ __ subs(count, count, 1);
+ __ b(L_cardtable_loop, ge);
+ __ BIND(L_done);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/arm/gc/shared/cardTableBarrierSetAssembler_arm.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_ARM_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ARM_HPP
+#define CPU_ARM_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ARM_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler {
+protected:
+ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, Register tmp);
+};
+
+#endif // #ifndef CPU_ARM_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ARM_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/arm/gc/shared/modRefBarrierSetAssembler_arm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+#define __ masm->
+
+void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
+ Register addr, Register count, int callee_saved_regs) {
+
+ if (is_oop) {
+ gen_write_ref_array_pre_barrier(masm, decorators, addr, count, callee_saved_regs);
+ }
+}
+
+void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
+ Register addr, Register count, Register tmp) {
+ if (is_oop) {
+ gen_write_ref_array_post_barrier(masm, decorators, addr, count, tmp);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/arm/gc/shared/modRefBarrierSetAssembler_arm.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_ARM_GC_SHARED_MODREFBARRIERSETASSEMBLER_ARM_HPP
+#define CPU_ARM_GC_SHARED_MODREFBARRIERSETASSEMBLER_ARM_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
+
+class ModRefBarrierSetAssembler: public BarrierSetAssembler {
+protected:
+ virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, , int callee_saved_regs) {}
+ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, Register tmp) {}
+
+public:
+ virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
+ Register addr, Register count, int callee_saved_regs);
+ virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
+ Register addr, Register count, Register tmp);
+};
+
+#endif // CPU_ARM_GC_SHARED_MODREFBARRIERSETASSEMBLER_ARM_HPP
--- a/src/hotspot/cpu/arm/interp_masm_arm.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,9 +24,9 @@
#include "precompiled.hpp"
#include "jvm.h"
-#include "gc/shared/barrierSet.inline.hpp"
+#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.inline.hpp"
+#include "gc/shared/cardTableBarrierSet.inline.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interp_masm_arm.hpp"
#include "interpreter/interpreter.hpp"
@@ -40,6 +40,7 @@
#include "prims/jvmtiThreadState.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#if INCLUDE_ALL_GCS
@@ -411,10 +412,10 @@
void InterpreterMacroAssembler::store_check_part1(Register card_table_base) {
// Check barrier set type (should be card table) and element size
BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->kind() == BarrierSet::CardTableModRef,
+ assert(bs->kind() == BarrierSet::CardTableBarrierSet,
"Wrong barrier set kind");
- CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "Adjust store check code");
@@ -473,7 +474,7 @@
#ifdef AARCH64
strb(ZR, card_table_addr);
#else
- CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set());
CardTable* ct = ctbs->card_table();
if ((((uintptr_t)ct->byte_map_base() & 0xff) == 0)) {
// Card table is aligned so the lowest byte of the table address base is zero.
--- a/src/hotspot/cpu/arm/interpreterRT_arm.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/interpreterRT_arm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "interpreter/interp_masm.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
@@ -31,11 +32,26 @@
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/signature.hpp"
#define __ _masm->
+Interpreter::SignatureHandlerGenerator::SignatureHandlerGenerator(
+ const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
+ _masm = new MacroAssembler(buffer);
+ _abi_offset = 0;
+ _ireg = is_static() ? 2 : 1;
+#ifdef __ABI_HARD__
+#ifdef AARCH64
+ _freg = 0;
+#else
+ _fp_slot = 0;
+ _single_fpr_slot = 0;
+#endif
+#endif
+}
+
#ifdef SHARING_FAST_NATIVE_FINGERPRINTS
// mapping from SignatureIterator param to (common) type of parsing
static const u1 shared_type[] = {
--- a/src/hotspot/cpu/arm/interpreterRT_arm.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/interpreterRT_arm.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,6 @@
#ifndef CPU_ARM_VM_INTERPRETERRT_ARM_HPP
#define CPU_ARM_VM_INTERPRETERRT_ARM_HPP
-#include "memory/allocation.hpp"
-
// native method calls
class SignatureHandlerGenerator: public NativeSignatureIterator {
@@ -56,23 +54,10 @@
#endif
public:
// Creation
- SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
- _masm = new MacroAssembler(buffer);
- _abi_offset = 0;
- _ireg = is_static() ? 2 : 1;
-#ifdef __ABI_HARD__
-#ifdef AARCH64
- _freg = 0;
-#else
- _fp_slot = 0;
- _single_fpr_slot = 0;
-#endif
-#endif
- }
+ SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer);
// Code generation
void generate(uint64_t fingerprint);
-
};
#ifndef AARCH64
--- a/src/hotspot/cpu/arm/jvmciCodeInstaller_arm.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/jvmciCodeInstaller_arm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "jvmci/jvmciCompilerToVM.hpp"
#include "jvmci/jvmciJavaClasses.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_arm.inline.hpp"
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -30,14 +30,14 @@
#include "code/nativeInst.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -2267,7 +2267,7 @@
DirtyCardQueue::byte_offset_of_buf()));
BarrierSet* bs = Universe::heap()->barrier_set();
- CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
Label done;
Label runtime;
--- a/src/hotspot/cpu/arm/methodHandles_arm.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/methodHandles_arm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/frame.inline.hpp"
#define __ _masm->
--- a/src/hotspot/cpu/arm/runtime_arm.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/runtime_arm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -32,7 +32,7 @@
#include "memory/resourceArea.hpp"
#include "nativeInst_arm.hpp"
#include "opto/runtime.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/arm/stubGenerator_arm.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/stubGenerator_arm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,8 +25,8 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "assembler_arm.inline.hpp"
-#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_arm.hpp"
#include "oops/instanceOop.hpp"
@@ -2855,148 +2855,6 @@
return start;
}
-#if INCLUDE_ALL_GCS
- //
- // Generate pre-write barrier for array.
- //
- // Input:
- // addr - register containing starting address
- // count - register containing element count, 32-bit int
- // callee_saved_regs -
- // the call must preserve this number of registers: R0, R1, ..., R[callee_saved_regs-1]
- //
- // callee_saved_regs must include addr and count
- // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR) except for callee_saved_regs.
- void gen_write_ref_array_pre_barrier(Register addr, Register count, int callee_saved_regs) {
- BarrierSet* bs = Universe::heap()->barrier_set();
- switch (bs->kind()) {
- case BarrierSet::G1BarrierSet:
- {
- assert( addr->encoding() < callee_saved_regs, "addr must be saved");
- assert(count->encoding() < callee_saved_regs, "count must be saved");
-
- BLOCK_COMMENT("PreBarrier");
-
-#ifdef AARCH64
- callee_saved_regs = align_up(callee_saved_regs, 2);
- for (int i = 0; i < callee_saved_regs; i += 2) {
- __ raw_push(as_Register(i), as_Register(i+1));
- }
-#else
- RegisterSet saved_regs = RegisterSet(R0, as_Register(callee_saved_regs-1));
- __ push(saved_regs | R9ifScratched);
-#endif // AARCH64
-
- if (addr != R0) {
- assert_different_registers(count, R0);
- __ mov(R0, addr);
- }
-#ifdef AARCH64
- __ zero_extend(R1, count, 32); // BarrierSet::static_write_ref_array_pre takes size_t
-#else
- if (count != R1) {
- __ mov(R1, count);
- }
-#endif // AARCH64
-
- __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
-
-#ifdef AARCH64
- for (int i = callee_saved_regs - 2; i >= 0; i -= 2) {
- __ raw_pop(as_Register(i), as_Register(i+1));
- }
-#else
- __ pop(saved_regs | R9ifScratched);
-#endif // AARCH64
- }
- case BarrierSet::CardTableModRef:
- break;
- default:
- ShouldNotReachHere();
- }
- }
-#endif // INCLUDE_ALL_GCS
-
- //
- // Generate post-write barrier for array.
- //
- // Input:
- // addr - register containing starting address (can be scratched)
- // count - register containing element count, 32-bit int (can be scratched)
- // tmp - scratch register
- //
- // Note: LR can be scratched but might be equal to addr, count or tmp
- // Blows all volatile registers (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR).
- void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp) {
- assert_different_registers(addr, count, tmp);
- BarrierSet* bs = Universe::heap()->barrier_set();
-
- switch (bs->kind()) {
- case BarrierSet::G1BarrierSet:
- {
- BLOCK_COMMENT("G1PostBarrier");
- if (addr != R0) {
- assert_different_registers(count, R0);
- __ mov(R0, addr);
- }
-#ifdef AARCH64
- __ zero_extend(R1, count, 32); // BarrierSet::static_write_ref_array_post takes size_t
-#else
- if (count != R1) {
- __ mov(R1, count);
- }
-#if R9_IS_SCRATCHED
- // Safer to save R9 here since callers may have been written
- // assuming R9 survives. This is suboptimal but is not in
- // general worth optimizing for the few platforms where R9
- // is scratched. Note that the optimization might not be to
- // difficult for this particular call site.
- __ push(R9);
-#endif
-#endif // !AARCH64
- __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
-#ifndef AARCH64
-#if R9_IS_SCRATCHED
- __ pop(R9);
-#endif
-#endif // !AARCH64
- }
- break;
- case BarrierSet::CardTableModRef:
- {
- BLOCK_COMMENT("CardTablePostBarrier");
- CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
- CardTable* ct = ctbs->card_table();
- assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
-
- Label L_cardtable_loop, L_done;
-
- __ cbz_32(count, L_done); // zero count - nothing to do
-
- __ add_ptr_scaled_int32(count, addr, count, LogBytesPerHeapOop);
- __ sub(count, count, BytesPerHeapOop); // last addr
-
- __ logical_shift_right(addr, addr, CardTable::card_shift);
- __ logical_shift_right(count, count, CardTable::card_shift);
- __ sub(count, count, addr); // nb of cards
-
- // warning: Rthread has not been preserved
- __ mov_address(tmp, (address) ct->byte_map_base(), symbolic_Relocation::card_table_reference);
- __ add(addr,tmp, addr);
-
- Register zero = __ zero_register(tmp);
-
- __ BIND(L_cardtable_loop);
- __ strb(zero, Address(addr, 1, post_indexed));
- __ subs(count, count, 1);
- __ b(L_cardtable_loop, ge);
- __ BIND(L_done);
- }
- break;
- default:
- ShouldNotReachHere();
- }
- }
// Generates pattern of code to be placed after raw data copying in generate_oop_copy
// Includes return from arraycopy stub.
@@ -3007,7 +2865,7 @@
// count: total number of copied elements, 32-bit int
//
// Blows all volatile (R0-R3 on 32-bit ARM, R0-R18 on AArch64, Rtemp, LR) and 'to', 'count', 'tmp' registers.
- void oop_arraycopy_stub_epilogue_helper(Register to, Register count, Register tmp, bool status, bool forward) {
+ void oop_arraycopy_stub_epilogue_helper(Register to, Register count, Register tmp, bool status, bool forward, DecoratorSet decorators) {
assert_different_registers(to, count, tmp);
if (forward) {
@@ -3018,7 +2876,8 @@
// 'to' is the beginning of the region
- gen_write_ref_array_post_barrier(to, count, tmp);
+ BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->arraycopy_epilogue(this, decorators, true, to, count, tmp);
if (status) {
__ mov(R0, 0); // OK
@@ -3086,9 +2945,16 @@
__ push(LR);
#endif // AARCH64
-#if INCLUDE_ALL_GCS
- gen_write_ref_array_pre_barrier(to, count, callee_saved_regs);
-#endif // INCLUDE_ALL_GCS
+ DecoratorSet decorators = 0;
+ if (disjoint) {
+ decorators |= ARRAYCOPY_DISJOINT;
+ }
+ if (aligned) {
+ decorators |= ARRAYCOPY_ALIGNED;
+ }
+
+ BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(this, decorators, true, to, count, callee_saved_regs);
// save arguments for barrier generation (after the pre barrier)
__ mov(saved_count, count);
@@ -3146,12 +3012,12 @@
}
assert(small_copy_limit >= count_required_to_align + min_copy, "first loop might exhaust count");
- oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward);
+ oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators);
{
copy_small_array(from, to, count, tmp1, noreg, bytes_per_count, forward, L_small_array);
- oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward);
+ oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators);
}
if (!to_is_aligned) {
@@ -3165,7 +3031,7 @@
int min_copy_shifted = align_dst_and_generate_shifted_copy_loop(from, to, count, bytes_per_count, forward);
assert (small_copy_limit >= count_required_to_align + min_copy_shifted, "first loop might exhaust count");
- oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward);
+ oop_arraycopy_stub_epilogue_helper(to, saved_count, /* tmp */ tmp1, status, forward, decorators);
}
return start;
@@ -3336,7 +3202,7 @@
const int callee_saved_regs = AARCH64_ONLY(5) NOT_AARCH64(4); // LR saved differently
- Label load_element, store_element, do_card_marks, fail;
+ Label load_element, store_element, do_epilogue, fail;
BLOCK_COMMENT("Entry:");
@@ -3351,9 +3217,10 @@
pushed+=1;
#endif // AARCH64
-#if INCLUDE_ALL_GCS
- gen_write_ref_array_pre_barrier(to, count, callee_saved_regs);
-#endif // INCLUDE_ALL_GCS
+ DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
+
+ BarrierSetAssembler *bs = BarrierSet::barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(this, decorators, true, to, count, callee_saved_regs);
#ifndef AARCH64
const RegisterSet caller_saved_regs = RegisterSet(R4,R6) | RegisterSet(R8,R9) | altFP_7_11;
@@ -3399,7 +3266,7 @@
__ subs_32(count,count,1);
__ str(R5, Address(to, BytesPerHeapOop, post_indexed)); // store the oop
}
- __ b(do_card_marks, eq); // count exhausted
+ __ b(do_epilogue, eq); // count exhausted
// ======== loop entry is here ========
__ BIND(load_element);
@@ -3421,7 +3288,7 @@
// Note: fail marked by the fact that count differs from saved_count
- __ BIND(do_card_marks);
+ __ BIND(do_epilogue);
Register copied = AARCH64_ONLY(R20) NOT_AARCH64(R4); // saved
Label L_not_copied;
@@ -3431,7 +3298,7 @@
__ sub(to, to, AsmOperand(copied, lsl, LogBytesPerHeapOop)); // initial to value
__ mov(R12, copied); // count arg scratched by post barrier
- gen_write_ref_array_post_barrier(to, R12, R3);
+ bs->arraycopy_epilogue(this, decorators, true, to, R12, R3);
assert_different_registers(R3,R12,LR,copied,saved_count);
inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R3, R12);
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/arm/templateTable_arm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -34,6 +34,7 @@
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
@@ -228,7 +229,7 @@
}
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableModRef:
+ case BarrierSet::CardTableBarrierSet:
{
if (is_null) {
__ store_heap_oop_null(new_val, obj);
--- a/src/hotspot/cpu/ppc/assembler_ppc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/ppc/assembler_ppc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,13 +25,13 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -33,9 +33,10 @@
#include "ci/ciInstance.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/barrierSet.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "nativeInst_ppc.hpp"
#include "oops/objArrayKlass.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/safepointMechanism.inline.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -1858,34 +1859,31 @@
if (op->expected_type() == NULL) {
assert(src->is_nonvolatile() && src_pos->is_nonvolatile() && dst->is_nonvolatile() && dst_pos->is_nonvolatile() &&
length->is_nonvolatile(), "must preserve");
+ address copyfunc_addr = StubRoutines::generic_arraycopy();
+ assert(copyfunc_addr != NULL, "generic arraycopy stub required");
+
// 3 parms are int. Convert to long.
__ mr(R3_ARG1, src);
__ extsw(R4_ARG2, src_pos);
__ mr(R5_ARG3, dst);
__ extsw(R6_ARG4, dst_pos);
__ extsw(R7_ARG5, length);
- address copyfunc_addr = StubRoutines::generic_arraycopy();
-
- if (copyfunc_addr == NULL) { // Use C version if stub was not generated.
- address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
- __ call_c_with_frame_resize(entry, frame_resize);
- } else {
+
#ifndef PRODUCT
- if (PrintC1Statistics) {
- address counter = (address)&Runtime1::_generic_arraycopystub_cnt;
- int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
- __ lwz(R11_scratch1, simm16_offs, tmp);
- __ addi(R11_scratch1, R11_scratch1, 1);
- __ stw(R11_scratch1, simm16_offs, tmp);
- }
+ if (PrintC1Statistics) {
+ address counter = (address)&Runtime1::_generic_arraycopystub_cnt;
+ int simm16_offs = __ load_const_optimized(tmp, counter, tmp2, true);
+ __ lwz(R11_scratch1, simm16_offs, tmp);
+ __ addi(R11_scratch1, R11_scratch1, 1);
+ __ stw(R11_scratch1, simm16_offs, tmp);
+ }
#endif
- __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0);
-
- __ nand(tmp, R3_RET, R3_RET);
- __ subf(length, tmp, length);
- __ add(src_pos, tmp, src_pos);
- __ add(dst_pos, tmp, dst_pos);
- }
+ __ call_c_with_frame_resize(copyfunc_addr, /*stub does not need resized frame*/ 0);
+
+ __ nand(tmp, R3_RET, R3_RET);
+ __ subf(length, tmp, length);
+ __ add(src_pos, tmp, src_pos);
+ __ add(dst_pos, tmp, dst_pos);
__ cmpwi(CCR0, R3_RET, 0);
__ bc_far_optimized(Assembler::bcondCRbiIs1, __ bi0(CCR0, Assembler::less), *stub->entry());
--- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -29,7 +29,7 @@
#include "c1/c1_Runtime1.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_ppc.hpp"
#include "oops/compiledICHolder.hpp"
--- a/src/hotspot/cpu/ppc/frame_ppc.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/ppc/frame_ppc.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -425,4 +425,6 @@
pc_return_offset = 0
};
+ static jint interpreter_frame_expression_stack_direction() { return -1; }
+
#endif // CPU_PPC_VM_FRAME_PPC_HPP
--- a/src/hotspot/cpu/ppc/frame_ppc.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/ppc/frame_ppc.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -179,10 +179,6 @@
return (intptr_t*)interpreter_frame_monitor_end() - 1;
}
-inline jint frame::interpreter_frame_expression_stack_direction() {
- return -1;
-}
-
// top of expression stack
inline intptr_t* frame::interpreter_frame_tos_address() const {
return ((intptr_t*) get_ijava_state()->esp) + Interpreter::stackElementWords;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
+#include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/heapRegion.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "runtime/thread.hpp"
+#include "interpreter/interp_masm.hpp"
+
+#define __ masm->
+
+void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register from, Register to, Register count,
+ Register preserve1, Register preserve2) {
+ bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
+ // With G1, don't generate the call if we statically know that the target in uninitialized
+ if (!dest_uninitialized) {
+ int spill_slots = 3;
+ if (preserve1 != noreg) { spill_slots++; }
+ if (preserve2 != noreg) { spill_slots++; }
+ const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
+ Label filtered;
+
+ // Is marking active?
+ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+ __ lwz(R0, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
+ } else {
+ guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+ __ lbz(R0, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
+ }
+ __ cmpdi(CCR0, R0, 0);
+ __ beq(CCR0, filtered);
+
+ __ save_LR_CR(R0);
+ __ push_frame(frame_size, R0);
+ int slot_nr = 0;
+ __ std(from, frame_size - (++slot_nr) * wordSize, R1_SP);
+ __ std(to, frame_size - (++slot_nr) * wordSize, R1_SP);
+ __ std(count, frame_size - (++slot_nr) * wordSize, R1_SP);
+ if (preserve1 != noreg) { __ std(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
+ if (preserve2 != noreg) { __ std(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
+
+ if (UseCompressedOops) {
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), to, count);
+ } else {
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), to, count);
+ }
+
+ slot_nr = 0;
+ __ ld(from, frame_size - (++slot_nr) * wordSize, R1_SP);
+ __ ld(to, frame_size - (++slot_nr) * wordSize, R1_SP);
+ __ ld(count, frame_size - (++slot_nr) * wordSize, R1_SP);
+ if (preserve1 != noreg) { __ ld(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
+ if (preserve2 != noreg) { __ ld(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
+ __ addi(R1_SP, R1_SP, frame_size); // pop_frame()
+ __ restore_LR_CR(R0);
+
+ __ bind(filtered);
+ }
+}
+
+void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, Register preserve) {
+ int spill_slots = (preserve != noreg) ? 1 : 0;
+ const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
+
+ __ save_LR_CR(R0);
+ __ push_frame(frame_size, R0);
+ if (preserve != noreg) { __ std(preserve, frame_size - 1 * wordSize, R1_SP); }
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry), addr, count);
+ if (preserve != noreg) { __ ld(preserve, frame_size - 1 * wordSize, R1_SP); }
+ __ addi(R1_SP, R1_SP, frame_size); // pop_frame();
+ __ restore_LR_CR(R0);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_GC_G1_G1BARRIERSETASSEMBLER_PPC_HPP
+#define CPU_PPC_GC_G1_G1BARRIERSETASSEMBLER_PPC_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
+protected:
+ virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register from, Register to, Register count,
+ Register preserve1, Register preserve2);
+ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, Register preserve);
+};
+
+#endif // CPU_PPC_GC_G1_G1BARRIERSETASSEMBLER_PPC_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_GC_SHARED_BARRIERSETASSEMBLER_PPC_HPP
+#define CPU_PPC_GC_SHARED_BARRIERSETASSEMBLER_PPC_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "memory/allocation.hpp"
+#include "oops/access.hpp"
+
+class InterpreterMacroAssembler;
+
+class BarrierSetAssembler: public CHeapObj<mtGC> {
+public:
+ virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count, Register preserve1, Register preserve2) {}
+ virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register dst, Register count, Register preserve) {}
+};
+
+#endif // CPU_PPC_GC_SHARED_BARRIERSETASSEMBLER_PPC_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
+#include "gc/shared/cardTableBarrierSetAssembler.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "interpreter/interp_masm.hpp"
+
+#define __ masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
+void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr,
+ Register count, Register preserve) {
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set());
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
+ assert_different_registers(addr, count, R0);
+
+ Label Lskip_loop, Lstore_loop;
+
+ if (UseConcMarkSweepGC) { __ membar(Assembler::StoreStore); }
+
+ __ sldi_(count, count, LogBytesPerHeapOop);
+ __ beq(CCR0, Lskip_loop); // zero length
+ __ addi(count, count, -BytesPerHeapOop);
+ __ add(count, addr, count);
+ // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
+ __ srdi(addr, addr, CardTable::card_shift);
+ __ srdi(count, count, CardTable::card_shift);
+ __ subf(count, addr, count);
+ __ add_const_optimized(addr, addr, (address)ct->byte_map_base(), R0);
+ __ addi(count, count, 1);
+ __ li(R0, 0);
+ __ mtctr(count);
+ // Byte store loop
+ __ bind(Lstore_loop);
+ __ stb(R0, 0, addr);
+ __ addi(addr, addr, 1);
+ __ bdnz(Lstore_loop);
+ __ bind(Lskip_loop);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/ppc/gc/shared/cardTableBarrierSetAssembler_ppc.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_PPC_HPP
+#define CPU_PPC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_PPC_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler {
+protected:
+ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr,
+ Register count, Register preserve);
+};
+
+#endif // CPU_PPC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_PPC_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/ppc/gc/shared/modRefBarrierSetAssembler_ppc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+#define __ masm->
+
+void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count, Register preserve1, Register preserve2) {
+ if (type == T_OBJECT) {
+ gen_write_ref_array_pre_barrier(masm, decorators, src, dst, count, preserve1, preserve2);
+
+ bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
+ if (!checkcast) {
+ assert_different_registers(dst, count, R9_ARG7, R10_ARG8);
+ // Save some arguments for epilogue, e.g. disjoint_long_copy_core destroys them.
+ __ mr(R9_ARG7, dst);
+ __ mr(R10_ARG8, count);
+ }
+ }
+}
+
+void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register dst, Register count, Register preserve) {
+ if (type == T_OBJECT) {
+ bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
+ if (!checkcast) {
+ gen_write_ref_array_post_barrier(masm, decorators, R9_ARG7, R10_ARG8, preserve);
+ } else {
+ gen_write_ref_array_post_barrier(masm, decorators, dst, count, preserve);
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/ppc/gc/shared/modRefBarrierSetAssembler_ppc.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_PPC_GC_SHARED_MODREFBARRIERSETASSEMBLER_PPC_HPP
+#define CPU_PPC_GC_SHARED_MODREFBARRIERSETASSEMBLER_PPC_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
+
+class ModRefBarrierSetAssembler: public BarrierSetAssembler {
+protected:
+ virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register from, Register to, Register count,
+ Register preserve1, Register preserve2) {}
+ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, Register preserve) {}
+
+public:
+ virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count, Register preserve1, Register preserve2);
+ virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register dst, Register count, Register preserve);
+};
+
+#endif // CPU_PPC_GC_SHARED_MODREFBARRIERSETASSEMBLER_PPC_HPP
--- a/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/ppc/interp_masm_ppc_64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -29,6 +29,7 @@
#include "interp_masm_ppc.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "prims/jvmtiThreadState.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/ppc/interpreterRT_ppc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/ppc/interpreterRT_ppc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
+#include "interpreter/interp_masm.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
@@ -33,7 +34,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/signature.hpp"
#define __ _masm->
@@ -46,6 +47,12 @@
// Implementation of SignatureHandlerGenerator
+InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator(
+ const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
+ _masm = new MacroAssembler(buffer);
+ _num_used_fp_arg_regs = 0;
+}
+
void InterpreterRuntime::SignatureHandlerGenerator::pass_int() {
Argument jni_arg(jni_offset());
Register r = jni_arg.is_register() ? jni_arg.as_register() : R0;
--- a/src/hotspot/cpu/ppc/interpreterRT_ppc.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/ppc/interpreterRT_ppc.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -26,8 +26,8 @@
#ifndef CPU_PPC_VM_INTERPRETERRT_PPC_HPP
#define CPU_PPC_VM_INTERPRETERRT_PPC_HPP
-#include "asm/macroAssembler.hpp"
-#include "memory/allocation.hpp"
+// This is included in the middle of class Interpreter.
+// Do not include files here.
// native method calls
@@ -45,10 +45,7 @@
public:
// Creation
- SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
- _masm = new MacroAssembler(buffer);
- _num_used_fp_arg_regs = 0;
- }
+ SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer);
// Code generation
void generate(uint64_t fingerprint);
--- a/src/hotspot/cpu/ppc/jvmciCodeInstaller_ppc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/ppc/jvmciCodeInstaller_ppc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#include "jvmci/jvmciCompilerToVM.hpp"
#include "jvmci/jvmciJavaClasses.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_ppc.inline.hpp"
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,7 +27,7 @@
#include "asm/macroAssembler.inline.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
@@ -35,7 +35,7 @@
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/icache.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/safepoint.hpp"
@@ -3036,9 +3036,9 @@
// Write the card table byte if needed.
void MacroAssembler::card_write_barrier_post(Register Rstore_addr, Register Rnew_val, Register Rtmp) {
- CardTableModRefBS* bs =
- barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
- assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier");
+ CardTableBarrierSet* bs =
+ barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set());
+ assert(bs->kind() == BarrierSet::CardTableBarrierSet, "wrong barrier");
CardTable* ct = bs->card_table();
#ifdef ASSERT
cmpdi(CCR0, Rnew_val, 0);
--- a/src/hotspot/cpu/ppc/methodHandles_ppc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/ppc/methodHandles_ppc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -31,6 +31,8 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/frame.inline.hpp"
+#include "utilities/preserveException.hpp"
#define __ _masm->
--- a/src/hotspot/cpu/ppc/ppc.ad Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/ppc/ppc.ad Sat Mar 24 01:08:35 2018 +0100
@@ -1274,12 +1274,12 @@
return offsets;
}
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
-
+
// Emit the trampoline stub which will be related to the branch-and-link below.
CallStubImpl::emit_trampoline_stub(_masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
if (ciEnv::current()->failing()) { return offsets; } // Code cache may be full.
__ relocate(rtype);
-
+
// Note: At this point we do not have the address of the trampoline
// stub, and the entry point might be too far away for bl, so __ pc()
// serves as dummy and the bl will be patched later.
@@ -1526,7 +1526,7 @@
// Save return pc.
___(std) std(return_pc, _abi(lr), callers_sp);
}
-
+
C->set_frame_complete(cbuf.insts_size());
}
#undef ___
@@ -2695,13 +2695,13 @@
ciEnv::current()->record_out_of_memory_failure();
return;
}
-
+
// Get the constant's TOC offset.
toc_offset = __ offset_to_method_toc(const_toc_addr);
-
+
// Keep the current instruction offset in mind.
((loadConLNode*)this)->_cbuf_insts_offset = __ offset();
-
+
__ ld($dst$$Register, toc_offset, $toc$$Register);
%}
@@ -2819,7 +2819,7 @@
MachNode *_last;
} loadConLReplicatedNodesTuple;
-loadConLReplicatedNodesTuple loadConLReplicatedNodesTuple_create(Compile *C, PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc,
+loadConLReplicatedNodesTuple loadConLReplicatedNodesTuple_create(Compile *C, PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc,
vecXOper *dst, immI_0Oper *zero,
OptoReg::Name reg_second, OptoReg::Name reg_first,
OptoReg::Name reg_vec_second, OptoReg::Name reg_vec_first) {
@@ -3158,7 +3158,7 @@
Label skip_storestore;
#if 0 // TODO: PPC port
- // Check CMSCollectorCardTableModRefBSExt::_requires_release and do the
+ // Check CMSCollectorCardTableBarrierSetBSExt::_requires_release and do the
// StoreStore barrier conditionally.
__ lwz(R0, 0, $releaseFieldAddr$$Register);
__ cmpwi($crx$$CondRegister, R0, 0);
@@ -6852,7 +6852,7 @@
// Card-mark for CMS garbage collection.
// This cardmark does an optimization so that it must not always
// do a releasing store. For this, it gets the address of
-// CMSCollectorCardTableModRefBSExt::_requires_release as input.
+// CMSCollectorCardTableBarrierSetBSExt::_requires_release as input.
// (Using releaseFieldAddr in the match rule is a hack.)
instruct storeCM_CMS(memory mem, iRegLdst releaseFieldAddr, flagsReg crx) %{
match(Set mem (StoreCM mem releaseFieldAddr));
@@ -6871,7 +6871,7 @@
// Card-mark for CMS garbage collection.
// This cardmark does an optimization so that it must not always
// do a releasing store. For this, it needs the constant address of
-// CMSCollectorCardTableModRefBSExt::_requires_release.
+// CMSCollectorCardTableBarrierSetBSExt::_requires_release.
// This constant address is split off here by expand so we can use
// adlc / matcher functionality to load it from the constant section.
instruct storeCM_CMS_ExEx(memory mem, immI_0 zero) %{
@@ -6879,7 +6879,7 @@
predicate(UseConcMarkSweepGC);
expand %{
- immL baseImm %{ 0 /* TODO: PPC port (jlong)CMSCollectorCardTableModRefBSExt::requires_release_address() */ %}
+ immL baseImm %{ 0 /* TODO: PPC port (jlong)CMSCollectorCardTableBarrierSetBSExt::requires_release_address() */ %}
iRegLdst releaseFieldAddress;
flagsReg crx;
loadConL_Ex(releaseFieldAddress, baseImm);
@@ -13665,7 +13665,7 @@
instruct mtvsrwz(vecX temp1, iRegIsrc src) %{
effect(DEF temp1, USE src);
-
+
size(4);
ins_encode %{
__ mtvsrwz($temp1$$VectorSRegister, $src$$Register);
@@ -13678,7 +13678,7 @@
size(4);
ins_encode %{
- __ xxspltw($dst$$VectorSRegister, $src$$VectorSRegister, $imm1$$constant);
+ __ xxspltw($dst$$VectorSRegister, $src$$VectorSRegister, $imm1$$constant);
%}
ins_pipe(pipe_class_default);
%}
@@ -13843,7 +13843,7 @@
expand %{
iRegLdst tmpL;
vecX tmpV;
- immI8 zero %{ (int) 0 %}
+ immI8 zero %{ (int) 0 %}
moveReg(tmpL, src);
repl48(tmpL);
repl32(tmpL);
@@ -13915,10 +13915,10 @@
predicate(n->as_Vector()->length() == 4);
ins_cost(2 * DEFAULT_COST);
- expand %{
+ expand %{
iRegLdst tmpL;
vecX tmpV;
- immI8 zero %{ (int) 0 %}
+ immI8 zero %{ (int) 0 %}
moveReg(tmpL, src);
repl32(tmpL);
mtvsrd(tmpV, tmpL);
@@ -14057,7 +14057,7 @@
iRegIdst tmpI;
iRegLdst tmpL;
vecX tmpV;
- immI8 zero %{ (int) 0 %}
+ immI8 zero %{ (int) 0 %}
moveF2I_reg_stack(tmpS, src); // Move float to stack.
moveF2I_stack_reg(tmpI, tmpS); // Move stack to int reg.
@@ -14096,7 +14096,7 @@
iRegLdst tmpL;
iRegLdst tmp;
vecX tmpV;
- immI8 zero %{ (int) 0 %}
+ immI8 zero %{ (int) 0 %}
moveD2L_reg_stack(tmpS, src);
moveD2L_stack_reg(tmpL, tmpS);
mtvsrd(tmpV, tmpL);
@@ -14132,7 +14132,7 @@
predicate(false);
effect(DEF dst, USE src);
- format %{ "MTVSRD $dst, $src \t// Move to 16-byte register"%}
+ format %{ "MTVSRD $dst, $src \t// Move to 16-byte register"%}
size(4);
ins_encode %{
__ mtvsrd($dst$$VectorSRegister, $src$$Register);
@@ -14147,7 +14147,7 @@
size(4);
ins_encode %{
__ xxpermdi($dst$$VectorSRegister, $src$$VectorSRegister, $src$$VectorSRegister, $zero$$constant);
- %}
+ %}
ins_pipe(pipe_class_default);
%}
@@ -14158,7 +14158,7 @@
size(4);
ins_encode %{
__ xxpermdi($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister, $zero$$constant);
- %}
+ %}
ins_pipe(pipe_class_default);
%}
@@ -14167,8 +14167,8 @@
predicate(n->as_Vector()->length() == 2);
expand %{
vecX tmpV;
- immI8 zero %{ (int) 0 %}
- mtvsrd(tmpV, src);
+ immI8 zero %{ (int) 0 %}
+ mtvsrd(tmpV, src);
xxpermdi(dst, tmpV, tmpV, zero);
%}
%}
--- a/src/hotspot/cpu/ppc/runtime_ppc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/ppc/runtime_ppc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -33,7 +33,7 @@
#include "memory/resourceArea.hpp"
#include "nativeInst_ppc.hpp"
#include "opto/runtime.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -29,10 +29,12 @@
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "frame_ppc.hpp"
+#include "gc/shared/gcLocker.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interp_masm.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compiledICHolder.hpp"
+#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/align.hpp"
--- a/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/ppc/stubGenerator_ppc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,8 +25,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
-#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_ppc.hpp"
#include "oops/instanceOop.hpp"
@@ -612,137 +612,6 @@
#undef __
#define __ _masm->
- // Generate G1 pre-write barrier for array.
- //
- // Input:
- // from - register containing src address (only needed for spilling)
- // to - register containing starting address
- // count - register containing element count
- // tmp - scratch register
- //
- // Kills:
- // nothing
- //
- void gen_write_ref_array_pre_barrier(Register from, Register to, Register count, bool dest_uninitialized, Register Rtmp1,
- Register preserve1 = noreg, Register preserve2 = noreg) {
- BarrierSet* const bs = Universe::heap()->barrier_set();
- switch (bs->kind()) {
- case BarrierSet::G1BarrierSet:
- // With G1, don't generate the call if we statically know that the target in uninitialized
- if (!dest_uninitialized) {
- int spill_slots = 3;
- if (preserve1 != noreg) { spill_slots++; }
- if (preserve2 != noreg) { spill_slots++; }
- const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
- Label filtered;
-
- // Is marking active?
- if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
- __ lwz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
- } else {
- guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
- __ lbz(Rtmp1, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), R16_thread);
- }
- __ cmpdi(CCR0, Rtmp1, 0);
- __ beq(CCR0, filtered);
-
- __ save_LR_CR(R0);
- __ push_frame(frame_size, R0);
- int slot_nr = 0;
- __ std(from, frame_size - (++slot_nr) * wordSize, R1_SP);
- __ std(to, frame_size - (++slot_nr) * wordSize, R1_SP);
- __ std(count, frame_size - (++slot_nr) * wordSize, R1_SP);
- if (preserve1 != noreg) { __ std(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
- if (preserve2 != noreg) { __ std(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
-
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), to, count);
-
- slot_nr = 0;
- __ ld(from, frame_size - (++slot_nr) * wordSize, R1_SP);
- __ ld(to, frame_size - (++slot_nr) * wordSize, R1_SP);
- __ ld(count, frame_size - (++slot_nr) * wordSize, R1_SP);
- if (preserve1 != noreg) { __ ld(preserve1, frame_size - (++slot_nr) * wordSize, R1_SP); }
- if (preserve2 != noreg) { __ ld(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
- __ addi(R1_SP, R1_SP, frame_size); // pop_frame()
- __ restore_LR_CR(R0);
-
- __ bind(filtered);
- }
- break;
- case BarrierSet::CardTableModRef:
- break;
- default:
- ShouldNotReachHere();
- }
- }
-
- // Generate CMS/G1 post-write barrier for array.
- //
- // Input:
- // addr - register containing starting address
- // count - register containing element count
- // tmp - scratch register
- //
- // The input registers and R0 are overwritten.
- //
- void gen_write_ref_array_post_barrier(Register addr, Register count, Register tmp, Register preserve = noreg) {
- BarrierSet* const bs = Universe::heap()->barrier_set();
-
- switch (bs->kind()) {
- case BarrierSet::G1BarrierSet:
- {
- int spill_slots = (preserve != noreg) ? 1 : 0;
- const int frame_size = align_up(frame::abi_reg_args_size + spill_slots * BytesPerWord, frame::alignment_in_bytes);
-
- __ save_LR_CR(R0);
- __ push_frame(frame_size, R0);
- if (preserve != noreg) { __ std(preserve, frame_size - 1 * wordSize, R1_SP); }
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
- if (preserve != noreg) { __ ld(preserve, frame_size - 1 * wordSize, R1_SP); }
- __ addi(R1_SP, R1_SP, frame_size); // pop_frame();
- __ restore_LR_CR(R0);
- }
- break;
- case BarrierSet::CardTableModRef:
- {
- Label Lskip_loop, Lstore_loop;
- if (UseConcMarkSweepGC) {
- // TODO PPC port: contribute optimization / requires shared changes
- __ release();
- }
-
- CardTableModRefBS* const ctbs = barrier_set_cast<CardTableModRefBS>(bs);
- CardTable* const ct = ctbs->card_table();
- assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
- assert_different_registers(addr, count, tmp);
-
- __ sldi(count, count, LogBytesPerHeapOop);
- __ addi(count, count, -BytesPerHeapOop);
- __ add(count, addr, count);
- // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
- __ srdi(addr, addr, CardTable::card_shift);
- __ srdi(count, count, CardTable::card_shift);
- __ subf(count, addr, count);
- assert_different_registers(R0, addr, count, tmp);
- __ load_const(tmp, (address)ct->byte_map_base());
- __ addic_(count, count, 1);
- __ beq(CCR0, Lskip_loop);
- __ li(R0, 0);
- __ mtctr(count);
- // Byte store loop
- __ bind(Lstore_loop);
- __ stbx(R0, tmp, addr);
- __ addi(addr, addr, 1);
- __ bdnz(Lstore_loop);
- __ bind(Lskip_loop);
- }
- break;
- case BarrierSet::ModRef:
- break;
- default:
- ShouldNotReachHere();
- }
- }
// Support for void zero_words_aligned8(HeapWord* to, size_t count)
//
@@ -2155,11 +2024,16 @@
STUB_ENTRY(arrayof_oop_disjoint_arraycopy) :
STUB_ENTRY(oop_disjoint_arraycopy);
- gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
-
- // Save arguments.
- __ mr(R9_ARG7, R4_ARG2);
- __ mr(R10_ARG8, R5_ARG3);
+ DecoratorSet decorators = 0;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
+ }
+ if (aligned) {
+ decorators |= ARRAYCOPY_ALIGNED;
+ }
+
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg);
if (UseCompressedOops) {
array_overlap_test(nooverlap_target, 2);
@@ -2169,7 +2043,7 @@
generate_conjoint_long_copy_core(aligned);
}
- gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1);
+ bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_ARG2, R5_ARG3, noreg);
__ li(R3_RET, 0); // return 0
__ blr();
return start;
@@ -2188,12 +2062,17 @@
StubCodeMark mark(this, "StubRoutines", name);
address start = __ function_entry();
assert_positive_int(R5_ARG3);
- gen_write_ref_array_pre_barrier(R3_ARG1, R4_ARG2, R5_ARG3, dest_uninitialized, R9_ARG7);
-
- // save some arguments, disjoint_long_copy_core destroys them.
- // needed for post barrier
- __ mr(R9_ARG7, R4_ARG2);
- __ mr(R10_ARG8, R5_ARG3);
+
+ DecoratorSet decorators = ARRAYCOPY_DISJOINT;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
+ }
+ if (aligned) {
+ decorators |= ARRAYCOPY_ALIGNED;
+ }
+
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_ARG1, R4_ARG2, R5_ARG3, noreg, noreg);
if (UseCompressedOops) {
generate_disjoint_int_copy_core(aligned);
@@ -2201,7 +2080,7 @@
generate_disjoint_long_copy_core(aligned);
}
- gen_write_ref_array_post_barrier(R9_ARG7, R10_ARG8, R11_scratch1);
+ bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_ARG2, R5_ARG3, noreg);
__ li(R3_RET, 0); // return 0
__ blr();
@@ -2280,11 +2159,17 @@
}
#endif
- gen_write_ref_array_pre_barrier(R3_from, R4_to, R5_count, dest_uninitialized, R12_tmp, /* preserve: */ R6_ckoff, R7_ckval);
+ DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
+ }
+
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, T_OBJECT, R3_from, R4_to, R5_count, /* preserve: */ R6_ckoff, R7_ckval);
//inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, R12_tmp, R3_RET);
- Label load_element, store_element, store_null, success, do_card_marks;
+ Label load_element, store_element, store_null, success, do_epilogue;
__ or_(R9_remain, R5_count, R5_count); // Initialize loop index, and test it.
__ li(R8_offset, 0); // Offset from start of arrays.
__ li(R2_minus1, -1);
@@ -2328,15 +2213,15 @@
// and report their number to the caller.
__ subf_(R5_count, R9_remain, R5_count);
__ nand(R3_RET, R5_count, R5_count); // report (-1^K) to caller
- __ bne(CCR0, do_card_marks);
+ __ bne(CCR0, do_epilogue);
__ blr();
__ bind(success);
__ li(R3_RET, 0);
- __ bind(do_card_marks);
- // Store check on R4_to[0..R5_count-1].
- gen_write_ref_array_post_barrier(R4_to, R5_count, R12_tmp, /* preserve: */ R3_RET);
+ __ bind(do_epilogue);
+ bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, R4_to, R5_count, /* preserve */ R3_RET);
+
__ blr();
return start;
}
--- a/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/ppc/templateTable_ppc_64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -34,6 +34,8 @@
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
@@ -103,7 +105,7 @@
}
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableModRef:
+ case BarrierSet::CardTableBarrierSet:
{
Label Lnull, Ldone;
if (Rval != noreg) {
--- a/src/hotspot/cpu/s390/assembler_s390.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/s390/assembler_s390.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -28,11 +28,11 @@
#include "compiler/disassembler.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -33,9 +33,10 @@
#include "ci/ciInstance.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/barrierSet.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "nativeInst_s390.hpp"
#include "oops/objArrayKlass.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/safepointMechanism.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_s390.inline.hpp"
@@ -631,7 +632,7 @@
};
// Index register is normally not supported, but for
- // LIRGenerator::CardTableModRef_post_barrier we make an exception.
+ // LIRGenerator::CardTableBarrierSet_post_barrier we make an exception.
if (type == T_BYTE && dest->as_address_ptr()->index()->is_valid()) {
__ load_const_optimized(Z_R0_scratch, (int8_t)(c->as_jint()));
store_offset = __ offset();
@@ -1895,6 +1896,15 @@
// If we don't know anything, just go through the generic arraycopy.
if (default_type == NULL) {
+ address copyfunc_addr = StubRoutines::generic_arraycopy();
+
+ if (copyfunc_addr == NULL) {
+ // Take a slow path for generic arraycopy.
+ __ branch_optimized(Assembler::bcondAlways, *stub->entry());
+ __ bind(*stub->continuation());
+ return;
+ }
+
Label done;
// Save outgoing arguments in callee saved registers (C convention) in case
// a call to System.arraycopy is needed.
@@ -1915,10 +1925,6 @@
__ z_lgfr(dst_pos, dst_pos);
__ z_lgfr(length, length);
- address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
-
- address copyfunc_addr = StubRoutines::generic_arraycopy();
-
// Pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint.
// The arguments are in the corresponding registers.
@@ -1927,25 +1933,19 @@
assert(Z_ARG3 == dst, "assumption");
assert(Z_ARG4 == dst_pos, "assumption");
assert(Z_ARG5 == length, "assumption");
- if (copyfunc_addr == NULL) { // Use C version if stub was not generated.
- emit_call_c(C_entry);
- } else {
#ifndef PRODUCT
- if (PrintC1Statistics) {
- __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_generic_arraycopystub_cnt);
- __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
- }
+ if (PrintC1Statistics) {
+ __ load_const_optimized(Z_R1_scratch, (address)&Runtime1::_generic_arraycopystub_cnt);
+ __ add2mem_32(Address(Z_R1_scratch), 1, Z_R0_scratch);
+ }
#endif
- emit_call_c(copyfunc_addr);
- }
+ emit_call_c(copyfunc_addr);
CHECK_BAILOUT();
__ compare32_and_branch(Z_RET, (intptr_t)0, Assembler::bcondEqual, *stub->continuation());
- if (copyfunc_addr != NULL) {
- __ z_lgr(tmp, Z_RET);
- __ z_xilf(tmp, -1);
- }
+ __ z_lgr(tmp, Z_RET);
+ __ z_xilf(tmp, -1);
// Restore values from callee saved registers so they are where the stub
// expects them.
@@ -1955,11 +1955,9 @@
__ lgr_if_needed(dst_pos, callee_saved_dst_pos);
__ lgr_if_needed(length, callee_saved_length);
- if (copyfunc_addr != NULL) {
- __ z_sr(length, tmp);
- __ z_ar(src_pos, tmp);
- __ z_ar(dst_pos, tmp);
- }
+ __ z_sr(length, tmp);
+ __ z_ar(src_pos, tmp);
+ __ z_ar(dst_pos, tmp);
__ branch_optimized(Assembler::bcondAlways, *stub->entry());
__ bind(*stub->continuation());
--- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -29,7 +29,7 @@
#include "c1/c1_Runtime1.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_s390.hpp"
#include "oops/compiledICHolder.hpp"
--- a/src/hotspot/cpu/s390/frame_s390.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/s390/frame_s390.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -549,4 +549,6 @@
pc_return_offset = 0,
};
+ static jint interpreter_frame_expression_stack_direction() { return -1; }
+
#endif // CPU_S390_VM_FRAME_S390_HPP
--- a/src/hotspot/cpu/s390/frame_s390.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/s390/frame_s390.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -175,10 +175,6 @@
return (intptr_t*)interpreter_frame_monitor_end() - 1;
}
-inline jint frame::interpreter_frame_expression_stack_direction() {
- return -1;
-}
-
inline intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
return &interpreter_frame_tos_address()[offset];
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "registerSaver_s390.hpp"
+#include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
+#include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/heapRegion.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "runtime/thread.hpp"
+#include "interpreter/interp_masm.hpp"
+
+#define __ masm->
+
+#define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str)
+
+void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count) {
+ bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
+
+ // With G1, don't generate the call if we statically know that the target is uninitialized.
+ if (!dest_uninitialized) {
+ // Is marking active?
+ Label filtered;
+ assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
+ assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame()
+ Register Rtmp1 = Z_R0_scratch;
+ const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() +
+ SATBMarkQueue::byte_offset_of_active());
+ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+ __ load_and_test_int(Rtmp1, Address(Z_thread, active_offset));
+ } else {
+ guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+ __ load_and_test_byte(Rtmp1, Address(Z_thread, active_offset));
+ }
+ __ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
+
+ RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers); // Creates frame.
+
+ if (UseCompressedOops) {
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), addr, count);
+ } else {
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), addr, count);
+ }
+
+ RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
+
+ __ bind(filtered);
+ }
+}
+
+void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, bool do_return) {
+ address entry_point = CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry);
+ if (!do_return) {
+ assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
+ assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame()
+ RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers); // Creates frame.
+ __ call_VM_leaf(entry_point, addr, count);
+ RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
+ } else {
+ // Tail call: call c and return to stub caller.
+ __ lgr_if_needed(Z_ARG1, addr);
+ __ lgr_if_needed(Z_ARG2, count);
+ __ load_const(Z_R1, entry_point);
+ __ z_br(Z_R1); // Branch without linking, callee will return to stub caller.
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_S390_GC_G1_G1BARRIERSETASSEMBLER_S390_HPP
+#define CPU_S390_GC_G1_G1BARRIERSETASSEMBLER_S390_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
+ protected:
+ virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count);
+ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, bool do_return);
+};
+
+#endif // CPU_S390_GC_G1_G1BARRIERSETASSEMBLER_S390_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/s390/gc/shared/barrierSetAssembler_s390.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_S390_GC_G1_BARRIERSETASSEMBLER_S390_HPP
+#define CPU_S390_GC_G1_BARRIERSETASSEMBLER_S390_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "memory/allocation.hpp"
+#include "oops/access.hpp"
+
+class InterpreterMacroAssembler;
+
+class BarrierSetAssembler: public CHeapObj<mtGC> {
+public:
+ virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count) {}
+ virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register dst, Register count, bool do_return = false);
+};
+
+#endif // CPU_S390_GC_G1_BARRIERSETASSEMBLER_S390_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/s390/gc/shared/cardTableBarrierSetAssembler_s390.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
+#include "gc/shared/cardTableBarrierSetAssembler.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "interpreter/interp_masm.hpp"
+
+#define __ masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
+#define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
+
+void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count,
+ bool do_return) {
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set());
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
+
+ NearLabel doXC, done;
+ assert_different_registers(Z_R0, Z_R1, addr, count);
+
+ // Nothing to do if count <= 0.
+ if (!do_return) {
+ __ compare64_and_branch(count, (intptr_t) 0, Assembler::bcondNotHigh, done);
+ } else {
+ __ z_ltgr(count, count);
+ __ z_bcr(Assembler::bcondNotPositive, Z_R14);
+ }
+
+ // Note: We can't combine the shifts. We could lose a carry
+ // from calculating the array end address.
+ // count = (count-1)*BytesPerHeapOop + addr
+ // Count holds addr of last oop in array then.
+ __ z_sllg(count, count, LogBytesPerHeapOop);
+ __ add2reg_with_index(count, -BytesPerHeapOop, count, addr);
+
+ // Get base address of card table.
+ __ load_const_optimized(Z_R1, (address)ct->byte_map_base());
+
+ // count = (count>>shift) - (addr>>shift)
+ __ z_srlg(addr, addr, CardTable::card_shift);
+ __ z_srlg(count, count, CardTable::card_shift);
+
+ // Prefetch first elements of card table for update.
+ if (VM_Version::has_Prefetch()) {
+ __ z_pfd(0x02, 0, addr, Z_R1);
+ }
+
+ // Special case: clear just one byte.
+ __ clear_reg(Z_R0, true, false); // Used for doOneByte.
+ __ z_sgr(count, addr); // Count = n-1 now, CC used for brc below.
+ __ z_stc(Z_R0, 0, addr, Z_R1); // Must preserve CC from z_sgr.
+ if (!do_return) {
+ __ z_brz(done);
+ } else {
+ __ z_bcr(Assembler::bcondZero, Z_R14);
+ }
+
+ __ z_cghi(count, 255);
+ __ z_brnh(doXC);
+
+ // MVCLE: clear a long area.
+ // Start addr of card table range = base + addr.
+ // # bytes in card table range = (count + 1)
+ __ add2reg_with_index(Z_R0, 0, Z_R1, addr);
+ __ add2reg(Z_R1, 1, count);
+
+ // dirty hack:
+ // There are just two callers. Both pass
+ // count in Z_ARG3 = Z_R4
+ // addr in Z_ARG2 = Z_R3
+ // ==> use Z_ARG2 as src len reg = 0
+ // Z_ARG1 as src addr (ignored)
+ assert(count == Z_ARG3, "count: unexpected register number");
+ assert(addr == Z_ARG2, "addr: unexpected register number");
+ __ clear_reg(Z_ARG2, true, false);
+
+ __ MacroAssembler::move_long_ext(Z_R0, Z_ARG1, 0);
+
+ if (!do_return) {
+ __ z_bru(done);
+ } else {
+ __ z_bcr(Assembler::bcondAlways, Z_R14);
+ }
+
+ // XC: clear a short area.
+ Label XC_template; // Instr template, never exec directly!
+ __ bind(XC_template);
+ __ z_xc(0, 0, addr, 0, addr);
+
+ __ bind(doXC);
+ // start addr of card table range = base + addr
+ // end addr of card table range = base + addr + count
+ __ add2reg_with_index(addr, 0, Z_R1, addr);
+
+ if (VM_Version::has_ExecuteExtensions()) {
+ __ z_exrl(count, XC_template); // Execute XC with var. len.
+ } else {
+ __ z_larl(Z_R1, XC_template);
+ __ z_ex(count, 0, Z_R0, Z_R1); // Execute XC with var. len.
+ }
+ if (do_return) {
+ __ z_br(Z_R14);
+ }
+
+ __ bind(done);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/s390/gc/shared/cardTableBarrierSetAssembler_s390.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP
+#define CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler {
+protected:
+ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count,
+ bool do_return);
+};
+
+#endif // CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/s390/gc/shared/modRefBarrierSetAssembler_s390.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+#define __ masm->
+
+void ModRefBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count,
+ bool do_return) {
+ if (do_return) { __ z_br(Z_R14); }
+}
+
+void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count) {
+ if (type == T_OBJECT || type == T_ARRAY) {
+ gen_write_ref_array_pre_barrier(masm, decorators, dst, count);
+ }
+}
+
+void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register dst, Register count, bool do_return) {
+ if (type == T_OBJECT || type == T_ARRAY) {
+ gen_write_ref_array_post_barrier(masm, decorators, dst, count, do_return);
+ } else {
+ if (do_return) { __ z_br(Z_R14); }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/s390/gc/shared/modRefBarrierSetAssembler_s390.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP
+#define CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
+
+class ModRefBarrierSetAssembler: public BarrierSetAssembler {
+protected:
+ virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count) {}
+ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count,
+ bool do_return);
+
+public:
+ virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count);
+ virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register dst, Register count, bool do_return = false);
+};
+
+#endif // CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP
--- a/src/hotspot/cpu/s390/interp_masm_s390.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/s390/interp_masm_s390.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -36,6 +36,7 @@
#include "prims/jvmtiThreadState.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.inline.hpp"
--- a/src/hotspot/cpu/s390/interpreterRT_s390.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/s390/interpreterRT_s390.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "interpreter/interp_masm.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
@@ -32,7 +33,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/signature.hpp"
// Access macros for Java and C arguments.
@@ -64,6 +65,11 @@
}
// Implementation of SignatureHandlerGenerator
+InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator(
+ const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
+ _masm = new MacroAssembler(buffer);
+ _fp_arg_nr = 0;
+}
void InterpreterRuntime::SignatureHandlerGenerator::pass_int() {
int int_arg_nr = jni_offset() - _fp_arg_nr;
--- a/src/hotspot/cpu/s390/interpreterRT_s390.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/s390/interpreterRT_s390.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -26,7 +26,8 @@
#ifndef CPU_S390_VM_INTERPRETERRT_S390_HPP
#define CPU_S390_VM_INTERPRETERRT_S390_HPP
-#include "memory/allocation.hpp"
+// This is included in the middle of class Interpreter.
+// Do not include files here.
static int binary_search(int key, LookupswitchPair* array, int n);
@@ -51,10 +52,7 @@
public:
// creation
- SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
- _masm = new MacroAssembler(buffer);
- _fp_arg_nr = 0;
- }
+ SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer);
// code generation
void generate(uint64_t fingerprint);
--- a/src/hotspot/cpu/s390/jvmciCodeInstaller_s390.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/s390/jvmciCodeInstaller_s390.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -34,6 +34,7 @@
#include "jvmci/jvmciCompilerToVM.hpp"
#include "jvmci/jvmciJavaClasses.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_s390.inline.hpp"
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -30,7 +30,7 @@
#include "gc/shared/cardTable.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/klass.inline.hpp"
@@ -41,7 +41,7 @@
#include "registerSaver_s390.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/icache.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/safepoint.hpp"
@@ -3505,9 +3505,9 @@
// Write to card table for modification at store_addr - register is destroyed afterwards.
void MacroAssembler::card_write_barrier_post(Register store_addr, Register tmp) {
BarrierSet* bs = Universe::heap()->barrier_set();
- CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
- assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier");
+ assert(bs->kind() == BarrierSet::CardTableBarrierSet, "wrong barrier");
assert_different_registers(store_addr, tmp);
z_srlg(store_addr, store_addr, CardTable::card_shift);
load_absolute_address(tmp, (address)ct->byte_map_base());
--- a/src/hotspot/cpu/s390/methodHandles_s390.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/s390/methodHandles_s390.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -31,6 +31,8 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/frame.inline.hpp"
+#include "utilities/preserveException.hpp"
#ifdef PRODUCT
#define __ _masm->
--- a/src/hotspot/cpu/s390/runtime_s390.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/s390/runtime_s390.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -32,7 +32,7 @@
#include "memory/resourceArea.hpp"
#include "nativeInst_s390.hpp"
#include "opto/runtime.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -28,11 +28,13 @@
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
+#include "gc/shared/gcLocker.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interp_masm.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compiledICHolder.hpp"
#include "registerSaver_s390.hpp"
+#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/align.hpp"
--- a/src/hotspot/cpu/s390/stubGenerator_s390.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/s390/stubGenerator_s390.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,8 +26,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "registerSaver_s390.hpp"
-#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interp_masm.hpp"
#include "nativeInst_s390.hpp"
@@ -686,188 +686,6 @@
return start;
}
- // Generate pre-write barrier for array.
- //
- // Input:
- // addr - register containing starting address
- // count - register containing element count
- //
- // The input registers are overwritten.
- void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
-
- BarrierSet* const bs = Universe::heap()->barrier_set();
- switch (bs->kind()) {
- case BarrierSet::G1BarrierSet:
- // With G1, don't generate the call if we statically know that the target is uninitialized.
- if (!dest_uninitialized) {
- // Is marking active?
- Label filtered;
- assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
- assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame()
- Register Rtmp1 = Z_R0_scratch;
- const int active_offset = in_bytes(JavaThread::satb_mark_queue_offset() +
- SATBMarkQueue::byte_offset_of_active());
- if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
- __ load_and_test_int(Rtmp1, Address(Z_thread, active_offset));
- } else {
- guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
- __ load_and_test_byte(Rtmp1, Address(Z_thread, active_offset));
- }
- __ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
-
- // __ push_frame_abi160(0); // implicitly done in save_live_registers()
- (void) RegisterSaver::save_live_registers(_masm, RegisterSaver::arg_registers);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), addr, count);
- (void) RegisterSaver::restore_live_registers(_masm, RegisterSaver::arg_registers);
- // __ pop_frame(); // implicitly done in restore_live_registers()
-
- __ bind(filtered);
- }
- break;
- case BarrierSet::CardTableModRef:
- case BarrierSet::ModRef:
- break;
- default:
- ShouldNotReachHere();
- }
- }
-
- // Generate post-write barrier for array.
- //
- // Input:
- // addr - register containing starting address
- // count - register containing element count
- //
- // The input registers are overwritten.
- void gen_write_ref_array_post_barrier(Register addr, Register count, bool branchToEnd) {
- BarrierSet* const bs = Universe::heap()->barrier_set();
- switch (bs->kind()) {
- case BarrierSet::G1BarrierSet:
- {
- if (branchToEnd) {
- assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
- assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame()
- // __ push_frame_abi160(0); // implicitly done in save_live_registers()
- (void) RegisterSaver::save_live_registers(_masm, RegisterSaver::arg_registers);
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), addr, count);
- (void) RegisterSaver::restore_live_registers(_masm, RegisterSaver::arg_registers);
- // __ pop_frame(); // implicitly done in restore_live_registers()
- } else {
- // Tail call: call c and return to stub caller.
- address entry_point = CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post);
- __ lgr_if_needed(Z_ARG1, addr);
- __ lgr_if_needed(Z_ARG2, count);
- __ load_const(Z_R1, entry_point);
- __ z_br(Z_R1); // Branch without linking, callee will return to stub caller.
- }
- }
- break;
- case BarrierSet::CardTableModRef:
- // These cases formerly known as
- // void array_store_check(Register addr, Register count, bool branchToEnd).
- {
- NearLabel doXC, done;
- CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
- CardTable* ct = ctbs->card_table();
- assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
- assert_different_registers(Z_R0, Z_R1, addr, count);
-
- // Nothing to do if count <= 0.
- if (branchToEnd) {
- __ compare64_and_branch(count, (intptr_t) 0, Assembler::bcondNotHigh, done);
- } else {
- __ z_ltgr(count, count);
- __ z_bcr(Assembler::bcondNotPositive, Z_R14);
- }
-
- // Note: We can't combine the shifts. We could lose a carry
- // from calculating the array end address.
- // count = (count-1)*BytesPerHeapOop + addr
- // Count holds addr of last oop in array then.
- __ z_sllg(count, count, LogBytesPerHeapOop);
- __ add2reg_with_index(count, -BytesPerHeapOop, count, addr);
-
- // Get base address of card table.
- __ load_const_optimized(Z_R1, (address)ct->byte_map_base());
-
- // count = (count>>shift) - (addr>>shift)
- __ z_srlg(addr, addr, CardTable::card_shift);
- __ z_srlg(count, count, CardTable::card_shift);
-
- // Prefetch first elements of card table for update.
- if (VM_Version::has_Prefetch()) {
- __ z_pfd(0x02, 0, addr, Z_R1);
- }
-
- // Special case: clear just one byte.
- __ clear_reg(Z_R0, true, false); // Used for doOneByte.
- __ z_sgr(count, addr); // Count = n-1 now, CC used for brc below.
- __ z_stc(Z_R0, 0, addr, Z_R1); // Must preserve CC from z_sgr.
- if (branchToEnd) {
- __ z_brz(done);
- } else {
- __ z_bcr(Assembler::bcondZero, Z_R14);
- }
-
- __ z_cghi(count, 255);
- __ z_brnh(doXC);
-
- // MVCLE: clear a long area.
- // Start addr of card table range = base + addr.
- // # bytes in card table range = (count + 1)
- __ add2reg_with_index(Z_R0, 0, Z_R1, addr);
- __ add2reg(Z_R1, 1, count);
-
- // dirty hack:
- // There are just two callers. Both pass
- // count in Z_ARG3 = Z_R4
- // addr in Z_ARG2 = Z_R3
- // ==> use Z_ARG2 as src len reg = 0
- // Z_ARG1 as src addr (ignored)
- assert(count == Z_ARG3, "count: unexpected register number");
- assert(addr == Z_ARG2, "addr: unexpected register number");
- __ clear_reg(Z_ARG2, true, false);
-
- __ MacroAssembler::move_long_ext(Z_R0, Z_ARG1, 0);
-
- if (branchToEnd) {
- __ z_bru(done);
- } else {
- __ z_bcr(Assembler::bcondAlways, Z_R14);
- }
-
- // XC: clear a short area.
- Label XC_template; // Instr template, never exec directly!
- __ bind(XC_template);
- __ z_xc(0, 0, addr, 0, addr);
-
- __ bind(doXC);
- // start addr of card table range = base + addr
- // end addr of card table range = base + addr + count
- __ add2reg_with_index(addr, 0, Z_R1, addr);
-
- if (VM_Version::has_ExecuteExtensions()) {
- __ z_exrl(count, XC_template); // Execute XC with var. len.
- } else {
- __ z_larl(Z_R1, XC_template);
- __ z_ex(count, 0, Z_R0, Z_R1); // Execute XC with var. len.
- }
- if (!branchToEnd) {
- __ z_br(Z_R14);
- }
-
- __ bind(done);
- }
- break;
- case BarrierSet::ModRef:
- if (!branchToEnd) { __ z_br(Z_R14); }
- break;
- default:
- ShouldNotReachHere();
- }
- }
-
-
// This is to test that the count register contains a positive int value.
// Required because C2 does not respect int to long conversion for stub calls.
void assert_positive_int(Register count) {
@@ -1482,11 +1300,20 @@
unsigned int start_off = __ offset(); // Remember stub start address (is rtn value).
unsigned int size = UseCompressedOops ? 4 : 8;
- gen_write_ref_array_pre_barrier(Z_ARG2, Z_ARG3, dest_uninitialized);
+ DecoratorSet decorators = ARRAYCOPY_DISJOINT;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
+ }
+ if (aligned) {
+ decorators |= ARRAYCOPY_ALIGNED;
+ }
+
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, T_OBJECT, Z_ARG1, Z_ARG2, Z_ARG3);
generate_disjoint_copy(aligned, size, true, true);
- gen_write_ref_array_post_barrier(Z_ARG2, Z_ARG3, false);
+ bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, Z_ARG2, Z_ARG3, true);
return __ addr_at(start_off);
}
@@ -1565,11 +1392,20 @@
// Branch to disjoint_copy (if applicable) before pre_barrier to avoid double pre_barrier.
array_overlap_test(nooverlap_target, shift); // Branch away to nooverlap_target if disjoint.
- gen_write_ref_array_pre_barrier(Z_ARG2, Z_ARG3, dest_uninitialized);
+ DecoratorSet decorators = 0;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
+ }
+ if (aligned) {
+ decorators |= ARRAYCOPY_ALIGNED;
+ }
+
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, T_OBJECT, Z_ARG1, Z_ARG2, Z_ARG3);
generate_conjoint_copy(aligned, size, true); // Must preserve ARG2, ARG3.
- gen_write_ref_array_post_barrier(Z_ARG2, Z_ARG3, false);
+ bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, Z_ARG2, Z_ARG3, true);
return __ addr_at(start_off);
}
--- a/src/hotspot/cpu/s390/templateTable_s390.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/s390/templateTable_s390.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -33,6 +33,8 @@
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
@@ -260,7 +262,7 @@
}
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableModRef:
+ case BarrierSet::CardTableBarrierSet:
{
if (val_is_null) {
__ store_heap_oop_null(val, offset, base);
--- a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,10 +31,12 @@
#include "ci/ciArrayKlass.hpp"
#include "ci/ciInstance.hpp"
#include "gc/shared/barrierSet.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/objArrayKlass.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/safepointMechanism.inline.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -1878,29 +1880,21 @@
__ mov(dst_pos, O3);
__ mov(length, O4);
address copyfunc_addr = StubRoutines::generic_arraycopy();
-
- if (copyfunc_addr == NULL) { // Use C version if stub was not generated
- __ call_VM_leaf(tmp, CAST_FROM_FN_PTR(address, Runtime1::arraycopy));
- } else {
+ assert(copyfunc_addr != NULL, "generic arraycopy stub required");
+
#ifndef PRODUCT
- if (PrintC1Statistics) {
- address counter = (address)&Runtime1::_generic_arraycopystub_cnt;
- __ inc_counter(counter, G1, G3);
- }
-#endif
- __ call_VM_leaf(tmp, copyfunc_addr);
+ if (PrintC1Statistics) {
+ address counter = (address)&Runtime1::_generic_arraycopystub_cnt;
+ __ inc_counter(counter, G1, G3);
}
-
- if (copyfunc_addr != NULL) {
- __ xor3(O0, -1, tmp);
- __ sub(length, tmp, length);
- __ add(src_pos, tmp, src_pos);
- __ cmp_zero_and_br(Assembler::less, O0, *stub->entry());
- __ delayed()->add(dst_pos, tmp, dst_pos);
- } else {
- __ cmp_zero_and_br(Assembler::less, O0, *stub->entry());
- __ delayed()->nop();
- }
+#endif
+ __ call_VM_leaf(tmp, copyfunc_addr);
+
+ __ xor3(O0, -1, tmp);
+ __ sub(length, tmp, length);
+ __ add(src_pos, tmp, src_pos);
+ __ cmp_zero_and_br(Assembler::less, O0, *stub->entry());
+ __ delayed()->add(dst_pos, tmp, dst_pos);
__ bind(*stub->continuation());
return;
}
--- a/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -28,7 +28,7 @@
#include "c1/c1_Runtime1.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/compiledICHolder.hpp"
--- a/src/hotspot/cpu/sparc/frame_sparc.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/sparc/frame_sparc.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -240,4 +240,6 @@
void interpreter_frame_set_monitors(BasicObjectLock* monitors);
public:
+ static jint interpreter_frame_expression_stack_direction() { return -1; }
+
#endif // CPU_SPARC_VM_FRAME_SPARC_HPP
--- a/src/hotspot/cpu/sparc/frame_sparc.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/sparc/frame_sparc.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -99,8 +99,6 @@
return (intptr_t*) sp_addr_at( ImethodDataPtr->sp_offset_in_saved_window());
}
-inline jint frame::interpreter_frame_expression_stack_direction() { return -1; }
-
// bottom(base) of the expression stack (highest address)
inline intptr_t* frame::interpreter_frame_expression_stack() const {
return (intptr_t*)interpreter_frame_monitors() - 1;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
+#include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/heapRegion.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "interpreter/interp_masm.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/macros.hpp"
+
+#define __ masm->
+
+void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count) {
+ bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
+ // With G1, don't generate the call if we statically know that the target in uninitialized
+ if (!dest_uninitialized) {
+ Register tmp = O5;
+ assert_different_registers(addr, count, tmp);
+ Label filtered;
+ // Is marking active?
+ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+ __ ld(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp);
+ } else {
+ guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
+ "Assumption");
+ __ ldsb(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp);
+ }
+ // Is marking active?
+ __ cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
+
+ __ save_frame(0);
+ // Save the necessary global regs... will be used after.
+ if (addr->is_global()) {
+ __ mov(addr, L0);
+ }
+ if (count->is_global()) {
+ __ mov(count, L1);
+ }
+ __ mov(addr->after_save(), O0);
+ // Get the count into O1
+ address slowpath = UseCompressedOops ? CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry)
+ : CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry);
+ __ call(slowpath);
+ __ delayed()->mov(count->after_save(), O1);
+ if (addr->is_global()) {
+ __ mov(L0, addr);
+ }
+ if (count->is_global()) {
+ __ mov(L1, count);
+ }
+ __ restore();
+
+ __ bind(filtered);
+ DEBUG_ONLY(__ set(0xDEADC0DE, tmp);) // we have killed tmp
+ }
+}
+
+void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, Register tmp) {
+ // Get some new fresh output registers.
+ __ save_frame(0);
+ __ mov(addr->after_save(), O0);
+ __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry));
+ __ delayed()->mov(count->after_save(), O1);
+ __ restore();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_SPARC_GC_G1_G1BARRIERSETASSEMBLER_SPARC_HPP
+#define CPU_SPARC_GC_G1_G1BARRIERSETASSEMBLER_SPARC_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
+protected:
+ virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count);
+ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, Register tmp);
+};
+
+#endif // CPU_SPARC_GC_G1_G1BARRIERSETASSEMBLER_SPARC_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/sparc/gc/shared/barrierSetAssembler_sparc.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_SPARC_GC_SHARED_BARRIERSETASSEMBLER_SPARC_HPP
+#define CPU_SPARC_GC_SHARED_BARRIERSETASSEMBLER_SPARC_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "memory/allocation.hpp"
+#include "oops/access.hpp"
+
+class InterpreterMacroAssembler;
+
+class BarrierSetAssembler: public CHeapObj<mtGC> {
+public:
+ virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count) {}
+ virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count) {}
+};
+
+#endif // CPU_SPARC_GC_SHARED_BARRIERSETASSEMBLER_SPARC_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/sparc/gc/shared/cardTableBarrierSetAssembler_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,72 @@
+
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
+#include "gc/shared/cardTableBarrierSetAssembler.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "interpreter/interp_masm.hpp"
+
+#define __ masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
+void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, Register tmp) {
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set());
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
+ assert_different_registers(addr, count, tmp);
+
+ Label L_loop, L_done;
+
+ __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_done); // zero count - nothing to do
+
+ __ sll_ptr(count, LogBytesPerHeapOop, count);
+ __ sub(count, BytesPerHeapOop, count);
+ __ add(count, addr, count);
+ // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
+ __ srl_ptr(addr, CardTable::card_shift, addr);
+ __ srl_ptr(count, CardTable::card_shift, count);
+ __ sub(count, addr, count);
+ AddressLiteral rs(ct->byte_map_base());
+ __ set(rs, tmp);
+ __ BIND(L_loop);
+ __ stb(G0, tmp, addr);
+ __ subcc(count, 1, count);
+ __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
+ __ delayed()->add(addr, 1, addr);
+
+ __ BIND(L_done);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/sparc/gc/shared/cardTableBarrierSetAssembler_sparc.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_SPARC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_SPARC_HPP
+#define CPU_SPARC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_SPARC_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler {
+protected:
+ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, Register tmp);
+};
+
+#endif // CPU_SPARC_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_SPARC_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/sparc/gc/shared/modRefBarrierSetAssembler_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+#define __ masm->
+
+void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count) {
+ if (type == T_OBJECT) {
+ bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
+ if (!checkcast) {
+ // save arguments for barrier generation
+ __ mov(dst, G1);
+ __ mov(count, G5);
+ gen_write_ref_array_pre_barrier(masm, decorators, G1, G5);
+ } else {
+ gen_write_ref_array_pre_barrier(masm, decorators, dst, count);
+ }
+ }
+}
+
+void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count) {
+ if (type == T_OBJECT) {
+ bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
+ if (!checkcast) {
+ // O0 is used as temp register
+ gen_write_ref_array_post_barrier(masm, decorators, G1, G5, O0);
+ } else {
+ gen_write_ref_array_post_barrier(masm, decorators, dst, count, O3);
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/sparc/gc/shared/modRefBarrierSetAssembler_sparc.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_SPARC_GC_SHARED_MODREFBARRIERSETASSEMBLER_SPARC_HPP
+#define CPU_SPARC_GC_SHARED_MODREFBARRIERSETASSEMBLER_SPARC_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
+
+class ModRefBarrierSetAssembler: public BarrierSetAssembler {
+protected:
+ virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count) {}
+ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, Register tmp) {}
+
+public:
+ virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count);
+ virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count);
+};
+
+#endif // CPU_SPARC_GC_SHARED_MODREFBARRIERSETASSEMBLER_SPARC_HPP
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/sparc/interp_masm_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,7 @@
#include "prims/jvmtiThreadState.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.inline.hpp"
--- a/src/hotspot/cpu/sparc/interpreterRT_sparc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/sparc/interpreterRT_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "interpreter/interp_masm.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
@@ -32,7 +33,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/signature.hpp"
@@ -40,6 +41,10 @@
// Implementation of SignatureHandlerGenerator
+InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator(
+ const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
+ _masm = new MacroAssembler(buffer);
+}
void InterpreterRuntime::SignatureHandlerGenerator::pass_word(int size_of_arg, int offset_in_arg) {
Argument jni_arg(jni_offset() + offset_in_arg, false);
--- a/src/hotspot/cpu/sparc/interpreterRT_sparc.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/sparc/interpreterRT_sparc.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,9 @@
#ifndef CPU_SPARC_VM_INTERPRETERRT_SPARC_HPP
#define CPU_SPARC_VM_INTERPRETERRT_SPARC_HPP
-#include "memory/allocation.hpp"
+// This is included in the middle of class Interpreter.
+// Do not include files here.
+
static int binary_search(int key, LookupswitchPair* array, int n);
@@ -52,9 +54,7 @@
public:
// Creation
- SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
- _masm = new MacroAssembler(buffer);
- }
+ SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer);
// Code generation
void generate( uint64_t fingerprint );
--- a/src/hotspot/cpu/sparc/jvmciCodeInstaller_sparc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/sparc/jvmciCodeInstaller_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
#include "jvmci/jvmciCompilerToVM.hpp"
#include "jvmci/jvmciJavaClasses.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/align.hpp"
#include "vmreg_sparc.inline.hpp"
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,7 +27,7 @@
#include "asm/macroAssembler.inline.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
@@ -35,7 +35,7 @@
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.inline.hpp"
@@ -3729,11 +3729,11 @@
void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
// If we're writing constant NULL, we can skip the write barrier.
if (new_val == G0) return;
- CardTableModRefBS* bs =
- barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
+ CardTableBarrierSet* bs =
+ barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set());
CardTable* ct = bs->card_table();
- assert(bs->kind() == BarrierSet::CardTableModRef, "wrong barrier");
+ assert(bs->kind() == BarrierSet::CardTableBarrierSet, "wrong barrier");
card_table_write(ct->byte_map_base(), tmp, store_addr);
}
--- a/src/hotspot/cpu/sparc/methodHandles_sparc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/sparc/methodHandles_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,8 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/frame.inline.hpp"
+#include "utilities/preserveException.hpp"
#define __ _masm->
--- a/src/hotspot/cpu/sparc/runtime_sparc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/sparc/runtime_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,7 +31,7 @@
#include "memory/resourceArea.hpp"
#include "nativeInst_sparc.hpp"
#include "opto/runtime.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/sparc/sharedRuntime_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,10 +27,12 @@
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
+#include "gc/shared/gcLocker.hpp"
#include "interpreter/interpreter.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compiledICHolder.hpp"
+#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/align.hpp"
--- a/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/sparc/stubGenerator_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,8 +24,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
-#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_sparc.hpp"
#include "oops/instanceOop.hpp"
@@ -823,125 +823,6 @@
__ delayed()->nop();
}
- //
- // Generate pre-write barrier for array.
- //
- // Input:
- // addr - register containing starting address
- // count - register containing element count
- // tmp - scratch register
- //
- // The input registers are overwritten.
- //
- void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
- BarrierSet* bs = Universe::heap()->barrier_set();
- switch (bs->kind()) {
- case BarrierSet::G1BarrierSet:
- // With G1, don't generate the call if we statically know that the target in uninitialized
- if (!dest_uninitialized) {
- Register tmp = O5;
- assert_different_registers(addr, count, tmp);
- Label filtered;
- // Is marking active?
- if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
- __ ld(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp);
- } else {
- guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1,
- "Assumption");
- __ ldsb(G2, in_bytes(JavaThread::satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_active()), tmp);
- }
- // Is marking active?
- __ cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
-
- __ save_frame(0);
- // Save the necessary global regs... will be used after.
- if (addr->is_global()) {
- __ mov(addr, L0);
- }
- if (count->is_global()) {
- __ mov(count, L1);
- }
- __ mov(addr->after_save(), O0);
- // Get the count into O1
- __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
- __ delayed()->mov(count->after_save(), O1);
- if (addr->is_global()) {
- __ mov(L0, addr);
- }
- if (count->is_global()) {
- __ mov(L1, count);
- }
- __ restore();
-
- __ bind(filtered);
- DEBUG_ONLY(__ set(0xDEADC0DE, tmp);) // we have killed tmp
- }
- break;
- case BarrierSet::CardTableModRef:
- break;
- default:
- ShouldNotReachHere();
- }
- }
- //
- // Generate post-write barrier for array.
- //
- // Input:
- // addr - register containing starting address
- // count - register containing element count
- // tmp - scratch register
- //
- // The input registers are overwritten.
- //
- void gen_write_ref_array_post_barrier(Register addr, Register count,
- Register tmp) {
- BarrierSet* bs = Universe::heap()->barrier_set();
-
- switch (bs->kind()) {
- case BarrierSet::G1BarrierSet:
- {
- // Get some new fresh output registers.
- __ save_frame(0);
- __ mov(addr->after_save(), O0);
- __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
- __ delayed()->mov(count->after_save(), O1);
- __ restore();
- }
- break;
- case BarrierSet::CardTableModRef:
- {
- CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
- CardTable* ct = ctbs->card_table();
- assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
- assert_different_registers(addr, count, tmp);
-
- Label L_loop, L_done;
-
- __ cmp_and_br_short(count, 0, Assembler::equal, Assembler::pt, L_done); // zero count - nothing to do
-
- __ sll_ptr(count, LogBytesPerHeapOop, count);
- __ sub(count, BytesPerHeapOop, count);
- __ add(count, addr, count);
- // Use two shifts to clear out those low order two bits! (Cannot opt. into 1.)
- __ srl_ptr(addr, CardTable::card_shift, addr);
- __ srl_ptr(count, CardTable::card_shift, count);
- __ sub(count, addr, count);
- AddressLiteral rs(ct->byte_map_base());
- __ set(rs, tmp);
- __ BIND(L_loop);
- __ stb(G0, tmp, addr);
- __ subcc(count, 1, count);
- __ brx(Assembler::greaterEqual, false, Assembler::pt, L_loop);
- __ delayed()->add(addr, 1, addr);
- __ BIND(L_done);
- }
- break;
- case BarrierSet::ModRef:
- break;
- default:
- ShouldNotReachHere();
- }
- }
//
// Generate main code for disjoint arraycopy
@@ -2388,18 +2269,25 @@
BLOCK_COMMENT("Entry:");
}
- // save arguments for barrier generation
- __ mov(to, G1);
- __ mov(count, G5);
- gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
+ DecoratorSet decorators = ARRAYCOPY_DISJOINT;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
+ }
+ if (aligned) {
+ decorators |= ARRAYCOPY_ALIGNED;
+ }
+
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, T_OBJECT, from, to, count);
+
assert_clean_int(count, O3); // Make sure 'count' is clean int.
if (UseCompressedOops) {
generate_disjoint_int_copy_core(aligned);
} else {
generate_disjoint_long_copy_core(aligned);
}
- // O0 is used as temp register
- gen_write_ref_array_post_barrier(G1, G5, O0);
+
+ bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, from, to, count);
// O3, O4 are used as temp registers
inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
@@ -2438,10 +2326,16 @@
array_overlap_test(nooverlap_target, LogBytesPerHeapOop);
- // save arguments for barrier generation
- __ mov(to, G1);
- __ mov(count, G5);
- gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized);
+ DecoratorSet decorators = 0;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
+ }
+ if (aligned) {
+ decorators |= ARRAYCOPY_ALIGNED;
+ }
+
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, T_OBJECT, from, to, count);
if (UseCompressedOops) {
generate_conjoint_int_copy_core(aligned);
@@ -2449,8 +2343,7 @@
generate_conjoint_long_copy_core(aligned);
}
- // O0 is used as temp register
- gen_write_ref_array_post_barrier(G1, G5, O0);
+ bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, from, to, count);
// O3, O4 are used as temp registers
inc_counter_np(SharedRuntime::_oop_array_copy_ctr, O3, O4);
@@ -2552,9 +2445,16 @@
// caller can pass a 64-bit byte count here (from generic stub)
BLOCK_COMMENT("Entry:");
}
- gen_write_ref_array_pre_barrier(O1_to, O2_count, dest_uninitialized);
-
- Label load_element, store_element, do_card_marks, fail, done;
+
+ DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
+ }
+
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, T_OBJECT, O0_from, O1_to, O2_count);
+
+ Label load_element, store_element, do_epilogue, fail, done;
__ addcc(O2_count, 0, G1_remain); // initialize loop index, and test it
__ brx(Assembler::notZero, false, Assembler::pt, load_element);
__ delayed()->mov(G0, O5_offset); // offset from start of arrays
@@ -2576,7 +2476,7 @@
__ deccc(G1_remain); // decrement the count
__ store_heap_oop(G3_oop, O1_to, O5_offset); // store the oop
__ inc(O5_offset, heapOopSize); // step to next offset
- __ brx(Assembler::zero, true, Assembler::pt, do_card_marks);
+ __ brx(Assembler::zero, true, Assembler::pt, do_epilogue);
__ delayed()->set(0, O0); // return -1 on success
// ======== loop entry is here ========
@@ -2600,8 +2500,8 @@
__ brx(Assembler::zero, false, Assembler::pt, done);
__ delayed()->not1(O2_count, O0); // report (-1^K) to caller
- __ BIND(do_card_marks);
- gen_write_ref_array_post_barrier(O1_to, O2_count, O3); // store check on O1[0..O2]
+ __ BIND(do_epilogue);
+ bs->arraycopy_epilogue(_masm, decorators, T_OBJECT, O0_from, O1_to, O2_count);
__ BIND(done);
inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr, O3, O4);
--- a/src/hotspot/cpu/sparc/templateTable_sparc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/sparc/templateTable_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -32,6 +32,8 @@
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
@@ -90,7 +92,7 @@
}
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableModRef:
+ case BarrierSet::CardTableBarrierSet:
{
if (index == noreg ) {
assert(Assembler::is_simm13(offset), "fix this code");
--- a/src/hotspot/cpu/x86/assembler_x86.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/assembler_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,13 +25,12 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
-#include "runtime/interfaceSupport.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -33,10 +33,12 @@
#include "ci/ciArrayKlass.hpp"
#include "ci/ciInstance.hpp"
#include "gc/shared/barrierSet.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "nativeInst_x86.hpp"
#include "oops/objArrayKlass.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "vmreg_x86.inline.hpp"
@@ -3057,9 +3059,8 @@
store_parameter(src, 4);
NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
- address C_entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
-
address copyfunc_addr = StubRoutines::generic_arraycopy();
+ assert(copyfunc_addr != NULL, "generic arraycopy stub required");
// pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
#ifdef _LP64
@@ -3077,29 +3078,21 @@
// Allocate abi space for args but be sure to keep stack aligned
__ subptr(rsp, 6*wordSize);
store_parameter(j_rarg4, 4);
- if (copyfunc_addr == NULL) { // Use C version if stub was not generated
- __ call(RuntimeAddress(C_entry));
- } else {
#ifndef PRODUCT
- if (PrintC1Statistics) {
- __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
- }
+ if (PrintC1Statistics) {
+ __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
+ }
#endif
- __ call(RuntimeAddress(copyfunc_addr));
- }
+ __ call(RuntimeAddress(copyfunc_addr));
__ addptr(rsp, 6*wordSize);
#else
__ mov(c_rarg4, j_rarg4);
- if (copyfunc_addr == NULL) { // Use C version if stub was not generated
- __ call(RuntimeAddress(C_entry));
- } else {
#ifndef PRODUCT
- if (PrintC1Statistics) {
- __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
- }
+ if (PrintC1Statistics) {
+ __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
+ }
#endif
- __ call(RuntimeAddress(copyfunc_addr));
- }
+ __ call(RuntimeAddress(copyfunc_addr));
#endif // _WIN64
#else
__ push(length);
@@ -3108,26 +3101,20 @@
__ push(src_pos);
__ push(src);
- if (copyfunc_addr == NULL) { // Use C version if stub was not generated
- __ call_VM_leaf(C_entry, 5); // removes pushed parameter from the stack
- } else {
#ifndef PRODUCT
- if (PrintC1Statistics) {
- __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
- }
+ if (PrintC1Statistics) {
+ __ incrementl(ExternalAddress((address)&Runtime1::_generic_arraycopystub_cnt));
+ }
#endif
- __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack
- }
+ __ call_VM_leaf(copyfunc_addr, 5); // removes pushed parameter from the stack
#endif // _LP64
__ cmpl(rax, 0);
__ jcc(Assembler::equal, *stub->continuation());
- if (copyfunc_addr != NULL) {
- __ mov(tmp, rax);
- __ xorl(tmp, -1);
- }
+ __ mov(tmp, rax);
+ __ xorl(tmp, -1);
// Reload values from the stack so they are where the stub
// expects them.
@@ -3137,11 +3124,9 @@
__ movptr (src_pos, Address(rsp, 3*BytesPerWord));
__ movptr (src, Address(rsp, 4*BytesPerWord));
- if (copyfunc_addr != NULL) {
- __ subl(length, tmp);
- __ addl(src_pos, tmp);
- __ addl(dst_pos, tmp);
- }
+ __ subl(length, tmp);
+ __ addl(src_pos, tmp);
+ __ addl(dst_pos, tmp);
__ jmp(*stub->entry());
__ bind(*stub->continuation());
--- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -29,7 +29,7 @@
#include "c1/c1_Runtime1.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/compiledICHolder.hpp"
--- a/src/hotspot/cpu/x86/frame_x86.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/frame_x86.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -154,4 +154,6 @@
// deoptimization support
void interpreter_frame_set_last_sp(intptr_t* sp);
+ static jint interpreter_frame_expression_stack_direction() { return -1; }
+
#endif // CPU_X86_VM_FRAME_X86_HPP
--- a/src/hotspot/cpu/x86/frame_x86.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/frame_x86.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -223,10 +223,6 @@
return monitor_end-1;
}
-
-inline jint frame::interpreter_frame_expression_stack_direction() { return -1; }
-
-
// Entry frames
inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/g1/g1BarrierSet.hpp"
+#include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/heapRegion.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "interpreter/interp_masm.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/thread.hpp"
+#include "utilities/macros.hpp"
+
+#define __ masm->
+
+void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count) {
+ bool dest_uninitialized = (decorators & AS_DEST_NOT_INITIALIZED) != 0;
+
+ if (!dest_uninitialized) {
+ Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread);
+#ifndef _LP64
+ __ push(thread);
+ __ get_thread(thread);
+#endif
+
+ Label filtered;
+ Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
+ SATBMarkQueue::byte_offset_of_active()));
+ // Is marking active?
+ if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
+ __ cmpl(in_progress, 0);
+ } else {
+ assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
+ __ cmpb(in_progress, 0);
+ }
+
+ NOT_LP64(__ pop(thread);)
+
+ __ jcc(Assembler::equal, filtered);
+
+ __ pusha(); // push registers
+#ifdef _LP64
+ if (count == c_rarg0) {
+ if (addr == c_rarg1) {
+ // exactly backwards!!
+ __ xchgptr(c_rarg1, c_rarg0);
+ } else {
+ __ movptr(c_rarg1, count);
+ __ movptr(c_rarg0, addr);
+ }
+ } else {
+ __ movptr(c_rarg0, addr);
+ __ movptr(c_rarg1, count);
+ }
+ if (UseCompressedOops) {
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), 2);
+ } else {
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), 2);
+ }
+#else
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry),
+ addr, count);
+#endif
+ __ popa();
+
+ __ bind(filtered);
+ }
+}
+
+void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, Register tmp) {
+ __ pusha(); // push registers (overkill)
+#ifdef _LP64
+ if (c_rarg0 == count) { // On win64 c_rarg0 == rcx
+ assert_different_registers(c_rarg1, addr);
+ __ mov(c_rarg1, count);
+ __ mov(c_rarg0, addr);
+ } else {
+ assert_different_registers(c_rarg0, count);
+ __ mov(c_rarg0, addr);
+ __ mov(c_rarg1, count);
+ }
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry), 2);
+#else
+ __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry),
+ addr, count);
+#endif
+ __ popa();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_GC_G1_G1BARRIERSETASSEMBLER_X86_HPP
+#define CPU_X86_GC_G1_G1BARRIERSETASSEMBLER_X86_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+class G1BarrierSetAssembler: public ModRefBarrierSetAssembler {
+ protected:
+ virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count);
+ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, Register tmp);
+};
+
+#endif // CPU_X86_GC_G1_G1BARRIERSETASSEMBLER_X86_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_GC_G1_BARRIERSETASSEMBLER_X86_HPP
+#define CPU_X86_GC_G1_BARRIERSETASSEMBLER_X86_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "memory/allocation.hpp"
+#include "oops/access.hpp"
+
+class InterpreterMacroAssembler;
+
+class BarrierSetAssembler: public CHeapObj<mtGC> {
+protected:
+public:
+ virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count) {}
+ virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count) {}
+};
+
+#endif // CPU_X86_GC_G1_BARRIERSETASSEMBLER_X86_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
+#include "gc/shared/cardTableBarrierSetAssembler.hpp"
+#include "gc/shared/collectedHeap.hpp"
+
+#define __ masm->
+
+#ifdef PRODUCT
+#define BLOCK_COMMENT(str) /* nothing */
+#else
+#define BLOCK_COMMENT(str) __ block_comment(str)
+#endif
+
+#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
+
+#define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
+
+void CardTableBarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
+ Register addr, Register count, Register tmp) {
+ BarrierSet *bs = Universe::heap()->barrier_set();
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
+ CardTable* ct = ctbs->card_table();
+ assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
+ intptr_t disp = (intptr_t) ct->byte_map_base();
+
+ Label L_loop, L_done;
+ const Register end = count;
+ assert_different_registers(addr, end);
+
+ __ testl(count, count);
+ __ jcc(Assembler::zero, L_done); // zero count - nothing to do
+
+
+#ifdef _LP64
+ __ leaq(end, Address(addr, count, TIMES_OOP, 0)); // end == addr+count*oop_size
+ __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
+ __ shrptr(addr, CardTable::card_shift);
+ __ shrptr(end, CardTable::card_shift);
+ __ subptr(end, addr); // end --> cards count
+
+ __ mov64(tmp, disp);
+ __ addptr(addr, tmp);
+__ BIND(L_loop);
+ __ movb(Address(addr, count, Address::times_1), 0);
+ __ decrement(count);
+ __ jcc(Assembler::greaterEqual, L_loop);
+#else
+ __ lea(end, Address(addr, count, Address::times_ptr, -wordSize));
+ __ shrptr(addr, CardTable::card_shift);
+ __ shrptr(end, CardTable::card_shift);
+ __ subptr(end, addr); // end --> count
+__ BIND(L_loop);
+ Address cardtable(addr, count, Address::times_1, disp);
+ __ movb(cardtable, 0);
+ __ decrement(count);
+ __ jcc(Assembler::greaterEqual, L_loop);
+#endif
+
+__ BIND(L_done);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/x86/gc/shared/cardTableBarrierSetAssembler_x86.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP
+#define CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+class CardTableBarrierSetAssembler: public ModRefBarrierSetAssembler {
+protected:
+ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr,
+ Register count, Register tmp);
+};
+
+#endif // CPU_X86_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_X86_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/x86/gc/shared/modRefBarrierSetAssembler_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
+#include "gc/shared/modRefBarrierSetAssembler.hpp"
+
+#define __ masm->
+
+void ModRefBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count) {
+ bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
+ bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
+ bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops);
+
+ if (type == T_OBJECT || type == T_ARRAY) {
+#ifdef _LP64
+ if (!checkcast && !obj_int) {
+ // Save count for barrier
+ __ movptr(r11, count);
+ } else if (disjoint && obj_int) {
+ // Save dst in r11 in the disjoint case
+ __ movq(r11, dst);
+ }
+#else
+ if (disjoint) {
+ __ mov(rdx, dst); // save 'to'
+ }
+#endif
+ gen_write_ref_array_pre_barrier(masm, decorators, dst, count);
+ }
+}
+
+void ModRefBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count) {
+ bool checkcast = (decorators & ARRAYCOPY_CHECKCAST) != 0;
+ bool disjoint = (decorators & ARRAYCOPY_DISJOINT) != 0;
+ bool obj_int = type == T_OBJECT LP64_ONLY(&& UseCompressedOops);
+ Register tmp = rax;
+
+ if (type == T_OBJECT || type == T_ARRAY) {
+#ifdef _LP64
+ if (!checkcast && !obj_int) {
+ // Save count for barrier
+ count = r11;
+ } else if (disjoint && obj_int) {
+ // Use the saved dst in the disjoint case
+ dst = r11;
+ } else if (checkcast) {
+ tmp = rscratch1;
+ }
+#else
+ if (disjoint) {
+ __ mov(dst, rdx); // restore 'to'
+ }
+#endif
+ gen_write_ref_array_post_barrier(masm, decorators, dst, count, tmp);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/x86/gc/shared/modRefBarrierSetAssembler_x86.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP
+#define CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP
+
+#include "asm/macroAssembler.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
+
+class ModRefBarrierSetAssembler: public BarrierSetAssembler {
+protected:
+ virtual void gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count) {}
+ virtual void gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators, Register addr, Register count, Register tmp) {}
+
+public:
+ virtual void arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count);
+ virtual void arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
+ Register src, Register dst, Register count);
+};
+
+#endif // CPU_X86_GC_SHARED_MODREFBARRIERSETASSEMBLER_X86_HPP
--- a/src/hotspot/cpu/x86/interp_masm_x86.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -35,6 +35,7 @@
#include "prims/jvmtiThreadState.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/thread.inline.hpp"
--- a/src/hotspot/cpu/x86/interpreterRT_x86.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/interpreterRT_x86.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,8 @@
#ifndef CPU_X86_VM_INTERPRETERRT_X86_HPP
#define CPU_X86_VM_INTERPRETERRT_X86_HPP
-#include "memory/allocation.hpp"
+// This is included in the middle of class Interpreter.
+// Do not include files here.
// native method calls
@@ -55,19 +56,7 @@
public:
// Creation
- SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) : NativeSignatureIterator(method) {
- _masm = new MacroAssembler(buffer);
-#ifdef AMD64
-#ifdef _WIN64
- _num_args = (method->is_static() ? 1 : 0);
- _stack_offset = (Argument::n_int_register_parameters_c+1)* wordSize; // don't overwrite return address
-#else
- _num_int_args = (method->is_static() ? 1 : 0);
- _num_fp_args = 0;
- _stack_offset = wordSize; // don't overwrite return address
-#endif // _WIN64
-#endif // AMD64
- }
+ SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer);
// Code generation
void generate(uint64_t fingerprint);
--- a/src/hotspot/cpu/x86/interpreterRT_x86_32.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/interpreterRT_x86_32.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "interpreter/interp_masm.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
@@ -31,7 +32,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/signature.hpp"
@@ -39,6 +40,21 @@
// Implementation of SignatureHandlerGenerator
+InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) :
+ NativeSignatureIterator(method) {
+ _masm = new MacroAssembler(buffer);
+#ifdef AMD64
+#ifdef _WIN64
+ _num_args = (method->is_static() ? 1 : 0);
+ _stack_offset = (Argument::n_int_register_parameters_c+1)* wordSize; // don't overwrite return address
+#else
+ _num_int_args = (method->is_static() ? 1 : 0);
+ _num_fp_args = 0;
+ _stack_offset = wordSize; // don't overwrite return address
+#endif // _WIN64
+#endif // AMD64
+}
+
void InterpreterRuntime::SignatureHandlerGenerator::pass_int() {
move(offset(), jni_offset() + 1);
}
--- a/src/hotspot/cpu/x86/interpreterRT_x86_64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/interpreterRT_x86_64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "interpreter/interp_masm.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "memory/allocation.inline.hpp"
@@ -31,13 +32,28 @@
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/signature.hpp"
#define __ _masm->
// Implementation of SignatureHandlerGenerator
+InterpreterRuntime::SignatureHandlerGenerator::SignatureHandlerGenerator(const methodHandle& method, CodeBuffer* buffer) :
+ NativeSignatureIterator(method) {
+ _masm = new MacroAssembler(buffer);
+#ifdef AMD64
+#ifdef _WIN64
+ _num_args = (method->is_static() ? 1 : 0);
+ _stack_offset = (Argument::n_int_register_parameters_c+1)* wordSize; // don't overwrite return address
+#else
+ _num_int_args = (method->is_static() ? 1 : 0);
+ _num_fp_args = 0;
+ _stack_offset = wordSize; // don't overwrite return address
+#endif // _WIN64
+#endif // AMD64
+}
+
Register InterpreterRuntime::SignatureHandlerGenerator::from() { return r14; }
Register InterpreterRuntime::SignatureHandlerGenerator::to() { return rsp; }
Register InterpreterRuntime::SignatureHandlerGenerator::temp() { return rscratch1; }
--- a/src/hotspot/cpu/x86/jvmciCodeInstaller_x86.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/jvmciCodeInstaller_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "compiler/disassembler.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/sharedRuntime.hpp"
#include "jvmci/jvmciEnv.hpp"
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -28,7 +28,7 @@
#include "asm/assembler.inline.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
@@ -36,7 +36,7 @@
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/safepoint.hpp"
@@ -5409,8 +5409,8 @@
Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
DirtyCardQueue::byte_offset_of_buf()));
- CardTableModRefBS* ctbs =
- barrier_set_cast<CardTableModRefBS>(Universe::heap()->barrier_set());
+ CardTableBarrierSet* ctbs =
+ barrier_set_cast<CardTableBarrierSet>(Universe::heap()->barrier_set());
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
@@ -5497,10 +5497,10 @@
// Does a store check for the oop in register obj. The content of
// register obj is destroyed afterwards.
BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->kind() == BarrierSet::CardTableModRef,
+ assert(bs->kind() == BarrierSet::CardTableBarrierSet,
"Wrong barrier set kind");
- CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
--- a/src/hotspot/cpu/x86/methodHandles_x86.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/methodHandles_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,6 +31,8 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/frame.inline.hpp"
+#include "utilities/preserveException.hpp"
#define __ _masm->
--- a/src/hotspot/cpu/x86/runtime_x86_32.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/runtime_x86_32.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,7 +31,6 @@
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
#include "opto/runtime.hpp"
-#include "runtime/interfaceSupport.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/x86/runtime_x86_64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/runtime_x86_64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -30,7 +30,6 @@
#include "code/vmreg.hpp"
#include "interpreter/interpreter.hpp"
#include "opto/runtime.hpp"
-#include "runtime/interfaceSupport.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/vframeArray.hpp"
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -28,10 +28,12 @@
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
+#include "gc/shared/gcLocker.hpp"
#include "interpreter/interpreter.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compiledICHolder.hpp"
+#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/align.hpp"
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -32,10 +32,12 @@
#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
+#include "gc/shared/gcLocker.hpp"
#include "interpreter/interpreter.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compiledICHolder.hpp"
+#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "utilities/align.hpp"
--- a/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_32.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,8 +25,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
-#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/instanceOop.hpp"
@@ -668,107 +668,6 @@
return start;
}
- //
- // Generate pre-barrier for array stores
- //
- // Input:
- // start - starting address
- // count - element count
- void gen_write_ref_array_pre_barrier(Register start, Register count, bool uninitialized_target) {
- assert_different_registers(start, count);
- BarrierSet* bs = Universe::heap()->barrier_set();
- switch (bs->kind()) {
-#if INCLUDE_ALL_GCS
- case BarrierSet::G1BarrierSet:
- // With G1, don't generate the call if we statically know that the target in uninitialized
- if (!uninitialized_target) {
- Register thread = rax;
- Label filtered;
- __ push(thread);
- __ get_thread(thread);
- Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
- SATBMarkQueue::byte_offset_of_active()));
- // Is marking active?
- if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
- __ cmpl(in_progress, 0);
- } else {
- assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
- __ cmpb(in_progress, 0);
- }
- __ pop(thread);
- __ jcc(Assembler::equal, filtered);
-
- __ pusha(); // push registers
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre),
- start, count);
- __ popa();
-
- __ bind(filtered);
- }
- break;
-#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableModRef:
- break;
- default :
- ShouldNotReachHere();
-
- }
- }
-
-
- //
- // Generate a post-barrier for an array store
- //
- // start - starting address
- // count - element count
- //
- // The two input registers are overwritten.
- //
- void gen_write_ref_array_post_barrier(Register start, Register count) {
- BarrierSet* bs = Universe::heap()->barrier_set();
- assert_different_registers(start, count);
- switch (bs->kind()) {
-#if INCLUDE_ALL_GCS
- case BarrierSet::G1BarrierSet:
- {
- __ pusha(); // push registers
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post),
- start, count);
- __ popa();
- }
- break;
-#endif // INCLUDE_ALL_GCS
-
- case BarrierSet::CardTableModRef:
- {
- CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
- CardTable* ct = ctbs->card_table();
- assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust this code");
-
- Label L_loop;
- const Register end = count; // elements count; end == start+count-1
- assert_different_registers(start, end);
-
- __ lea(end, Address(start, count, Address::times_ptr, -wordSize));
- __ shrptr(start, CardTable::card_shift);
- __ shrptr(end, CardTable::card_shift);
- __ subptr(end, start); // end --> count
- __ BIND(L_loop);
- intptr_t disp = (intptr_t) ct->byte_map_base();
- Address cardtable(start, count, Address::times_1, disp);
- __ movb(cardtable, 0);
- __ decrement(count);
- __ jcc(Assembler::greaterEqual, L_loop);
- }
- break;
- case BarrierSet::ModRef:
- break;
- default :
- ShouldNotReachHere();
-
- }
- }
-
// Copy 64 bytes chunks
//
@@ -936,9 +835,18 @@
if (t == T_OBJECT) {
__ testl(count, count);
__ jcc(Assembler::zero, L_0_count);
- gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
- __ mov(saved_to, to); // save 'to'
+ }
+
+ DecoratorSet decorators = ARRAYCOPY_DISJOINT;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
}
+ if (aligned) {
+ decorators |= ARRAYCOPY_ALIGNED;
+ }
+
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, t, from, to, count);
__ subptr(to, from); // to --> to_from
__ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
@@ -1024,10 +932,10 @@
__ BIND(L_copy_2_bytes);
}
+ __ movl(count, Address(rsp, 12+12)); // reread 'count'
+ bs->arraycopy_epilogue(_masm, decorators, t, from, to, count);
+
if (t == T_OBJECT) {
- __ movl(count, Address(rsp, 12+12)); // reread 'count'
- __ mov(to, saved_to); // restore 'to'
- gen_write_ref_array_post_barrier(to, count);
__ BIND(L_0_count);
}
inc_copy_counter_np(t);
@@ -1116,8 +1024,18 @@
if (t == T_OBJECT) {
__ testl(count, count);
__ jcc(Assembler::zero, L_0_count);
- gen_write_ref_array_pre_barrier(dst, count, dest_uninitialized);
+ }
+
+ DecoratorSet decorators = 0;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
}
+ if (aligned) {
+ decorators |= ARRAYCOPY_ALIGNED;
+ }
+
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, t, from, to, count);
// copy from high to low
__ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
@@ -1216,9 +1134,11 @@
} else {
__ BIND(L_copy_2_bytes);
}
+
+ __ movl2ptr(count, Address(rsp, 12+12)); // reread count
+ bs->arraycopy_epilogue(_masm, decorators, t, from, to, count);
+
if (t == T_OBJECT) {
- __ movl2ptr(count, Address(rsp, 12+12)); // reread count
- gen_write_ref_array_post_barrier(to, count);
__ BIND(L_0_count);
}
inc_copy_counter_np(t);
@@ -1463,8 +1383,16 @@
Address to_element_addr(end_to, count, Address::times_ptr, 0);
Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes());
+ DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
+ }
+
+ BasicType type = T_OBJECT;
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, type, from, to, count);
+
// Copy from low to high addresses, indexed from the end of each array.
- gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
__ lea(end_from, end_from_addr);
__ lea(end_to, end_to_addr);
assert(length == count, ""); // else fix next line:
@@ -1521,7 +1449,7 @@
__ BIND(L_post_barrier);
__ movptr(to, to_arg); // reload
- gen_write_ref_array_post_barrier(to, count);
+ bs->arraycopy_epilogue(_masm, decorators, type, from, to, count);
// Common exit point (success or failure).
__ BIND(L_done);
--- a/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/stubGenerator_x86_64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,8 +26,8 @@
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "ci/ciUtilities.hpp"
-#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_x86.hpp"
#include "oops/instanceOop.hpp"
@@ -1190,119 +1190,6 @@
#endif
}
- // Generate code for an array write pre barrier
- //
- // addr - starting address
- // count - element count
- // tmp - scratch register
- //
- // Destroy no registers!
- //
- void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) {
- BarrierSet* bs = Universe::heap()->barrier_set();
- switch (bs->kind()) {
- case BarrierSet::G1BarrierSet:
- // With G1, don't generate the call if we statically know that the target in uninitialized
- if (!dest_uninitialized) {
- Label filtered;
- Address in_progress(r15_thread, in_bytes(JavaThread::satb_mark_queue_offset() +
- SATBMarkQueue::byte_offset_of_active()));
- // Is marking active?
- if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
- __ cmpl(in_progress, 0);
- } else {
- assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
- __ cmpb(in_progress, 0);
- }
- __ jcc(Assembler::equal, filtered);
-
- __ pusha(); // push registers
- if (count == c_rarg0) {
- if (addr == c_rarg1) {
- // exactly backwards!!
- __ xchgptr(c_rarg1, c_rarg0);
- } else {
- __ movptr(c_rarg1, count);
- __ movptr(c_rarg0, addr);
- }
- } else {
- __ movptr(c_rarg0, addr);
- __ movptr(c_rarg1, count);
- }
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2);
- __ popa();
-
- __ bind(filtered);
- }
- break;
- case BarrierSet::CardTableModRef:
- break;
- default:
- ShouldNotReachHere();
-
- }
- }
-
- //
- // Generate code for an array write post barrier
- //
- // Input:
- // start - register containing starting address of destination array
- // count - elements count
- // scratch - scratch register
- //
- // The input registers are overwritten.
- //
- void gen_write_ref_array_post_barrier(Register start, Register count, Register scratch) {
- assert_different_registers(start, count, scratch);
- BarrierSet* bs = Universe::heap()->barrier_set();
- switch (bs->kind()) {
- case BarrierSet::G1BarrierSet:
- {
- __ pusha(); // push registers (overkill)
- if (c_rarg0 == count) { // On win64 c_rarg0 == rcx
- assert_different_registers(c_rarg1, start);
- __ mov(c_rarg1, count);
- __ mov(c_rarg0, start);
- } else {
- assert_different_registers(c_rarg0, count);
- __ mov(c_rarg0, start);
- __ mov(c_rarg1, count);
- }
- __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post), 2);
- __ popa();
- }
- break;
- case BarrierSet::CardTableModRef:
- {
- Label L_loop, L_done;
- const Register end = count;
-
- __ testl(count, count);
- __ jcc(Assembler::zero, L_done); // zero count - nothing to do
-
- __ leaq(end, Address(start, count, TIMES_OOP, 0)); // end == start+count*oop_size
- __ subptr(end, BytesPerHeapOop); // end - 1 to make inclusive
- __ shrptr(start, CardTable::card_shift);
- __ shrptr(end, CardTable::card_shift);
- __ subptr(end, start); // end --> cards count
-
- int64_t disp = ci_card_table_address_as<int64_t>();
- __ mov64(scratch, disp);
- __ addptr(start, scratch);
- __ BIND(L_loop);
- __ movb(Address(start, count, Address::times_1), 0);
- __ decrement(count);
- __ jcc(Assembler::greaterEqual, L_loop);
- __ BIND(L_done);
- }
- break;
- default:
- ShouldNotReachHere();
-
- }
- }
-
// Copy big chunks forward
//
@@ -1918,7 +1805,6 @@
const Register qword_count = count;
const Register end_from = from; // source array end address
const Register end_to = to; // destination array end address
- const Register saved_to = r11; // saved destination array address
// End pointers are inclusive, and if count is not zero they point
// to the last unit copied: end_to[0] := end_from[0]
@@ -1933,10 +1819,18 @@
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
// r9 and r10 may be used to save non-volatile registers
- if (is_oop) {
- __ movq(saved_to, to);
- gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
+
+ DecoratorSet decorators = ARRAYCOPY_DISJOINT;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
}
+ if (aligned) {
+ decorators |= ARRAYCOPY_ALIGNED;
+ }
+
+ BasicType type = is_oop ? T_OBJECT : T_INT;
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, type, from, to, count);
// 'from', 'to' and 'count' are now valid
__ movptr(dword_count, count);
@@ -1963,9 +1857,7 @@
__ movl(Address(end_to, 8), rax);
__ BIND(L_exit);
- if (is_oop) {
- gen_write_ref_array_post_barrier(saved_to, dword_count, rax);
- }
+ bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count);
restore_arg_regs();
inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
__ vzeroupper();
@@ -2022,10 +1914,18 @@
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
// r9 and r10 may be used to save non-volatile registers
- if (is_oop) {
- // no registers are destroyed by this call
- gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
+ DecoratorSet decorators = 0;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
}
+ if (aligned) {
+ decorators |= ARRAYCOPY_ALIGNED;
+ }
+
+ BasicType type = is_oop ? T_OBJECT : T_INT;
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ // no registers are destroyed by this call
+ bs->arraycopy_prologue(_masm, decorators, type, from, to, count);
assert_clean_int(count, rax); // Make sure 'count' is clean int.
// 'from', 'to' and 'count' are now valid
@@ -2062,9 +1962,7 @@
copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
__ BIND(L_exit);
- if (is_oop) {
- gen_write_ref_array_post_barrier(to, dword_count, rax);
- }
+ bs->arraycopy_epilogue(_masm, decorators, type, from, to, dword_count);
restore_arg_regs();
inc_counter_np(SharedRuntime::_jint_array_copy_ctr); // Update counter after rscratch1 is free
__ xorptr(rax, rax); // return 0
@@ -2102,7 +2000,6 @@
const Register qword_count = rdx; // elements count
const Register end_from = from; // source array end address
const Register end_to = rcx; // destination array end address
- const Register saved_to = to;
const Register saved_count = r11;
// End pointers are inclusive, and if count is not zero they point
// to the last unit copied: end_to[0] := end_from[0]
@@ -2120,12 +2017,18 @@
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
// r9 and r10 may be used to save non-volatile registers
// 'from', 'to' and 'qword_count' are now valid
- if (is_oop) {
- // Save to and count for store barrier
- __ movptr(saved_count, qword_count);
- // no registers are destroyed by this call
- gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized);
+
+ DecoratorSet decorators = ARRAYCOPY_DISJOINT;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
}
+ if (aligned) {
+ decorators |= ARRAYCOPY_ALIGNED;
+ }
+
+ BasicType type = is_oop ? T_OBJECT : T_LONG;
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count);
// Copy from low to high addresses. Use 'to' as scratch.
__ lea(end_from, Address(from, qword_count, Address::times_8, -8));
@@ -2154,10 +2057,8 @@
// Copy in multi-bytes chunks
copy_bytes_forward(end_from, end_to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
- if (is_oop) {
__ BIND(L_exit);
- gen_write_ref_array_post_barrier(saved_to, saved_count, rax);
- }
+ bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count);
restore_arg_regs();
if (is_oop) {
inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free
@@ -2209,12 +2110,18 @@
setup_arg_regs(); // from => rdi, to => rsi, count => rdx
// r9 and r10 may be used to save non-volatile registers
// 'from', 'to' and 'qword_count' are now valid
- if (is_oop) {
- // Save to and count for store barrier
- __ movptr(saved_count, qword_count);
- // No registers are destroyed by this call
- gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized);
+
+ DecoratorSet decorators = ARRAYCOPY_DISJOINT;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
}
+ if (aligned) {
+ decorators |= ARRAYCOPY_ALIGNED;
+ }
+
+ BasicType type = is_oop ? T_OBJECT : T_LONG;
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, type, from, to, qword_count);
__ jmp(L_copy_bytes);
@@ -2239,10 +2146,8 @@
// Copy in multi-bytes chunks
copy_bytes_backward(from, to, qword_count, rax, L_copy_bytes, L_copy_8_bytes);
- if (is_oop) {
__ BIND(L_exit);
- gen_write_ref_array_post_barrier(to, saved_count, rax);
- }
+ bs->arraycopy_epilogue(_masm, decorators, type, from, to, qword_count);
restore_arg_regs();
if (is_oop) {
inc_counter_np(SharedRuntime::_oop_array_copy_ctr); // Update counter after rscratch1 is free
@@ -2389,7 +2294,14 @@
Address from_element_addr(end_from, count, TIMES_OOP, 0);
Address to_element_addr(end_to, count, TIMES_OOP, 0);
- gen_write_ref_array_pre_barrier(to, count, dest_uninitialized);
+ DecoratorSet decorators = ARRAYCOPY_CHECKCAST;
+ if (dest_uninitialized) {
+ decorators |= AS_DEST_NOT_INITIALIZED;
+ }
+
+ BasicType type = T_OBJECT;
+ BarrierSetAssembler *bs = Universe::heap()->barrier_set()->barrier_set_assembler();
+ bs->arraycopy_prologue(_masm, decorators, type, from, to, count);
// Copy from low to high addresses, indexed from the end of each array.
__ lea(end_from, end_from_addr);
@@ -2442,7 +2354,7 @@
__ xorptr(rax, rax); // return 0 on success
__ BIND(L_post_barrier);
- gen_write_ref_array_post_barrier(to, r14_length, rscratch1);
+ bs->arraycopy_epilogue(_masm, decorators, type, from, to, r14_length);
// Common exit point (success or failure).
__ BIND(L_done);
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -33,6 +33,8 @@
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
@@ -198,7 +200,7 @@
}
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableModRef:
+ case BarrierSet::CardTableBarrierSet:
{
if (val == noreg) {
__ store_heap_oop_null(obj);
--- a/src/hotspot/cpu/x86/x86_32.ad Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/x86_32.ad Sat Mar 24 01:08:35 2018 +0100
@@ -391,7 +391,7 @@
int format) {
#ifdef ASSERT
if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (int)Universe::non_oop_word()) {
- assert(oopDesc::is_oop(cast_to_oop(d32)) && (ScavengeRootsInCode || !cast_to_oop(d32)->is_scavengable()), "cannot embed scavengable oops in code");
+ assert(oopDesc::is_oop(cast_to_oop(d32)) && (ScavengeRootsInCode || !Universe::heap()->is_scavengable(cast_to_oop(d32))), "cannot embed scavengable oops in code");
}
#endif
cbuf.relocate(cbuf.insts_mark(), rspec, format);
@@ -786,7 +786,7 @@
}
if (cbuf) {
MacroAssembler _masm(cbuf);
- // EVEX spills remain EVEX: Compressed displacemement is better than AVX on spill mem operations,
+ // EVEX spills remain EVEX: Compressed displacemement is better than AVX on spill mem operations,
// it maps more cases to single byte displacement
_masm.set_managed();
if (reg_lo+1 == reg_hi) { // double move?
@@ -976,7 +976,7 @@
dst_offset_size = (tmp_dst_offset == 0) ? 0 : ((tmp_dst_offset < 0x80) ? 1 : 4);
calc_size += 3+src_offset_size + 3+dst_offset_size;
break;
- }
+ }
case Op_VecX:
case Op_VecY:
case Op_VecZ:
--- a/src/hotspot/cpu/x86/x86_64.ad Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/x86/x86_64.ad Sat Mar 24 01:08:35 2018 +0100
@@ -669,7 +669,7 @@
if (rspec.reloc()->type() == relocInfo::oop_type &&
d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
assert(Universe::heap()->is_in_reserved((address)(intptr_t)d32), "should be real oop");
- assert(oopDesc::is_oop(cast_to_oop((intptr_t)d32)) && (ScavengeRootsInCode || !cast_to_oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
+ assert(oopDesc::is_oop(cast_to_oop((intptr_t)d32)) && (ScavengeRootsInCode || !Universe::heap()->is_scavengable(cast_to_oop((intptr_t)d32))), "cannot embed scavengable oops in code");
}
#endif
cbuf.relocate(cbuf.insts_mark(), rspec, format);
@@ -696,7 +696,7 @@
if (rspec.reloc()->type() == relocInfo::oop_type &&
d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
assert(Universe::heap()->is_in_reserved((address)d64), "should be real oop");
- assert(oopDesc::is_oop(cast_to_oop(d64)) && (ScavengeRootsInCode || !cast_to_oop(d64)->is_scavengable()),
+ assert(oopDesc::is_oop(cast_to_oop(d64)) && (ScavengeRootsInCode || !Universe::heap()->is_scavengable(cast_to_oop(d64))),
"cannot embed scavengable oops in code");
}
#endif
--- a/src/hotspot/cpu/zero/assembler_zero.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/zero/assembler_zero.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,13 +25,13 @@
#include "precompiled.hpp"
#include "assembler_zero.inline.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/biasedLocking.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -41,7 +41,7 @@
#include "runtime/atomic.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/cpu/zero/frame_zero.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/zero/frame_zero.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -75,4 +75,6 @@
char* buf,
int buflen) const;
+ static jint interpreter_frame_expression_stack_direction() { return -1; }
+
#endif // CPU_ZERO_VM_FRAME_ZERO_HPP
--- a/src/hotspot/cpu/zero/frame_zero.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/zero/frame_zero.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -130,10 +130,6 @@
return monitor_end - 1;
}
-inline jint frame::interpreter_frame_expression_stack_direction() {
- return -1;
-}
-
// Return a unique id for this frame. The id must have a value where
// we can distinguish identity and younger/older relationship. NULL
// represents an invalid (incomparable) frame.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/zero/gc/g1/g1BarrierSetAssembler_zero.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_ZERO_GC_G1_G1BARRIERSETASSEMBLER_ZERO_HPP
+#define CPU_ZERO_GC_G1_G1BARRIERSETASSEMBLER_ZERO_HPP
+
+class G1BarrierSetAssembler;
+
+#endif // CPU_ZERO_GC_G1_G1BARRIERSETASSEMBLER_ZERO_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/zero/gc/shared/barrierSetAssembler_zero.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_ZERO_GC_G1_BARRIERSETASSEMBLER_ZERO_HPP
+#define CPU_ZERO_GC_G1_BARRIERSETASSEMBLER_ZERO_HPP
+
+class BarrierSetAssembler;
+
+#endif // CPU_ZERO_GC_G1_BARRIERSETASSEMBLER_ZERO_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/zero/gc/shared/cardTableBarrierSetAssembler_zero.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_ZERO_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ZERO_HPP
+#define CPU_ZERO_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ZERO_HPP
+
+class CardTableBarrierSetAssembler;
+
+#endif // CPU_ZERO_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_ZERO_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/cpu/zero/gc/shared/modRefBarrierSetAssembler_zero.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef CPU_ZERO_GC_SHARED_MODREFBARRIERSETASSEMBLER_ZERO_HPP
+#define CPU_ZERO_GC_SHARED_MODREFBARRIERSETASSEMBLER_ZERO_HPP
+
+class ModRefBarrierSetAssembler;
+
+#endif // CPU_ZERO_GC_SHARED_MODREFBARRIERSETASSEMBLER_ZERO_HPP
--- a/src/hotspot/cpu/zero/interpreterRT_zero.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/zero/interpreterRT_zero.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -32,7 +32,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/signature.hpp"
#include "stack_zero.inline.hpp"
#include "utilities/align.hpp"
--- a/src/hotspot/cpu/zero/interpreterRT_zero.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/zero/interpreterRT_zero.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -26,7 +26,9 @@
#ifndef CPU_ZERO_VM_INTERPRETERRT_ZERO_HPP
#define CPU_ZERO_VM_INTERPRETERRT_ZERO_HPP
-#include "memory/allocation.hpp"
+// This is included in the middle of class Interpreter.
+// Do not include files here.
+
class SignatureHandler {
public:
--- a/src/hotspot/cpu/zero/methodHandles_zero.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/zero/methodHandles_zero.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2009, 2010, 2011 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -30,6 +30,7 @@
#include "memory/resourceArea.hpp"
#include "oops/method.inline.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/frame.inline.hpp"
#include "prims/methodHandles.hpp"
void MethodHandles::invoke_target(Method* method, TRAPS) {
--- a/src/hotspot/cpu/zero/sharedRuntime_zero.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/zero/sharedRuntime_zero.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,6 +31,7 @@
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/compiledICHolder.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
#include "vmreg_zero.inline.hpp"
--- a/src/hotspot/cpu/zero/stack_zero.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/cpu/zero/stack_zero.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -24,10 +24,12 @@
*/
#include "precompiled.hpp"
+#include "interpreter/bytecodeInterpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "runtime/thread.hpp"
#include "stack_zero.hpp"
#include "stack_zero.inline.hpp"
+#include "runtime/frame.inline.hpp"
#include "utilities/align.hpp"
// Inlined causes circular inclusion with thread.hpp
--- a/src/hotspot/os/aix/attachListener_aix.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os/aix/attachListener_aix.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
*/
#include "precompiled.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/os.inline.hpp"
#include "services/attachListener.hpp"
#include "services/dtraceAttacher.hpp"
--- a/src/hotspot/os/aix/jvm_aix.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os/aix/jvm_aix.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "jvm.h"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/osThread.hpp"
#include <signal.h>
--- a/src/hotspot/os/aix/os_aix.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os/aix/os_aix.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -54,7 +54,7 @@
#include "runtime/atomic.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/globals.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os/bsd/attachListener_bsd.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os/bsd/attachListener_bsd.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/os.inline.hpp"
#include "services/attachListener.hpp"
#include "services/dtraceAttacher.hpp"
--- a/src/hotspot/os/bsd/jvm_bsd.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os/bsd/jvm_bsd.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "jvm.h"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/osThread.hpp"
#include <signal.h>
--- a/src/hotspot/os/bsd/os_bsd.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os/bsd/os_bsd.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -44,7 +44,7 @@
#include "runtime/atomic.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/globals.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os/linux/attachListener_linux.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os/linux/attachListener_linux.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -23,7 +23,8 @@
*/
#include "precompiled.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "memory/allocation.inline.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/os.inline.hpp"
#include "services/attachListener.hpp"
#include "services/dtraceAttacher.hpp"
--- a/src/hotspot/os/linux/jvm_linux.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os/linux/jvm_linux.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "jvm.h"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/osThread.hpp"
#include <signal.h>
--- a/src/hotspot/os/linux/os_linux.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os/linux/os_linux.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -45,7 +45,7 @@
#include "runtime/atomic.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/globals.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
--- a/src/hotspot/os/posix/os_posix.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os/posix/os_posix.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,7 +26,7 @@
#include "memory/allocation.inline.hpp"
#include "utilities/globalDefinitions.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/os.hpp"
#include "services/memTracker.hpp"
#include "utilities/align.hpp"
--- a/src/hotspot/os/solaris/attachListener_solaris.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os/solaris/attachListener_solaris.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/os.inline.hpp"
#include "services/attachListener.hpp"
#include "services/dtraceAttacher.hpp"
--- a/src/hotspot/os/solaris/jvm_solaris.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os/solaris/jvm_solaris.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "jvm.h"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/osThread.hpp"
#include <signal.h>
--- a/src/hotspot/os/solaris/os_solaris.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os/solaris/os_solaris.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -44,7 +44,7 @@
#include "runtime/atomic.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/globals.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os/windows/attachListener_windows.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os/windows/attachListener_windows.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/os.hpp"
#include "services/attachListener.hpp"
#include "services/dtraceAttacher.hpp"
--- a/src/hotspot/os/windows/jvm_windows.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os/windows/jvm_windows.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "jvm.h"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/osThread.hpp"
#include <signal.h>
--- a/src/hotspot/os/windows/os_windows.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os/windows/os_windows.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -47,7 +47,7 @@
#include "runtime/atomic.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/globals.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -42,7 +42,7 @@
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os_cpu/aix_ppc/thread_aix_ppc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/aix_ppc/thread_aix_ppc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -27,6 +27,19 @@
#include "runtime/frame.hpp"
#include "runtime/thread.hpp"
+frame JavaThread::pd_last_frame() {
+ assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
+
+ intptr_t* sp = last_Java_sp();
+ address pc = _anchor.last_Java_pc();
+
+ // Last_Java_pc ist not set, if we come here from compiled code.
+ if (pc == NULL)
+ pc = (address) *(sp + 2);
+
+ return frame(sp, pc);
+}
+
// Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Aix/PPC.
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) {
Unimplemented();
--- a/src/hotspot/os_cpu/aix_ppc/thread_aix_ppc.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/aix_ppc/thread_aix_ppc.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -32,18 +32,7 @@
}
// The `last' frame is the youngest Java frame on the thread's stack.
- frame pd_last_frame() {
- assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
-
- intptr_t* sp = last_Java_sp();
- address pc = _anchor.last_Java_pc();
-
- // Last_Java_pc ist not set, if we come here from compiled code.
- if (pc == NULL)
- pc = (address) *(sp + 2);
-
- return frame(sp, pc);
- }
+ frame pd_last_frame();
public:
void set_base_of_stack_pointer(intptr_t* base_sp) {}
--- a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -39,7 +39,7 @@
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,12 @@
#include "runtime/frame.inline.hpp"
#include "runtime/thread.inline.hpp"
+frame JavaThread::pd_last_frame() {
+ assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
+ vmassert(_anchor.last_Java_pc() != NULL, "not walkable");
+ return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
+}
+
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
// currently interrupted by SIGPROF
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
--- a/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/bsd_x86/thread_bsd_x86.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,11 +30,7 @@
_anchor.clear();
}
- frame pd_last_frame() {
- assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
- vmassert(_anchor.last_Java_pc() != NULL, "not walkable");
- return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
- }
+ frame pd_last_frame();
public:
// Mutators are highly dangerous....
--- a/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -45,7 +45,7 @@
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os_cpu/bsd_zero/thread_bsd_zero.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/bsd_zero/thread_bsd_zero.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -27,6 +27,11 @@
#include "runtime/frame.inline.hpp"
#include "runtime/thread.inline.hpp"
+frame JavaThread::pd_last_frame() {
+ assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
+ return frame(last_Java_fp(), last_Java_sp());
+}
+
void JavaThread::cache_global_variables() {
// nothing to do
}
--- a/src/hotspot/os_cpu/bsd_zero/thread_bsd_zero.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/bsd_zero/thread_bsd_zero.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -86,10 +86,7 @@
}
private:
- frame pd_last_frame() {
- assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
- return frame(last_Java_fp(), last_Java_sp());
- }
+ frame pd_last_frame();
public:
static ByteSize last_Java_fp_offset() {
--- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -41,7 +41,7 @@
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -28,6 +28,11 @@
#include "runtime/frame.inline.hpp"
#include "runtime/thread.inline.hpp"
+frame JavaThread::pd_last_frame() {
+ assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
+ return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
+}
+
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
// currently interrupted by SIGPROF
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
--- a/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_aarch64/thread_linux_aarch64.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -41,10 +41,7 @@
_anchor.clear();
}
- frame pd_last_frame() {
- assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
- return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
- }
+ frame pd_last_frame();
public:
// Mutators are highly dangerous....
--- a/src/hotspot/os_cpu/linux_arm/copy_linux_arm.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_arm/copy_linux_arm.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,20 +26,11 @@
#define OS_CPU_LINUX_ARM_VM_COPY_LINUX_ARM_INLINE_HPP
static void pd_conjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
-#ifdef AARCH64
_Copy_conjoint_words(from, to, count * HeapWordSize);
-#else
- // NOTE: _Copy_* functions on 32-bit ARM expect "to" and "from" arguments in reversed order
- _Copy_conjoint_words(to, from, count * HeapWordSize);
-#endif
}
static void pd_disjoint_words(const HeapWord* from, HeapWord* to, size_t count) {
-#ifdef AARCH64
_Copy_disjoint_words(from, to, count * HeapWordSize);
-#else
- _Copy_disjoint_words(to, from, count * HeapWordSize);
-#endif // AARCH64
}
static void pd_disjoint_words_atomic(const HeapWord* from, HeapWord* to, size_t count) {
@@ -63,11 +54,7 @@
}
static void pd_conjoint_jshorts_atomic(const jshort* from, jshort* to, size_t count) {
-#ifdef AARCH64
_Copy_conjoint_jshorts_atomic(from, to, count * BytesPerShort);
-#else
- _Copy_conjoint_jshorts_atomic(to, from, count * BytesPerShort);
-#endif
}
static void pd_conjoint_jints_atomic(const jint* from, jint* to, size_t count) {
@@ -85,7 +72,7 @@
assert(HeapWordSize == BytesPerLong, "64-bit architecture");
pd_conjoint_words((const HeapWord*)from, (HeapWord*)to, count);
#else
- _Copy_conjoint_jlongs_atomic(to, from, count * BytesPerLong);
+ _Copy_conjoint_jlongs_atomic(from, to, count * BytesPerLong);
#endif
}
--- a/src/hotspot/os_cpu/linux_arm/linux_arm_32.s Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_arm/linux_arm_32.s Sat Mar 24 01:08:35 2018 +0100
@@ -49,6 +49,9 @@
.globl _Copy_arrayof_conjoint_jlongs
.type _Copy_arrayof_conjoint_jlongs, %function
+from .req r0
+to .req r1
+
.text
.globl SpinPause
.type SpinPause, %function
@@ -77,7 +80,7 @@
cmp r2, #0
beq disjoint_words_finish
- pld [r1, #0]
+ pld [from, #0]
cmp r2, #12
ble disjoint_words_small
@@ -85,28 +88,28 @@
dw_f2b_loop_32:
subs r2, #32
blt dw_f2b_loop_32_finish
- ldmia r1!, {r3 - r9, ip}
+ ldmia from!, {r3 - r9, ip}
nop
- pld [r1]
- stmia r0!, {r3 - r9, ip}
+ pld [from]
+ stmia to!, {r3 - r9, ip}
bgt dw_f2b_loop_32
dw_f2b_loop_32_finish:
addlts r2, #32
beq disjoint_words_finish
cmp r2, #16
blt disjoint_words_small
- ldmia r1!, {r3 - r6}
+ ldmia from!, {r3 - r6}
subge r2, r2, #16
- stmia r0!, {r3 - r6}
+ stmia to!, {r3 - r6}
beq disjoint_words_finish
disjoint_words_small:
cmp r2, #8
- ldr r7, [r1], #4
- ldrge r8, [r1], #4
- ldrgt r9, [r1], #4
- str r7, [r0], #4
- strge r8, [r0], #4
- strgt r9, [r0], #4
+ ldr r7, [from], #4
+ ldrge r8, [from], #4
+ ldrgt r9, [from], #4
+ str r7, [to], #4
+ strge r8, [to], #4
+ strgt r9, [to], #4
disjoint_words_finish:
ldmia sp!, {r3 - r9, ip}
@@ -122,72 +125,72 @@
cmp r2, #0
beq conjoint_words_finish
- pld [r1, #0]
+ pld [from, #0]
cmp r2, #12
ble conjoint_words_small
- subs r3, r0, r1
+ subs r3, to, from
cmphi r2, r3
bhi cw_b2f_copy
.align 3
cw_f2b_loop_32:
subs r2, #32
blt cw_f2b_loop_32_finish
- ldmia r1!, {r3 - r9, ip}
+ ldmia from!, {r3 - r9, ip}
nop
- pld [r1]
- stmia r0!, {r3 - r9, ip}
+ pld [from]
+ stmia to!, {r3 - r9, ip}
bgt cw_f2b_loop_32
cw_f2b_loop_32_finish:
addlts r2, #32
beq conjoint_words_finish
cmp r2, #16
blt conjoint_words_small
- ldmia r1!, {r3 - r6}
+ ldmia from!, {r3 - r6}
subge r2, r2, #16
- stmia r0!, {r3 - r6}
+ stmia to!, {r3 - r6}
beq conjoint_words_finish
conjoint_words_small:
cmp r2, #8
- ldr r7, [r1], #4
- ldrge r8, [r1], #4
- ldrgt r9, [r1], #4
- str r7, [r0], #4
- strge r8, [r0], #4
- strgt r9, [r0], #4
+ ldr r7, [from], #4
+ ldrge r8, [from], #4
+ ldrgt r9, [from], #4
+ str r7, [to], #4
+ strge r8, [to], #4
+ strgt r9, [to], #4
b conjoint_words_finish
# Src and dest overlap, copy in a descending order
cw_b2f_copy:
- add r1, r2
- pld [r1, #-32]
- add r0, r2
+ add from, r2
+ pld [from, #-32]
+ add to, r2
.align 3
cw_b2f_loop_32:
subs r2, #32
blt cw_b2f_loop_32_finish
- ldmdb r1!, {r3-r9,ip}
+ ldmdb from!, {r3-r9,ip}
nop
- pld [r1, #-32]
- stmdb r0!, {r3-r9,ip}
+ pld [from, #-32]
+ stmdb to!, {r3-r9,ip}
bgt cw_b2f_loop_32
cw_b2f_loop_32_finish:
addlts r2, #32
beq conjoint_words_finish
cmp r2, #16
blt cw_b2f_copy_small
- ldmdb r1!, {r3 - r6}
+ ldmdb from!, {r3 - r6}
subge r2, r2, #16
- stmdb r0!, {r3 - r6}
+ stmdb to!, {r3 - r6}
beq conjoint_words_finish
cw_b2f_copy_small:
cmp r2, #8
- ldr r7, [r1, #-4]!
- ldrge r8, [r1, #-4]!
- ldrgt r9, [r1, #-4]!
- str r7, [r0, #-4]!
- strge r8, [r0, #-4]!
- strgt r9, [r0, #-4]!
+ ldr r7, [from, #-4]!
+ ldrge r8, [from, #-4]!
+ ldrgt r9, [from, #-4]!
+ str r7, [to, #-4]!
+ strge r8, [to, #-4]!
+ strgt r9, [to, #-4]!
conjoint_words_finish:
ldmia sp!, {r3 - r9, ip}
@@ -202,15 +205,15 @@
cmp r2, #0
beq conjoint_shorts_finish
- subs r3, r0, r1
+ subs r3, to, from
cmphi r2, r3
bhi cs_b2f_copy
- pld [r1]
+ pld [from]
- ands r3, r0, #3
+ ands r3, to, #3
bne cs_f2b_dest_u
- ands r3, r1, #3
+ ands r3, from, #3
bne cs_f2b_src_u
# Aligned source address
@@ -218,10 +221,10 @@
cs_f2b_loop_32:
subs r2, #32
blt cs_f2b_loop_32_finish
- ldmia r1!, {r3 - r9, ip}
+ ldmia from!, {r3 - r9, ip}
nop
- pld [r1]
- stmia r0!, {r3 - r9, ip}
+ pld [from]
+ stmia to!, {r3 - r9, ip}
bgt cs_f2b_loop_32
cs_f2b_loop_32_finish:
addlts r2, #32
@@ -230,32 +233,32 @@
.align 3
cs_f2b_8_loop:
beq cs_f2b_4
- ldmia r1!, {r4-r5}
+ ldmia from!, {r4-r5}
subs r6, #1
- stmia r0!, {r4-r5}
+ stmia to!, {r4-r5}
bgt cs_f2b_8_loop
cs_f2b_4:
ands r2, #7
beq conjoint_shorts_finish
cmp r2, #4
- ldrh r3, [r1], #2
- ldrgeh r4, [r1], #2
- ldrgth r5, [r1], #2
- strh r3, [r0], #2
- strgeh r4, [r0], #2
- strgth r5, [r0], #2
+ ldrh r3, [from], #2
+ ldrgeh r4, [from], #2
+ ldrgth r5, [from], #2
+ strh r3, [to], #2
+ strgeh r4, [to], #2
+ strgth r5, [to], #2
b conjoint_shorts_finish
# Destination not aligned
cs_f2b_dest_u:
- ldrh r3, [r1], #2
+ ldrh r3, [from], #2
subs r2, #2
- strh r3, [r0], #2
+ strh r3, [to], #2
beq conjoint_shorts_finish
# Check to see if source is not aligned ether
- ands r3, r1, #3
+ ands r3, from, #3
beq cs_f2b_loop_32
cs_f2b_src_u:
@@ -263,153 +266,153 @@
blt cs_f2b_8_u
# Load 2 first bytes to r7 and make src ptr word aligned
- bic r1, #3
- ldr r7, [r1], #4
+ bic from, #3
+ ldr r7, [from], #4
# Destination aligned, source not
mov r8, r2, lsr #4
.align 3
cs_f2b_16_u_loop:
mov r3, r7, lsr #16
- ldmia r1!, {r4 - r7}
+ ldmia from!, {r4 - r7}
orr r3, r3, r4, lsl #16
mov r4, r4, lsr #16
- pld [r1]
+ pld [from]
orr r4, r4, r5, lsl #16
mov r5, r5, lsr #16
orr r5, r5, r6, lsl #16
mov r6, r6, lsr #16
orr r6, r6, r7, lsl #16
- stmia r0!, {r3 - r6}
+ stmia to!, {r3 - r6}
subs r8, #1
bgt cs_f2b_16_u_loop
ands r2, #0xf
beq conjoint_shorts_finish
- sub r1, #2
+ sub from, #2
cs_f2b_8_u:
cmp r2, #8
blt cs_f2b_4_u
- ldrh r4, [r1], #2
- ldr r5, [r1], #4
- ldrh r6, [r1], #2
+ ldrh r4, [from], #2
+ ldr r5, [from], #4
+ ldrh r6, [from], #2
orr r4, r4, r5, lsl #16
mov r5, r5, lsr #16
orr r5, r5, r6, lsl #16
subs r2, #8
- stmia r0!, {r4 - r5}
+ stmia to!, {r4 - r5}
cs_f2b_4_u:
beq conjoint_shorts_finish
cmp r2, #4
- ldrh r3, [r1], #2
- ldrgeh r4, [r1], #2
- ldrgth r5, [r1], #2
- strh r3, [r0], #2
- strgeh r4, [r0], #2
- strgth r5, [r0], #2
+ ldrh r3, [from], #2
+ ldrgeh r4, [from], #2
+ ldrgth r5, [from], #2
+ strh r3, [to], #2
+ strgeh r4, [to], #2
+ strgth r5, [to], #2
b conjoint_shorts_finish
# Src and dest overlap, copy in a descending order
cs_b2f_copy:
- add r1, r2
- pld [r1, #-32]
- add r0, r2
+ add from, r2
+ pld [from, #-32]
+ add to, r2
- ands r3, r0, #3
+ ands r3, to, #3
bne cs_b2f_dest_u
- ands r3, r1, #3
+ ands r3, from, #3
bne cs_b2f_src_u
.align 3
cs_b2f_loop_32:
subs r2, #32
blt cs_b2f_loop_32_finish
- ldmdb r1!, {r3-r9,ip}
+ ldmdb from!, {r3-r9,ip}
nop
- pld [r1, #-32]
- stmdb r0!, {r3-r9,ip}
+ pld [from, #-32]
+ stmdb to!, {r3-r9,ip}
bgt cs_b2f_loop_32
cs_b2f_loop_32_finish:
addlts r2, #32
beq conjoint_shorts_finish
cmp r2, #24
blt cs_b2f_16
- ldmdb r1!, {r3-r8}
+ ldmdb from!, {r3-r8}
sub r2, #24
- stmdb r0!, {r3-r8}
+ stmdb to!, {r3-r8}
beq conjoint_shorts_finish
cs_b2f_16:
cmp r2, #16
blt cs_b2f_8
- ldmdb r1!, {r3-r6}
+ ldmdb from!, {r3-r6}
sub r2, #16
- stmdb r0!, {r3-r6}
+ stmdb to!, {r3-r6}
beq conjoint_shorts_finish
cs_b2f_8:
cmp r2, #8
blt cs_b2f_all_copy
- ldmdb r1!, {r3-r4}
+ ldmdb from!, {r3-r4}
sub r2, #8
- stmdb r0!, {r3-r4}
+ stmdb to!, {r3-r4}
beq conjoint_shorts_finish
cs_b2f_all_copy:
cmp r2, #4
- ldrh r3, [r1, #-2]!
- ldrgeh r4, [r1, #-2]!
- ldrgth r5, [r1, #-2]!
- strh r3, [r0, #-2]!
- strgeh r4, [r0, #-2]!
- strgth r5, [r0, #-2]!
+ ldrh r3, [from, #-2]!
+ ldrgeh r4, [from, #-2]!
+ ldrgth r5, [from, #-2]!
+ strh r3, [to, #-2]!
+ strgeh r4, [to, #-2]!
+ strgth r5, [to, #-2]!
b conjoint_shorts_finish
# Destination not aligned
cs_b2f_dest_u:
- ldrh r3, [r1, #-2]!
- strh r3, [r0, #-2]!
+ ldrh r3, [from, #-2]!
+ strh r3, [to, #-2]!
sub r2, #2
# Check source alignment as well
- ands r3, r1, #3
+ ands r3, from, #3
beq cs_b2f_loop_32
# Source not aligned
cs_b2f_src_u:
- bic r1, #3
+ bic from, #3
.align 3
cs_b2f_16_loop_u:
subs r2, #16
blt cs_b2f_16_loop_u_finished
- ldr r7, [r1]
+ ldr r7, [from]
mov r3, r7
- ldmdb r1!, {r4 - r7}
+ ldmdb from!, {r4 - r7}
mov r4, r4, lsr #16
orr r4, r4, r5, lsl #16
- pld [r1, #-32]
+ pld [from, #-32]
mov r5, r5, lsr #16
orr r5, r5, r6, lsl #16
mov r6, r6, lsr #16
orr r6, r6, r7, lsl #16
mov r7, r7, lsr #16
orr r7, r7, r3, lsl #16
- stmdb r0!, {r4 - r7}
+ stmdb to!, {r4 - r7}
bgt cs_b2f_16_loop_u
beq conjoint_shorts_finish
cs_b2f_16_loop_u_finished:
addlts r2, #16
- ldr r3, [r1]
+ ldr r3, [from]
cmp r2, #10
blt cs_b2f_2_u_loop
- ldmdb r1!, {r4 - r5}
+ ldmdb from!, {r4 - r5}
mov r6, r4, lsr #16
orr r6, r6, r5, lsl #16
mov r7, r5, lsr #16
orr r7, r7, r3, lsl #16
- stmdb r0!, {r6-r7}
+ stmdb to!, {r6-r7}
sub r2, #8
.align 3
cs_b2f_2_u_loop:
subs r2, #2
- ldrh r3, [r1], #-2
- strh r3, [r0, #-2]!
+ ldrh r3, [from], #-2
+ strh r3, [to, #-2]!
bgt cs_b2f_2_u_loop
conjoint_shorts_finish:
@@ -440,21 +443,21 @@
cmp r2, #0
beq conjoint_longs_finish
- pld [r1, #0]
+ pld [from, #0]
cmp r2, #24
ble conjoint_longs_small
- subs r3, r0, r1
+ subs r3, to, from
cmphi r2, r3
bhi cl_b2f_copy
.align 3
cl_f2b_loop_32:
subs r2, #32
blt cl_f2b_loop_32_finish
- ldmia r1!, {r3 - r9, ip}
+ ldmia from!, {r3 - r9, ip}
nop
- pld [r1]
- stmia r0!, {r3 - r9, ip}
+ pld [from]
+ stmia to!, {r3 - r9, ip}
bgt cl_f2b_loop_32
cl_f2b_loop_32_finish:
addlts r2, #32
@@ -463,31 +466,31 @@
cmp r2, #16
blt cl_f2b_copy_8
bgt cl_f2b_copy_24
- ldmia r1!, {r3 - r6}
- stmia r0!, {r3 - r6}
+ ldmia from!, {r3 - r6}
+ stmia to!, {r3 - r6}
b conjoint_longs_finish
cl_f2b_copy_8:
- ldmia r1!, {r3 - r4}
- stmia r0!, {r3 - r4}
+ ldmia from!, {r3 - r4}
+ stmia to!, {r3 - r4}
b conjoint_longs_finish
cl_f2b_copy_24:
- ldmia r1!, {r3 - r8}
- stmia r0!, {r3 - r8}
+ ldmia from!, {r3 - r8}
+ stmia to!, {r3 - r8}
b conjoint_longs_finish
# Src and dest overlap, copy in a descending order
cl_b2f_copy:
- add r1, r2
- pld [r1, #-32]
- add r0, r2
+ add from, r2
+ pld [from, #-32]
+ add to, r2
.align 3
cl_b2f_loop_32:
subs r2, #32
blt cl_b2f_loop_32_finish
- ldmdb r1!, {r3 - r9, ip}
+ ldmdb from!, {r3 - r9, ip}
nop
- pld [r1]
- stmdb r0!, {r3 - r9, ip}
+ pld [from]
+ stmdb to!, {r3 - r9, ip}
bgt cl_b2f_loop_32
cl_b2f_loop_32_finish:
addlts r2, #32
@@ -495,16 +498,16 @@
cmp r2, #16
blt cl_b2f_copy_8
bgt cl_b2f_copy_24
- ldmdb r1!, {r3 - r6}
- stmdb r0!, {r3 - r6}
+ ldmdb from!, {r3 - r6}
+ stmdb to!, {r3 - r6}
b conjoint_longs_finish
cl_b2f_copy_8:
- ldmdb r1!, {r3 - r4}
- stmdb r0!, {r3 - r4}
+ ldmdb from!, {r3 - r4}
+ stmdb to!, {r3 - r4}
b conjoint_longs_finish
cl_b2f_copy_24:
- ldmdb r1!, {r3 - r8}
- stmdb r0!, {r3 - r8}
+ ldmdb from!, {r3 - r8}
+ stmdb to!, {r3 - r8}
conjoint_longs_finish:
ldmia sp!, {r3 - r9, ip}
--- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -39,7 +39,7 @@
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_arm/thread_linux_arm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -23,13 +23,28 @@
*/
#include "precompiled.hpp"
-#include "gc/shared/barrierSet.inline.hpp"
+#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.inline.hpp"
+#include "gc/shared/cardTableBarrierSet.inline.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "memory/metaspaceShared.hpp"
#include "runtime/frame.inline.hpp"
+frame JavaThread::pd_last_frame() {
+ assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
+#ifdef AARCH64
+ assert (_anchor.last_Java_pc() != NULL, "pc should be stored");
+ return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
+#else
+ if (_anchor.last_Java_pc() != NULL) {
+ return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
+ } else {
+ // This will pick up pc from sp
+ return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp());
+ }
+#endif // AARCH64
+}
+
void JavaThread::cache_global_variables() {
BarrierSet* bs = Universe::heap()->barrier_set();
@@ -42,8 +57,8 @@
_heap_top_addr = NULL;
}
- if (bs->is_a(BarrierSet::CardTableModRef)) {
- _card_table_base = (address) (barrier_set_cast<CardTableModRefBS>(bs)->card_table()->byte_map_base());
+ if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
+ _card_table_base = (address) (barrier_set_cast<CardTableBarrierSet>(bs)->card_table()->byte_map_base());
} else {
_card_table_base = NULL;
}
--- a/src/hotspot/os_cpu/linux_arm/thread_linux_arm.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_arm/thread_linux_arm.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -37,20 +37,7 @@
_in_top_frame_unsafe_section = NULL;
}
- frame pd_last_frame() {
- assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
-#ifdef AARCH64
- assert (_anchor.last_Java_pc() != NULL, "pc should be stored");
- return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
-#else
- if (_anchor.last_Java_pc() != NULL) {
- return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
- } else {
- // This will pick up pc from sp
- return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp());
- }
-#endif // AARCH64
- }
+ frame pd_last_frame();
public:
intptr_t* last_Java_fp() { return _anchor.last_Java_fp(); }
--- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -41,7 +41,7 @@
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -24,9 +24,23 @@
*/
#include "precompiled.hpp"
-#include "runtime/frame.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/thread.hpp"
+frame JavaThread::pd_last_frame() {
+ assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
+
+ intptr_t* sp = last_Java_sp();
+ address pc = _anchor.last_Java_pc();
+
+ // Last_Java_pc ist not set, if we come here from compiled code.
+ if (pc == NULL) {
+ pc = (address) *(sp + 2);
+ }
+
+ return frame(sp, pc);
+}
+
// Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Linux/PPC.
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) {
Unimplemented();
--- a/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_ppc/thread_linux_ppc.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -33,19 +33,7 @@
}
// The `last' frame is the youngest Java frame on the thread's stack.
- frame pd_last_frame() {
- assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
-
- intptr_t* sp = last_Java_sp();
- address pc = _anchor.last_Java_pc();
-
- // Last_Java_pc ist not set, if we come here from compiled code.
- if (pc == NULL) {
- pc = (address) *(sp + 2);
- }
-
- return frame(sp, pc);
- }
+ frame pd_last_frame();
public:
--- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -44,7 +44,7 @@
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_s390/thread_linux_s390.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -24,9 +24,23 @@
*/
#include "precompiled.hpp"
-#include "runtime/frame.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/thread.hpp"
+frame JavaThread::pd_last_frame() {
+ assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
+
+ intptr_t* sp = last_Java_sp();
+ address pc = _anchor.last_Java_pc();
+
+ // Last_Java_pc ist not set if we come here from compiled code.
+ if (pc == NULL) {
+ pc = (address) *(sp + 14);
+ }
+
+ return frame(sp, pc);
+}
+
// Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Linux/S390x.
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) {
Unimplemented();
--- a/src/hotspot/os_cpu/linux_s390/thread_linux_s390.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_s390/thread_linux_s390.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -33,19 +33,7 @@
}
// The `last' frame is the youngest Java frame on the thread's stack.
- frame pd_last_frame() {
- assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
-
- intptr_t* sp = last_Java_sp();
- address pc = _anchor.last_Java_pc();
-
- // Last_Java_pc ist not set if we come here from compiled code.
- if (pc == NULL) {
- pc = (address) *(sp + 14);
- }
-
- return frame(sp, pc);
- }
+ frame pd_last_frame();
public:
void set_base_of_stack_pointer(intptr_t* base_sp) {}
--- a/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_sparc/os_linux_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -40,7 +40,7 @@
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os_cpu/linux_sparc/thread_linux_sparc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_sparc/thread_linux_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,14 @@
#include "runtime/frame.inline.hpp"
#include "runtime/thread.inline.hpp"
+frame JavaThread::pd_last_frame() {
+ assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
+ assert(_anchor.walkable(), "thread has not dumped its register windows yet");
+
+ assert(_anchor.last_Java_pc() != NULL, "Ack no pc!");
+ return frame(last_Java_sp(), frame::unpatchable, _anchor.last_Java_pc());
+}
+
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
// currently interrupted by SIGPROF
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
--- a/src/hotspot/os_cpu/linux_sparc/thread_linux_sparc.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_sparc/thread_linux_sparc.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,13 +32,7 @@
_base_of_stack_pointer = NULL;
}
- frame pd_last_frame() {
- assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
- assert(_anchor.walkable(), "thread has not dumped its register windows yet");
-
- assert(_anchor.last_Java_pc() != NULL, "Ack no pc!");
- return frame(last_Java_sp(), frame::unpatchable, _anchor.last_Java_pc());
- }
+ frame pd_last_frame();
// Sometimes the trap handler needs to record both PC and NPC.
// This is a SPARC-specific companion to Thread::set_saved_exception_pc.
--- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -39,7 +39,7 @@
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os_cpu/linux_x86/thread_linux_x86.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_x86/thread_linux_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,12 @@
#include "runtime/frame.inline.hpp"
#include "runtime/thread.inline.hpp"
+frame JavaThread::pd_last_frame() {
+ assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
+ vmassert(_anchor.last_Java_pc() != NULL, "not walkable");
+ return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
+}
+
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
// currently interrupted by SIGPROF
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
--- a/src/hotspot/os_cpu/linux_x86/thread_linux_x86.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_x86/thread_linux_x86.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,11 +30,7 @@
_anchor.clear();
}
- frame pd_last_frame() {
- assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
- vmassert(_anchor.last_Java_pc() != NULL, "not walkable");
- return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
- }
+ frame pd_last_frame();
public:
// Mutators are highly dangerous....
--- a/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -40,7 +40,7 @@
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os_cpu/linux_zero/thread_linux_zero.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_zero/thread_linux_zero.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -27,6 +27,11 @@
#include "runtime/frame.inline.hpp"
#include "runtime/thread.inline.hpp"
+frame JavaThread::pd_last_frame() {
+ assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
+ return frame(last_Java_fp(), last_Java_sp());
+}
+
void JavaThread::cache_global_variables() {
// nothing to do
}
--- a/src/hotspot/os_cpu/linux_zero/thread_linux_zero.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/linux_zero/thread_linux_zero.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -86,10 +86,7 @@
}
private:
- frame pd_last_frame() {
- assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
- return frame(last_Java_fp(), last_Java_sp());
- }
+ frame pd_last_frame();
public:
static ByteSize last_Java_fp_offset() {
--- a/src/hotspot/os_cpu/solaris_sparc/os_solaris_sparc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/solaris_sparc/os_solaris_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -41,7 +41,7 @@
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os_cpu/solaris_sparc/thread_solaris_sparc.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/solaris_sparc/thread_solaris_sparc.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,14 @@
#include "runtime/frame.inline.hpp"
#include "runtime/thread.inline.hpp"
+frame JavaThread::pd_last_frame() {
+ assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
+ assert(_anchor.walkable(), "thread has not dumped its register windows yet");
+
+ assert(_anchor.last_Java_pc() != NULL, "Ack no pc!");
+ return frame(last_Java_sp(), frame::unpatchable, _anchor.last_Java_pc());
+}
+
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
// currently interrupted by SIGPROF
//
--- a/src/hotspot/os_cpu/solaris_sparc/thread_solaris_sparc.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/solaris_sparc/thread_solaris_sparc.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,13 +31,7 @@
_base_of_stack_pointer = NULL;
}
- frame pd_last_frame() {
- assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
- assert(_anchor.walkable(), "thread has not dumped its register windows yet");
-
- assert(_anchor.last_Java_pc() != NULL, "Ack no pc!");
- return frame(last_Java_sp(), frame::unpatchable, _anchor.last_Java_pc());
- }
+ frame pd_last_frame();
// Sometimes the trap handler needs to record both PC and NPC.
// This is a SPARC-specific companion to Thread::set_saved_exception_pc.
--- a/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/solaris_x86/os_solaris_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -40,7 +40,7 @@
#include "runtime/atomic.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os_cpu/solaris_x86/thread_solaris_x86.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/solaris_x86/thread_solaris_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,12 @@
#include "runtime/frame.inline.hpp"
#include "runtime/thread.inline.hpp"
+frame JavaThread::pd_last_frame() {
+ assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
+ vmassert(_anchor.last_Java_pc() != NULL, "not walkable");
+ return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
+}
+
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
// currently interrupted by SIGPROF
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
--- a/src/hotspot/os_cpu/solaris_x86/thread_solaris_x86.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/solaris_x86/thread_solaris_x86.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,11 +28,7 @@
private:
void pd_initialize() { _anchor.clear(); }
- frame pd_last_frame() {
- assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
- vmassert(_anchor.last_Java_pc() != NULL, "not walkable");
- return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
- }
+ frame pd_last_frame();
public:
// Mutators are highly dangerous....
--- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -40,7 +40,7 @@
#include "runtime/arguments.hpp"
#include "runtime/extendedPC.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/windows_x86/thread_windows_x86.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,12 @@
#include "runtime/frame.inline.hpp"
#include "runtime/thread.inline.hpp"
+frame JavaThread::pd_last_frame() {
+ assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
+ vmassert(_anchor.last_Java_pc() != NULL, "not walkable");
+ return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
+}
+
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is
// currently interrupted by SIGPROF
bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
--- a/src/hotspot/os_cpu/windows_x86/thread_windows_x86.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/os_cpu/windows_x86/thread_windows_x86.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,11 +30,7 @@
_anchor.clear();
}
- frame pd_last_frame() {
- assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
- vmassert(_anchor.last_Java_pc() != NULL, "not walkable");
- return frame(_anchor.last_Java_sp(), _anchor.last_Java_fp(), _anchor.last_Java_pc());
- }
+ frame pd_last_frame();
public:
// Mutators are highly dangerous....
--- a/src/hotspot/share/adlc/main.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/adlc/main.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -228,6 +228,7 @@
AD.addInclude(AD._CPP_file, "opto/regmask.hpp");
AD.addInclude(AD._CPP_file, "opto/runtime.hpp");
AD.addInclude(AD._CPP_file, "runtime/biasedLocking.hpp");
+ AD.addInclude(AD._CPP_file, "runtime/safepointMechanism.hpp");
AD.addInclude(AD._CPP_file, "runtime/sharedRuntime.hpp");
AD.addInclude(AD._CPP_file, "runtime/stubRoutines.hpp");
AD.addInclude(AD._CPP_file, "utilities/growableArray.hpp");
--- a/src/hotspot/share/aot/aotCodeHeap.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/aot/aotCodeHeap.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,10 +25,10 @@
#include "aot/aotCodeHeap.hpp"
#include "aot/aotLoader.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "classfile/javaAssertions.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/gcLocker.hpp"
#include "interpreter/abstractInterpreter.hpp"
@@ -36,6 +36,7 @@
#include "jvmci/jvmciRuntime.hpp"
#include "memory/allocation.inline.hpp"
#include "oops/method.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vm_operations.hpp"
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/aot/aotCompiledMethod.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -30,12 +30,13 @@
#include "code/compiledIC.hpp"
#include "code/nativeInst.hpp"
#include "compiler/compilerOracle.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gcLocker.hpp"
#include "jvmci/compilerRuntime.hpp"
#include "jvmci/jvmciRuntime.hpp"
#include "oops/method.inline.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
@@ -70,6 +71,10 @@
}
#endif
+address* AOTCompiledMethod::orig_pc_addr(const frame* fr) {
+ return (address*) ((address)fr->unextended_sp() + _meta->orig_pc_offset());
+}
+
bool AOTCompiledMethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
return false;
}
--- a/src/hotspot/share/aot/aotCompiledMethod.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/aot/aotCompiledMethod.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -117,7 +117,7 @@
const int _method_index;
oop _oop; // method()->method_holder()->klass_holder()
- address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _meta->orig_pc_offset()); }
+ address* orig_pc_addr(const frame* fr);
bool make_not_entrant_helper(int new_state);
public:
--- a/src/hotspot/share/c1/c1_Compiler.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/c1/c1_Compiler.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -38,7 +38,7 @@
#include "memory/resourceArea.hpp"
#include "prims/nativeLookup.hpp"
#include "runtime/arguments.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/bitMap.inline.hpp"
--- a/src/hotspot/share/c1/c1_FrameMap.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/c1/c1_FrameMap.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,7 @@
#ifndef SHARE_VM_C1_C1_FRAMEMAP_HPP
#define SHARE_VM_C1_C1_FRAMEMAP_HPP
-#include "asm/assembler.hpp"
+#include "asm/macroAssembler.hpp"
#include "c1/c1_Defs.hpp"
#include "c1/c1_LIR.hpp"
#include "code/vmreg.hpp"
--- a/src/hotspot/share/c1/c1_GraphBuilder.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/c1/c1_GraphBuilder.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -32,6 +32,7 @@
#include "ci/ciField.hpp"
#include "ci/ciKlass.hpp"
#include "ci/ciMemberName.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "compiler/compileBroker.hpp"
#include "interpreter/bytecode.hpp"
#include "memory/resourceArea.hpp"
--- a/src/hotspot/share/c1/c1_LIRAssembler.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/c1/c1_LIRAssembler.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -30,6 +30,7 @@
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_ValueStack.hpp"
#include "ci/ciInstance.hpp"
+#include "gc/shared/collectedHeap.hpp"
#include "runtime/os.hpp"
void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -35,7 +35,8 @@
#include "ci/ciObjArray.hpp"
#include "ci/ciUtilities.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
+#include "gc/shared/collectedHeap.hpp"
#include "runtime/arguments.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
@@ -1460,10 +1461,10 @@
switch (_bs->kind()) {
#if INCLUDE_ALL_GCS
case BarrierSet::G1BarrierSet:
- G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
+ G1BarrierSet_pre_barrier(addr_opr, pre_val, do_load, patch, info);
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableModRef:
+ case BarrierSet::CardTableBarrierSet:
// No pre barriers
break;
default :
@@ -1476,11 +1477,11 @@
switch (_bs->kind()) {
#if INCLUDE_ALL_GCS
case BarrierSet::G1BarrierSet:
- G1SATBCardTableModRef_post_barrier(addr, new_val);
+ G1BarrierSet_post_barrier(addr, new_val);
break;
#endif // INCLUDE_ALL_GCS
- case BarrierSet::CardTableModRef:
- CardTableModRef_post_barrier(addr, new_val);
+ case BarrierSet::CardTableBarrierSet:
+ CardTableBarrierSet_post_barrier(addr, new_val);
break;
default :
ShouldNotReachHere();
@@ -1490,8 +1491,8 @@
////////////////////////////////////////////////////////////////////////
#if INCLUDE_ALL_GCS
-void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
- bool do_load, bool patch, CodeEmitInfo* info) {
+void LIRGenerator::G1BarrierSet_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
+ bool do_load, bool patch, CodeEmitInfo* info) {
// First we test whether marking is in progress.
BasicType flag_type;
if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
@@ -1545,7 +1546,7 @@
__ branch_destination(slow->continuation());
}
-void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
+void LIRGenerator::G1BarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
// If the "new_val" is a constant NULL, no barrier is necessary.
if (new_val->is_constant() &&
new_val->as_constant_ptr()->as_jobject() == NULL) return;
@@ -1609,7 +1610,7 @@
#endif // INCLUDE_ALL_GCS
////////////////////////////////////////////////////////////////////////
-void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
+void LIRGenerator::CardTableBarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
LIR_Const* card_table_base = new LIR_Const(ci_card_table_address());
if (addr->is_address()) {
LIR_Address* address = addr->as_address_ptr();
@@ -1626,8 +1627,8 @@
}
assert(addr->is_register(), "must be a register at this point");
-#ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
- CardTableModRef_post_barrier_helper(addr, card_table_base);
+#ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
+ CardTableBarrierSet_post_barrier_helper(addr, card_table_base);
#else
LIR_Opr tmp = new_pointer_register();
if (TwoOperandLIRForm) {
--- a/src/hotspot/share/c1/c1_LIRGenerator.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/c1/c1_LIRGenerator.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -275,15 +275,15 @@
// specific implementations
// pre barriers
- void G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
- bool do_load, bool patch, CodeEmitInfo* info);
+ void G1BarrierSet_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
+ bool do_load, bool patch, CodeEmitInfo* info);
// post barriers
- void G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
- void CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
-#ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
- void CardTableModRef_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base);
+ void G1BarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
+ void CardTableBarrierSet_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val);
+#ifdef CARDTABLEBARRIERSET_POST_BARRIER_HELPER
+ void CardTableBarrierSet_post_barrier_helper(LIR_OprDesc* addr, LIR_Const* card_table_base);
#endif
--- a/src/hotspot/share/c1/c1_Runtime1.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/c1/c1_Runtime1.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -53,11 +53,12 @@
#include "runtime/atomic.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/compilationPolicy.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/threadCritical.hpp"
-#include "runtime/vframe.hpp"
+#include "runtime/vframe.inline.hpp"
#include "runtime/vframeArray.hpp"
#include "runtime/vm_version.hpp"
#include "utilities/copy.hpp"
@@ -1358,67 +1359,6 @@
JRT_END
-// Array copy return codes.
-enum {
- ac_failed = -1, // arraycopy failed
- ac_ok = 0 // arraycopy succeeded
-};
-
-
-// Below length is the # elements copied.
-template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
- oopDesc* dst, T* dst_addr,
- int length) {
- if (src == dst) {
- // same object, no check
- HeapAccess<>::oop_arraycopy(arrayOop(src), arrayOop(dst), src_addr, dst_addr, length);
- return ac_ok;
- } else {
- Klass* bound = ObjArrayKlass::cast(dst->klass())->element_klass();
- Klass* stype = ObjArrayKlass::cast(src->klass())->element_klass();
- if (stype == bound || stype->is_subtype_of(bound)) {
- // Elements are guaranteed to be subtypes, so no check necessary
- HeapAccess<ARRAYCOPY_DISJOINT>::oop_arraycopy(arrayOop(src), arrayOop(dst), src_addr, dst_addr, length);
- return ac_ok;
- }
- }
- return ac_failed;
-}
-
-// fast and direct copy of arrays; returning -1, means that an exception may be thrown
-// and we did not copy anything
-JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length))
-#ifndef PRODUCT
- _generic_arraycopy_cnt++; // Slow-path oop array copy
-#endif
-
- if (src == NULL || dst == NULL || src_pos < 0 || dst_pos < 0 || length < 0) return ac_failed;
- if (!dst->is_array() || !src->is_array()) return ac_failed;
- if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed;
- if ((unsigned int) arrayOop(dst)->length() < (unsigned int)dst_pos + (unsigned int)length) return ac_failed;
-
- if (length == 0) return ac_ok;
- if (src->is_typeArray()) {
- Klass* klass_oop = src->klass();
- if (klass_oop != dst->klass()) return ac_failed;
- TypeArrayKlass* klass = TypeArrayKlass::cast(klass_oop);
- klass->copy_array(arrayOop(src), src_pos, arrayOop(dst), dst_pos, length, Thread::current());
- return ac_ok;
- } else if (src->is_objArray() && dst->is_objArray()) {
- if (UseCompressedOops) {
- narrowOop *src_addr = objArrayOop(src)->obj_at_addr<narrowOop>(src_pos);
- narrowOop *dst_addr = objArrayOop(dst)->obj_at_addr<narrowOop>(dst_pos);
- return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
- } else {
- oop *src_addr = objArrayOop(src)->obj_at_addr<oop>(src_pos);
- oop *dst_addr = objArrayOop(dst)->obj_at_addr<oop>(dst_pos);
- return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
- }
- }
- return ac_failed;
-JRT_END
-
-
JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
// had to return int instead of bool, otherwise there may be a mismatch
// between the C calling convention and the Java one.
--- a/src/hotspot/share/c1/c1_Runtime1.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/c1/c1_Runtime1.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -186,7 +186,6 @@
#endif
// directly accessible leaf routine
- static int arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length);
static int is_instance_of(oopDesc* mirror, oopDesc* obj);
static void predicate_failed_trap(JavaThread* thread);
--- a/src/hotspot/share/ci/ciArray.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciArray.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,7 +27,7 @@
#include "ci/ciArrayKlass.hpp"
#include "ci/ciConstant.hpp"
#include "ci/ciKlass.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
--- a/src/hotspot/share/ci/ciBaseObject.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciBaseObject.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,7 +27,6 @@
#include "ci/ciClassList.hpp"
#include "memory/allocation.hpp"
-#include "runtime/handles.hpp"
#include "runtime/jniHandles.hpp"
// ciBaseObject
--- a/src/hotspot/share/ci/ciCallSite.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciCallSite.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "ci/ciCallSite.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
// ciCallSite
--- a/src/hotspot/share/ci/ciConstantPoolCache.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciConstantPoolCache.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "ci/ciConstantPoolCache.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
--- a/src/hotspot/share/ci/ciEnv.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciEnv.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -32,7 +32,7 @@
#include "ci/ciMethod.hpp"
#include "ci/ciNullObject.hpp"
#include "ci/ciReplay.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
--- a/src/hotspot/share/ci/ciEnv.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciEnv.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
#include "code/dependencies.hpp"
#include "code/exceptionHandlerTable.hpp"
#include "compiler/oopMap.hpp"
+#include "oops/methodData.hpp"
#include "runtime/thread.hpp"
class CompileTask;
--- a/src/hotspot/share/ci/ciExceptionHandler.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciExceptionHandler.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,8 @@
#include "precompiled.hpp"
#include "ci/ciExceptionHandler.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
+#include "runtime/handles.inline.hpp"
// ciExceptionHandler
//
--- a/src/hotspot/share/ci/ciField.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciField.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,13 +25,14 @@
#include "precompiled.hpp"
#include "ci/ciField.hpp"
#include "ci/ciInstanceKlass.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/linkResolver.hpp"
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/fieldDescriptor.hpp"
+#include "runtime/handles.inline.hpp"
// ciField
//
--- a/src/hotspot/share/ci/ciField.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciField.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -29,6 +29,7 @@
#include "ci/ciConstant.hpp"
#include "ci/ciFlags.hpp"
#include "ci/ciInstance.hpp"
+#include "ci/ciUtilities.hpp"
// ciField
//
--- a/src/hotspot/share/ci/ciInstance.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciInstance.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,7 +27,7 @@
#include "ci/ciField.hpp"
#include "ci/ciInstance.hpp"
#include "ci/ciInstanceKlass.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "oops/oop.inline.hpp"
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciInstanceKlass.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,7 +26,7 @@
#include "ci/ciField.hpp"
#include "ci/ciInstance.hpp"
#include "ci/ciInstanceKlass.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
@@ -34,6 +34,7 @@
#include "oops/oop.inline.hpp"
#include "oops/fieldStreams.hpp"
#include "runtime/fieldDescriptor.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
// ciInstanceKlass
--- a/src/hotspot/share/ci/ciKlass.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciKlass.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "ci/ciKlass.hpp"
#include "ci/ciSymbol.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "oops/oop.inline.hpp"
// ciKlass
--- a/src/hotspot/share/ci/ciMemberName.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciMemberName.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "ci/ciClassList.hpp"
#include "ci/ciMemberName.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "classfile/javaClasses.hpp"
// ------------------------------------------------------------------
--- a/src/hotspot/share/ci/ciMetadata.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciMetadata.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "ci/ciObject.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
// ------------------------------------------------------------------
--- a/src/hotspot/share/ci/ciMethod.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciMethod.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -32,7 +32,7 @@
#include "ci/ciStreams.hpp"
#include "ci/ciSymbol.hpp"
#include "ci/ciReplay.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "compiler/abstractCompiler.hpp"
#include "compiler/methodLiveness.hpp"
--- a/src/hotspot/share/ci/ciMethodData.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciMethodData.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,7 +26,7 @@
#include "ci/ciMetadata.hpp"
#include "ci/ciMethodData.hpp"
#include "ci/ciReplay.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/deoptimization.hpp"
--- a/src/hotspot/share/ci/ciMethodHandle.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciMethodHandle.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "ci/ciClassList.hpp"
#include "ci/ciMethodHandle.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "classfile/javaClasses.hpp"
// ------------------------------------------------------------------
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/ci/ciMethodType.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "ci/ciInstance.hpp"
+#include "ci/ciMethodType.hpp"
+#include "ci/ciUtilities.inline.hpp"
+#include "classfile/javaClasses.hpp"
+
+ciType* ciMethodType::class_to_citype(oop klass_oop) const {
+ if (java_lang_Class::is_primitive(klass_oop)) {
+ BasicType bt = java_lang_Class::primitive_type(klass_oop);
+ return ciType::make(bt);
+ } else {
+ Klass* k = java_lang_Class::as_Klass(klass_oop);
+ return CURRENT_ENV->get_klass(k);
+ }
+}
+
+ciType* ciMethodType::rtype() const {
+ GUARDED_VM_ENTRY(
+ oop rtype = java_lang_invoke_MethodType::rtype(get_oop());
+ return class_to_citype(rtype);
+ )
+}
+
+int ciMethodType::ptype_count() const {
+ GUARDED_VM_ENTRY(return java_lang_invoke_MethodType::ptype_count(get_oop());)
+}
+
+int ciMethodType::ptype_slot_count() const {
+ GUARDED_VM_ENTRY(return java_lang_invoke_MethodType::ptype_slot_count(get_oop());)
+}
+
+ciType* ciMethodType::ptype_at(int index) const {
+ GUARDED_VM_ENTRY(
+ oop ptype = java_lang_invoke_MethodType::ptype(get_oop(), index);
+ return class_to_citype(ptype);
+ )
+}
--- a/src/hotspot/share/ci/ciMethodType.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciMethodType.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,23 +26,13 @@
#define SHARE_VM_CI_CIMETHODTYPE_HPP
#include "ci/ciInstance.hpp"
-#include "ci/ciUtilities.hpp"
-#include "classfile/javaClasses.hpp"
// ciMethodType
//
// The class represents a java.lang.invoke.MethodType object.
class ciMethodType : public ciInstance {
private:
- ciType* class_to_citype(oop klass_oop) const {
- if (java_lang_Class::is_primitive(klass_oop)) {
- BasicType bt = java_lang_Class::primitive_type(klass_oop);
- return ciType::make(bt);
- } else {
- Klass* k = java_lang_Class::as_Klass(klass_oop);
- return CURRENT_ENV->get_klass(k);
- }
- }
+ ciType* class_to_citype(oop klass_oop) const;
public:
ciMethodType(instanceHandle h_i) : ciInstance(h_i) {}
@@ -50,27 +40,12 @@
// What kind of ciObject is this?
bool is_method_type() const { return true; }
- ciType* rtype() const {
- GUARDED_VM_ENTRY(
- oop rtype = java_lang_invoke_MethodType::rtype(get_oop());
- return class_to_citype(rtype);
- )
- }
-
- int ptype_count() const {
- GUARDED_VM_ENTRY(return java_lang_invoke_MethodType::ptype_count(get_oop());)
- }
+ ciType* rtype() const;
- int ptype_slot_count() const {
- GUARDED_VM_ENTRY(return java_lang_invoke_MethodType::ptype_slot_count(get_oop());)
- }
+ int ptype_count() const;
+ int ptype_slot_count() const ;
- ciType* ptype_at(int index) const {
- GUARDED_VM_ENTRY(
- oop ptype = java_lang_invoke_MethodType::ptype(get_oop(), index);
- return class_to_citype(ptype);
- )
- }
+ ciType* ptype_at(int index) const;
};
#endif // SHARE_VM_CI_CIMETHODTYPE_HPP
--- a/src/hotspot/share/ci/ciNullObject.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciNullObject.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "ci/ciNullObject.hpp"
+#include "ci/ciUtilities.hpp"
// ciNullObject
//
--- a/src/hotspot/share/ci/ciNullObject.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciNullObject.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,7 +27,6 @@
#include "ci/ciClassList.hpp"
#include "ci/ciObject.hpp"
-#include "ci/ciUtilities.hpp"
// ciNullObject
//
--- a/src/hotspot/share/ci/ciObjArray.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciObjArray.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "ci/ciNullObject.hpp"
#include "ci/ciObjArray.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "oops/objArrayOop.inline.hpp"
// ciObjArray
--- a/src/hotspot/share/ci/ciObjArrayKlass.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciObjArrayKlass.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,7 +26,7 @@
#include "ci/ciInstanceKlass.hpp"
#include "ci/ciObjArrayKlass.hpp"
#include "ci/ciSymbol.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "oops/objArrayKlass.hpp"
// ciObjArrayKlass
--- a/src/hotspot/share/ci/ciObject.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciObject.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "ci/ciObject.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
@@ -207,7 +207,7 @@
int flags = 0;
if (x != NULL) {
assert(Universe::heap()->is_in_reserved(x), "must be");
- if (x->is_scavengable())
+ if (Universe::heap()->is_scavengable(x))
flags |= SCAVENGABLE_FLAG;
}
_ident |= flags;
--- a/src/hotspot/share/ci/ciObjectFactory.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciObjectFactory.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -39,13 +39,14 @@
#include "ci/ciSymbol.hpp"
#include "ci/ciTypeArray.hpp"
#include "ci/ciTypeArrayKlass.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/fieldType.hpp"
+#include "runtime/handles.inline.hpp"
#include "utilities/macros.hpp"
// ciObjectFactory
--- a/src/hotspot/share/ci/ciReplay.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciReplay.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -28,7 +28,7 @@
#include "ci/ciReplay.hpp"
#include "ci/ciSymbol.hpp"
#include "ci/ciKlass.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "compiler/compileBroker.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/oopFactory.hpp"
--- a/src/hotspot/share/ci/ciSignature.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciSignature.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "ci/ciMethodType.hpp"
#include "ci/ciSignature.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
--- a/src/hotspot/share/ci/ciSignature.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciSignature.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,6 +27,7 @@
#include "ci/ciClassList.hpp"
#include "ci/ciSymbol.hpp"
+#include "interpreter/bytecodes.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"
--- a/src/hotspot/share/ci/ciStreams.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciStreams.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,7 +27,8 @@
#include "ci/ciConstant.hpp"
#include "ci/ciField.hpp"
#include "ci/ciStreams.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
+#include "runtime/handles.inline.hpp"
// ciExceptionHandlerStream
//
--- a/src/hotspot/share/ci/ciSymbol.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciSymbol.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "ci/ciSymbol.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "memory/oopFactory.hpp"
// ------------------------------------------------------------------
--- a/src/hotspot/share/ci/ciType.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciType.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "ci/ciEnv.hpp"
#include "ci/ciType.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
--- a/src/hotspot/share/ci/ciTypeArray.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciTypeArray.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "ci/ciTypeArray.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
// ciTypeArray
--- a/src/hotspot/share/ci/ciTypeArrayKlass.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciTypeArrayKlass.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "ci/ciTypeArrayKlass.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
// ciTypeArrayKlass
//
--- a/src/hotspot/share/ci/ciUtilities.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciUtilities.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,8 +24,9 @@
#include "precompiled.hpp"
#include "ci/ciUtilities.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/cardTable.hpp"
+#include "gc/shared/collectedHeap.hpp"
#include "memory/universe.hpp"
// ciUtilities
@@ -51,7 +52,7 @@
// card_table_base
jbyte *ci_card_table_address() {
BarrierSet* bs = Universe::heap()->barrier_set();
- CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
assert(sizeof(*ct->byte_map_base()) == sizeof(jbyte), "adjust users of this code");
return ct->byte_map_base();
--- a/src/hotspot/share/ci/ciUtilities.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/ci/ciUtilities.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,45 +26,11 @@
#define SHARE_VM_CI_CIUTILITIES_HPP
#include "ci/ciEnv.hpp"
-#include "runtime/interfaceSupport.hpp"
#include "utilities/globalDefinitions.hpp"
// The following routines and definitions are used internally in the
// compiler interface.
-
-// Add a ci native entry wrapper?
-
-// Bring the compilation thread into the VM state.
-#define VM_ENTRY_MARK \
- CompilerThread* thread=CompilerThread::current(); \
- ThreadInVMfromNative __tiv(thread); \
- ResetNoHandleMark rnhm; \
- HandleMarkCleaner __hm(thread); \
- Thread* THREAD = thread; \
- debug_only(VMNativeEntryWrapper __vew;)
-
-
-
-// Bring the compilation thread into the VM state. No handle mark.
-#define VM_QUICK_ENTRY_MARK \
- CompilerThread* thread=CompilerThread::current(); \
- ThreadInVMfromNative __tiv(thread); \
-/* \
- * [TODO] The NoHandleMark line does nothing but declare a function prototype \
- * The NoHandkeMark constructor is NOT executed. If the ()'s are \
- * removed, causes the NoHandleMark assert to trigger. \
- * debug_only(NoHandleMark __hm();) \
- */ \
- Thread* THREAD = thread; \
- debug_only(VMNativeEntryWrapper __vew;)
-
-
-#define EXCEPTION_CONTEXT \
- CompilerThread* thread=CompilerThread::current(); \
- Thread* THREAD = thread;
-
-
#define CURRENT_ENV \
ciEnv::current()
@@ -78,36 +44,6 @@
#define ASSERT_IN_VM \
assert(IS_IN_VM, "must be in vm state");
-#define GUARDED_VM_ENTRY(action) \
- {if (IS_IN_VM) { action } else { VM_ENTRY_MARK; { action }}}
-
-#define GUARDED_VM_QUICK_ENTRY(action) \
- {if (IS_IN_VM) { action } else { VM_QUICK_ENTRY_MARK; { action }}}
-
-// Redefine this later.
-#define KILL_COMPILE_ON_FATAL_(result) \
- THREAD); \
- if (HAS_PENDING_EXCEPTION) { \
- if (PENDING_EXCEPTION->klass() == \
- SystemDictionary::ThreadDeath_klass()) { \
- /* Kill the compilation. */ \
- fatal("unhandled ci exception"); \
- return (result); \
- } \
- CLEAR_PENDING_EXCEPTION; \
- return (result); \
- } \
- (void)(0
-
-#define KILL_COMPILE_ON_ANY \
- THREAD); \
- if (HAS_PENDING_EXCEPTION) { \
- fatal("unhandled ci exception"); \
- CLEAR_PENDING_EXCEPTION; \
- } \
-(void)(0
-
-
inline const char* bool_to_str(bool b) {
return ((b) ? "true" : "false");
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/ci/ciUtilities.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CI_CIUTILITIES_INLINE_HPP
+#define SHARE_VM_CI_CIUTILITIES_INLINE_HPP
+
+#include "ci/ciUtilities.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+
+// Add a ci native entry wrapper?
+
+// Bring the compilation thread into the VM state.
+#define VM_ENTRY_MARK \
+ CompilerThread* thread=CompilerThread::current(); \
+ ThreadInVMfromNative __tiv(thread); \
+ ResetNoHandleMark rnhm; \
+ HandleMarkCleaner __hm(thread); \
+ Thread* THREAD = thread; \
+ debug_only(VMNativeEntryWrapper __vew;)
+
+
+
+// Bring the compilation thread into the VM state. No handle mark.
+#define VM_QUICK_ENTRY_MARK \
+ CompilerThread* thread=CompilerThread::current(); \
+ ThreadInVMfromNative __tiv(thread); \
+/* \
+ * [TODO] The NoHandleMark line does nothing but declare a function prototype \
+ * The NoHandkeMark constructor is NOT executed. If the ()'s are \
+ * removed, causes the NoHandleMark assert to trigger. \
+ * debug_only(NoHandleMark __hm();) \
+ */ \
+ Thread* THREAD = thread; \
+ debug_only(VMNativeEntryWrapper __vew;)
+
+
+#define EXCEPTION_CONTEXT \
+ CompilerThread* thread=CompilerThread::current(); \
+ Thread* THREAD = thread;
+
+
+#define GUARDED_VM_ENTRY(action) \
+ {if (IS_IN_VM) { action } else { VM_ENTRY_MARK; { action }}}
+
+#define GUARDED_VM_QUICK_ENTRY(action) \
+ {if (IS_IN_VM) { action } else { VM_QUICK_ENTRY_MARK; { action }}}
+
+// Redefine this later.
+#define KILL_COMPILE_ON_FATAL_(result) \
+ THREAD); \
+ if (HAS_PENDING_EXCEPTION) { \
+ if (PENDING_EXCEPTION->klass() == \
+ SystemDictionary::ThreadDeath_klass()) { \
+ /* Kill the compilation. */ \
+ fatal("unhandled ci exception"); \
+ return (result); \
+ } \
+ CLEAR_PENDING_EXCEPTION; \
+ return (result); \
+ } \
+ (void)(0
+
+#define KILL_COMPILE_ON_ANY \
+ THREAD); \
+ if (HAS_PENDING_EXCEPTION) { \
+ fatal("unhandled ci exception"); \
+ CLEAR_PENDING_EXCEPTION; \
+ } \
+(void)(0
+
+#endif // SHARE_VM_CI_CIUTILITIES_INLINE_HPP
+
--- a/src/hotspot/share/classfile/classFileParser.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/classfile/classFileParser.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -58,6 +58,7 @@
#include "oops/symbol.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiThreadState.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/perfData.hpp"
#include "runtime/reflection.hpp"
@@ -770,6 +771,13 @@
} // end of for
}
+Handle ClassFileParser::clear_cp_patch_at(int index) {
+ Handle patch = cp_patch_at(index);
+ _cp_patches->at_put(index, Handle());
+ assert(!has_cp_patch_at(index), "");
+ return patch;
+}
+
void ClassFileParser::patch_class(ConstantPool* cp, int class_index, Klass* k, Symbol* name) {
int name_index = _orig_cp_size + _num_patched_klasses;
int resolved_klass_index = _first_patched_klass_resolved_index + _num_patched_klasses;
--- a/src/hotspot/share/classfile/classFileParser.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/classfile/classFileParser.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,7 +26,7 @@
#define SHARE_VM_CLASSFILE_CLASSFILEPARSER_HPP
#include "memory/referenceType.hpp"
-#include "runtime/handles.inline.hpp"
+#include "oops/annotations.hpp"
#include "oops/constantPool.hpp"
#include "oops/typeArrayOop.hpp"
#include "utilities/accessFlags.hpp"
@@ -434,12 +434,7 @@
return _cp_patches->at(index);
}
- Handle clear_cp_patch_at(int index) {
- Handle patch = cp_patch_at(index);
- _cp_patches->at_put(index, Handle());
- assert(!has_cp_patch_at(index), "");
- return patch;
- }
+ Handle clear_cp_patch_at(int index);
void patch_class(ConstantPool* cp, int class_index, Klass* k, Symbol* name);
void patch_constant_pool(ConstantPool* cp,
@@ -530,7 +525,7 @@
const GrowableArray<Handle>* cp_patches() const { return _cp_patches; }
ClassLoaderData* loader_data() const { return _loader_data; }
const Symbol* class_name() const { return _class_name; }
- const Klass* super_klass() const { return _super_klass; }
+ const InstanceKlass* super_klass() const { return _super_klass; }
ReferenceType reference_type() const { return _rt; }
AccessFlags access_flags() const { return _access_flags; }
--- a/src/hotspot/share/classfile/classListParser.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/classfile/classListParser.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,8 @@
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/systemDictionaryShared.hpp"
+#include "logging/log.hpp"
+#include "logging/logTag.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/fieldType.hpp"
--- a/src/hotspot/share/classfile/classLoader.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/classfile/classLoader.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -61,7 +61,7 @@
#include "runtime/handles.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/os.hpp"
--- a/src/hotspot/share/classfile/classLoaderData.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/classfile/classLoaderData.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -610,6 +610,21 @@
return new Dictionary(this, size, resizable);
}
+// Tell the GC to keep this klass alive while iterating ClassLoaderDataGraph
+oop ClassLoaderData::holder_phantom() {
+ // A klass that was previously considered dead can be looked up in the
+ // CLD/SD, and its _java_mirror or _class_loader can be stored in a root
+ // or a reachable object making it alive again. The SATB part of G1 needs
+ // to get notified about this potential resurrection, otherwise the marking
+ // might not find the object.
+ if (!keep_alive()) {
+ oop* o = is_anonymous() ? _klasses->java_mirror_handle().ptr_raw() : &_class_loader;
+ return RootAccess<ON_PHANTOM_OOP_REF>::oop_load(o);
+ } else {
+ return NULL;
+ }
+}
+
// Unloading support
oop ClassLoaderData::keep_alive_object() const {
assert_locked_or_safepoint(_metaspace_lock);
@@ -1048,26 +1063,34 @@
}
void ClassLoaderDataGraph::classes_do(KlassClosure* klass_closure) {
+ Thread* thread = Thread::current();
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
+ Handle holder(thread, cld->holder_phantom());
cld->classes_do(klass_closure);
}
}
void ClassLoaderDataGraph::classes_do(void f(Klass* const)) {
+ Thread* thread = Thread::current();
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
+ Handle holder(thread, cld->holder_phantom());
cld->classes_do(f);
}
}
void ClassLoaderDataGraph::methods_do(void f(Method*)) {
+ Thread* thread = Thread::current();
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
+ Handle holder(thread, cld->holder_phantom());
cld->methods_do(f);
}
}
void ClassLoaderDataGraph::modules_do(void f(ModuleEntry*)) {
assert_locked_or_safepoint(Module_lock);
+ Thread* thread = Thread::current();
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
+ Handle holder(thread, cld->holder_phantom());
cld->modules_do(f);
}
}
@@ -1084,7 +1107,9 @@
void ClassLoaderDataGraph::packages_do(void f(PackageEntry*)) {
assert_locked_or_safepoint(Module_lock);
+ Thread* thread = Thread::current();
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
+ Handle holder(thread, cld->holder_phantom());
cld->packages_do(f);
}
}
@@ -1100,7 +1125,9 @@
}
void ClassLoaderDataGraph::loaded_classes_do(KlassClosure* klass_closure) {
+ Thread* thread = Thread::current();
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
+ Handle holder(thread, cld->holder_phantom());
cld->loaded_classes_do(klass_closure);
}
}
@@ -1121,21 +1148,27 @@
// Walk classes in the loaded class dictionaries in various forms.
// Only walks the classes defined in this class loader.
void ClassLoaderDataGraph::dictionary_classes_do(void f(InstanceKlass*)) {
+ Thread* thread = Thread::current();
FOR_ALL_DICTIONARY(cld) {
+ Handle holder(thread, cld->holder_phantom());
cld->dictionary()->classes_do(f);
}
}
// Only walks the classes defined in this class loader.
void ClassLoaderDataGraph::dictionary_classes_do(void f(InstanceKlass*, TRAPS), TRAPS) {
+ Thread* thread = Thread::current();
FOR_ALL_DICTIONARY(cld) {
+ Handle holder(thread, cld->holder_phantom());
cld->dictionary()->classes_do(f, CHECK);
}
}
// Walks all entries in the dictionary including entries initiated by this class loader.
void ClassLoaderDataGraph::dictionary_all_entries_do(void f(InstanceKlass*, ClassLoaderData*)) {
+ Thread* thread = Thread::current();
FOR_ALL_DICTIONARY(cld) {
+ Handle holder(thread, cld->holder_phantom());
cld->dictionary()->all_entries_do(f);
}
}
--- a/src/hotspot/share/classfile/classLoaderData.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/classfile/classLoaderData.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -288,6 +288,7 @@
void unload();
bool keep_alive() const { return _keep_alive > 0; }
+ oop holder_phantom();
void classes_do(void f(Klass*));
void loaded_classes_do(KlassClosure* klass_closure);
void classes_do(void f(InstanceKlass*));
--- a/src/hotspot/share/classfile/javaClasses.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/classfile/javaClasses.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -50,14 +50,15 @@
#include "oops/typeArrayOop.inline.hpp"
#include "prims/resolvedMethodTable.hpp"
#include "runtime/fieldDescriptor.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.inline.hpp"
-#include "runtime/vframe.hpp"
+#include "runtime/vframe.inline.hpp"
#include "utilities/align.hpp"
#include "utilities/preserveException.hpp"
@@ -1866,12 +1867,8 @@
oop java_lang_Throwable::unassigned_stacktrace() {
InstanceKlass* ik = SystemDictionary::Throwable_klass();
- address addr = ik->static_field_addr(static_unassigned_stacktrace_offset);
- if (UseCompressedOops) {
- return oopDesc::load_decode_heap_oop((narrowOop *)addr);
- } else {
- return oopDesc::load_decode_heap_oop((oop*)addr);
- }
+ oop base = ik->static_field_base_raw();
+ return base->obj_field(static_unassigned_stacktrace_offset);
}
oop java_lang_Throwable::backtrace(oop throwable) {
@@ -3547,14 +3544,14 @@
jlong java_lang_ref_SoftReference::clock() {
InstanceKlass* ik = SystemDictionary::SoftReference_klass();
- jlong* offset = (jlong*)ik->static_field_addr(static_clock_offset);
- return *offset;
+ oop base = ik->static_field_base_raw();
+ return base->long_field(static_clock_offset);
}
void java_lang_ref_SoftReference::set_clock(jlong value) {
InstanceKlass* ik = SystemDictionary::SoftReference_klass();
- jlong* offset = (jlong*)ik->static_field_addr(static_clock_offset);
- *offset = value;
+ oop base = ik->static_field_base_raw();
+ base->long_field_put(static_clock_offset, value);
}
// Support for java_lang_invoke_DirectMethodHandle
@@ -4133,12 +4130,8 @@
bool java_lang_System::has_security_manager() {
InstanceKlass* ik = SystemDictionary::System_klass();
- address addr = ik->static_field_addr(static_security_offset);
- if (UseCompressedOops) {
- return oopDesc::load_decode_heap_oop((narrowOop *)addr) != NULL;
- } else {
- return oopDesc::load_decode_heap_oop((oop*)addr) != NULL;
- }
+ oop base = ik->static_field_base_raw();
+ return !oopDesc::is_null(base->obj_field(static_security_offset));
}
int java_lang_Class::_klass_offset;
--- a/src/hotspot/share/classfile/klassFactory.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/classfile/klassFactory.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,7 @@
#include "memory/resourceArea.hpp"
#include "prims/jvmtiEnvBase.hpp"
#include "prims/jvmtiRedefineClasses.hpp"
+#include "runtime/handles.inline.hpp"
#include "trace/traceMacros.hpp"
// called during initial loading of a shared class
--- a/src/hotspot/share/classfile/sharedClassUtil.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/classfile/sharedClassUtil.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@
#include "oops/instanceKlass.hpp"
#include "runtime/arguments.hpp"
#include "runtime/java.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
class ManifestStream: public ResourceObj {
private:
--- a/src/hotspot/share/classfile/stackMapTable.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/classfile/stackMapTable.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -144,6 +144,20 @@
str->print_cr(" }");
}
+StackMapReader::StackMapReader(ClassVerifier* v, StackMapStream* stream, char* code_data,
+ int32_t code_len, TRAPS) :
+ _verifier(v), _stream(stream),
+ _code_data(code_data), _code_length(code_len) {
+ methodHandle m = v->method();
+ if (m->has_stackmap_table()) {
+ _cp = constantPoolHandle(THREAD, m->constants());
+ _frame_count = _stream->get_u2(CHECK);
+ } else {
+ // There's no stackmap table present. Frame count and size are 0.
+ _frame_count = 0;
+ }
+}
+
int32_t StackMapReader::chop(
VerificationType* locals, int32_t length, int32_t chops) {
if (locals == NULL) return -1;
--- a/src/hotspot/share/classfile/stackMapTable.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/classfile/stackMapTable.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -142,18 +142,7 @@
public:
// Constructor
StackMapReader(ClassVerifier* v, StackMapStream* stream, char* code_data,
- int32_t code_len, TRAPS) :
- _verifier(v), _stream(stream),
- _code_data(code_data), _code_length(code_len) {
- methodHandle m = v->method();
- if (m->has_stackmap_table()) {
- _cp = constantPoolHandle(THREAD, m->constants());
- _frame_count = _stream->get_u2(CHECK);
- } else {
- // There's no stackmap table present. Frame count and size are 0.
- _frame_count = 0;
- }
- }
+ int32_t code_len, TRAPS);
inline int32_t get_frame_count() const { return _frame_count; }
StackMapFrame* next(StackMapFrame* pre_frame, bool first,
--- a/src/hotspot/share/classfile/systemDictionary.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/classfile/systemDictionary.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -34,7 +34,6 @@
#include "runtime/reflectionUtils.hpp"
#include "runtime/signature.hpp"
#include "utilities/hashtable.hpp"
-#include "utilities/hashtable.inline.hpp"
// The dictionary in each ClassLoaderData stores all loaded classes, either
// initiatied by its class loader or defined by its class loader:
--- a/src/hotspot/share/classfile/verifier.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/classfile/verifier.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -44,7 +44,7 @@
#include "oops/typeArrayOop.hpp"
#include "runtime/fieldDescriptor.hpp"
#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
--- a/src/hotspot/share/code/codeBlob.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/code/codeBlob.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -35,7 +35,7 @@
#include "oops/oop.inline.hpp"
#include "prims/forte.hpp"
#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/share/code/compiledMethod.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/code/compiledMethod.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,10 +27,12 @@
#include "code/compiledMethod.inline.hpp"
#include "code/scopeDesc.hpp"
#include "code/codeCache.hpp"
-#include "prims/methodHandles.hpp"
#include "interpreter/bytecode.inline.hpp"
#include "memory/resourceArea.hpp"
+#include "oops/methodData.hpp"
#include "oops/method.inline.hpp"
+#include "prims/methodHandles.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/mutexLocker.hpp"
CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
--- a/src/hotspot/share/code/compiledMethod.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/code/compiledMethod.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -50,13 +50,15 @@
volatile int _count;
ExceptionCache* _next;
- address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
- void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
- address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
- void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
- int count();
+ inline address pc_at(int index);
+ void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
+
+ inline address handler_at(int index);
+ void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
+
+ inline int count();
// increment_count is only called under lock, but there may be concurrent readers.
- void increment_count();
+ void increment_count();
public:
@@ -306,9 +308,9 @@
virtual address get_original_pc(const frame* fr) = 0;
// Deopt
// Return true is the PC is one would expect if the frame is being deopted.
- bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
+ inline bool is_deopt_pc(address pc);
bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
- bool is_deopt_entry(address pc);
+ inline bool is_deopt_entry(address pc);
virtual bool can_convert_to_zombie() = 0;
virtual const char* compile_kind() const = 0;
--- a/src/hotspot/share/code/compiledMethod.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/code/compiledMethod.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -29,6 +29,8 @@
#include "code/nativeInst.hpp"
#include "runtime/frame.hpp"
+inline bool CompiledMethod::is_deopt_pc(address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
+
// When using JVMCI the address might be off by the size of a call instruction.
inline bool CompiledMethod::is_deopt_entry(address pc) {
return pc == deopt_handler_begin()
@@ -64,6 +66,16 @@
inline int ExceptionCache::count() { return OrderAccess::load_acquire(&_count); }
+address ExceptionCache::pc_at(int index) {
+ assert(index >= 0 && index < count(),"");
+ return _pc[index];
+}
+
+address ExceptionCache::handler_at(int index) {
+ assert(index >= 0 && index < count(),"");
+ return _handler[index];
+}
+
// increment_count is only called under lock, but there may be concurrent readers.
inline void ExceptionCache::increment_count() { OrderAccess::release_store(&_count, _count + 1); }
--- a/src/hotspot/share/code/debugInfo.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/code/debugInfo.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -28,7 +28,7 @@
#include "code/nmethod.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/thread.hpp"
@@ -121,6 +121,10 @@
// ObjectValue
+void ObjectValue::set_value(oop value) {
+ _value = Handle(Thread::current(), value);
+}
+
void ObjectValue::read_object(DebugInfoReadStream* stream) {
_klass = read_from(stream);
assert(_klass->is_constant_oop(), "should be constant java mirror oop");
--- a/src/hotspot/share/code/debugInfo.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/code/debugInfo.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -128,7 +128,7 @@
Handle value() const { return _value; }
bool is_visited() const { return _visited; }
- void set_value(oop value) { _value = Handle(Thread::current(), value); }
+ void set_value(oop value);
void set_visited(bool visited) { _visited = false; }
// Serialization of debugging information
--- a/src/hotspot/share/code/nmethod.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/code/nmethod.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,6 +26,7 @@
#include "jvm.h"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
+#include "code/compiledMethod.inline.hpp"
#include "code/dependencies.hpp"
#include "code/nativeInst.hpp"
#include "code/nmethod.hpp"
@@ -36,20 +37,25 @@
#include "compiler/compilerDirectives.hpp"
#include "compiler/directivesParser.hpp"
#include "compiler/disassembler.hpp"
+#include "gc/shared/gcLocker.hpp"
#include "interpreter/bytecode.hpp"
#include "logging/log.hpp"
#include "logging/logStream.hpp"
+#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/method.inline.hpp"
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiImpl.hpp"
#include "runtime/atomic.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/sweeper.hpp"
+#include "runtime/vmThread.hpp"
#include "utilities/align.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
@@ -380,6 +386,10 @@
nul_chk_table_size();
}
+address* nmethod::orig_pc_addr(const frame* fr) {
+ return (address*) ((address)fr->unextended_sp() + _orig_pc_offset);
+}
+
const char* nmethod::compile_kind() const {
if (is_osr_method()) return "osr";
if (method() != NULL && is_native_method()) return "c2n";
@@ -1682,7 +1692,7 @@
{ NOT_PRODUCT(_print_nm = NULL); }
bool detected_scavenge_root() { return _detected_scavenge_root; }
virtual void do_oop(oop* p) {
- if ((*p) != NULL && (*p)->is_scavengable()) {
+ if ((*p) != NULL && Universe::heap()->is_scavengable(*p)) {
NOT_PRODUCT(maybe_print(p));
_detected_scavenge_root = true;
}
@@ -2177,7 +2187,7 @@
DebugScavengeRoot(nmethod* nm) : _nm(nm), _ok(true) { }
bool ok() { return _ok; }
virtual void do_oop(oop* p) {
- if ((*p) == NULL || !(*p)->is_scavengable()) return;
+ if ((*p) == NULL || !Universe::heap()->is_scavengable(*p)) return;
if (_ok) {
_nm->print_nmethod(true);
_ok = false;
--- a/src/hotspot/share/code/nmethod.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/code/nmethod.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -512,7 +512,7 @@
private:
ScopeDesc* scope_desc_in(address begin, address end);
- address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
+ address* orig_pc_addr(const frame* fr);
public:
// copying of debugging information
--- a/src/hotspot/share/code/relocInfo_ext.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/code/relocInfo_ext.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,7 +27,7 @@
#include "code/relocInfo.hpp"
#include "code/relocInfo_ext.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "memory/universe.hpp"
#include "runtime/os.hpp"
@@ -60,7 +60,7 @@
}
case symbolic_Relocation::card_table_reference: {
BarrierSet* bs = Universe::heap()->barrier_set();
- CardTableModRefBS* ctbs = barrier_set_cast<CardTableModRefBS>(bs);
+ CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
CardTable* ct = ctbs->card_table();
return (address)ct->byte_map_base();
}
--- a/src/hotspot/share/compiler/compileBroker.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/compiler/compileBroker.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -47,12 +47,13 @@
#include "runtime/atomic.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/init.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/sweeper.hpp"
#include "runtime/timerTrace.hpp"
+#include "runtime/vframe.inline.hpp"
#include "trace/tracing.hpp"
#include "utilities/debug.hpp"
#include "utilities/dtrace.hpp"
@@ -1344,11 +1345,11 @@
#if INCLUDE_JVMCI
// The number of milliseconds to wait before checking if
// JVMCI compilation has made progress.
-static const long JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE = 500;
+static const long JVMCI_COMPILATION_PROGRESS_WAIT_TIMESLICE = 1000;
// The number of JVMCI compilation progress checks that must fail
// before unblocking a thread waiting for a blocking compilation.
-static const int JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS = 5;
+static const int JVMCI_COMPILATION_PROGRESS_WAIT_ATTEMPTS = 10;
/**
* Waits for a JVMCI compiler to complete a given task. This thread
--- a/src/hotspot/share/compiler/compileTask.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/compiler/compileTask.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,7 @@
#include "logging/log.hpp"
#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
+#include "runtime/handles.inline.hpp"
CompileTask* CompileTask::_task_free_list = NULL;
#ifdef ASSERT
--- a/src/hotspot/share/compiler/compilerDirectives.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/compiler/compilerDirectives.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "ci/ciMethod.hpp"
-#include "ci/ciUtilities.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "compiler/abstractCompiler.hpp"
#include "compiler/compilerDirectives.hpp"
#include "compiler/compilerOracle.hpp"
--- a/src/hotspot/share/compiler/compilerDirectives.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/compiler/compilerDirectives.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,7 +27,6 @@
#include "ci/ciMetadata.hpp"
#include "ci/ciMethod.hpp"
-#include "ci/ciUtilities.hpp"
#include "compiler/methodMatcher.hpp"
#include "compiler/compilerOracle.hpp"
#include "utilities/exceptions.hpp"
--- a/src/hotspot/share/compiler/directivesParser.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/compiler/directivesParser.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
#include "compiler/directivesParser.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
-#include "runtime/os.hpp"
+#include "runtime/os.inline.hpp"
#include <string.h>
void DirectivesParser::push_tmp(CompilerDirectives* dir) {
--- a/src/hotspot/share/compiler/disassembler.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/compiler/disassembler.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -28,7 +28,7 @@
#include "code/codeCache.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
@@ -319,7 +319,7 @@
}
BarrierSet* bs = Universe::heap()->barrier_set();
- if (bs->is_a(BarrierSet::CardTableModRef) &&
+ if (bs->is_a(BarrierSet::CardTableBarrierSet) &&
adr == ci_card_table_address_as<address>()) {
st->print("word_map_base");
if (WizardMode) st->print(" " INTPTR_FORMAT, p2i(adr));
--- a/src/hotspot/share/compiler/methodMatcher.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/compiler/methodMatcher.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
#define SHARE_VM_COMPILER_METHODMATCHER_HPP
#include "memory/allocation.hpp"
-#include "runtime/handles.inline.hpp"
+#include "runtime/handles.hpp"
#include "memory/resourceArea.hpp"
class MethodMatcher : public CHeapObj<mtCompiler> {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/cms/cmsCardTable.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,432 @@
+/*
+ * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/cms/cmsHeap.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
+#include "gc/shared/cardTableRS.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/space.inline.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/virtualspace.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/java.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.inline.hpp"
+#include "runtime/vmThread.hpp"
+
+void CardTableRS::
+non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
+ OopsInGenClosure* cl,
+ CardTableRS* ct,
+ uint n_threads) {
+ assert(n_threads > 0, "expected n_threads > 0");
+ assert(n_threads <= ParallelGCThreads,
+ "n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads);
+
+ // Make sure the LNC array is valid for the space.
+ jbyte** lowest_non_clean;
+ uintptr_t lowest_non_clean_base_chunk_index;
+ size_t lowest_non_clean_chunk_size;
+ get_LNC_array_for_space(sp, lowest_non_clean,
+ lowest_non_clean_base_chunk_index,
+ lowest_non_clean_chunk_size);
+
+ uint n_strides = n_threads * ParGCStridesPerThread;
+ SequentialSubTasksDone* pst = sp->par_seq_tasks();
+ // Sets the condition for completion of the subtask (how many threads
+ // need to finish in order to be done).
+ pst->set_n_threads(n_threads);
+ pst->set_n_tasks(n_strides);
+
+ uint stride = 0;
+ while (!pst->is_task_claimed(/* reference */ stride)) {
+ process_stride(sp, mr, stride, n_strides,
+ cl, ct,
+ lowest_non_clean,
+ lowest_non_clean_base_chunk_index,
+ lowest_non_clean_chunk_size);
+ }
+ if (pst->all_tasks_completed()) {
+ // Clear lowest_non_clean array for next time.
+ intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
+ uintptr_t last_chunk_index = addr_to_chunk_index(mr.last());
+ for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
+ intptr_t ind = ch - lowest_non_clean_base_chunk_index;
+ assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
+ "Bounds error");
+ lowest_non_clean[ind] = NULL;
+ }
+ }
+}
+
+void
+CardTableRS::
+process_stride(Space* sp,
+ MemRegion used,
+ jint stride, int n_strides,
+ OopsInGenClosure* cl,
+ CardTableRS* ct,
+ jbyte** lowest_non_clean,
+ uintptr_t lowest_non_clean_base_chunk_index,
+ size_t lowest_non_clean_chunk_size) {
+ // We go from higher to lower addresses here; it wouldn't help that much
+ // because of the strided parallelism pattern used here.
+
+ // Find the first card address of the first chunk in the stride that is
+ // at least "bottom" of the used region.
+ jbyte* start_card = byte_for(used.start());
+ jbyte* end_card = byte_after(used.last());
+ uintptr_t start_chunk = addr_to_chunk_index(used.start());
+ uintptr_t start_chunk_stride_num = start_chunk % n_strides;
+ jbyte* chunk_card_start;
+
+ if ((uintptr_t)stride >= start_chunk_stride_num) {
+ chunk_card_start = (jbyte*)(start_card +
+ (stride - start_chunk_stride_num) *
+ ParGCCardsPerStrideChunk);
+ } else {
+ // Go ahead to the next chunk group boundary, then to the requested stride.
+ chunk_card_start = (jbyte*)(start_card +
+ (n_strides - start_chunk_stride_num + stride) *
+ ParGCCardsPerStrideChunk);
+ }
+
+ while (chunk_card_start < end_card) {
+ // Even though we go from lower to higher addresses below, the
+ // strided parallelism can interleave the actual processing of the
+ // dirty pages in various ways. For a specific chunk within this
+ // stride, we take care to avoid double scanning or missing a card
+ // by suitably initializing the "min_done" field in process_chunk_boundaries()
+ // below, together with the dirty region extension accomplished in
+ // DirtyCardToOopClosure::do_MemRegion().
+ jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
+ // Invariant: chunk_mr should be fully contained within the "used" region.
+ MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),
+ chunk_card_end >= end_card ?
+ used.end() : addr_for(chunk_card_end));
+ assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
+ assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
+
+ // This function is used by the parallel card table iteration.
+ const bool parallel = true;
+
+ DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
+ cl->gen_boundary(),
+ parallel);
+ ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
+
+
+ // Process the chunk.
+ process_chunk_boundaries(sp,
+ dcto_cl,
+ chunk_mr,
+ used,
+ lowest_non_clean,
+ lowest_non_clean_base_chunk_index,
+ lowest_non_clean_chunk_size);
+
+ // We want the LNC array updates above in process_chunk_boundaries
+ // to be visible before any of the card table value changes as a
+ // result of the dirty card iteration below.
+ OrderAccess::storestore();
+
+ // We want to clear the cards: clear_cl here does the work of finding
+ // contiguous dirty ranges of cards to process and clear.
+ clear_cl.do_MemRegion(chunk_mr);
+
+ // Find the next chunk of the stride.
+ chunk_card_start += ParGCCardsPerStrideChunk * n_strides;
+ }
+}
+
+void
+CardTableRS::
+process_chunk_boundaries(Space* sp,
+ DirtyCardToOopClosure* dcto_cl,
+ MemRegion chunk_mr,
+ MemRegion used,
+ jbyte** lowest_non_clean,
+ uintptr_t lowest_non_clean_base_chunk_index,
+ size_t lowest_non_clean_chunk_size)
+{
+ // We must worry about non-array objects that cross chunk boundaries,
+ // because such objects are both precisely and imprecisely marked:
+ // .. if the head of such an object is dirty, the entire object
+ // needs to be scanned, under the interpretation that this
+ // was an imprecise mark
+ // .. if the head of such an object is not dirty, we can assume
+ // precise marking and it's efficient to scan just the dirty
+ // cards.
+ // In either case, each scanned reference must be scanned precisely
+ // once so as to avoid cloning of a young referent. For efficiency,
+ // our closures depend on this property and do not protect against
+ // double scans.
+
+ uintptr_t start_chunk_index = addr_to_chunk_index(chunk_mr.start());
+ assert(start_chunk_index >= lowest_non_clean_base_chunk_index, "Bounds error.");
+ uintptr_t cur_chunk_index = start_chunk_index - lowest_non_clean_base_chunk_index;
+
+ // First, set "our" lowest_non_clean entry, which would be
+ // used by the thread scanning an adjoining left chunk with
+ // a non-array object straddling the mutual boundary.
+ // Find the object that spans our boundary, if one exists.
+ // first_block is the block possibly straddling our left boundary.
+ HeapWord* first_block = sp->block_start(chunk_mr.start());
+ assert((chunk_mr.start() != used.start()) || (first_block == chunk_mr.start()),
+ "First chunk should always have a co-initial block");
+ // Does the block straddle the chunk's left boundary, and is it
+ // a non-array object?
+ if (first_block < chunk_mr.start() // first block straddles left bdry
+ && sp->block_is_obj(first_block) // first block is an object
+ && !(oop(first_block)->is_objArray() // first block is not an array (arrays are precisely dirtied)
+ || oop(first_block)->is_typeArray())) {
+ // Find our least non-clean card, so that a left neighbor
+ // does not scan an object straddling the mutual boundary
+ // too far to the right, and attempt to scan a portion of
+ // that object twice.
+ jbyte* first_dirty_card = NULL;
+ jbyte* last_card_of_first_obj =
+ byte_for(first_block + sp->block_size(first_block) - 1);
+ jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
+ jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last());
+ jbyte* last_card_to_check =
+ (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk,
+ (intptr_t) last_card_of_first_obj);
+ // Note that this does not need to go beyond our last card
+ // if our first object completely straddles this chunk.
+ for (jbyte* cur = first_card_of_cur_chunk;
+ cur <= last_card_to_check; cur++) {
+ jbyte val = *cur;
+ if (card_will_be_scanned(val)) {
+ first_dirty_card = cur; break;
+ } else {
+ assert(!card_may_have_been_dirty(val), "Error");
+ }
+ }
+ if (first_dirty_card != NULL) {
+ assert(cur_chunk_index < lowest_non_clean_chunk_size, "Bounds error.");
+ assert(lowest_non_clean[cur_chunk_index] == NULL,
+ "Write exactly once : value should be stable hereafter for this round");
+ lowest_non_clean[cur_chunk_index] = first_dirty_card;
+ }
+ } else {
+ // In this case we can help our neighbor by just asking them
+ // to stop at our first card (even though it may not be dirty).
+ assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");
+ jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
+ lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk;
+ }
+
+ // Next, set our own max_to_do, which will strictly/exclusively bound
+ // the highest address that we will scan past the right end of our chunk.
+ HeapWord* max_to_do = NULL;
+ if (chunk_mr.end() < used.end()) {
+ // This is not the last chunk in the used region.
+ // What is our last block? We check the first block of
+ // the next (right) chunk rather than strictly check our last block
+ // because it's potentially more efficient to do so.
+ HeapWord* const last_block = sp->block_start(chunk_mr.end());
+ assert(last_block <= chunk_mr.end(), "In case this property changes.");
+ if ((last_block == chunk_mr.end()) // our last block does not straddle boundary
+ || !sp->block_is_obj(last_block) // last_block isn't an object
+ || oop(last_block)->is_objArray() // last_block is an array (precisely marked)
+ || oop(last_block)->is_typeArray()) {
+ max_to_do = chunk_mr.end();
+ } else {
+ assert(last_block < chunk_mr.end(), "Tautology");
+ // It is a non-array object that straddles the right boundary of this chunk.
+ // last_obj_card is the card corresponding to the start of the last object
+ // in the chunk. Note that the last object may not start in
+ // the chunk.
+ jbyte* const last_obj_card = byte_for(last_block);
+ const jbyte val = *last_obj_card;
+ if (!card_will_be_scanned(val)) {
+ assert(!card_may_have_been_dirty(val), "Error");
+ // The card containing the head is not dirty. Any marks on
+ // subsequent cards still in this chunk must have been made
+ // precisely; we can cap processing at the end of our chunk.
+ max_to_do = chunk_mr.end();
+ } else {
+ // The last object must be considered dirty, and extends onto the
+ // following chunk. Look for a dirty card in that chunk that will
+ // bound our processing.
+ jbyte* limit_card = NULL;
+ const size_t last_block_size = sp->block_size(last_block);
+ jbyte* const last_card_of_last_obj =
+ byte_for(last_block + last_block_size - 1);
+ jbyte* const first_card_of_next_chunk = byte_for(chunk_mr.end());
+ // This search potentially goes a long distance looking
+ // for the next card that will be scanned, terminating
+ // at the end of the last_block, if no earlier dirty card
+ // is found.
+ assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk,
+ "last card of next chunk may be wrong");
+ for (jbyte* cur = first_card_of_next_chunk;
+ cur <= last_card_of_last_obj; cur++) {
+ const jbyte val = *cur;
+ if (card_will_be_scanned(val)) {
+ limit_card = cur; break;
+ } else {
+ assert(!card_may_have_been_dirty(val), "Error: card can't be skipped");
+ }
+ }
+ if (limit_card != NULL) {
+ max_to_do = addr_for(limit_card);
+ assert(limit_card != NULL && max_to_do != NULL, "Error");
+ } else {
+ // The following is a pessimistic value, because it's possible
+ // that a dirty card on a subsequent chunk has been cleared by
+ // the time we get to look at it; we'll correct for that further below,
+ // using the LNC array which records the least non-clean card
+ // before cards were cleared in a particular chunk.
+ limit_card = last_card_of_last_obj;
+ max_to_do = last_block + last_block_size;
+ assert(limit_card != NULL && max_to_do != NULL, "Error");
+ }
+ assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size,
+ "Bounds error.");
+ // It is possible that a dirty card for the last object may have been
+ // cleared before we had a chance to examine it. In that case, the value
+ // will have been logged in the LNC for that chunk.
+ // We need to examine as many chunks to the right as this object
+ // covers. However, we need to bound this checking to the largest
+ // entry in the LNC array: this is because the heap may expand
+ // after the LNC array has been created but before we reach this point,
+ // and the last block in our chunk may have been expanded to include
+ // the expansion delta (and possibly subsequently allocated from, so
+ // it wouldn't be sufficient to check whether that last block was
+ // or was not an object at this point).
+ uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1)
+ - lowest_non_clean_base_chunk_index;
+ const uintptr_t last_chunk_index = addr_to_chunk_index(used.last())
+ - lowest_non_clean_base_chunk_index;
+ if (last_chunk_index_to_check > last_chunk_index) {
+ assert(last_block + last_block_size > used.end(),
+ "Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]"
+ " does not exceed used.end() = " PTR_FORMAT ","
+ " yet last_chunk_index_to_check " INTPTR_FORMAT
+ " exceeds last_chunk_index " INTPTR_FORMAT,
+ p2i(last_block), p2i(last_block + last_block_size),
+ p2i(used.end()),
+ last_chunk_index_to_check, last_chunk_index);
+ assert(sp->used_region().end() > used.end(),
+ "Expansion did not happen: "
+ "[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")",
+ p2i(sp->used_region().start()), p2i(sp->used_region().end()),
+ p2i(used.start()), p2i(used.end()));
+ last_chunk_index_to_check = last_chunk_index;
+ }
+ for (uintptr_t lnc_index = cur_chunk_index + 1;
+ lnc_index <= last_chunk_index_to_check;
+ lnc_index++) {
+ jbyte* lnc_card = lowest_non_clean[lnc_index];
+ if (lnc_card != NULL) {
+ // we can stop at the first non-NULL entry we find
+ if (lnc_card <= limit_card) {
+ limit_card = lnc_card;
+ max_to_do = addr_for(limit_card);
+ assert(limit_card != NULL && max_to_do != NULL, "Error");
+ }
+ // In any case, we break now
+ break;
+ } // else continue to look for a non-NULL entry if any
+ }
+ assert(limit_card != NULL && max_to_do != NULL, "Error");
+ }
+ assert(max_to_do != NULL, "OOPS 1 !");
+ }
+ assert(max_to_do != NULL, "OOPS 2!");
+ } else {
+ max_to_do = used.end();
+ }
+ assert(max_to_do != NULL, "OOPS 3!");
+ // Now we can set the closure we're using so it doesn't to beyond
+ // max_to_do.
+ dcto_cl->set_min_done(max_to_do);
+#ifndef PRODUCT
+ dcto_cl->set_last_bottom(max_to_do);
+#endif
+}
+
+void
+CardTableRS::
+get_LNC_array_for_space(Space* sp,
+ jbyte**& lowest_non_clean,
+ uintptr_t& lowest_non_clean_base_chunk_index,
+ size_t& lowest_non_clean_chunk_size) {
+
+ int i = find_covering_region_containing(sp->bottom());
+ MemRegion covered = _covered[i];
+ size_t n_chunks = chunks_to_cover(covered);
+
+ // Only the first thread to obtain the lock will resize the
+ // LNC array for the covered region. Any later expansion can't affect
+ // the used_at_save_marks region.
+ // (I observed a bug in which the first thread to execute this would
+ // resize, and then it would cause "expand_and_allocate" that would
+ // increase the number of chunks in the covered region. Then a second
+ // thread would come and execute this, see that the size didn't match,
+ // and free and allocate again. So the first thread would be using a
+ // freed "_lowest_non_clean" array.)
+
+ // Do a dirty read here. If we pass the conditional then take the rare
+ // event lock and do the read again in case some other thread had already
+ // succeeded and done the resize.
+ int cur_collection = CMSHeap::heap()->total_collections();
+ // Updated _last_LNC_resizing_collection[i] must not be visible before
+ // _lowest_non_clean and friends are visible. Therefore use acquire/release
+ // to guarantee this on non TSO architecures.
+ if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
+ MutexLocker x(ParGCRareEvent_lock);
+ // This load_acquire is here for clarity only. The MutexLocker already fences.
+ if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
+ if (_lowest_non_clean[i] == NULL ||
+ n_chunks != _lowest_non_clean_chunk_size[i]) {
+
+ // Should we delete the old?
+ if (_lowest_non_clean[i] != NULL) {
+ assert(n_chunks != _lowest_non_clean_chunk_size[i],
+ "logical consequence");
+ FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);
+ _lowest_non_clean[i] = NULL;
+ }
+ // Now allocate a new one if necessary.
+ if (_lowest_non_clean[i] == NULL) {
+ _lowest_non_clean[i] = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC);
+ _lowest_non_clean_chunk_size[i] = n_chunks;
+ _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
+ for (int j = 0; j < (int)n_chunks; j++)
+ _lowest_non_clean[i][j] = NULL;
+ }
+ }
+ // Make sure this gets visible only after _lowest_non_clean* was initialized
+ OrderAccess::release_store(&_last_LNC_resizing_collection[i], cur_collection);
+ }
+ }
+ // In any case, now do the initialization.
+ lowest_non_clean = _lowest_non_clean[i];
+ lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i];
+ lowest_non_clean_chunk_size = _lowest_non_clean_chunk_size[i];
+}
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepThread.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepThread.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -30,7 +30,6 @@
#include "gc/shared/gcId.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/init.hpp"
-#include "runtime/interfaceSupport.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/share/gc/cms/parCardTableModRefBS.cpp Thu Mar 29 20:12:02 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,432 +0,0 @@
-/*
- * Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/cms/cmsHeap.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
-#include "gc/shared/cardTableRS.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/space.inline.hpp"
-#include "memory/allocation.inline.hpp"
-#include "memory/virtualspace.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/java.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/orderAccess.inline.hpp"
-#include "runtime/vmThread.hpp"
-
-void CardTableRS::
-non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
- OopsInGenClosure* cl,
- CardTableRS* ct,
- uint n_threads) {
- assert(n_threads > 0, "expected n_threads > 0");
- assert(n_threads <= ParallelGCThreads,
- "n_threads: %u > ParallelGCThreads: %u", n_threads, ParallelGCThreads);
-
- // Make sure the LNC array is valid for the space.
- jbyte** lowest_non_clean;
- uintptr_t lowest_non_clean_base_chunk_index;
- size_t lowest_non_clean_chunk_size;
- get_LNC_array_for_space(sp, lowest_non_clean,
- lowest_non_clean_base_chunk_index,
- lowest_non_clean_chunk_size);
-
- uint n_strides = n_threads * ParGCStridesPerThread;
- SequentialSubTasksDone* pst = sp->par_seq_tasks();
- // Sets the condition for completion of the subtask (how many threads
- // need to finish in order to be done).
- pst->set_n_threads(n_threads);
- pst->set_n_tasks(n_strides);
-
- uint stride = 0;
- while (!pst->is_task_claimed(/* reference */ stride)) {
- process_stride(sp, mr, stride, n_strides,
- cl, ct,
- lowest_non_clean,
- lowest_non_clean_base_chunk_index,
- lowest_non_clean_chunk_size);
- }
- if (pst->all_tasks_completed()) {
- // Clear lowest_non_clean array for next time.
- intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
- uintptr_t last_chunk_index = addr_to_chunk_index(mr.last());
- for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
- intptr_t ind = ch - lowest_non_clean_base_chunk_index;
- assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
- "Bounds error");
- lowest_non_clean[ind] = NULL;
- }
- }
-}
-
-void
-CardTableRS::
-process_stride(Space* sp,
- MemRegion used,
- jint stride, int n_strides,
- OopsInGenClosure* cl,
- CardTableRS* ct,
- jbyte** lowest_non_clean,
- uintptr_t lowest_non_clean_base_chunk_index,
- size_t lowest_non_clean_chunk_size) {
- // We go from higher to lower addresses here; it wouldn't help that much
- // because of the strided parallelism pattern used here.
-
- // Find the first card address of the first chunk in the stride that is
- // at least "bottom" of the used region.
- jbyte* start_card = byte_for(used.start());
- jbyte* end_card = byte_after(used.last());
- uintptr_t start_chunk = addr_to_chunk_index(used.start());
- uintptr_t start_chunk_stride_num = start_chunk % n_strides;
- jbyte* chunk_card_start;
-
- if ((uintptr_t)stride >= start_chunk_stride_num) {
- chunk_card_start = (jbyte*)(start_card +
- (stride - start_chunk_stride_num) *
- ParGCCardsPerStrideChunk);
- } else {
- // Go ahead to the next chunk group boundary, then to the requested stride.
- chunk_card_start = (jbyte*)(start_card +
- (n_strides - start_chunk_stride_num + stride) *
- ParGCCardsPerStrideChunk);
- }
-
- while (chunk_card_start < end_card) {
- // Even though we go from lower to higher addresses below, the
- // strided parallelism can interleave the actual processing of the
- // dirty pages in various ways. For a specific chunk within this
- // stride, we take care to avoid double scanning or missing a card
- // by suitably initializing the "min_done" field in process_chunk_boundaries()
- // below, together with the dirty region extension accomplished in
- // DirtyCardToOopClosure::do_MemRegion().
- jbyte* chunk_card_end = chunk_card_start + ParGCCardsPerStrideChunk;
- // Invariant: chunk_mr should be fully contained within the "used" region.
- MemRegion chunk_mr = MemRegion(addr_for(chunk_card_start),
- chunk_card_end >= end_card ?
- used.end() : addr_for(chunk_card_end));
- assert(chunk_mr.word_size() > 0, "[chunk_card_start > used_end)");
- assert(used.contains(chunk_mr), "chunk_mr should be subset of used");
-
- // This function is used by the parallel card table iteration.
- const bool parallel = true;
-
- DirtyCardToOopClosure* dcto_cl = sp->new_dcto_cl(cl, precision(),
- cl->gen_boundary(),
- parallel);
- ClearNoncleanCardWrapper clear_cl(dcto_cl, ct, parallel);
-
-
- // Process the chunk.
- process_chunk_boundaries(sp,
- dcto_cl,
- chunk_mr,
- used,
- lowest_non_clean,
- lowest_non_clean_base_chunk_index,
- lowest_non_clean_chunk_size);
-
- // We want the LNC array updates above in process_chunk_boundaries
- // to be visible before any of the card table value changes as a
- // result of the dirty card iteration below.
- OrderAccess::storestore();
-
- // We want to clear the cards: clear_cl here does the work of finding
- // contiguous dirty ranges of cards to process and clear.
- clear_cl.do_MemRegion(chunk_mr);
-
- // Find the next chunk of the stride.
- chunk_card_start += ParGCCardsPerStrideChunk * n_strides;
- }
-}
-
-void
-CardTableRS::
-process_chunk_boundaries(Space* sp,
- DirtyCardToOopClosure* dcto_cl,
- MemRegion chunk_mr,
- MemRegion used,
- jbyte** lowest_non_clean,
- uintptr_t lowest_non_clean_base_chunk_index,
- size_t lowest_non_clean_chunk_size)
-{
- // We must worry about non-array objects that cross chunk boundaries,
- // because such objects are both precisely and imprecisely marked:
- // .. if the head of such an object is dirty, the entire object
- // needs to be scanned, under the interpretation that this
- // was an imprecise mark
- // .. if the head of such an object is not dirty, we can assume
- // precise marking and it's efficient to scan just the dirty
- // cards.
- // In either case, each scanned reference must be scanned precisely
- // once so as to avoid cloning of a young referent. For efficiency,
- // our closures depend on this property and do not protect against
- // double scans.
-
- uintptr_t start_chunk_index = addr_to_chunk_index(chunk_mr.start());
- assert(start_chunk_index >= lowest_non_clean_base_chunk_index, "Bounds error.");
- uintptr_t cur_chunk_index = start_chunk_index - lowest_non_clean_base_chunk_index;
-
- // First, set "our" lowest_non_clean entry, which would be
- // used by the thread scanning an adjoining left chunk with
- // a non-array object straddling the mutual boundary.
- // Find the object that spans our boundary, if one exists.
- // first_block is the block possibly straddling our left boundary.
- HeapWord* first_block = sp->block_start(chunk_mr.start());
- assert((chunk_mr.start() != used.start()) || (first_block == chunk_mr.start()),
- "First chunk should always have a co-initial block");
- // Does the block straddle the chunk's left boundary, and is it
- // a non-array object?
- if (first_block < chunk_mr.start() // first block straddles left bdry
- && sp->block_is_obj(first_block) // first block is an object
- && !(oop(first_block)->is_objArray() // first block is not an array (arrays are precisely dirtied)
- || oop(first_block)->is_typeArray())) {
- // Find our least non-clean card, so that a left neighbor
- // does not scan an object straddling the mutual boundary
- // too far to the right, and attempt to scan a portion of
- // that object twice.
- jbyte* first_dirty_card = NULL;
- jbyte* last_card_of_first_obj =
- byte_for(first_block + sp->block_size(first_block) - 1);
- jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
- jbyte* last_card_of_cur_chunk = byte_for(chunk_mr.last());
- jbyte* last_card_to_check =
- (jbyte*) MIN2((intptr_t) last_card_of_cur_chunk,
- (intptr_t) last_card_of_first_obj);
- // Note that this does not need to go beyond our last card
- // if our first object completely straddles this chunk.
- for (jbyte* cur = first_card_of_cur_chunk;
- cur <= last_card_to_check; cur++) {
- jbyte val = *cur;
- if (card_will_be_scanned(val)) {
- first_dirty_card = cur; break;
- } else {
- assert(!card_may_have_been_dirty(val), "Error");
- }
- }
- if (first_dirty_card != NULL) {
- assert(cur_chunk_index < lowest_non_clean_chunk_size, "Bounds error.");
- assert(lowest_non_clean[cur_chunk_index] == NULL,
- "Write exactly once : value should be stable hereafter for this round");
- lowest_non_clean[cur_chunk_index] = first_dirty_card;
- }
- } else {
- // In this case we can help our neighbor by just asking them
- // to stop at our first card (even though it may not be dirty).
- assert(lowest_non_clean[cur_chunk_index] == NULL, "Write once : value should be stable hereafter");
- jbyte* first_card_of_cur_chunk = byte_for(chunk_mr.start());
- lowest_non_clean[cur_chunk_index] = first_card_of_cur_chunk;
- }
-
- // Next, set our own max_to_do, which will strictly/exclusively bound
- // the highest address that we will scan past the right end of our chunk.
- HeapWord* max_to_do = NULL;
- if (chunk_mr.end() < used.end()) {
- // This is not the last chunk in the used region.
- // What is our last block? We check the first block of
- // the next (right) chunk rather than strictly check our last block
- // because it's potentially more efficient to do so.
- HeapWord* const last_block = sp->block_start(chunk_mr.end());
- assert(last_block <= chunk_mr.end(), "In case this property changes.");
- if ((last_block == chunk_mr.end()) // our last block does not straddle boundary
- || !sp->block_is_obj(last_block) // last_block isn't an object
- || oop(last_block)->is_objArray() // last_block is an array (precisely marked)
- || oop(last_block)->is_typeArray()) {
- max_to_do = chunk_mr.end();
- } else {
- assert(last_block < chunk_mr.end(), "Tautology");
- // It is a non-array object that straddles the right boundary of this chunk.
- // last_obj_card is the card corresponding to the start of the last object
- // in the chunk. Note that the last object may not start in
- // the chunk.
- jbyte* const last_obj_card = byte_for(last_block);
- const jbyte val = *last_obj_card;
- if (!card_will_be_scanned(val)) {
- assert(!card_may_have_been_dirty(val), "Error");
- // The card containing the head is not dirty. Any marks on
- // subsequent cards still in this chunk must have been made
- // precisely; we can cap processing at the end of our chunk.
- max_to_do = chunk_mr.end();
- } else {
- // The last object must be considered dirty, and extends onto the
- // following chunk. Look for a dirty card in that chunk that will
- // bound our processing.
- jbyte* limit_card = NULL;
- const size_t last_block_size = sp->block_size(last_block);
- jbyte* const last_card_of_last_obj =
- byte_for(last_block + last_block_size - 1);
- jbyte* const first_card_of_next_chunk = byte_for(chunk_mr.end());
- // This search potentially goes a long distance looking
- // for the next card that will be scanned, terminating
- // at the end of the last_block, if no earlier dirty card
- // is found.
- assert(byte_for(chunk_mr.end()) - byte_for(chunk_mr.start()) == ParGCCardsPerStrideChunk,
- "last card of next chunk may be wrong");
- for (jbyte* cur = first_card_of_next_chunk;
- cur <= last_card_of_last_obj; cur++) {
- const jbyte val = *cur;
- if (card_will_be_scanned(val)) {
- limit_card = cur; break;
- } else {
- assert(!card_may_have_been_dirty(val), "Error: card can't be skipped");
- }
- }
- if (limit_card != NULL) {
- max_to_do = addr_for(limit_card);
- assert(limit_card != NULL && max_to_do != NULL, "Error");
- } else {
- // The following is a pessimistic value, because it's possible
- // that a dirty card on a subsequent chunk has been cleared by
- // the time we get to look at it; we'll correct for that further below,
- // using the LNC array which records the least non-clean card
- // before cards were cleared in a particular chunk.
- limit_card = last_card_of_last_obj;
- max_to_do = last_block + last_block_size;
- assert(limit_card != NULL && max_to_do != NULL, "Error");
- }
- assert(0 < cur_chunk_index+1 && cur_chunk_index+1 < lowest_non_clean_chunk_size,
- "Bounds error.");
- // It is possible that a dirty card for the last object may have been
- // cleared before we had a chance to examine it. In that case, the value
- // will have been logged in the LNC for that chunk.
- // We need to examine as many chunks to the right as this object
- // covers. However, we need to bound this checking to the largest
- // entry in the LNC array: this is because the heap may expand
- // after the LNC array has been created but before we reach this point,
- // and the last block in our chunk may have been expanded to include
- // the expansion delta (and possibly subsequently allocated from, so
- // it wouldn't be sufficient to check whether that last block was
- // or was not an object at this point).
- uintptr_t last_chunk_index_to_check = addr_to_chunk_index(last_block + last_block_size - 1)
- - lowest_non_clean_base_chunk_index;
- const uintptr_t last_chunk_index = addr_to_chunk_index(used.last())
- - lowest_non_clean_base_chunk_index;
- if (last_chunk_index_to_check > last_chunk_index) {
- assert(last_block + last_block_size > used.end(),
- "Inconsistency detected: last_block [" PTR_FORMAT "," PTR_FORMAT "]"
- " does not exceed used.end() = " PTR_FORMAT ","
- " yet last_chunk_index_to_check " INTPTR_FORMAT
- " exceeds last_chunk_index " INTPTR_FORMAT,
- p2i(last_block), p2i(last_block + last_block_size),
- p2i(used.end()),
- last_chunk_index_to_check, last_chunk_index);
- assert(sp->used_region().end() > used.end(),
- "Expansion did not happen: "
- "[" PTR_FORMAT "," PTR_FORMAT ") -> [" PTR_FORMAT "," PTR_FORMAT ")",
- p2i(sp->used_region().start()), p2i(sp->used_region().end()),
- p2i(used.start()), p2i(used.end()));
- last_chunk_index_to_check = last_chunk_index;
- }
- for (uintptr_t lnc_index = cur_chunk_index + 1;
- lnc_index <= last_chunk_index_to_check;
- lnc_index++) {
- jbyte* lnc_card = lowest_non_clean[lnc_index];
- if (lnc_card != NULL) {
- // we can stop at the first non-NULL entry we find
- if (lnc_card <= limit_card) {
- limit_card = lnc_card;
- max_to_do = addr_for(limit_card);
- assert(limit_card != NULL && max_to_do != NULL, "Error");
- }
- // In any case, we break now
- break;
- } // else continue to look for a non-NULL entry if any
- }
- assert(limit_card != NULL && max_to_do != NULL, "Error");
- }
- assert(max_to_do != NULL, "OOPS 1 !");
- }
- assert(max_to_do != NULL, "OOPS 2!");
- } else {
- max_to_do = used.end();
- }
- assert(max_to_do != NULL, "OOPS 3!");
- // Now we can set the closure we're using so it doesn't to beyond
- // max_to_do.
- dcto_cl->set_min_done(max_to_do);
-#ifndef PRODUCT
- dcto_cl->set_last_bottom(max_to_do);
-#endif
-}
-
-void
-CardTableRS::
-get_LNC_array_for_space(Space* sp,
- jbyte**& lowest_non_clean,
- uintptr_t& lowest_non_clean_base_chunk_index,
- size_t& lowest_non_clean_chunk_size) {
-
- int i = find_covering_region_containing(sp->bottom());
- MemRegion covered = _covered[i];
- size_t n_chunks = chunks_to_cover(covered);
-
- // Only the first thread to obtain the lock will resize the
- // LNC array for the covered region. Any later expansion can't affect
- // the used_at_save_marks region.
- // (I observed a bug in which the first thread to execute this would
- // resize, and then it would cause "expand_and_allocate" that would
- // increase the number of chunks in the covered region. Then a second
- // thread would come and execute this, see that the size didn't match,
- // and free and allocate again. So the first thread would be using a
- // freed "_lowest_non_clean" array.)
-
- // Do a dirty read here. If we pass the conditional then take the rare
- // event lock and do the read again in case some other thread had already
- // succeeded and done the resize.
- int cur_collection = CMSHeap::heap()->total_collections();
- // Updated _last_LNC_resizing_collection[i] must not be visible before
- // _lowest_non_clean and friends are visible. Therefore use acquire/release
- // to guarantee this on non TSO architecures.
- if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
- MutexLocker x(ParGCRareEvent_lock);
- // This load_acquire is here for clarity only. The MutexLocker already fences.
- if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
- if (_lowest_non_clean[i] == NULL ||
- n_chunks != _lowest_non_clean_chunk_size[i]) {
-
- // Should we delete the old?
- if (_lowest_non_clean[i] != NULL) {
- assert(n_chunks != _lowest_non_clean_chunk_size[i],
- "logical consequence");
- FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);
- _lowest_non_clean[i] = NULL;
- }
- // Now allocate a new one if necessary.
- if (_lowest_non_clean[i] == NULL) {
- _lowest_non_clean[i] = NEW_C_HEAP_ARRAY(CardPtr, n_chunks, mtGC);
- _lowest_non_clean_chunk_size[i] = n_chunks;
- _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
- for (int j = 0; j < (int)n_chunks; j++)
- _lowest_non_clean[i][j] = NULL;
- }
- }
- // Make sure this gets visible only after _lowest_non_clean* was initialized
- OrderAccess::release_store(&_last_LNC_resizing_collection[i], cur_collection);
- }
- }
- // In any case, now do the initialization.
- lowest_non_clean = _lowest_non_clean[i];
- lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i];
- lowest_non_clean_chunk_size = _lowest_non_clean_chunk_size[i];
-}
--- a/src/hotspot/share/gc/cms/vmCMSOperations.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/cms/vmCMSOperations.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,7 +31,8 @@
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/isGCActiveMark.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/os.hpp"
#include "utilities/dtrace.hpp"
--- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "gc/g1/g1BarrierSet.inline.hpp"
+#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1CardTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/heapRegion.hpp"
@@ -32,9 +33,12 @@
#include "oops/oop.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/thread.inline.hpp"
+#include "utilities/macros.hpp"
G1BarrierSet::G1BarrierSet(G1CardTable* card_table) :
- CardTableModRefBS(card_table, BarrierSet::FakeRtti(BarrierSet::G1BarrierSet)),
+ CardTableBarrierSet(make_barrier_set_assembler<G1BarrierSetAssembler>(),
+ card_table,
+ BarrierSet::FakeRtti(BarrierSet::G1BarrierSet)),
_dcqs(JavaThread::dirty_card_queue_set())
{ }
@@ -53,11 +57,26 @@
}
}
+void G1BarrierSet::write_ref_array_pre_oop_entry(oop* dst, size_t length) {
+ G1BarrierSet *bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
+ bs->write_ref_array_pre(dst, length, false);
+}
+
+void G1BarrierSet::write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length) {
+ G1BarrierSet *bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
+ bs->write_ref_array_pre(dst, length, false);
+}
+
+void G1BarrierSet::write_ref_array_post_entry(HeapWord* dst, size_t length) {
+ G1BarrierSet *bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
+ bs->G1BarrierSet::write_ref_array(dst, length);
+}
+
template <class T> void
-G1BarrierSet::write_ref_array_pre_work(T* dst, int count) {
+G1BarrierSet::write_ref_array_pre_work(T* dst, size_t count) {
if (!JavaThread::satb_mark_queue_set().is_active()) return;
T* elem_ptr = dst;
- for (int i = 0; i < count; i++, elem_ptr++) {
+ for (size_t i = 0; i < count; i++, elem_ptr++) {
T heap_oop = oopDesc::load_heap_oop(elem_ptr);
if (!oopDesc::is_null(heap_oop)) {
enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
@@ -65,13 +84,13 @@
}
}
-void G1BarrierSet::write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) {
+void G1BarrierSet::write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized) {
if (!dest_uninitialized) {
write_ref_array_pre_work(dst, count);
}
}
-void G1BarrierSet::write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) {
+void G1BarrierSet::write_ref_array_pre(narrowOop* dst, size_t count, bool dest_uninitialized) {
if (!dest_uninitialized) {
write_ref_array_pre_work(dst, count);
}
@@ -167,7 +186,7 @@
void G1BarrierSet::on_thread_detach(JavaThread* thread) {
// Flush any deferred card marks, SATB buffers and dirty card queue buffers
- CardTableModRefBS::on_thread_detach(thread);
+ CardTableBarrierSet::on_thread_detach(thread);
thread->satb_mark_queue().flush();
thread->dirty_card_queue().flush();
}
--- a/src/hotspot/share/gc/g1/g1BarrierSet.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,7 +25,7 @@
#ifndef SHARE_VM_GC_G1_G1BARRIERSET_HPP
#define SHARE_VM_GC_G1_G1BARRIERSET_HPP
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
class DirtyCardQueueSet;
class CardTable;
@@ -34,7 +34,7 @@
// This barrier is specialized to use a logging barrier to support
// snapshot-at-the-beginning marking.
-class G1BarrierSet: public CardTableModRefBS {
+class G1BarrierSet: public CardTableBarrierSet {
friend class VMStructs;
private:
DirtyCardQueueSet& _dcqs;
@@ -49,9 +49,13 @@
static void enqueue_if_weak_or_archive(DecoratorSet decorators, oop value);
- template <class T> void write_ref_array_pre_work(T* dst, int count);
- virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
- virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized);
+ template <class T> void write_ref_array_pre_work(T* dst, size_t count);
+ virtual void write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized);
+ virtual void write_ref_array_pre(narrowOop* dst, size_t count, bool dest_uninitialized);
+
+ static void write_ref_array_pre_oop_entry(oop* dst, size_t length);
+ static void write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length);
+ static void write_ref_array_post_entry(HeapWord* dst, size_t length);
template <DecoratorSet decorators, typename T>
void write_ref_field_pre(T* field);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1BarrierSetAssembler.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_G1BARRIERSETASSEMBLER_HPP
+#define SHARE_GC_SHARED_G1BARRIERSETASSEMBLER_HPP
+
+#include "utilities/macros.hpp"
+
+#include CPU_HEADER(gc/g1/g1BarrierSetAssembler)
+
+#endif // SHARE_GC_SHARED_G1BARRIERSETASSEMBLER_HPP
--- a/src/hotspot/share/gc/g1/g1CardCounts.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1CardCounts.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "gc/g1/g1CardCounts.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "services/memTracker.hpp"
#include "utilities/copy.hpp"
--- a/src/hotspot/share/gc/g1/g1CardCounts.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1CardCounts.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,7 +31,7 @@
#include "memory/virtualspace.hpp"
#include "utilities/globalDefinitions.hpp"
-class CardTableModRefBS;
+class CardTableBarrierSet;
class G1CardCounts;
class G1CollectedHeap;
class G1RegionToSpaceMapper;
--- a/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1FullGCMarker.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -107,6 +107,11 @@
const int stride = MIN2(len - beg_index, (int) ObjArrayMarkingStride);
const int end_index = beg_index + stride;
+ // Push the continuation first to allow more efficient work stealing.
+ if (end_index < len) {
+ push_objarray(array, end_index);
+ }
+
array->oop_iterate_range(mark_closure(), beg_index, end_index);
if (VerifyDuringGC) {
@@ -117,10 +122,6 @@
assert(false, "Failed");
}
}
-
- if (end_index < len) {
- push_objarray(array, end_index); // Push the continuation.
- }
}
inline void G1FullGCMarker::follow_object(oop obj) {
--- a/src/hotspot/share/gc/g1/g1Policy.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -161,7 +161,7 @@
double accum_yg_surv_rate_pred(int age) const;
-protected:
+private:
G1CollectionSet* _collection_set;
double average_time_ms(G1GCPhaseTimes::GCParPhases phase) const;
double other_time_ms(double pause_time_ms) const;
@@ -171,7 +171,6 @@
double constant_other_time_ms(double pause_time_ms) const;
CollectionSetChooser* cset_chooser() const;
-private:
// The number of bytes copied during the GC.
size_t _bytes_copied_during_gc;
@@ -399,7 +398,6 @@
AgeTable _survivors_age_table;
-protected:
size_t desired_survivor_size() const;
public:
uint tenuring_threshold() const { return _tenuring_threshold; }
--- a/src/hotspot/share/gc/g1/g1RemSet.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/g1/g1RemSet.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -37,7 +37,7 @@
// collection set.
class BitMap;
-class CardTableModRefBS;
+class CardTableBarrierSet;
class G1BlockOffsetTable;
class CodeBlobClosure;
class G1CollectedHeap;
--- a/src/hotspot/share/gc/g1/sparsePRT.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/g1/sparsePRT.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,7 +27,7 @@
#include "gc/g1/heapRegionBounds.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
#include "gc/g1/sparsePRT.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/space.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/atomic.hpp"
--- a/src/hotspot/share/gc/g1/sparsePRT.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/g1/sparsePRT.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,7 +27,7 @@
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/heapRegion.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "memory/allocation.hpp"
#include "runtime/mutex.hpp"
#include "utilities/align.hpp"
--- a/src/hotspot/share/gc/g1/vmStructs_g1.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/g1/vmStructs_g1.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -73,8 +73,13 @@
#define VM_INT_CONSTANTS_G1(declare_constant, declare_constant_with_value) \
declare_constant(HeapRegionType::FreeTag) \
declare_constant(HeapRegionType::YoungMask) \
+ declare_constant(HeapRegionType::EdenTag) \
+ declare_constant(HeapRegionType::SurvTag) \
declare_constant(HeapRegionType::HumongousMask) \
declare_constant(HeapRegionType::PinnedMask) \
+ declare_constant(HeapRegionType::ArchiveMask) \
+ declare_constant(HeapRegionType::StartsHumongousTag) \
+ declare_constant(HeapRegionType::ContinuesHumongousTag) \
declare_constant(HeapRegionType::OldMask)
--- a/src/hotspot/share/gc/g1/vm_operations_g1.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/g1/vm_operations_g1.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,7 +31,7 @@
#include "gc/shared/gcTimer.hpp"
#include "gc/shared/gcTraceTime.inline.hpp"
#include "gc/shared/isGCActiveMark.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
void VM_G1CollectFull::doit() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
--- a/src/hotspot/share/gc/parallel/asPSOldGen.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/parallel/asPSOldGen.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,7 +27,7 @@
#include "gc/parallel/parallelScavengeHeap.hpp"
#include "gc/parallel/psAdaptiveSizePolicy.hpp"
#include "gc/parallel/psMarkSweepDecorator.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
#include "utilities/align.hpp"
--- a/src/hotspot/share/gc/parallel/objectStartArray.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/parallel/objectStartArray.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "gc/parallel/objectStartArray.inline.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "memory/allocation.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/java.hpp"
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -71,7 +71,7 @@
PSCardTable* card_table = new PSCardTable(reserved_region());
card_table->initialize();
- CardTableModRefBS* const barrier_set = new CardTableModRefBS(card_table);
+ CardTableBarrierSet* const barrier_set = new CardTableBarrierSet(card_table);
barrier_set->initialize();
set_barrier_set(barrier_set);
@@ -626,8 +626,8 @@
return (ParallelScavengeHeap*)heap;
}
-CardTableModRefBS* ParallelScavengeHeap::barrier_set() {
- return barrier_set_cast<CardTableModRefBS>(CollectedHeap::barrier_set());
+CardTableBarrierSet* ParallelScavengeHeap::barrier_set() {
+ return barrier_set_cast<CardTableBarrierSet>(CollectedHeap::barrier_set());
}
PSCardTable* ParallelScavengeHeap::card_table() {
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -30,7 +30,7 @@
#include "gc/parallel/psGCAdaptivePolicyCounters.hpp"
#include "gc/parallel/psOldGen.hpp"
#include "gc/parallel/psYoungGen.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/collectorPolicy.hpp"
#include "gc/shared/gcPolicyCounters.hpp"
@@ -127,7 +127,7 @@
static GCTaskManager* const gc_task_manager() { return _gc_task_manager; }
- CardTableModRefBS* barrier_set();
+ CardTableBarrierSet* barrier_set();
PSCardTable* card_table();
AdjoiningGenerations* gens() { return _gens; }
--- a/src/hotspot/share/gc/parallel/psCardTable.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psCardTable.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -392,10 +392,10 @@
// Assumes that only the base or the end changes. This allows indentification
// of the region that is being resized. The
-// CardTableModRefBS::resize_covered_region() is used for the normal case
+// CardTable::resize_covered_region() is used for the normal case
// where the covered regions are growing or shrinking at the high end.
// The method resize_covered_region_by_end() is analogous to
-// CardTableModRefBS::resize_covered_region() but
+// CardTable::resize_covered_region() but
// for regions that grow or shrink at the low end.
void PSCardTable::resize_covered_region(MemRegion new_region) {
for (int i = 0; i < _cur_covered_regions; i++) {
@@ -463,7 +463,7 @@
resize_update_covered_table(changed_region, new_region);
int ind = changed_region;
- log_trace(gc, barrier)("CardTableModRefBS::resize_covered_region: ");
+ log_trace(gc, barrier)("CardTable::resize_covered_region: ");
log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT,
ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last()));
log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT,
--- a/src/hotspot/share/gc/parallel/psOldGen.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psOldGen.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -29,7 +29,7 @@
#include "gc/parallel/psCardTable.hpp"
#include "gc/parallel/psMarkSweepDecorator.hpp"
#include "gc/parallel/psOldGen.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/gcLocker.inline.hpp"
#include "gc/shared/spaceDecorator.hpp"
#include "logging/log.hpp"
--- a/src/hotspot/share/gc/parallel/psParallelCompact.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -950,19 +950,7 @@
ParCompactionManager* _cm;
};
- class AdjustKlassClosure : public KlassClosure {
- public:
- AdjustKlassClosure(ParCompactionManager* cm) {
- assert(cm != NULL, "associate ParCompactionManage should not be NULL");
- _cm = cm;
- }
- void do_klass(Klass* klass);
- private:
- ParCompactionManager* _cm;
- };
-
friend class AdjustPointerClosure;
- friend class AdjustKlassClosure;
friend class RefProcTaskProxy;
friend class PSParallelCompactTest;
--- a/src/hotspot/share/gc/shared/barrierSet.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/shared/barrierSet.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,24 +23,6 @@
*/
#include "precompiled.hpp"
-#include "gc/shared/barrierSet.inline.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "memory/universe.hpp"
+#include "gc/shared/barrierSet.hpp"
BarrierSet* BarrierSet::_bs = NULL;
-
-// count is number of array elements being written
-void BarrierSet::static_write_ref_array_pre(HeapWord* start, size_t count) {
- assert(count <= (size_t)max_intx, "count too large");
- if (UseCompressedOops) {
- Universe::heap()->barrier_set()->write_ref_array_pre((narrowOop*)start, (int)count, false);
- } else {
- Universe::heap()->barrier_set()->write_ref_array_pre( (oop*)start, (int)count, false);
- }
-}
-
-// count is number of array elements being written
-void BarrierSet::static_write_ref_array_post(HeapWord* start, size_t count) {
- // simply delegate to instance method
- Universe::heap()->barrier_set()->write_ref_array(start, count);
-}
--- a/src/hotspot/share/gc/shared/barrierSet.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/shared/barrierSet.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,8 +31,10 @@
#include "oops/accessBackend.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/fakeRttiSupport.hpp"
+#include "utilities/macros.hpp"
class JavaThread;
+class BarrierSetAssembler;
// This class provides the interface between a barrier implementation and
// the rest of the system.
@@ -67,6 +69,7 @@
private:
FakeRtti _fake_rtti;
+ BarrierSetAssembler* _barrier_set_assembler;
public:
// Metafunction mapping a class derived from BarrierSet to the
@@ -87,28 +90,17 @@
// End of fake RTTI support.
protected:
- BarrierSet(const FakeRtti& fake_rtti) : _fake_rtti(fake_rtti) { }
+ BarrierSet(BarrierSetAssembler* barrier_set_assembler, const FakeRtti& fake_rtti) :
+ _fake_rtti(fake_rtti),
+ _barrier_set_assembler(barrier_set_assembler) { }
~BarrierSet() { }
+ template <class BarrierSetAssemblerT>
+ BarrierSetAssembler* make_barrier_set_assembler() {
+ return NOT_ZERO(new BarrierSetAssemblerT()) ZERO_ONLY(NULL);
+ }
+
public:
- // Operations on arrays, or general regions (e.g., for "clone") may be
- // optimized by some barriers.
-
- // Below length is the # array elements being written
- virtual void write_ref_array_pre(oop* dst, int length,
- bool dest_uninitialized = false) {}
- virtual void write_ref_array_pre(narrowOop* dst, int length,
- bool dest_uninitialized = false) {}
- // Below count is the # array elements being written, starting
- // at the address "start", which may not necessarily be HeapWord-aligned
- inline void write_ref_array(HeapWord* start, size_t count);
-
- // Static versions, suitable for calling from generated code;
- // count is # array elements being written, starting with "start",
- // which may not necessarily be HeapWord-aligned.
- static void static_write_ref_array_pre(HeapWord* start, size_t count);
- static void static_write_ref_array_post(HeapWord* start, size_t count);
-
// Support for optimizing compilers to call the barrier set on slow path allocations
// that did not enter a TLAB. Used for e.g. ReduceInitialCardMarks.
// The allocation is safe to use iff it returns true. If not, the slow-path allocation
@@ -119,15 +111,17 @@
virtual void on_thread_detach(JavaThread* thread) {}
virtual void make_parsable(JavaThread* thread) {}
-protected:
- virtual void write_ref_array_work(MemRegion mr) = 0;
-
public:
// Print a description of the memory for the barrier set
virtual void print_on(outputStream* st) const = 0;
static void set_bs(BarrierSet* bs) { _bs = bs; }
+ BarrierSetAssembler* barrier_set_assembler() {
+ assert(_barrier_set_assembler != NULL, "should be set");
+ return _barrier_set_assembler;
+ }
+
// The AccessBarrier of a BarrierSet subclass is called by the Access API
// (cf. oops/access.hpp) to perform decorated accesses. GC implementations
// may override these default access operations by declaring an
--- a/src/hotspot/share/gc/shared/barrierSet.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
-#define SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
-
-#include "gc/shared/barrierSet.hpp"
-#include "utilities/align.hpp"
-
-// count is number of array elements being written
-void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
- assert(count <= (size_t)max_intx, "count too large");
- HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
- // In the case of compressed oops, start and end may potentially be misaligned;
- // so we need to conservatively align the first downward (this is not
- // strictly necessary for current uses, but a case of good hygiene and,
- // if you will, aesthetics) and the second upward (this is essential for
- // current uses) to a HeapWord boundary, so we mark all cards overlapping
- // this write. If this evolves in the future to calling a
- // logging barrier of narrow oop granularity, like the pre-barrier for G1
- // (mentioned here merely by way of example), we will need to change this
- // interface, so it is "exactly precise" (if i may be allowed the adverbial
- // redundancy for emphasis) and does not include narrow oop slots not
- // included in the original write interval.
- HeapWord* aligned_start = align_down(start, HeapWordSize);
- HeapWord* aligned_end = align_up (end, HeapWordSize);
- // If compressed oops were not being used, these should already be aligned
- assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
- "Expected heap word alignment of start and end");
- write_ref_array_work(MemRegion(aligned_start, aligned_end));
-}
-
-#endif // SHARE_VM_GC_SHARED_BARRIERSET_INLINE_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/barrierSetAssembler.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_BARRIERSETASSEMBLER_HPP
+#define SHARE_GC_SHARED_BARRIERSETASSEMBLER_HPP
+
+#include "utilities/macros.hpp"
+
+#include CPU_HEADER(gc/shared/barrierSetAssembler)
+
+#endif // SHARE_GC_SHARED_BARRIERSETASSEMBLER_HPP
--- a/src/hotspot/share/gc/shared/barrierSetConfig.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -36,7 +36,7 @@
// Do something for each concrete barrier set part of the build.
#define FOR_EACH_CONCRETE_BARRIER_SET_DO(f) \
- f(CardTableModRef) \
+ f(CardTableBarrierSet) \
FOR_EACH_CONCRETE_INCLUDE_ALL_GC_BARRIER_SET_DO(f)
#define FOR_EACH_ABSTRACT_BARRIER_SET_DO(f) \
--- a/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/shared/barrierSetConfig.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -28,7 +28,7 @@
#include "gc/shared/barrierSetConfig.hpp"
#include "gc/shared/modRefBarrierSet.inline.hpp"
-#include "gc/shared/cardTableModRefBS.inline.hpp"
+#include "gc/shared/cardTableBarrierSet.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1BarrierSet.inline.hpp" // G1 support
--- a/src/hotspot/share/gc/shared/cardTable.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/shared/cardTable.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -87,7 +87,7 @@
MemRegion committed_unique_to_self(int self, MemRegion mr) const;
// Some barrier sets create tables whose elements correspond to parts of
- // the heap; the CardTableModRefBS is an example. Such barrier sets will
+ // the heap; the CardTableBarrierSet is an example. Such barrier sets will
// normally reserve space for such tables, and commit parts of the table
// "covering" parts of the heap that are committed. At most one covered
// region per generation is needed.
@@ -114,7 +114,7 @@
virtual ~CardTable();
virtual void initialize();
- // The kinds of precision a CardTableModRefBS may offer.
+ // The kinds of precision a CardTable may offer.
enum PrecisionStyle {
Precise,
ObjHeadPreciseArray
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/shared/cardTableBarrierSetAssembler.hpp"
+#include "gc/shared/cardTableBarrierSet.inline.hpp"
+#include "gc/shared/collectedHeap.hpp"
+#include "gc/shared/genCollectedHeap.hpp"
+#include "gc/shared/space.inline.hpp"
+#include "logging/log.hpp"
+#include "memory/virtualspace.hpp"
+#include "oops/oop.inline.hpp"
+#include "runtime/thread.hpp"
+#include "services/memTracker.hpp"
+#include "utilities/align.hpp"
+#include "utilities/macros.hpp"
+
+// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
+// enumerate ref fields that have been modified (since the last
+// enumeration.)
+
+CardTableBarrierSet::CardTableBarrierSet(BarrierSetAssembler* barrier_set_assembler,
+ CardTable* card_table,
+ const BarrierSet::FakeRtti& fake_rtti) :
+ ModRefBarrierSet(barrier_set_assembler,
+ fake_rtti.add_tag(BarrierSet::CardTableBarrierSet)),
+ _defer_initial_card_mark(false),
+ _card_table(card_table)
+{}
+
+CardTableBarrierSet::CardTableBarrierSet(CardTable* card_table) :
+ ModRefBarrierSet(make_barrier_set_assembler<CardTableBarrierSetAssembler>(),
+ BarrierSet::FakeRtti(BarrierSet::CardTableBarrierSet)),
+ _defer_initial_card_mark(false),
+ _card_table(card_table)
+{}
+
+void CardTableBarrierSet::initialize() {
+ initialize_deferred_card_mark_barriers();
+}
+
+CardTableBarrierSet::~CardTableBarrierSet() {
+ delete _card_table;
+}
+
+void CardTableBarrierSet::write_ref_array_work(MemRegion mr) {
+ _card_table->dirty_MemRegion(mr);
+}
+
+void CardTableBarrierSet::invalidate(MemRegion mr) {
+ _card_table->invalidate(mr);
+}
+
+void CardTableBarrierSet::print_on(outputStream* st) const {
+ _card_table->print_on(st);
+}
+
+// Helper for ReduceInitialCardMarks. For performance,
+// compiled code may elide card-marks for initializing stores
+// to a newly allocated object along the fast-path. We
+// compensate for such elided card-marks as follows:
+// (a) Generational, non-concurrent collectors, such as
+// GenCollectedHeap(ParNew,DefNew,Tenured) and
+// ParallelScavengeHeap(ParallelGC, ParallelOldGC)
+// need the card-mark if and only if the region is
+// in the old gen, and do not care if the card-mark
+// succeeds or precedes the initializing stores themselves,
+// so long as the card-mark is completed before the next
+// scavenge. For all these cases, we can do a card mark
+// at the point at which we do a slow path allocation
+// in the old gen, i.e. in this call.
+// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
+// in addition that the card-mark for an old gen allocated
+// object strictly follow any associated initializing stores.
+// In these cases, the memRegion remembered below is
+// used to card-mark the entire region either just before the next
+// slow-path allocation by this thread or just before the next scavenge or
+// CMS-associated safepoint, whichever of these events happens first.
+// (The implicit assumption is that the object has been fully
+// initialized by this point, a fact that we assert when doing the
+// card-mark.)
+// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
+// G1 concurrent marking is in progress an SATB (pre-write-)barrier
+// is used to remember the pre-value of any store. Initializing
+// stores will not need this barrier, so we need not worry about
+// compensating for the missing pre-barrier here. Turning now
+// to the post-barrier, we note that G1 needs a RS update barrier
+// which simply enqueues a (sequence of) dirty cards which may
+// optionally be refined by the concurrent update threads. Note
+// that this barrier need only be applied to a non-young write,
+// but, like in CMS, because of the presence of concurrent refinement
+// (much like CMS' precleaning), must strictly follow the oop-store.
+// Thus, using the same protocol for maintaining the intended
+// invariants turns out, serendepitously, to be the same for both
+// G1 and CMS.
+//
+// For any future collector, this code should be reexamined with
+// that specific collector in mind, and the documentation above suitably
+// extended and updated.
+void CardTableBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
+#if defined(COMPILER2) || INCLUDE_JVMCI
+ if (!ReduceInitialCardMarks) {
+ return;
+ }
+ // If a previous card-mark was deferred, flush it now.
+ flush_deferred_card_mark_barrier(thread);
+ if (new_obj->is_typeArray() || _card_table->is_in_young(new_obj)) {
+ // Arrays of non-references don't need a post-barrier.
+ // The deferred_card_mark region should be empty
+ // following the flush above.
+ assert(thread->deferred_card_mark().is_empty(), "Error");
+ } else {
+ MemRegion mr((HeapWord*)new_obj, new_obj->size());
+ assert(!mr.is_empty(), "Error");
+ if (_defer_initial_card_mark) {
+ // Defer the card mark
+ thread->set_deferred_card_mark(mr);
+ } else {
+ // Do the card mark
+ invalidate(mr);
+ }
+ }
+#endif // COMPILER2 || JVMCI
+}
+
+void CardTableBarrierSet::initialize_deferred_card_mark_barriers() {
+ // Used for ReduceInitialCardMarks (when COMPILER2 or JVMCI is used);
+ // otherwise remains unused.
+#if defined(COMPILER2) || INCLUDE_JVMCI
+ _defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers()
+ && (DeferInitialCardMark || card_mark_must_follow_store());
+#else
+ assert(_defer_initial_card_mark == false, "Who would set it?");
+#endif
+}
+
+void CardTableBarrierSet::flush_deferred_card_mark_barrier(JavaThread* thread) {
+#if defined(COMPILER2) || INCLUDE_JVMCI
+ MemRegion deferred = thread->deferred_card_mark();
+ if (!deferred.is_empty()) {
+ assert(_defer_initial_card_mark, "Otherwise should be empty");
+ {
+ // Verify that the storage points to a parsable object in heap
+ DEBUG_ONLY(oop old_obj = oop(deferred.start());)
+ assert(!_card_table->is_in_young(old_obj),
+ "Else should have been filtered in on_slowpath_allocation_exit()");
+ assert(oopDesc::is_oop(old_obj, true), "Not an oop");
+ assert(deferred.word_size() == (size_t)(old_obj->size()),
+ "Mismatch: multiple objects?");
+ }
+ write_region(deferred);
+ // "Clear" the deferred_card_mark field
+ thread->set_deferred_card_mark(MemRegion());
+ }
+ assert(thread->deferred_card_mark().is_empty(), "invariant");
+#else
+ assert(!_defer_initial_card_mark, "Should be false");
+ assert(thread->deferred_card_mark().is_empty(), "Should be empty");
+#endif
+}
+
+void CardTableBarrierSet::on_thread_detach(JavaThread* thread) {
+ // The deferred store barriers must all have been flushed to the
+ // card-table (or other remembered set structure) before GC starts
+ // processing the card-table (or other remembered set).
+ flush_deferred_card_mark_barrier(thread);
+}
+
+bool CardTableBarrierSet::card_mark_must_follow_store() const {
+ return _card_table->scanned_concurrently();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_CARDTABLEBARRIERSET_HPP
+#define SHARE_VM_GC_SHARED_CARDTABLEBARRIERSET_HPP
+
+#include "gc/shared/modRefBarrierSet.hpp"
+#include "utilities/align.hpp"
+
+class CardTable;
+
+// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
+// enumerate ref fields that have been modified (since the last
+// enumeration.)
+
+// As it currently stands, this barrier is *imprecise*: when a ref field in
+// an object "o" is modified, the card table entry for the card containing
+// the head of "o" is dirtied, not necessarily the card containing the
+// modified field itself. For object arrays, however, the barrier *is*
+// precise; only the card containing the modified element is dirtied.
+// Closures used to scan dirty cards should take these
+// considerations into account.
+
+class CardTableBarrierSet: public ModRefBarrierSet {
+ // Some classes get to look at some private stuff.
+ friend class VMStructs;
+ protected:
+
+ // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
+ // or INCLUDE_JVMCI is being used
+ bool _defer_initial_card_mark;
+ CardTable* _card_table;
+
+ CardTableBarrierSet(BarrierSetAssembler* barrier_set_assembler,
+ CardTable* card_table,
+ const BarrierSet::FakeRtti& fake_rtti);
+
+ public:
+ CardTableBarrierSet(CardTable* card_table);
+ ~CardTableBarrierSet();
+
+ CardTable* card_table() const { return _card_table; }
+
+ virtual void initialize();
+
+ void write_region(MemRegion mr) {
+ invalidate(mr);
+ }
+
+ void write_ref_array_work(MemRegion mr);
+
+ public:
+ // Record a reference update. Note that these versions are precise!
+ // The scanning code has to handle the fact that the write barrier may be
+ // either precise or imprecise. We make non-virtual inline variants of
+ // these functions here for performance.
+ template <DecoratorSet decorators, typename T>
+ void write_ref_field_post(T* field, oop newVal);
+
+ virtual void invalidate(MemRegion mr);
+
+ // ReduceInitialCardMarks
+ void initialize_deferred_card_mark_barriers();
+
+ // If the CollectedHeap was asked to defer a store barrier above,
+ // this informs it to flush such a deferred store barrier to the
+ // remembered set.
+ void flush_deferred_card_mark_barrier(JavaThread* thread);
+
+ // Can a compiler initialize a new object without store barriers?
+ // This permission only extends from the creation of a new object
+ // via a TLAB up to the first subsequent safepoint. If such permission
+ // is granted for this heap type, the compiler promises to call
+ // defer_store_barrier() below on any slow path allocation of
+ // a new object for which such initializing store barriers will
+ // have been elided. G1, like CMS, allows this, but should be
+ // ready to provide a compensating write barrier as necessary
+ // if that storage came out of a non-young region. The efficiency
+ // of this implementation depends crucially on being able to
+ // answer very efficiently in constant time whether a piece of
+ // storage in the heap comes from a young region or not.
+ // See ReduceInitialCardMarks.
+ virtual bool can_elide_tlab_store_barriers() const {
+ return true;
+ }
+
+ // If a compiler is eliding store barriers for TLAB-allocated objects,
+ // we will be informed of a slow-path allocation by a call
+ // to on_slowpath_allocation_exit() below. Such a call precedes the
+ // initialization of the object itself, and no post-store-barriers will
+ // be issued. Some heap types require that the barrier strictly follows
+ // the initializing stores. (This is currently implemented by deferring the
+ // barrier until the next slow-path allocation or gc-related safepoint.)
+ // This interface answers whether a particular barrier type needs the card
+ // mark to be thus strictly sequenced after the stores.
+ virtual bool card_mark_must_follow_store() const;
+
+ virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj);
+ virtual void on_thread_detach(JavaThread* thread);
+
+ virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); }
+
+ virtual void print_on(outputStream* st) const;
+
+ template <DecoratorSet decorators, typename BarrierSetT = CardTableBarrierSet>
+ class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {};
+};
+
+template<>
+struct BarrierSet::GetName<CardTableBarrierSet> {
+ static const BarrierSet::Name value = BarrierSet::CardTableBarrierSet;
+};
+
+template<>
+struct BarrierSet::GetType<BarrierSet::CardTableBarrierSet> {
+ typedef ::CardTableBarrierSet type;
+};
+
+#endif // SHARE_VM_GC_SHARED_CARDTABLEBARRIERSET_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_SHARED_CARDTABLEBARRIERSET_INLINE_HPP
+#define SHARE_VM_GC_SHARED_CARDTABLEBARRIERSET_INLINE_HPP
+
+#include "gc/shared/cardTableBarrierSet.hpp"
+#include "gc/shared/cardTable.hpp"
+#include "runtime/orderAccess.inline.hpp"
+
+template <DecoratorSet decorators, typename T>
+inline void CardTableBarrierSet::write_ref_field_post(T* field, oop newVal) {
+ volatile jbyte* byte = _card_table->byte_for(field);
+ if (UseConcMarkSweepGC) {
+ // Perform a releasing store if using CMS so that it may
+ // scan and clear the cards concurrently during pre-cleaning.
+ OrderAccess::release_store(byte, CardTable::dirty_card_val());
+ } else {
+ *byte = CardTable::dirty_card_val();
+ }
+}
+
+#endif // SHARE_VM_GC_SHARED_CARDTABLEBARRIERSET_INLINE_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSetAssembler.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_HPP
+#define SHARE_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_HPP
+
+#include "utilities/macros.hpp"
+
+#include CPU_HEADER(gc/shared/cardTableBarrierSetAssembler)
+
+#endif // SHARE_GC_SHARED_CARDTABLEBARRIERSETASSEMBLER_HPP
--- a/src/hotspot/share/gc/shared/cardTableModRefBS.cpp Thu Mar 29 20:12:02 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,189 +0,0 @@
-/*
- * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "gc/shared/cardTableModRefBS.inline.hpp"
-#include "gc/shared/collectedHeap.hpp"
-#include "gc/shared/genCollectedHeap.hpp"
-#include "gc/shared/space.inline.hpp"
-#include "logging/log.hpp"
-#include "memory/virtualspace.hpp"
-#include "oops/oop.inline.hpp"
-#include "runtime/thread.hpp"
-#include "services/memTracker.hpp"
-#include "utilities/align.hpp"
-#include "utilities/macros.hpp"
-
-// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
-// enumerate ref fields that have been modified (since the last
-// enumeration.)
-
-CardTableModRefBS::CardTableModRefBS(
- CardTable* card_table,
- const BarrierSet::FakeRtti& fake_rtti) :
- ModRefBarrierSet(fake_rtti.add_tag(BarrierSet::CardTableModRef)),
- _defer_initial_card_mark(false),
- _card_table(card_table)
-{}
-
-CardTableModRefBS::CardTableModRefBS(CardTable* card_table) :
- ModRefBarrierSet(BarrierSet::FakeRtti(BarrierSet::CardTableModRef)),
- _defer_initial_card_mark(false),
- _card_table(card_table)
-{}
-
-void CardTableModRefBS::initialize() {
- initialize_deferred_card_mark_barriers();
-}
-
-CardTableModRefBS::~CardTableModRefBS() {
- delete _card_table;
-}
-
-void CardTableModRefBS::write_ref_array_work(MemRegion mr) {
- _card_table->dirty_MemRegion(mr);
-}
-
-void CardTableModRefBS::invalidate(MemRegion mr) {
- _card_table->invalidate(mr);
-}
-
-void CardTableModRefBS::print_on(outputStream* st) const {
- _card_table->print_on(st);
-}
-
-// Helper for ReduceInitialCardMarks. For performance,
-// compiled code may elide card-marks for initializing stores
-// to a newly allocated object along the fast-path. We
-// compensate for such elided card-marks as follows:
-// (a) Generational, non-concurrent collectors, such as
-// GenCollectedHeap(ParNew,DefNew,Tenured) and
-// ParallelScavengeHeap(ParallelGC, ParallelOldGC)
-// need the card-mark if and only if the region is
-// in the old gen, and do not care if the card-mark
-// succeeds or precedes the initializing stores themselves,
-// so long as the card-mark is completed before the next
-// scavenge. For all these cases, we can do a card mark
-// at the point at which we do a slow path allocation
-// in the old gen, i.e. in this call.
-// (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
-// in addition that the card-mark for an old gen allocated
-// object strictly follow any associated initializing stores.
-// In these cases, the memRegion remembered below is
-// used to card-mark the entire region either just before the next
-// slow-path allocation by this thread or just before the next scavenge or
-// CMS-associated safepoint, whichever of these events happens first.
-// (The implicit assumption is that the object has been fully
-// initialized by this point, a fact that we assert when doing the
-// card-mark.)
-// (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
-// G1 concurrent marking is in progress an SATB (pre-write-)barrier
-// is used to remember the pre-value of any store. Initializing
-// stores will not need this barrier, so we need not worry about
-// compensating for the missing pre-barrier here. Turning now
-// to the post-barrier, we note that G1 needs a RS update barrier
-// which simply enqueues a (sequence of) dirty cards which may
-// optionally be refined by the concurrent update threads. Note
-// that this barrier need only be applied to a non-young write,
-// but, like in CMS, because of the presence of concurrent refinement
-// (much like CMS' precleaning), must strictly follow the oop-store.
-// Thus, using the same protocol for maintaining the intended
-// invariants turns out, serendepitously, to be the same for both
-// G1 and CMS.
-//
-// For any future collector, this code should be reexamined with
-// that specific collector in mind, and the documentation above suitably
-// extended and updated.
-void CardTableModRefBS::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
-#if defined(COMPILER2) || INCLUDE_JVMCI
- if (!ReduceInitialCardMarks) {
- return;
- }
- // If a previous card-mark was deferred, flush it now.
- flush_deferred_card_mark_barrier(thread);
- if (new_obj->is_typeArray() || _card_table->is_in_young(new_obj)) {
- // Arrays of non-references don't need a post-barrier.
- // The deferred_card_mark region should be empty
- // following the flush above.
- assert(thread->deferred_card_mark().is_empty(), "Error");
- } else {
- MemRegion mr((HeapWord*)new_obj, new_obj->size());
- assert(!mr.is_empty(), "Error");
- if (_defer_initial_card_mark) {
- // Defer the card mark
- thread->set_deferred_card_mark(mr);
- } else {
- // Do the card mark
- invalidate(mr);
- }
- }
-#endif // COMPILER2 || JVMCI
-}
-
-void CardTableModRefBS::initialize_deferred_card_mark_barriers() {
- // Used for ReduceInitialCardMarks (when COMPILER2 or JVMCI is used);
- // otherwise remains unused.
-#if defined(COMPILER2) || INCLUDE_JVMCI
- _defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks && can_elide_tlab_store_barriers()
- && (DeferInitialCardMark || card_mark_must_follow_store());
-#else
- assert(_defer_initial_card_mark == false, "Who would set it?");
-#endif
-}
-
-void CardTableModRefBS::flush_deferred_card_mark_barrier(JavaThread* thread) {
-#if defined(COMPILER2) || INCLUDE_JVMCI
- MemRegion deferred = thread->deferred_card_mark();
- if (!deferred.is_empty()) {
- assert(_defer_initial_card_mark, "Otherwise should be empty");
- {
- // Verify that the storage points to a parsable object in heap
- DEBUG_ONLY(oop old_obj = oop(deferred.start());)
- assert(!_card_table->is_in_young(old_obj),
- "Else should have been filtered in on_slowpath_allocation_exit()");
- assert(oopDesc::is_oop(old_obj, true), "Not an oop");
- assert(deferred.word_size() == (size_t)(old_obj->size()),
- "Mismatch: multiple objects?");
- }
- write_region(deferred);
- // "Clear" the deferred_card_mark field
- thread->set_deferred_card_mark(MemRegion());
- }
- assert(thread->deferred_card_mark().is_empty(), "invariant");
-#else
- assert(!_defer_initial_card_mark, "Should be false");
- assert(thread->deferred_card_mark().is_empty(), "Should be empty");
-#endif
-}
-
-void CardTableModRefBS::on_thread_detach(JavaThread* thread) {
- // The deferred store barriers must all have been flushed to the
- // card-table (or other remembered set structure) before GC starts
- // processing the card-table (or other remembered set).
- flush_deferred_card_mark_barrier(thread);
-}
-
-bool CardTableModRefBS::card_mark_must_follow_store() const {
- return _card_table->scanned_concurrently();
-}
--- a/src/hotspot/share/gc/shared/cardTableModRefBS.hpp Thu Mar 29 20:12:02 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,139 +0,0 @@
-/*
- * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP
-#define SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP
-
-#include "gc/shared/modRefBarrierSet.hpp"
-#include "utilities/align.hpp"
-
-class CardTable;
-
-// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
-// enumerate ref fields that have been modified (since the last
-// enumeration.)
-
-// As it currently stands, this barrier is *imprecise*: when a ref field in
-// an object "o" is modified, the card table entry for the card containing
-// the head of "o" is dirtied, not necessarily the card containing the
-// modified field itself. For object arrays, however, the barrier *is*
-// precise; only the card containing the modified element is dirtied.
-// Closures used to scan dirty cards should take these
-// considerations into account.
-
-class CardTableModRefBS: public ModRefBarrierSet {
- // Some classes get to look at some private stuff.
- friend class VMStructs;
- protected:
-
- // Used in support of ReduceInitialCardMarks; only consulted if COMPILER2
- // or INCLUDE_JVMCI is being used
- bool _defer_initial_card_mark;
- CardTable* _card_table;
-
- CardTableModRefBS(CardTable* card_table, const BarrierSet::FakeRtti& fake_rtti);
-
- public:
- CardTableModRefBS(CardTable* card_table);
- ~CardTableModRefBS();
-
- CardTable* card_table() const { return _card_table; }
-
- virtual void initialize();
-
- void write_region(MemRegion mr) {
- invalidate(mr);
- }
-
- protected:
- void write_ref_array_work(MemRegion mr);
-
- public:
- // Record a reference update. Note that these versions are precise!
- // The scanning code has to handle the fact that the write barrier may be
- // either precise or imprecise. We make non-virtual inline variants of
- // these functions here for performance.
- template <DecoratorSet decorators, typename T>
- void write_ref_field_post(T* field, oop newVal);
-
- virtual void invalidate(MemRegion mr);
-
- // ReduceInitialCardMarks
- void initialize_deferred_card_mark_barriers();
-
- // If the CollectedHeap was asked to defer a store barrier above,
- // this informs it to flush such a deferred store barrier to the
- // remembered set.
- void flush_deferred_card_mark_barrier(JavaThread* thread);
-
- // Can a compiler initialize a new object without store barriers?
- // This permission only extends from the creation of a new object
- // via a TLAB up to the first subsequent safepoint. If such permission
- // is granted for this heap type, the compiler promises to call
- // defer_store_barrier() below on any slow path allocation of
- // a new object for which such initializing store barriers will
- // have been elided. G1, like CMS, allows this, but should be
- // ready to provide a compensating write barrier as necessary
- // if that storage came out of a non-young region. The efficiency
- // of this implementation depends crucially on being able to
- // answer very efficiently in constant time whether a piece of
- // storage in the heap comes from a young region or not.
- // See ReduceInitialCardMarks.
- virtual bool can_elide_tlab_store_barriers() const {
- return true;
- }
-
- // If a compiler is eliding store barriers for TLAB-allocated objects,
- // we will be informed of a slow-path allocation by a call
- // to on_slowpath_allocation_exit() below. Such a call precedes the
- // initialization of the object itself, and no post-store-barriers will
- // be issued. Some heap types require that the barrier strictly follows
- // the initializing stores. (This is currently implemented by deferring the
- // barrier until the next slow-path allocation or gc-related safepoint.)
- // This interface answers whether a particular barrier type needs the card
- // mark to be thus strictly sequenced after the stores.
- virtual bool card_mark_must_follow_store() const;
-
- virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj);
- virtual void on_thread_detach(JavaThread* thread);
-
- virtual void make_parsable(JavaThread* thread) { flush_deferred_card_mark_barrier(thread); }
-
- virtual void print_on(outputStream* st) const;
-
- template <DecoratorSet decorators, typename BarrierSetT = CardTableModRefBS>
- class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {};
-};
-
-template<>
-struct BarrierSet::GetName<CardTableModRefBS> {
- static const BarrierSet::Name value = BarrierSet::CardTableModRef;
-};
-
-template<>
-struct BarrierSet::GetType<BarrierSet::CardTableModRef> {
- typedef CardTableModRefBS type;
-};
-
-#endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_HPP
--- a/src/hotspot/share/gc/shared/cardTableModRefBS.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_INLINE_HPP
-#define SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_INLINE_HPP
-
-#include "gc/shared/cardTableModRefBS.hpp"
-#include "gc/shared/cardTable.hpp"
-#include "runtime/orderAccess.inline.hpp"
-
-template <DecoratorSet decorators, typename T>
-inline void CardTableModRefBS::write_ref_field_post(T* field, oop newVal) {
- volatile jbyte* byte = _card_table->byte_for(field);
- if (UseConcMarkSweepGC) {
- // Perform a releasing store if using CMS so that it may
- // scan and clear the cards concurrently during pre-cleaning.
- OrderAccess::release_store(byte, CardTable::dirty_card_val());
- } else {
- *byte = CardTable::dirty_card_val();
- }
-}
-
-#endif // SHARE_VM_GC_SHARED_CARDTABLEMODREFBS_INLINE_HPP
--- a/src/hotspot/share/gc/shared/cardTableRS.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/shared/cardTableRS.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -571,7 +571,7 @@
// [End Case 3]
//
// (Please refer to the code in the helper class
- // ClearNonCleanCardWrapper and in CardTableModRefBS for details.)
+ // ClearNonCleanCardWrapper and in CardTable for details.)
//
// The informal arguments above can be tightened into a formal
// correctness proof and it behooves us to write up such a proof,
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,7 +25,7 @@
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/allocTracer.hpp"
-#include "gc/shared/barrierSet.inline.hpp"
+#include "gc/shared/barrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/gcLocker.inline.hpp"
@@ -585,3 +585,13 @@
void CollectedHeap::post_initialize() {
initialize_serviceability();
}
+
+oop CollectedHeap::pin_object(JavaThread* thread, oop o) {
+ Handle handle(thread, o);
+ GCLocker::lock_critical(thread);
+ return handle();
+}
+
+void CollectedHeap::unpin_object(JavaThread* thread, oop o) {
+ GCLocker::unlock_critical(thread);
+}
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -588,6 +588,15 @@
// perform cleanup tasks serially in the VMThread.
virtual WorkGang* get_safepoint_workers() { return NULL; }
+ // Support for object pinning. This is used by JNI's Get*Critical() and
+ // Release*Critical() family of functions. A GC may either use the GCLocker
+ // protocol to ensure no critical arrays are in-use when entering
+ // a GC pause, or it can implement pinning, which must guarantee that
+ // the object does not move while pinned.
+ virtual oop pin_object(JavaThread* thread, oop o);
+
+ virtual void unpin_object(JavaThread* thread, oop o);
+
// Non product verification and debugging.
#ifndef PRODUCT
// Support for PromotionFailureALot. Return true if it's time to cause a
--- a/src/hotspot/share/gc/shared/concurrentGCThread.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/shared/concurrentGCThread.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -28,7 +28,6 @@
#include "oops/instanceRefKlass.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/init.hpp"
-#include "runtime/interfaceSupport.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/os.hpp"
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,7 +31,7 @@
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "gc/shared/adaptiveSizePolicy.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/cardTableRS.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/collectorCounters.hpp"
@@ -112,7 +112,7 @@
_rem_set = new CardTableRS(reserved_region());
_rem_set->initialize();
- CardTableModRefBS *bs = new CardTableModRefBS(_rem_set);
+ CardTableBarrierSet *bs = new CardTableBarrierSet(_rem_set);
bs->initialize();
set_barrier_set(bs);
--- a/src/hotspot/share/gc/shared/genOopClosures.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/shared/genOopClosures.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,7 +31,7 @@
class Generation;
class HeapWord;
class CardTableRS;
-class CardTableModRefBS;
+class CardTableBarrierSet;
class DefNewGeneration;
class KlassRemSet;
--- a/src/hotspot/share/gc/shared/modRefBarrierSet.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/shared/modRefBarrierSet.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -32,8 +32,10 @@
class ModRefBarrierSet: public BarrierSet {
protected:
- ModRefBarrierSet(const BarrierSet::FakeRtti& fake_rtti)
- : BarrierSet(fake_rtti.add_tag(BarrierSet::ModRef)) { }
+ ModRefBarrierSet(BarrierSetAssembler* barrier_set_assembler,
+ const BarrierSet::FakeRtti& fake_rtti)
+ : BarrierSet(barrier_set_assembler,
+ fake_rtti.add_tag(BarrierSet::ModRef)) { }
~ModRefBarrierSet() { }
public:
@@ -47,6 +49,22 @@
virtual void invalidate(MemRegion mr) = 0;
virtual void write_region(MemRegion mr) = 0;
+ // Operations on arrays, or general regions (e.g., for "clone") may be
+ // optimized by some barriers.
+
+ // Below length is the # array elements being written
+ virtual void write_ref_array_pre(oop* dst, size_t length,
+ bool dest_uninitialized = false) {}
+ virtual void write_ref_array_pre(narrowOop* dst, size_t length,
+ bool dest_uninitialized = false) {}
+ // Below count is the # array elements being written, starting
+ // at the address "start", which may not necessarily be HeapWord-aligned
+ inline void write_ref_array(HeapWord* start, size_t count);
+
+ protected:
+ virtual void write_ref_array_work(MemRegion mr) = 0;
+
+ public:
// The ModRef abstraction introduces pre and post barriers
template <DecoratorSet decorators, typename BarrierSetT>
class AccessBarrier: public BarrierSet::AccessBarrier<decorators, BarrierSetT> {
--- a/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/shared/modRefBarrierSet.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,12 +25,34 @@
#ifndef SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
#define SHARE_VM_GC_SHARED_MODREFBARRIERSET_INLINE_HPP
-#include "gc/shared/barrierSet.inline.hpp"
+#include "gc/shared/barrierSet.hpp"
#include "gc/shared/modRefBarrierSet.hpp"
#include "oops/klass.inline.hpp"
#include "oops/objArrayOop.hpp"
#include "oops/oop.hpp"
+// count is number of array elements being written
+void ModRefBarrierSet::write_ref_array(HeapWord* start, size_t count) {
+ HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
+ // In the case of compressed oops, start and end may potentially be misaligned;
+ // so we need to conservatively align the first downward (this is not
+ // strictly necessary for current uses, but a case of good hygiene and,
+ // if you will, aesthetics) and the second upward (this is essential for
+ // current uses) to a HeapWord boundary, so we mark all cards overlapping
+ // this write. If this evolves in the future to calling a
+ // logging barrier of narrow oop granularity, like the pre-barrier for G1
+ // (mentioned here merely by way of example), we will need to change this
+ // interface, so it is "exactly precise" (if i may be allowed the adverbial
+ // redundancy for emphasis) and does not include narrow oop slots not
+ // included in the original write interval.
+ HeapWord* aligned_start = align_down(start, HeapWordSize);
+ HeapWord* aligned_end = align_up (end, HeapWordSize);
+ // If compressed oops were not being used, these should already be aligned
+ assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
+ "Expected heap word alignment of start and end");
+ write_ref_array_work(MemRegion(aligned_start, aligned_end));
+}
+
template <DecoratorSet decorators, typename BarrierSetT>
template <typename T>
inline void ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT>::
@@ -73,7 +95,7 @@
if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) {
// Optimized covariant case
- bs->write_ref_array_pre(dst, (int)length,
+ bs->write_ref_array_pre(dst, length,
HasDecorator<decorators, AS_DEST_NOT_INITIALIZED>::value);
Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
bs->write_ref_array((HeapWord*)dst, length);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/shared/modRefBarrierSetAssembler.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_SHARED_MODREFBARRIERSETASSEMBLER_HPP
+#define SHARE_GC_SHARED_MODREFBARRIERSETASSEMBLER_HPP
+
+#include "utilities/macros.hpp"
+
+#include CPU_HEADER(gc/shared/modRefBarrierSetAssembler)
+
+#endif // SHARE_GC_SHARED_MODREFBARRIERSETASSEMBLER_HPP
--- a/src/hotspot/share/gc/shared/vmGCOperations.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/gc/shared/vmGCOperations.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -35,7 +35,6 @@
#include "memory/oopFactory.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
-#include "runtime/interfaceSupport.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/macros.hpp"
#include "utilities/preserveException.hpp"
--- a/src/hotspot/share/interpreter/abstractInterpreter.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/interpreter/abstractInterpreter.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
#include "asm/macroAssembler.hpp"
#include "code/stubs.hpp"
#include "interpreter/bytecodes.hpp"
+#include "runtime/frame.hpp"
#include "runtime/thread.hpp"
#include "runtime/vmThread.hpp"
--- a/src/hotspot/share/interpreter/bytecodeInterpreter.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/interpreter/bytecodeInterpreter.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -47,7 +47,7 @@
#include "runtime/biasedLocking.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/threadCritical.hpp"
--- a/src/hotspot/share/interpreter/bytecodeStream.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/interpreter/bytecodeStream.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/bytecodes.hpp"
+#include "runtime/handles.inline.hpp"
Bytecodes::Code RawBytecodeStream::raw_next_special(Bytecodes::Code code) {
assert(!is_last_bytecode(), "should have been checked");
@@ -53,6 +54,11 @@
return code;
}
+BaseBytecodeStream::BaseBytecodeStream(const methodHandle& method) : _method(method) {
+ set_interval(0, _method->code_size());
+ _is_raw = false;
+}
+
#ifdef ASSERT
void BaseBytecodeStream::assert_raw_index_size(int size) const {
if (raw_code() == Bytecodes::_invokedynamic && is_raw()) {
--- a/src/hotspot/share/interpreter/bytecodeStream.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/interpreter/bytecodeStream.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,6 @@
#include "interpreter/bytecode.hpp"
#include "memory/allocation.hpp"
#include "oops/method.hpp"
-#include "runtime/handles.inline.hpp"
#include "utilities/bytes.hpp"
// A BytecodeStream is used for fast iteration over the bytecodes
@@ -63,10 +62,7 @@
bool _is_raw; // false in 'cooked' BytecodeStream
// Construction
- BaseBytecodeStream(const methodHandle& method) : _method(method) {
- set_interval(0, _method->code_size());
- _is_raw = false;
- }
+ BaseBytecodeStream(const methodHandle& method);
public:
// Iteration control
--- a/src/hotspot/share/interpreter/interpreterRuntime.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/interpreter/interpreterRuntime.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -53,9 +53,10 @@
#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/fieldDescriptor.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/jfieldIDWorkaround.hpp"
#include "runtime/osThread.hpp"
@@ -84,6 +85,58 @@
}
};
+// Helper class to access current interpreter state
+class LastFrameAccessor : public StackObj {
+ frame _last_frame;
+public:
+ LastFrameAccessor(JavaThread* thread) {
+ assert(thread == Thread::current(), "sanity");
+ _last_frame = thread->last_frame();
+ }
+ bool is_interpreted_frame() const { return _last_frame.is_interpreted_frame(); }
+ Method* method() const { return _last_frame.interpreter_frame_method(); }
+ address bcp() const { return _last_frame.interpreter_frame_bcp(); }
+ int bci() const { return _last_frame.interpreter_frame_bci(); }
+ address mdp() const { return _last_frame.interpreter_frame_mdp(); }
+
+ void set_bcp(address bcp) { _last_frame.interpreter_frame_set_bcp(bcp); }
+ void set_mdp(address dp) { _last_frame.interpreter_frame_set_mdp(dp); }
+
+ // pass method to avoid calling unsafe bcp_to_method (partial fix 4926272)
+ Bytecodes::Code code() const { return Bytecodes::code_at(method(), bcp()); }
+
+ Bytecode bytecode() const { return Bytecode(method(), bcp()); }
+ int get_index_u1(Bytecodes::Code bc) const { return bytecode().get_index_u1(bc); }
+ int get_index_u2(Bytecodes::Code bc) const { return bytecode().get_index_u2(bc); }
+ int get_index_u2_cpcache(Bytecodes::Code bc) const
+ { return bytecode().get_index_u2_cpcache(bc); }
+ int get_index_u4(Bytecodes::Code bc) const { return bytecode().get_index_u4(bc); }
+ int number_of_dimensions() const { return bcp()[3]; }
+ ConstantPoolCacheEntry* cache_entry_at(int i) const
+ { return method()->constants()->cache()->entry_at(i); }
+ ConstantPoolCacheEntry* cache_entry() const { return cache_entry_at(Bytes::get_native_u2(bcp() + 1)); }
+
+ oop callee_receiver(Symbol* signature) {
+ return _last_frame.interpreter_callee_receiver(signature);
+ }
+ BasicObjectLock* monitor_begin() const {
+ return _last_frame.interpreter_frame_monitor_begin();
+ }
+ BasicObjectLock* monitor_end() const {
+ return _last_frame.interpreter_frame_monitor_end();
+ }
+ BasicObjectLock* next_monitor(BasicObjectLock* current) const {
+ return _last_frame.next_monitor_in_interpreter_frame(current);
+ }
+
+ frame& get_frame() { return _last_frame; }
+};
+
+
+bool InterpreterRuntime::is_breakpoint(JavaThread *thread) {
+ return Bytecodes::code_or_bp_at(LastFrameAccessor(thread).bcp()) == Bytecodes::_breakpoint;
+}
+
//------------------------------------------------------------------------------------------------------------------------
// State accessors
--- a/src/hotspot/share/interpreter/interpreterRuntime.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/interpreter/interpreterRuntime.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
#include "interpreter/linkResolver.hpp"
#include "memory/universe.hpp"
#include "oops/method.hpp"
-#include "runtime/frame.inline.hpp"
+#include "runtime/frame.hpp"
#include "runtime/signature.hpp"
#include "runtime/thread.hpp"
#include "utilities/macros.hpp"
@@ -42,52 +42,6 @@
friend class PrintingClosure; // for method and bcp
private:
- // Helper class to access current interpreter state
- class LastFrameAccessor : public StackObj {
- frame _last_frame;
- public:
- LastFrameAccessor(JavaThread* thread) {
- assert(thread == Thread::current(), "sanity");
- _last_frame = thread->last_frame();
- }
- bool is_interpreted_frame() const { return _last_frame.is_interpreted_frame(); }
- Method* method() const { return _last_frame.interpreter_frame_method(); }
- address bcp() const { return _last_frame.interpreter_frame_bcp(); }
- int bci() const { return _last_frame.interpreter_frame_bci(); }
- address mdp() const { return _last_frame.interpreter_frame_mdp(); }
-
- void set_bcp(address bcp) { _last_frame.interpreter_frame_set_bcp(bcp); }
- void set_mdp(address dp) { _last_frame.interpreter_frame_set_mdp(dp); }
-
- // pass method to avoid calling unsafe bcp_to_method (partial fix 4926272)
- Bytecodes::Code code() const { return Bytecodes::code_at(method(), bcp()); }
-
- Bytecode bytecode() const { return Bytecode(method(), bcp()); }
- int get_index_u1(Bytecodes::Code bc) const { return bytecode().get_index_u1(bc); }
- int get_index_u2(Bytecodes::Code bc) const { return bytecode().get_index_u2(bc); }
- int get_index_u2_cpcache(Bytecodes::Code bc) const
- { return bytecode().get_index_u2_cpcache(bc); }
- int get_index_u4(Bytecodes::Code bc) const { return bytecode().get_index_u4(bc); }
- int number_of_dimensions() const { return bcp()[3]; }
- ConstantPoolCacheEntry* cache_entry_at(int i) const
- { return method()->constants()->cache()->entry_at(i); }
- ConstantPoolCacheEntry* cache_entry() const { return cache_entry_at(Bytes::get_native_u2(bcp() + 1)); }
-
- oop callee_receiver(Symbol* signature) {
- return _last_frame.interpreter_callee_receiver(signature);
- }
- BasicObjectLock* monitor_begin() const {
- return _last_frame.interpreter_frame_monitor_begin();
- }
- BasicObjectLock* monitor_end() const {
- return _last_frame.interpreter_frame_monitor_end();
- }
- BasicObjectLock* next_monitor(BasicObjectLock* current) const {
- return _last_frame.next_monitor_in_interpreter_frame(current);
- }
-
- frame& get_frame() { return _last_frame; }
- };
static void set_bcp_and_mdp(address bcp, JavaThread*thread);
static void note_trap_inner(JavaThread* thread, int reason,
@@ -172,7 +126,7 @@
static void _breakpoint(JavaThread* thread, Method* method, address bcp);
static Bytecodes::Code get_original_bytecode_at(JavaThread* thread, Method* method, address bcp);
static void set_original_bytecode_at(JavaThread* thread, Method* method, address bcp, Bytecodes::Code new_code);
- static bool is_breakpoint(JavaThread *thread) { return Bytecodes::code_or_bp_at(LastFrameAccessor(thread).bcp()) == Bytecodes::_breakpoint; }
+ static bool is_breakpoint(JavaThread *thread);
// Safepoints
static void at_safepoint(JavaThread* thread);
--- a/src/hotspot/share/interpreter/linkResolver.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/interpreter/linkResolver.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -32,6 +32,7 @@
#include "classfile/vmSymbols.hpp"
#include "compiler/compileBroker.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
+#include "gc/shared/gcLocker.hpp"
#include "interpreter/bytecode.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "interpreter/linkResolver.hpp"
--- a/src/hotspot/share/interpreter/rewriter.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/interpreter/rewriter.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,6 +31,7 @@
#include "memory/resourceArea.hpp"
#include "oops/generateOopMap.hpp"
#include "prims/methodHandles.hpp"
+#include "runtime/handles.inline.hpp"
// Computes a CPC map (new_index -> original_index) for constant pool entries
// that are referred to by the interpreter at runtime via the constant pool cache.
--- a/src/hotspot/share/interpreter/rewriter.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/interpreter/rewriter.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
#define SHARE_VM_INTERPRETER_REWRITER_HPP
#include "memory/allocation.hpp"
-#include "runtime/handles.inline.hpp"
#include "utilities/growableArray.hpp"
// The Rewriter adds caches to the constant pool and rewrites bytecode indices
--- a/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/interpreter/templateInterpreterGenerator.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,6 +29,7 @@
#include "interpreter/templateInterpreter.hpp"
#include "interpreter/templateInterpreterGenerator.hpp"
#include "interpreter/templateTable.hpp"
+#include "oops/methodData.hpp"
#ifndef CC_INTERP
--- a/src/hotspot/share/jvmci/compilerRuntime.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/jvmci/compilerRuntime.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -29,9 +29,10 @@
#include "oops/cpCache.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/compilationPolicy.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/deoptimization.hpp"
-#include "runtime/interfaceSupport.hpp"
-#include "runtime/vframe.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+#include "runtime/vframe.inline.hpp"
#include "aot/aotLoader.hpp"
// Resolve and allocate String
--- a/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciCodeInstaller.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -39,6 +39,7 @@
#include "oops/oop.inline.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/safepointMechanism.inline.hpp"
--- a/src/hotspot/share/jvmci/jvmciCompiler.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciCompiler.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,7 @@
#include "jvmci/jvmciRuntime.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/globals_extension.hpp"
+#include "runtime/handles.inline.hpp"
JVMCICompiler* JVMCICompiler::_instance = NULL;
elapsedTimer JVMCICompiler::_codeInstallTimer;
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "code/scopeDesc.hpp"
#include "memory/oopFactory.hpp"
@@ -35,6 +36,8 @@
#include "jvmci/jvmciCompilerToVM.hpp"
#include "jvmci/jvmciCodeInstaller.hpp"
#include "jvmci/jvmciRuntime.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/timerTrace.hpp"
#include "runtime/vframe_hp.hpp"
@@ -98,6 +101,12 @@
return NULL;
}
+Handle JavaArgumentUnboxer::next_arg(BasicType expectedType) {
+ assert(_index < _args->length(), "out of bounds");
+ oop arg=((objArrayOop) (_args))->obj_at(_index++);
+ assert(expectedType == T_OBJECT || java_lang_boxing_object::is_instance(arg, expectedType), "arg type mismatch");
+ return Handle(Thread::current(), arg);
+}
jobjectArray readConfiguration0(JNIEnv *env, TRAPS);
@@ -472,6 +481,9 @@
C2V_VMENTRY(jobject, resolveTypeInPool, (JNIEnv*, jobject, jobject jvmci_constant_pool, jint index))
constantPoolHandle cp = CompilerToVM::asConstantPool(jvmci_constant_pool);
Klass* resolved_klass = cp->klass_at(index, CHECK_NULL);
+ if (resolved_klass->is_instance_klass()) {
+ InstanceKlass::cast(resolved_klass)->link_class_or_fail(THREAD);
+ }
oop klass = CompilerToVM::get_jvmci_type(resolved_klass, CHECK_NULL);
return JNIHandles::make_local(THREAD, klass);
C2V_END
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -170,12 +170,7 @@
arrayOop _args;
int _index;
- Handle next_arg(BasicType expectedType) {
- assert(_index < _args->length(), "out of bounds");
- oop arg=((objArrayOop) (_args))->obj_at(_index++);
- assert(expectedType == T_OBJECT || java_lang_boxing_object::is_instance(arg, expectedType), "arg type mismatch");
- return Handle(Thread::current(), arg);
- }
+ Handle next_arg(BasicType expectedType);
public:
JavaArgumentUnboxer(Symbol* signature, JavaCallArguments* jca, arrayOop args, bool is_static) : SignatureIterator(signature) {
--- a/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciCompilerToVMInit.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -28,6 +28,7 @@
#include "jvmci/jvmciRuntime.hpp"
#include "jvmci/jvmciCompilerToVM.hpp"
#include "jvmci/vmStructs_jvmci.hpp"
+#include "runtime/handles.inline.hpp"
#include "utilities/resourceHash.hpp"
@@ -119,7 +120,7 @@
symbol_clinit = (address) vmSymbols::class_initializer_name();
BarrierSet* bs = Universe::heap()->barrier_set();
- if (bs->is_a(BarrierSet::CardTableModRef)) {
+ if (bs->is_a(BarrierSet::CardTableBarrierSet)) {
jbyte* base = ci_card_table_address();
assert(base != NULL, "unexpected byte_map_base");
cardtable_start_address = base;
@@ -420,4 +421,3 @@
#undef ADD_UINTX_FLAG
#undef CHECK_FLAG
}
-
--- a/src/hotspot/share/jvmci/jvmciEnv.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciEnv.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -29,7 +29,6 @@
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/scopeDesc.hpp"
-#include "runtime/sweeper.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compileLog.hpp"
#include "compiler/compilerOracle.hpp"
@@ -45,9 +44,11 @@
#include "oops/objArrayKlass.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/reflection.hpp"
#include "runtime/sharedRuntime.hpp"
+#include "runtime/sweeper.hpp"
#include "utilities/dtrace.hpp"
#include "jvmci/jvmciRuntime.hpp"
#include "jvmci/jvmciJavaClasses.hpp"
@@ -368,12 +369,6 @@
if (holder_is_accessible) { // Our declared holder is loaded.
constantTag tag = cpool->tag_ref_at(index);
methodHandle m = lookup_method(accessor, holder, name_sym, sig_sym, bc, tag);
- if (!m.is_null() &&
- (bc == Bytecodes::_invokestatic
- ? InstanceKlass::cast(m->method_holder())->is_not_initialized()
- : !InstanceKlass::cast(m->method_holder())->is_loaded())) {
- m = NULL;
- }
if (!m.is_null()) {
// We found the method.
return m;
--- a/src/hotspot/share/jvmci/jvmciJavaClasses.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciJavaClasses.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -353,30 +353,30 @@
static type name() { \
assert(klassName::klass() != NULL && klassName::klass()->is_linked(), "Class not yet linked: " #klassName); \
InstanceKlass* ik = klassName::klass(); \
- address addr = ik->static_field_addr(_##name##_offset); \
- oop result = HeapAccess<>::oop_load((HeapWord*)addr); \
+ oop base = ik->static_field_base_raw(); \
+ oop result = HeapAccess<>::oop_load_at(base, _##name##_offset); \
return type(result); \
} \
static void set_##name(type x) { \
assert(klassName::klass() != NULL && klassName::klass()->is_linked(), "Class not yet linked: " #klassName); \
assert(klassName::klass() != NULL, "Class not yet loaded: " #klassName); \
InstanceKlass* ik = klassName::klass(); \
- address addr = ik->static_field_addr(_##name##_offset); \
- HeapAccess<>::oop_store((HeapWord*)addr, x); \
+ oop base = ik->static_field_base_raw(); \
+ HeapAccess<>::oop_store_at(base, _##name##_offset, x); \
}
#define STATIC_PRIMITIVE_FIELD(klassName, name, jtypename) \
static int _##name##_offset; \
static jtypename name() { \
assert(klassName::klass() != NULL && klassName::klass()->is_linked(), "Class not yet linked: " #klassName); \
InstanceKlass* ik = klassName::klass(); \
- address addr = ik->static_field_addr(_##name##_offset); \
- return HeapAccess<>::load((jtypename*)addr); \
+ oop base = ik->static_field_base_raw(); \
+ return HeapAccess<>::load_at(base, _##name##_offset); \
} \
static void set_##name(jtypename x) { \
assert(klassName::klass() != NULL && klassName::klass()->is_linked(), "Class not yet linked: " #klassName); \
InstanceKlass* ik = klassName::klass(); \
- address addr = ik->static_field_addr(_##name##_offset); \
- HeapAccess<>::store((jtypename*)addr, x); \
+ oop base = ik->static_field_base_raw(); \
+ HeapAccess<>::store_at(base, _##name##_offset, x); \
}
#define STATIC_INT_FIELD(klassName, name) STATIC_PRIMITIVE_FIELD(klassName, name, jint)
--- a/src/hotspot/share/jvmci/jvmciRuntime.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/jvmci/jvmciRuntime.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,6 +26,7 @@
#include "asm/codeBuffer.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "code/codeCache.hpp"
+#include "code/compiledMethod.inline.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
#include "jvmci/jvmciRuntime.hpp"
@@ -40,7 +41,8 @@
#include "oops/oop.inline.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "runtime/biasedLocking.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/reflection.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/share/libadt/vectset.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/libadt/vectset.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "libadt/vectset.hpp"
#include "memory/allocation.inline.hpp"
+#include "memory/arena.hpp"
// Vector Sets - An Abstract Data Type
--- a/src/hotspot/share/memory/allocation.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/memory/allocation.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -37,6 +37,39 @@
#include "services/memTracker.hpp"
#include "utilities/ostream.hpp"
+// allocate using malloc; will fail if no memory available
+char* AllocateHeap(size_t size,
+ MEMFLAGS flags,
+ const NativeCallStack& stack,
+ AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) {
+ char* p = (char*) os::malloc(size, flags, stack);
+ if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
+ vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
+ }
+ return p;
+}
+
+char* AllocateHeap(size_t size,
+ MEMFLAGS flags,
+ AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) {
+ return AllocateHeap(size, flags, CALLER_PC);
+}
+
+char* ReallocateHeap(char *old,
+ size_t size,
+ MEMFLAGS flag,
+ AllocFailType alloc_failmode) {
+ char* p = (char*) os::realloc(old, size, flag, CALLER_PC);
+ if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
+ vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
+ }
+ return p;
+}
+
+void FreeHeap(void* p) {
+ os::free(p);
+}
+
void* MetaspaceObj::_shared_metaspace_base = NULL;
void* MetaspaceObj::_shared_metaspace_top = NULL;
--- a/src/hotspot/share/memory/allocation.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/memory/allocation.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -154,22 +154,61 @@
class NativeCallStack;
+char* AllocateHeap(size_t size,
+ MEMFLAGS flags,
+ const NativeCallStack& stack,
+ AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
+char* AllocateHeap(size_t size,
+ MEMFLAGS flags,
+ AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
+
+char* ReallocateHeap(char *old,
+ size_t size,
+ MEMFLAGS flag,
+ AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
+
+void FreeHeap(void* p);
+
template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
public:
- NOINLINE void* operator new(size_t size, const NativeCallStack& stack) throw();
- NOINLINE void* operator new(size_t size) throw();
- NOINLINE void* operator new (size_t size, const std::nothrow_t& nothrow_constant,
- const NativeCallStack& stack) throw();
- NOINLINE void* operator new (size_t size, const std::nothrow_t& nothrow_constant)
- throw();
- NOINLINE void* operator new [](size_t size, const NativeCallStack& stack) throw();
- NOINLINE void* operator new [](size_t size) throw();
- NOINLINE void* operator new [](size_t size, const std::nothrow_t& nothrow_constant,
- const NativeCallStack& stack) throw();
- NOINLINE void* operator new [](size_t size, const std::nothrow_t& nothrow_constant)
- throw();
- void operator delete(void* p);
- void operator delete [] (void* p);
+ ALWAYSINLINE void* operator new(size_t size) throw() {
+ return (void*)AllocateHeap(size, F);
+ }
+
+ ALWAYSINLINE void* operator new(size_t size,
+ const NativeCallStack& stack) throw() {
+ return (void*)AllocateHeap(size, F, stack);
+ }
+
+ ALWAYSINLINE void* operator new(size_t size, const std::nothrow_t&,
+ const NativeCallStack& stack) throw() {
+ return (void*)AllocateHeap(size, F, stack, AllocFailStrategy::RETURN_NULL);
+ }
+
+ ALWAYSINLINE void* operator new(size_t size, const std::nothrow_t&) throw() {
+ return (void*)AllocateHeap(size, F, AllocFailStrategy::RETURN_NULL);
+ }
+
+ ALWAYSINLINE void* operator new[](size_t size) throw() {
+ return (void*)AllocateHeap(size, F);
+ }
+
+ ALWAYSINLINE void* operator new[](size_t size,
+ const NativeCallStack& stack) throw() {
+ return (void*)AllocateHeap(size, F, stack);
+ }
+
+ ALWAYSINLINE void* operator new[](size_t size, const std::nothrow_t&,
+ const NativeCallStack& stack) throw() {
+ return (void*)AllocateHeap(size, F, stack, AllocFailStrategy::RETURN_NULL);
+ }
+
+ ALWAYSINLINE void* operator new[](size_t size, const std::nothrow_t&) throw() {
+ return (void*)AllocateHeap(size, F, AllocFailStrategy::RETURN_NULL);
+ }
+
+ void operator delete(void* p) { FreeHeap(p); }
+ void operator delete [] (void* p) { FreeHeap(p); }
};
// Base class for objects allocated on the stack only.
--- a/src/hotspot/share/memory/allocation.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/memory/allocation.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -48,83 +48,6 @@
}
#endif
-// allocate using malloc; will fail if no memory available
-inline char* AllocateHeap(size_t size, MEMFLAGS flags,
- const NativeCallStack& stack,
- AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
- char* p = (char*) os::malloc(size, flags, stack);
- if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
- vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
- }
- return p;
-}
-
-ALWAYSINLINE char* AllocateHeap(size_t size, MEMFLAGS flags,
- AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
- return AllocateHeap(size, flags, CURRENT_PC, alloc_failmode);
-}
-
-ALWAYSINLINE char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag,
- AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
- char* p = (char*) os::realloc(old, size, flag, CURRENT_PC);
- if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
- vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
- }
- return p;
-}
-
-inline void FreeHeap(void* p) {
- os::free(p);
-}
-
-
-template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size,
- const NativeCallStack& stack) throw() {
- return (void*)AllocateHeap(size, F, stack);
-}
-
-template <MEMFLAGS F> void* CHeapObj<F>::operator new(size_t size) throw() {
- return CHeapObj<F>::operator new(size, CALLER_PC);
-}
-
-template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
- const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw() {
- return (void*)AllocateHeap(size, F, stack, AllocFailStrategy::RETURN_NULL);
-}
-
-template <MEMFLAGS F> void* CHeapObj<F>::operator new (size_t size,
- const std::nothrow_t& nothrow_constant) throw() {
- return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
-}
-
-template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
- const NativeCallStack& stack) throw() {
- return CHeapObj<F>::operator new(size, stack);
-}
-
-template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size)
- throw() {
- return CHeapObj<F>::operator new(size, CALLER_PC);
-}
-
-template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
- const std::nothrow_t& nothrow_constant, const NativeCallStack& stack) throw() {
- return CHeapObj<F>::operator new(size, nothrow_constant, stack);
-}
-
-template <MEMFLAGS F> void* CHeapObj<F>::operator new [](size_t size,
- const std::nothrow_t& nothrow_constant) throw() {
- return CHeapObj<F>::operator new(size, nothrow_constant, CALLER_PC);
-}
-
-template <MEMFLAGS F> void CHeapObj<F>::operator delete(void* p){
- FreeHeap(p);
-}
-
-template <MEMFLAGS F> void CHeapObj<F>::operator delete [](void* p){
- FreeHeap(p);
-}
-
template <class E>
size_t MmapArrayAllocator<E>::size_for(size_t length) {
size_t size = length * sizeof(E);
--- a/src/hotspot/share/memory/memRegion.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/memory/memRegion.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -35,7 +35,7 @@
// Note that MemRegions are passed by value, not by reference.
// The intent is that they remain very small and contain no
// objects. These should never be allocated in heap but we do
-// create MemRegions (in CardTableModRefBS) in heap so operator
+// create MemRegions (in CardTableBarrierSet) in heap so operator
// new and operator new [] added for this special case.
class MetaWord;
--- a/src/hotspot/share/memory/metaspace.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/memory/metaspace.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -113,6 +113,7 @@
} else if (size == ClassMediumChunk) {
return MediumIndex;
} else if (size > ClassMediumChunk) {
+ // A valid humongous chunk size is a multiple of the smallest chunk size.
assert(is_aligned(size, ClassSpecializedChunk), "Invalid chunk size");
return HumongousIndex;
}
@@ -124,6 +125,7 @@
} else if (size == MediumChunk) {
return MediumIndex;
} else if (size > MediumChunk) {
+ // A valid humongous chunk size is a multiple of the smallest chunk size.
assert(is_aligned(size, SpecializedChunk), "Invalid chunk size");
return HumongousIndex;
}
@@ -299,10 +301,7 @@
Metachunk* free_chunks_get(size_t chunk_word_size);
#define index_bounds_check(index) \
- assert(index == SpecializedIndex || \
- index == SmallIndex || \
- index == MediumIndex || \
- index == HumongousIndex, "Bad index: %d", (int) index)
+ assert(is_valid_chunktype(index), "Bad index: %d", (int) index)
size_t num_free_chunks(ChunkIndex index) const {
index_bounds_check(index);
@@ -2648,25 +2647,13 @@
}
ChunkIndex ChunkManager::list_index(size_t size) {
- if (size_by_index(SpecializedIndex) == size) {
- return SpecializedIndex;
- }
- if (size_by_index(SmallIndex) == size) {
- return SmallIndex;
- }
- const size_t med_size = size_by_index(MediumIndex);
- if (med_size == size) {
- return MediumIndex;
- }
-
- assert(size > med_size, "Not a humongous chunk");
- return HumongousIndex;
+ return get_chunk_type_by_size(size, is_class());
}
size_t ChunkManager::size_by_index(ChunkIndex index) const {
index_bounds_check(index);
assert(index != HumongousIndex, "Do not call for humongous chunks.");
- return _free_chunks[index].size();
+ return get_size_for_nonhumongous_chunktype(index, is_class());
}
void ChunkManager::locked_verify_free_chunks_total() {
@@ -5244,37 +5231,39 @@
// The following test is placed here instead of a gtest / unittest file
// because the ChunkManager class is only available in this file.
void ChunkManager_test_list_index() {
- ChunkManager manager(true);
-
- // Test previous bug where a query for a humongous class metachunk,
- // incorrectly matched the non-class medium metachunk size.
{
- assert(MediumChunk > ClassMediumChunk, "Precondition for test");
-
- ChunkIndex index = manager.list_index(MediumChunk);
-
- assert(index == HumongousIndex,
- "Requested size is larger than ClassMediumChunk,"
- " so should return HumongousIndex. Got index: %d", (int)index);
- }
-
- // Check the specified sizes as well.
- {
- ChunkIndex index = manager.list_index(ClassSpecializedChunk);
- assert(index == SpecializedIndex, "Wrong index returned. Got index: %d", (int)index);
- }
- {
- ChunkIndex index = manager.list_index(ClassSmallChunk);
- assert(index == SmallIndex, "Wrong index returned. Got index: %d", (int)index);
- }
- {
- ChunkIndex index = manager.list_index(ClassMediumChunk);
- assert(index == MediumIndex, "Wrong index returned. Got index: %d", (int)index);
- }
- {
- ChunkIndex index = manager.list_index(ClassMediumChunk + 1);
- assert(index == HumongousIndex, "Wrong index returned. Got index: %d", (int)index);
- }
+ // Test previous bug where a query for a humongous class metachunk,
+ // incorrectly matched the non-class medium metachunk size.
+ {
+ ChunkManager manager(true);
+
+ assert(MediumChunk > ClassMediumChunk, "Precondition for test");
+
+ ChunkIndex index = manager.list_index(MediumChunk);
+
+ assert(index == HumongousIndex,
+ "Requested size is larger than ClassMediumChunk,"
+ " so should return HumongousIndex. Got index: %d", (int)index);
+ }
+
+ // Check the specified sizes as well.
+ {
+ ChunkManager manager(true);
+ assert(manager.list_index(ClassSpecializedChunk) == SpecializedIndex, "sanity");
+ assert(manager.list_index(ClassSmallChunk) == SmallIndex, "sanity");
+ assert(manager.list_index(ClassMediumChunk) == MediumIndex, "sanity");
+ assert(manager.list_index(ClassMediumChunk + ClassSpecializedChunk) == HumongousIndex, "sanity");
+ }
+ {
+ ChunkManager manager(false);
+ assert(manager.list_index(SpecializedChunk) == SpecializedIndex, "sanity");
+ assert(manager.list_index(SmallChunk) == SmallIndex, "sanity");
+ assert(manager.list_index(MediumChunk) == MediumIndex, "sanity");
+ assert(manager.list_index(MediumChunk + SpecializedChunk) == HumongousIndex, "sanity");
+ }
+
+ }
+
}
#endif // !PRODUCT
--- a/src/hotspot/share/memory/metaspace.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/memory/metaspace.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -66,6 +66,7 @@
class PrintCLDMetaspaceInfoClosure;
class SpaceManager;
class VirtualSpaceList;
+class CollectedHeap;
// Metaspaces each have a SpaceManager and allocations
// are done by the SpaceManager. Allocations are done
--- a/src/hotspot/share/memory/metaspaceShared.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/memory/metaspaceShared.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -46,6 +46,7 @@
#include "logging/logMessage.hpp"
#include "memory/filemap.hpp"
#include "memory/metaspace.hpp"
+#include "memory/metaspaceClosure.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/resourceArea.hpp"
#include "oops/instanceClassLoaderKlass.hpp"
@@ -56,15 +57,15 @@
#include "oops/oop.inline.hpp"
#include "oops/typeArrayKlass.hpp"
#include "prims/jvmtiRedefineClasses.hpp"
-#include "runtime/timerTrace.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/signature.hpp"
+#include "runtime/timerTrace.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vm_operations.hpp"
#include "utilities/align.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/hashtable.inline.hpp"
-#include "memory/metaspaceClosure.hpp"
ReservedSpace MetaspaceShared::_shared_rs;
VirtualSpace MetaspaceShared::_shared_vs;
@@ -233,6 +234,7 @@
// with the archived ones, so it must be done after all encodings are determined.
mapinfo->map_heap_regions();
}
+ Universe::set_narrow_klass_range(CompressedClassSpaceSize);
#endif // _LP64
} else {
assert(!mapinfo->is_open() && !UseSharedSpaces,
@@ -298,6 +300,8 @@
// Set narrow_klass_shift to be LogKlassAlignmentInBytes. This is consistent
// with AOT.
Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
+ // Set the range of klass addresses to 4GB.
+ Universe::set_narrow_klass_range(cds_total);
Metaspace::initialize_class_space(tmp_class_space);
tty->print_cr("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
@@ -1820,6 +1824,13 @@
os::vm_allocation_granularity());
}
+unsigned MetaspaceShared::obj_hash(oop const& p) {
+ assert(!p->mark()->has_bias_pattern(),
+ "this object should never have been locked"); // so identity_hash won't safepoin
+ unsigned hash = (unsigned)p->identity_hash();
+ return hash;
+}
+
MetaspaceShared::ArchivedObjectCache* MetaspaceShared::_archive_object_cache = NULL;
oop MetaspaceShared::find_archived_heap_object(oop obj) {
assert(DumpSharedSpaces, "dump-time only");
--- a/src/hotspot/share/memory/metaspaceShared.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/memory/metaspaceShared.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -29,7 +29,7 @@
#include "memory/allocation.hpp"
#include "memory/memRegion.hpp"
#include "memory/virtualspace.hpp"
-#include "oops/oop.inline.hpp"
+#include "oops/oop.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/macros.hpp"
#include "utilities/resourceHash.hpp"
@@ -96,12 +96,8 @@
static bool obj_equals(oop const& p1, oop const& p2) {
return p1 == p2;
}
- static unsigned obj_hash(oop const& p) {
- assert(!p->mark()->has_bias_pattern(),
- "this object should never have been locked"); // so identity_hash won't safepoin
- unsigned hash = (unsigned)p->identity_hash();
- return hash;
- }
+ static unsigned obj_hash(oop const& p);
+
typedef ResourceHashtable<oop, oop,
MetaspaceShared::obj_hash,
MetaspaceShared::obj_equals,
--- a/src/hotspot/share/memory/universe.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/memory/universe.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -32,7 +32,7 @@
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/dependencies.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/gcArguments.hpp"
#include "gc/shared/gcLocker.inline.hpp"
@@ -165,6 +165,7 @@
NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
address Universe::_narrow_ptrs_base;
+uint64_t Universe::_narrow_klass_range = (uint64_t(max_juint)+1);
void Universe::basic_type_classes_do(void f(Klass*)) {
f(boolArrayKlassObj());
--- a/src/hotspot/share/memory/universe.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/memory/universe.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -194,7 +194,8 @@
// For UseCompressedClassPointers.
static struct NarrowPtrStruct _narrow_klass;
static address _narrow_ptrs_base;
-
+ // CompressedClassSpaceSize set to 1GB, but appear 3GB away from _narrow_ptrs_base during CDS dump.
+ static uint64_t _narrow_klass_range;
// array of dummy objects used with +FullGCAlot
debug_only(static objArrayOop _fullgc_alot_dummy_array;)
// index of next entry to clear
@@ -244,6 +245,10 @@
assert(UseCompressedClassPointers, "no compressed klass ptrs?");
_narrow_klass._base = base;
}
+ static void set_narrow_klass_range(uint64_t range) {
+ assert(UseCompressedClassPointers, "no compressed klass ptrs?");
+ _narrow_klass_range = range;
+ }
static void set_narrow_oop_use_implicit_null_checks(bool use) {
assert(UseCompressedOops, "no compressed ptrs?");
_narrow_oop._use_implicit_null_checks = use;
@@ -429,6 +434,7 @@
// For UseCompressedClassPointers
static address narrow_klass_base() { return _narrow_klass._base; }
static bool is_narrow_klass_base(void* addr) { return (narrow_klass_base() == (address)addr); }
+ static uint64_t narrow_klass_range() { return _narrow_klass_range; }
static int narrow_klass_shift() { return _narrow_klass._shift; }
static bool narrow_klass_use_implicit_null_checks() { return _narrow_klass._use_implicit_null_checks; }
--- a/src/hotspot/share/oops/access.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/oops/access.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -675,7 +675,6 @@
inline static typename EnableIf<
!HasDecorator<decorators, AS_RAW>::value, T>::type
atomic_cmpxchg(T new_value, void* addr, T compare_value) {
- typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
if (is_hardwired_primitive<decorators>()) {
const DecoratorSet expanded_decorators = decorators | AS_RAW;
return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
@@ -761,17 +760,33 @@
template <DecoratorSet decorators, typename T>
inline static typename EnableIf<
- HasDecorator<decorators, AS_RAW>::value, bool>::type
- arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
+ HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
+ arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
- return Raw::arraycopy(src_obj, dst_obj, src, dst, length);
+ if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
+ return Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
+ } else {
+ return Raw::arraycopy(src_obj, dst_obj, src, dst, length);
+ }
+ }
+
+ template <DecoratorSet decorators, typename T>
+ inline static typename EnableIf<
+ HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, bool>::type
+ arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+ if (UseCompressedOops) {
+ const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
+ return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+ } else {
+ const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
+ return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+ }
}
template <DecoratorSet decorators, typename T>
inline static typename EnableIf<
!HasDecorator<decorators, AS_RAW>::value, bool>::type
- arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
- typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
+ arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
if (is_hardwired_primitive<decorators>()) {
const DecoratorSet expanded_decorators = decorators | AS_RAW;
return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
@@ -947,6 +962,24 @@
return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
}
+ template <DecoratorSet decorators, typename T>
+ inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
+ return PreRuntimeDispatch::arraycopy<decorators>(src_obj, dst_obj, src, dst, length);
+ }
+
+ template <DecoratorSet decorators>
+ inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length) {
+ const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
+ return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+ }
+
+ template <DecoratorSet decorators>
+ inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, narrowOop* src, narrowOop* dst, size_t length) {
+ const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
+ INTERNAL_RT_USE_COMPRESSED_OOPS;
+ return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
+ }
+
// Step 1: Set default decorators. This step remembers if a type was volatile
// and then sets the MO_VOLATILE decorator by default. Otherwise, a default
// memory ordering is set for the access, and the implied decorator rules
@@ -1077,18 +1110,16 @@
}
template <DecoratorSet decorators, typename T>
- inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length) {
+ inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
(IsSame<T, void>::value || IsIntegral<T>::value) ||
IsFloatingPoint<T>::value)); // arraycopy allows type erased void elements
typedef typename Decay<T>::type DecayedT;
- const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IN_HEAP_ARRAY | IN_HEAP |
- (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
- INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
- return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj,
- const_cast<DecayedT*>(src),
- const_cast<DecayedT*>(dst),
- length);
+ const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IN_HEAP_ARRAY | IN_HEAP>::value;
+ return arraycopy_reduce_types<expanded_decorators>(src_obj, dst_obj,
+ const_cast<DecayedT*>(src),
+ const_cast<DecayedT*>(dst),
+ length);
}
template <DecoratorSet decorators>
--- a/src/hotspot/share/oops/accessBackend.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/oops/accessBackend.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -384,7 +384,6 @@
template <typename T>
static bool oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length);
- static bool oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length);
static void clone(oop src, oop dst, size_t size);
--- a/src/hotspot/share/oops/accessBackend.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/oops/accessBackend.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -122,17 +122,6 @@
}
template <DecoratorSet decorators>
-inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length) {
- bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
- HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
- if (needs_oop_compress) {
- return arraycopy(reinterpret_cast<narrowOop*>(src), reinterpret_cast<narrowOop*>(dst), length);
- } else {
- return arraycopy(reinterpret_cast<oop*>(src), reinterpret_cast<oop*>(dst), length);
- }
-}
-
-template <DecoratorSet decorators>
template <DecoratorSet ds, typename T>
inline typename EnableIf<
HasDecorator<ds, MO_SEQ_CST>::value, T>::type
--- a/src/hotspot/share/oops/annotations.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/oops/annotations.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,6 @@
#include "oops/array.hpp"
#include "oops/metadata.hpp"
-#include "runtime/handles.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/globalDefinitions.hpp"
--- a/src/hotspot/share/oops/constantPool.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/oops/constantPool.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -47,10 +47,11 @@
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "runtime/fieldType.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/signature.hpp"
-#include "runtime/vframe.hpp"
+#include "runtime/vframe.inline.hpp"
#include "utilities/copy.hpp"
constantTag ConstantPool::tag_at(int which) const { return (constantTag)tags()->at_acquire(which); }
--- a/src/hotspot/share/oops/instanceKlass.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/oops/instanceKlass.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -2255,12 +2255,6 @@
}
}
-address InstanceKlass::static_field_addr(int offset) {
- assert(offset >= InstanceMirrorKlass::offset_of_static_fields(), "has already been adjusted");
- return (address)(offset + cast_from_oop<intptr_t>(java_mirror()));
-}
-
-
const char* InstanceKlass::signature_name() const {
int hash_len = 0;
char hash_buf[40];
--- a/src/hotspot/share/oops/instanceKlass.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/oops/instanceKlass.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1070,7 +1070,7 @@
int itable_offset_in_words() const { return start_of_itable() - (intptr_t*)this; }
- address static_field_addr(int offset);
+ oop static_field_base_raw() { return java_mirror(); }
OopMapBlock* start_of_nonstatic_oop_maps() const {
return (OopMapBlock*)(start_of_itable() + itable_length());
--- a/src/hotspot/share/oops/method.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/oops/method.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,7 +26,7 @@
#define SHARE_VM_OOPS_METHOD_INLINE_HPP
#include "oops/method.hpp"
-#include "runtime/orderAccess.hpp"
+#include "runtime/orderAccess.inline.hpp"
inline address Method::from_compiled_entry() const {
return OrderAccess::load_acquire(&_from_compiled_entry);
--- a/src/hotspot/share/oops/methodData.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/oops/methodData.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "classfile/systemDictionary.hpp"
#include "compiler/compilerOracle.hpp"
+#include "gc/shared/gcLocker.hpp"
#include "interpreter/bytecode.hpp"
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/linkResolver.hpp"
--- a/src/hotspot/share/oops/methodData.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/oops/methodData.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,6 +27,7 @@
#include "interpreter/bytecodes.hpp"
#include "memory/universe.hpp"
+#include "oops/metadata.hpp"
#include "oops/method.hpp"
#include "oops/oop.hpp"
#include "utilities/align.hpp"
--- a/src/hotspot/share/oops/oop.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/oops/oop.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -284,8 +284,6 @@
// garbage collection
inline bool is_gc_marked() const;
- inline bool is_scavengable() const;
-
// Forward pointer operations for scavenge
inline bool is_forwarded() const;
--- a/src/hotspot/share/oops/oop.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/oops/oop.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -363,10 +363,6 @@
return mark()->is_marked();
}
-bool oopDesc::is_scavengable() const {
- return Universe::heap()->is_scavengable(oop(const_cast<oopDesc*>(this)));
-}
-
// Used by scavengers
bool oopDesc::is_forwarded() const {
// The extra heap check is needed since the obj might be locked, in which case the
--- a/src/hotspot/share/oops/typeArrayOop.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/oops/typeArrayOop.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,7 +27,6 @@
#include "oops/arrayOop.hpp"
#include "oops/typeArrayKlass.hpp"
-#include "runtime/orderAccess.inline.hpp"
// A typeArrayOop is an array containing basic types (non oop elements).
// It is used for arrays of {characters, singles, doubles, bytes, shorts, integers, longs}
--- a/src/hotspot/share/opto/callnode.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/callnode.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,6 +26,7 @@
#include "compiler/compileLog.hpp"
#include "ci/bcEscapeAnalyzer.hpp"
#include "compiler/oopMap.hpp"
+#include "interpreter/interpreter.hpp"
#include "opto/callGenerator.hpp"
#include "opto/callnode.hpp"
#include "opto/castnode.hpp"
--- a/src/hotspot/share/opto/compile.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/compile.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -397,13 +397,20 @@
remove_range_check_cast(cast);
}
}
- // Remove useless expensive node
+ // Remove useless expensive nodes
for (int i = C->expensive_count()-1; i >= 0; i--) {
Node* n = C->expensive_node(i);
if (!useful.member(n)) {
remove_expensive_node(n);
}
}
+ // Remove useless Opaque4 nodes
+ for (int i = opaque4_count() - 1; i >= 0; i--) {
+ Node* opaq = opaque4_node(i);
+ if (!useful.member(opaq)) {
+ remove_opaque4_node(opaq);
+ }
+ }
// clean up the late inline lists
remove_useless_late_inlines(&_string_late_inlines, useful);
remove_useless_late_inlines(&_boxing_late_inlines, useful);
@@ -1179,6 +1186,7 @@
_predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
_expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
_range_check_casts = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
+ _opaque4_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
register_library_intrinsics();
}
@@ -1957,6 +1965,22 @@
assert(range_check_cast_count() == 0, "should be empty");
}
+void Compile::add_opaque4_node(Node* n) {
+ assert(n->Opcode() == Op_Opaque4, "Opaque4 only");
+ assert(!_opaque4_nodes->contains(n), "duplicate entry in Opaque4 list");
+ _opaque4_nodes->append(n);
+}
+
+// Remove all Opaque4 nodes.
+void Compile::remove_opaque4_nodes(PhaseIterGVN &igvn) {
+ for (int i = opaque4_count(); i > 0; i--) {
+ Node* opaq = opaque4_node(i-1);
+ assert(opaq->Opcode() == Op_Opaque4, "Opaque4 only");
+ igvn.replace_node(opaq, opaq->in(2));
+ }
+ assert(opaque4_count() == 0, "should be empty");
+}
+
// StringOpts and late inlining of string methods
void Compile::inline_string_calls(bool parse_time) {
{
@@ -2332,6 +2356,11 @@
}
}
+ if (opaque4_count() > 0) {
+ C->remove_opaque4_nodes(igvn);
+ igvn.optimize();
+ }
+
DEBUG_ONLY( _modified_nodes = NULL; )
} // (End scope of igvn; run destructor if necessary for asserts.)
@@ -3332,6 +3361,20 @@
}
break;
}
+ case Op_CmpUL: {
+ if (!Matcher::has_match_rule(Op_CmpUL)) {
+ // We don't support unsigned long comparisons. Set 'max_idx_expr'
+ // to max_julong if < 0 to make the signed comparison fail.
+ ConINode* sign_pos = new ConINode(TypeInt::make(BitsPerLong - 1));
+ Node* sign_bit_mask = new RShiftLNode(n->in(1), sign_pos);
+ Node* orl = new OrLNode(n->in(1), sign_bit_mask);
+ ConLNode* remove_sign_mask = new ConLNode(TypeLong::make(max_jlong));
+ Node* andl = new AndLNode(orl, remove_sign_mask);
+ Node* cmp = new CmpLNode(andl, n->in(2));
+ n->subsume_by(cmp, this);
+ }
+ break;
+ }
default:
assert( !n->is_Call(), "" );
assert( !n->is_Mem(), "" );
--- a/src/hotspot/share/opto/compile.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/compile.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,7 @@
#include "libadt/dict.hpp"
#include "libadt/vectset.hpp"
#include "memory/resourceArea.hpp"
+#include "oops/methodData.hpp"
#include "opto/idealGraphPrinter.hpp"
#include "opto/phasetype.hpp"
#include "opto/phase.hpp"
@@ -415,6 +416,7 @@
GrowableArray<Node*>* _predicate_opaqs; // List of Opaque1 nodes for the loop predicates.
GrowableArray<Node*>* _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
GrowableArray<Node*>* _range_check_casts; // List of CastII nodes with a range check dependency
+ GrowableArray<Node*>* _opaque4_nodes; // List of Opaque4 nodes that have a default value
ConnectionGraph* _congraph;
#ifndef PRODUCT
IdealGraphPrinter* _printer;
@@ -809,6 +811,16 @@
// Remove all range check dependent CastIINodes.
void remove_range_check_casts(PhaseIterGVN &igvn);
+ void add_opaque4_node(Node* n);
+ void remove_opaque4_node(Node* n) {
+ if (_opaque4_nodes->contains(n)) {
+ _opaque4_nodes->remove(n);
+ }
+ }
+ Node* opaque4_node(int idx) const { return _opaque4_nodes->at(idx); }
+ int opaque4_count() const { return _opaque4_nodes->length(); }
+ void remove_opaque4_nodes(PhaseIterGVN &igvn);
+
// remove the opaque nodes that protect the predicates so that the unused checks and
// uncommon traps will be eliminated from the graph.
void cleanup_loop_predicates(PhaseIterGVN &igvn);
--- a/src/hotspot/share/opto/graphKit.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/graphKit.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -30,8 +30,9 @@
#include "gc/g1/heapRegion.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
-#include "gc/shared/cardTableModRefBS.hpp"
+#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
+#include "interpreter/interpreter.hpp"
#include "memory/resourceArea.hpp"
#include "opto/addnode.hpp"
#include "opto/castnode.hpp"
@@ -1565,7 +1566,7 @@
g1_write_barrier_pre(do_load, obj, adr, adr_idx, val, val_type, pre_val, bt);
break;
- case BarrierSet::CardTableModRef:
+ case BarrierSet::CardTableBarrierSet:
break;
default :
@@ -1580,7 +1581,7 @@
case BarrierSet::G1BarrierSet:
return true; // Can move it if no safepoint
- case BarrierSet::CardTableModRef:
+ case BarrierSet::CardTableBarrierSet:
return true; // There is no pre-barrier
default :
@@ -1604,7 +1605,7 @@
g1_write_barrier_post(store, obj, adr, adr_idx, val, bt, use_precise);
break;
- case BarrierSet::CardTableModRef:
+ case BarrierSet::CardTableBarrierSet:
write_barrier_post(store, obj, adr, adr_idx, val, use_precise);
break;
@@ -3811,8 +3812,8 @@
bool GraphKit::use_ReduceInitialCardMarks() {
BarrierSet *bs = Universe::heap()->barrier_set();
- return bs->is_a(BarrierSet::CardTableModRef)
- && barrier_set_cast<CardTableModRefBS>(bs)->can_elide_tlab_store_barriers()
+ return bs->is_a(BarrierSet::CardTableBarrierSet)
+ && barrier_set_cast<CardTableBarrierSet>(bs)->can_elide_tlab_store_barriers()
&& ReduceInitialCardMarks;
}
@@ -3881,7 +3882,7 @@
Node* cast = __ CastPX(__ ctrl(), adr);
// Divide by card size
- assert(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef),
+ assert(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableBarrierSet),
"Only one we handle so far.");
Node* card_offset = __ URShiftX( cast, __ ConI(CardTable::card_shift) );
@@ -4159,7 +4160,7 @@
* as part of the allocation in the case the allocated object is not located
* in the nursery, this would happen for humongous objects. This is similar to
* how CMS is required to handle this case, see the comments for the method
- * CardTableModRefBS::on_allocation_slowpath_exit and OptoRuntime::new_deferred_store_barrier.
+ * CardTableBarrierSet::on_allocation_slowpath_exit and OptoRuntime::new_deferred_store_barrier.
* A deferred card mark is required for these objects and handled in the above
* mentioned methods.
*
--- a/src/hotspot/share/opto/lcm.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/lcm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "memory/allocation.inline.hpp"
#include "opto/ad.hpp"
#include "opto/block.hpp"
--- a/src/hotspot/share/opto/library_call.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/library_call.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
+#include "ci/ciUtilities.inline.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "compiler/compileBroker.hpp"
--- a/src/hotspot/share/opto/loopPredicate.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/loopPredicate.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -632,7 +632,7 @@
int scale, Node* offset,
Node* init, Node* limit, jint stride,
Node* range, bool upper, bool &overflow) {
- jint con_limit = limit->is_Con() ? limit->get_int() : 0;
+ jint con_limit = (limit != NULL && limit->is_Con()) ? limit->get_int() : 0;
jint con_init = init->is_Con() ? init->get_int() : 0;
jint con_offset = offset->is_Con() ? offset->get_int() : 0;
@@ -751,26 +751,7 @@
// Integer expressions may overflow, do long comparison
range = new ConvI2LNode(range);
register_new_node(range, ctrl);
- if (!Matcher::has_match_rule(Op_CmpUL)) {
- // We don't support unsigned long comparisons. Set 'max_idx_expr'
- // to max_julong if < 0 to make the signed comparison fail.
- ConINode* sign_pos = _igvn.intcon(BitsPerLong - 1);
- set_ctrl(sign_pos, C->root());
- Node* sign_bit_mask = new RShiftLNode(max_idx_expr, sign_pos);
- register_new_node(sign_bit_mask, ctrl);
- // OR with sign bit to set all bits to 1 if negative (otherwise no change)
- max_idx_expr = new OrLNode(max_idx_expr, sign_bit_mask);
- register_new_node(max_idx_expr, ctrl);
- // AND with 0x7ff... to unset the sign bit
- ConLNode* remove_sign_mask = _igvn.longcon(max_jlong);
- set_ctrl(remove_sign_mask, C->root());
- max_idx_expr = new AndLNode(max_idx_expr, remove_sign_mask);
- register_new_node(max_idx_expr, ctrl);
-
- cmp = new CmpLNode(max_idx_expr, range);
- } else {
- cmp = new CmpULNode(max_idx_expr, range);
- }
+ cmp = new CmpULNode(max_idx_expr, range);
} else {
cmp = new CmpUNode(max_idx_expr, range);
}
@@ -785,6 +766,29 @@
return bol;
}
+// After pre/main/post loops are created, we'll put a copy of some
+// range checks between the pre and main loop to validate the initial
+// value of the induction variable for the main loop. Make a copy of
+// the predicates here with an opaque node as a place holder for the
+// initial value.
+ProjNode* PhaseIdealLoop::insert_skeleton_predicate(IfNode* iff, IdealLoopTree *loop,
+ ProjNode* proj, ProjNode *predicate_proj,
+ ProjNode* upper_bound_proj,
+ int scale, Node* offset,
+ Node* init, Node* limit, jint stride,
+ Node* rng, bool &overflow) {
+ assert(proj->_con && predicate_proj->_con, "not a range check?");
+ Node* opaque_init = new Opaque1Node(C, init);
+ register_new_node(opaque_init, upper_bound_proj);
+ BoolNode* bol = rc_predicate(loop, upper_bound_proj, scale, offset, opaque_init, limit, stride, rng, (stride > 0) != (scale > 0), overflow);
+ Node* opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1)); // This will go away once loop opts are over
+ register_new_node(opaque_bol, upper_bound_proj);
+ ProjNode* new_proj = create_new_if_for_predicate(predicate_proj, NULL, Deoptimization::Reason_predicate, overflow ? Op_If : iff->Opcode());
+ _igvn.replace_input_of(new_proj->in(0), 1, opaque_bol);
+ assert(opaque_init->outcnt() > 0, "should be used");
+ return new_proj;
+}
+
//------------------------------ loop_predication_impl--------------------------
// Insert loop predicates for null checks and range checks
bool PhaseIdealLoop::loop_predication_impl(IdealLoopTree *loop) {
@@ -980,6 +984,10 @@
// any dependent nodes onto the upper bound test.
new_predicate_proj = upper_bound_proj;
+ if (iff->is_RangeCheck()) {
+ new_predicate_proj = insert_skeleton_predicate(iff, loop, proj, predicate_proj, upper_bound_proj, scale, offset, init, limit, stride, rng, overflow);
+ }
+
#ifndef PRODUCT
if (TraceLoopOpts && !TraceLoopPredicate) {
tty->print("Predicate RC ");
--- a/src/hotspot/share/opto/loopTransform.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/loopTransform.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -982,7 +982,7 @@
return n;
}
-bool PhaseIdealLoop::cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop) {
+Node* PhaseIdealLoop::cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop) {
Node* castii = new CastIINode(incr, TypeInt::INT, true);
castii->set_req(0, ctrl);
register_new_node(castii, ctrl);
@@ -990,10 +990,138 @@
Node* n = incr->fast_out(i);
if (n->is_Phi() && n->in(0) == loop) {
int nrep = n->replace_edge(incr, castii);
- return true;
+ return castii;
}
}
- return false;
+ return NULL;
+}
+
+// Make a copy of the skeleton range check predicates before the main
+// loop and set the initial value of loop as input. After unrolling,
+// the range of values for the induction variable in the main loop can
+// fall outside the allowed range of values by the array access (main
+// loop is never executed). When that happens, range check
+// CastII/ConvI2L nodes cause some data paths to die. For consistency,
+// the control paths must die too but the range checks were removed by
+// predication. The range checks that we add here guarantee that they
+// do.
+void PhaseIdealLoop::duplicate_predicates(CountedLoopNode* pre_head, Node* min_taken, Node* castii,
+ IdealLoopTree* outer_loop, LoopNode* outer_main_head,
+ uint dd_main_head) {
+ if (UseLoopPredicate) {
+ Node* entry = pre_head->in(LoopNode::EntryControl);
+ Node* predicate = NULL;
+ predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_loop_limit_check);
+ if (predicate != NULL) {
+ entry = entry->in(0)->in(0);
+ }
+ predicate = find_predicate_insertion_point(entry, Deoptimization::Reason_predicate);
+ if (predicate != NULL) {
+ IfNode* iff = entry->in(0)->as_If();
+ ProjNode* uncommon_proj = iff->proj_out(1 - entry->as_Proj()->_con);
+ Node* rgn = uncommon_proj->unique_ctrl_out();
+ assert(rgn->is_Region() || rgn->is_Call(), "must be a region or call uct");
+ assert(iff->in(1)->in(1)->Opcode() == Op_Opaque1, "unexpected predicate shape");
+ entry = entry->in(0)->in(0);
+ Node* prev_proj = min_taken;
+ while (entry != NULL && entry->is_Proj() && entry->in(0)->is_If()) {
+ uncommon_proj = entry->in(0)->as_If()->proj_out(1 - entry->as_Proj()->_con);
+ if (uncommon_proj->unique_ctrl_out() != rgn)
+ break;
+ iff = entry->in(0)->as_If();
+ if (iff->in(1)->Opcode() == Op_Opaque4) {
+ Node_Stack to_clone(2);
+ to_clone.push(iff->in(1), 1);
+ uint current = C->unique();
+ Node* result = NULL;
+ // Look for the opaque node to replace with the init value
+ // and clone everything in between. We keep the Opaque4 node
+ // so the duplicated predicates are eliminated once loop
+ // opts are over: they are here only to keep the IR graph
+ // consistent.
+ do {
+ Node* n = to_clone.node();
+ uint i = to_clone.index();
+ Node* m = n->in(i);
+ int op = m->Opcode();
+ if (m->is_Bool() ||
+ m->is_Cmp() ||
+ op == Op_AndL ||
+ op == Op_OrL ||
+ op == Op_RShiftL ||
+ op == Op_LShiftL ||
+ op == Op_AddL ||
+ op == Op_AddI ||
+ op == Op_MulL ||
+ op == Op_MulI ||
+ op == Op_SubL ||
+ op == Op_SubI ||
+ op == Op_ConvI2L) {
+ to_clone.push(m, 1);
+ continue;
+ }
+ if (op == Op_Opaque1) {
+ if (n->_idx < current) {
+ n = n->clone();
+ }
+ n->set_req(i, castii);
+ register_new_node(n, min_taken);
+ to_clone.set_node(n);
+ }
+ for (;;) {
+ Node* cur = to_clone.node();
+ uint j = to_clone.index();
+ if (j+1 < cur->req()) {
+ to_clone.set_index(j+1);
+ break;
+ }
+ to_clone.pop();
+ if (to_clone.size() == 0) {
+ result = cur;
+ break;
+ }
+ Node* next = to_clone.node();
+ j = to_clone.index();
+ if (cur->_idx >= current) {
+ if (next->_idx < current) {
+ next = next->clone();
+ register_new_node(next, min_taken);
+ to_clone.set_node(next);
+ }
+ assert(next->in(j) != cur, "input should have been cloned");
+ next->set_req(j, cur);
+ }
+ }
+ } while (result == NULL);
+ assert(result->_idx >= current, "new node expected");
+
+ Node* proj = entry->clone();
+ Node* other_proj = uncommon_proj->clone();
+ Node* new_iff = iff->clone();
+ new_iff->set_req(1, result);
+ proj->set_req(0, new_iff);
+ other_proj->set_req(0, new_iff);
+ Node *frame = new ParmNode(C->start(), TypeFunc::FramePtr);
+ register_new_node(frame, C->start());
+ // It's impossible for the predicate to fail at runtime. Use
+ // an Halt node.
+ Node* halt = new HaltNode(other_proj, frame);
+ C->root()->add_req(halt);
+ new_iff->set_req(0, prev_proj);
+
+ register_control(new_iff, outer_loop->_parent, prev_proj);
+ register_control(proj, outer_loop->_parent, new_iff);
+ register_control(other_proj, _ltree_root, new_iff);
+ register_control(halt, _ltree_root, other_proj);
+
+ prev_proj = proj;
+ }
+ entry = entry->in(0)->in(0);
+ }
+ _igvn.replace_input_of(outer_main_head, LoopNode::EntryControl, prev_proj);
+ set_idom(outer_main_head, prev_proj, dd_main_head);
+ }
+ }
}
//------------------------------insert_pre_post_loops--------------------------
@@ -1137,8 +1265,9 @@
// dependencies.
// CastII for the main loop:
- bool inserted = cast_incr_before_loop( pre_incr, min_taken, main_head );
- assert(inserted, "no castII inserted");
+ Node* castii = cast_incr_before_loop( pre_incr, min_taken, main_head );
+ assert(castii != NULL, "no castII inserted");
+ duplicate_predicates(pre_head, min_taken, castii, outer_loop, outer_main_head, dd_main_head);
// Step B4: Shorten the pre-loop to run only 1 iteration (for now).
// RCE and alignment may change this later.
@@ -1403,8 +1532,8 @@
}
// CastII for the new post loop:
- bool inserted = cast_incr_before_loop(zer_opaq->in(1), zer_taken, post_head);
- assert(inserted, "no castII inserted");
+ Node* castii = cast_incr_before_loop(zer_opaq->in(1), zer_taken, post_head);
+ assert(castii != NULL, "no castII inserted");
return new_main_exit;
}
@@ -1467,7 +1596,7 @@
if (!is_canonical_loop_entry(loop_head)) {
return;
}
- opaq = ctrl->in(0)->in(1)->in(1)->in(2);
+ opaq = loop_head->skip_predicates()->in(0)->in(1)->in(1)->in(2);
// Zero-trip test uses an 'opaque' node which is not shared.
assert(opaq->outcnt() == 1 && opaq->in(1) == limit, "");
}
@@ -2034,6 +2163,34 @@
return false;
}
+// Same as PhaseIdealLoop::duplicate_predicates() but for range checks
+// eliminated by iteration splitting.
+Node* PhaseIdealLoop::add_range_check_predicate(IdealLoopTree* loop, CountedLoopNode* cl,
+ Node* predicate_proj, int scale_con, Node* offset,
+ Node* limit, jint stride_con) {
+ bool overflow = false;
+ BoolNode* bol = rc_predicate(loop, predicate_proj, scale_con, offset, cl->init_trip(), NULL, stride_con, limit, (stride_con > 0) != (scale_con > 0), overflow);
+ Node* opaque_bol = new Opaque4Node(C, bol, _igvn.intcon(1));
+ register_new_node(opaque_bol, predicate_proj);
+ IfNode* new_iff = NULL;
+ if (overflow) {
+ new_iff = new IfNode(predicate_proj, bol, PROB_MAX, COUNT_UNKNOWN);
+ } else {
+ new_iff = new RangeCheckNode(predicate_proj, bol, PROB_MAX, COUNT_UNKNOWN);
+ }
+ register_control(new_iff, loop->_parent, predicate_proj);
+ Node* iffalse = new IfFalseNode(new_iff);
+ register_control(iffalse, _ltree_root, new_iff);
+ ProjNode* iftrue = new IfTrueNode(new_iff);
+ register_control(iftrue, loop->_parent, new_iff);
+ Node *frame = new ParmNode(C->start(), TypeFunc::FramePtr);
+ register_new_node(frame, C->start());
+ Node* halt = new HaltNode(iffalse, frame);
+ register_control(halt, _ltree_root, iffalse);
+ C->root()->add_req(halt);
+ return iftrue;
+}
+
//------------------------------do_range_check---------------------------------
// Eliminate range-checks and other trip-counter vs loop-invariant tests.
int PhaseIdealLoop::do_range_check( IdealLoopTree *loop, Node_List &old_new ) {
@@ -2069,7 +2226,7 @@
}
// Need to find the main-loop zero-trip guard
- Node *ctrl = cl->skip_strip_mined()->in(LoopNode::EntryControl);
+ Node *ctrl = cl->skip_predicates();
Node *iffm = ctrl->in(0);
Node *opqzm = iffm->in(1)->in(1)->in(2);
assert(opqzm->in(1) == main_limit, "do not understand situation");
@@ -2124,6 +2281,8 @@
// the loop is in canonical form to multiversion.
closed_range_checks = 0;
+ Node* predicate_proj = cl->skip_strip_mined()->in(LoopNode::EntryControl);
+ assert(predicate_proj->is_Proj() && predicate_proj->in(0)->is_If(), "if projection only");
// Check loop body for tests of trip-counter plus loop-invariant vs loop-variant.
for( uint i = 0; i < loop->_body.size(); i++ ) {
Node *iff = loop->_body[i];
@@ -2168,7 +2327,7 @@
// 'limit' maybe pinned below the zero trip test (probably from a
// previous round of rce), in which case, it can't be used in the
// zero trip test expression which must occur before the zero test's if.
- if( limit_c == ctrl ) {
+ if (is_dominator(ctrl, limit_c)) {
continue; // Don't rce this check but continue looking for other candidates.
}
@@ -2186,7 +2345,7 @@
// As above for the 'limit', the 'offset' maybe pinned below the
// zero trip test.
- if( offset_c == ctrl ) {
+ if (is_dominator(ctrl, offset_c)) {
continue; // Don't rce this check but continue looking for other candidates.
}
#ifdef ASSERT
@@ -2209,6 +2368,7 @@
add_constraint( stride_con, scale_con, offset, zero, limit, pre_ctrl, &pre_limit, &main_limit );
// (0-offset)/scale could be outside of loop iterations range.
conditional_rc = true;
+ predicate_proj = add_range_check_predicate(loop, cl, predicate_proj, scale_con, offset, limit, stride_con);
} else {
if (PrintOpto) {
tty->print_cr("missed RCE opportunity");
@@ -2278,6 +2438,10 @@
} // End of is IF
}
+ if (predicate_proj != cl->skip_strip_mined()->in(LoopNode::EntryControl)) {
+ _igvn.replace_input_of(cl->skip_strip_mined(), LoopNode::EntryControl, predicate_proj);
+ set_idom(cl->skip_strip_mined(), predicate_proj, dom_depth(cl->skip_strip_mined()));
+ }
// Update loop limits
if (conditional_rc) {
@@ -2540,7 +2704,7 @@
#ifdef ASSERT
static CountedLoopNode* locate_pre_from_main(CountedLoopNode *cl) {
- Node *ctrl = cl->skip_strip_mined()->in(LoopNode::EntryControl);
+ Node *ctrl = cl->skip_predicates();
assert(ctrl->Opcode() == Op_IfTrue || ctrl->Opcode() == Op_IfFalse, "");
Node *iffm = ctrl->in(0);
assert(iffm->Opcode() == Op_If, "");
@@ -2579,7 +2743,7 @@
}
assert(locate_pre_from_main(main_head) == cl, "bad main loop");
- Node* main_iff = main_head->skip_strip_mined()->in(LoopNode::EntryControl)->in(0);
+ Node* main_iff = main_head->skip_predicates()->in(0);
// Remove the Opaque1Node of the pre loop and make it execute all iterations
phase->_igvn.replace_input_of(pre_cmp, 2, pre_cmp->in(2)->in(2));
@@ -2640,7 +2804,7 @@
}
if (needs_guard) {
// Check for an obvious zero trip guard.
- Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->skip_strip_mined()->in(LoopNode::EntryControl));
+ Node* inctrl = PhaseIdealLoop::skip_loop_predicates(cl->skip_predicates());
if (inctrl->Opcode() == Op_IfTrue || inctrl->Opcode() == Op_IfFalse) {
bool maybe_swapped = (inctrl->Opcode() == Op_IfFalse);
// The test should look like just the backedge of a CountedLoop
--- a/src/hotspot/share/opto/loopnode.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/loopnode.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1158,9 +1158,9 @@
return NULL;
}
-LoopNode* CountedLoopNode::skip_strip_mined(int expect_opaq) {
+LoopNode* CountedLoopNode::skip_strip_mined(int expect_skeleton) {
if (is_strip_mined()) {
- verify_strip_mined(expect_opaq);
+ verify_strip_mined(expect_skeleton);
return in(EntryControl)->as_Loop();
}
return this;
@@ -1252,6 +1252,20 @@
return l->outer_safepoint();
}
+Node* CountedLoopNode::skip_predicates() {
+ if (is_main_loop()) {
+ Node* ctrl = skip_strip_mined()->in(LoopNode::EntryControl);
+ while (ctrl != NULL && ctrl->is_Proj() && ctrl->in(0)->is_If() &&
+ ctrl->in(0)->as_If()->proj_out(1-ctrl->as_Proj()->_con)->outcnt() == 1 &&
+ ctrl->in(0)->as_If()->proj_out(1-ctrl->as_Proj()->_con)->unique_out()->Opcode() == Op_Halt) {
+ ctrl = ctrl->in(0)->in(0);
+ }
+
+ return ctrl;
+ }
+ return in(LoopNode::EntryControl);
+}
+
void OuterStripMinedLoopNode::adjust_strip_mined_loop(PhaseIterGVN* igvn) {
// Look for the outer & inner strip mined loop, reduce number of
// iterations of the inner loop, set exit condition of outer loop,
@@ -3770,7 +3784,8 @@
if (!cl->is_main_loop() && !cl->is_post_loop()) {
return false;
}
- Node* ctrl = cl->skip_strip_mined()->in(LoopNode::EntryControl);
+ Node* ctrl = cl->skip_predicates();
+
if (ctrl == NULL || (!ctrl->is_IfTrue() && !ctrl->is_IfFalse())) {
return false;
}
--- a/src/hotspot/share/opto/loopnode.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/loopnode.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -138,7 +138,7 @@
#endif
void verify_strip_mined(int expect_skeleton) const;
- virtual LoopNode* skip_strip_mined(int expect_opaq = 1) { return this; }
+ virtual LoopNode* skip_strip_mined(int expect_skeleton = 1) { return this; }
virtual IfTrueNode* outer_loop_tail() const { ShouldNotReachHere(); return NULL; }
virtual OuterStripMinedLoopEndNode* outer_loop_end() const { ShouldNotReachHere(); return NULL; }
virtual IfFalseNode* outer_loop_exit() const { ShouldNotReachHere(); return NULL; }
@@ -298,6 +298,11 @@
virtual IfFalseNode* outer_loop_exit() const;
virtual SafePointNode* outer_safepoint() const;
+ // If this is a main loop in a pre/main/post loop nest, walk over
+ // the predicates that were inserted by
+ // duplicate_predicates()/add_range_check_predicate()
+ Node* skip_predicates();
+
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
@@ -724,7 +729,10 @@
return ctrl;
}
- bool cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop);
+ Node* cast_incr_before_loop(Node* incr, Node* ctrl, Node* loop);
+ void duplicate_predicates(CountedLoopNode* pre_head, Node *min_taken, Node* castii,
+ IdealLoopTree* outer_loop, LoopNode* outer_main_head,
+ uint dd_main_head);
public:
@@ -1067,6 +1075,15 @@
// Implementation of the loop predication to promote checks outside the loop
bool loop_predication_impl(IdealLoopTree *loop);
+ ProjNode* insert_skeleton_predicate(IfNode* iff, IdealLoopTree *loop,
+ ProjNode* proj, ProjNode *predicate_proj,
+ ProjNode* upper_bound_proj,
+ int scale, Node* offset,
+ Node* init, Node* limit, jint stride,
+ Node* rng, bool& overflow);
+ Node* add_range_check_predicate(IdealLoopTree* loop, CountedLoopNode* cl,
+ Node* predicate_proj, int scale_con, Node* offset,
+ Node* limit, jint stride_con);
// Helper function to collect predicate for eliminating the useless ones
void collect_potentially_useful_predicates(IdealLoopTree *loop, Unique_Node_List &predicate_opaque1);
--- a/src/hotspot/share/opto/loopopts.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/loopopts.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1029,11 +1029,18 @@
//------------------------------place_near_use---------------------------------
// Place some computation next to use but not inside inner loops.
// For inner loop uses move it to the preheader area.
-Node *PhaseIdealLoop::place_near_use( Node *useblock ) const {
+Node *PhaseIdealLoop::place_near_use(Node *useblock) const {
IdealLoopTree *u_loop = get_loop( useblock );
- return (u_loop->_irreducible || u_loop->_child)
- ? useblock
- : u_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl);
+ if (u_loop->_irreducible) {
+ return useblock;
+ }
+ if (u_loop->_child) {
+ if (useblock == u_loop->_head && u_loop->_head->is_OuterStripMinedLoop()) {
+ return u_loop->_head->in(LoopNode::EntryControl);
+ }
+ return useblock;
+ }
+ return u_loop->_head->as_Loop()->skip_strip_mined()->in(LoopNode::EntryControl);
}
--- a/src/hotspot/share/opto/macro.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/macro.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "compiler/compileLog.hpp"
+#include "gc/shared/collectedHeap.inline.hpp"
#include "libadt/vectset.hpp"
#include "opto/addnode.hpp"
#include "opto/arraycopynode.hpp"
@@ -2667,8 +2668,7 @@
assert(n->Opcode() == Op_LoopLimit ||
n->Opcode() == Op_Opaque1 ||
n->Opcode() == Op_Opaque2 ||
- n->Opcode() == Op_Opaque3 ||
- n->Opcode() == Op_Opaque4, "unknown node type in macro list");
+ n->Opcode() == Op_Opaque3, "unknown node type in macro list");
}
assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
progress = progress || success;
@@ -2733,9 +2733,6 @@
_igvn.replace_node(n, repl);
success = true;
#endif
- } else if (n->Opcode() == Op_Opaque4) {
- _igvn.replace_node(n, n->in(2));
- success = true;
} else if (n->Opcode() == Op_OuterStripMinedLoop) {
n->as_OuterStripMinedLoop()->adjust_strip_mined_loop(&_igvn);
C->remove_macro_node(n);
--- a/src/hotspot/share/opto/node.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/node.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -504,6 +504,9 @@
if (cast != NULL && cast->has_range_check()) {
C->add_range_check_cast(cast);
}
+ if (n->Opcode() == Op_Opaque4) {
+ C->add_opaque4_node(n);
+ }
n->set_idx(C->next_unique()); // Get new unique index as well
debug_only( n->verify_construction() );
@@ -612,6 +615,9 @@
if (cast != NULL && cast->has_range_check()) {
compile->remove_range_check_cast(cast);
}
+ if (Opcode() == Op_Opaque4) {
+ compile->remove_opaque4_node(this);
+ }
if (is_SafePoint()) {
as_SafePoint()->delete_replaced_nodes();
@@ -1352,6 +1358,9 @@
if (cast != NULL && cast->has_range_check()) {
igvn->C->remove_range_check_cast(cast);
}
+ if (dead->Opcode() == Op_Opaque4) {
+ igvn->C->remove_range_check_cast(dead);
+ }
igvn->C->record_dead_node(dead->_idx);
// Kill all inputs to the dead guy
for (uint i=0; i < dead->req(); i++) {
--- a/src/hotspot/share/opto/opaquenode.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/opaquenode.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -60,10 +60,6 @@
return (&n == this); // Always fail except on self
}
-Node* Opaque4Node::Identity(PhaseGVN* phase) {
- return phase->C->major_progress() ? this : in(2);
-}
-
const Type* Opaque4Node::Value(PhaseGVN* phase) const {
return phase->type(in(1));
}
--- a/src/hotspot/share/opto/opaquenode.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/opaquenode.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -35,14 +35,14 @@
virtual uint hash() const ; // { return NO_HASH; }
virtual uint cmp( const Node &n ) const;
public:
- Opaque1Node( Compile* C, Node *n ) : Node(0,n) {
+ Opaque1Node(Compile* C, Node *n) : Node(NULL, n) {
// Put it on the Macro nodes list to removed during macro nodes expansion.
init_flags(Flag_is_macro);
C->add_macro_node(this);
}
// Special version for the pre-loop to hold the original loop limit
// which is consumed by range check elimination.
- Opaque1Node( Compile* C, Node *n, Node* orig_limit ) : Node(0,n,orig_limit) {
+ Opaque1Node(Compile* C, Node *n, Node* orig_limit) : Node(NULL, n, orig_limit) {
// Put it on the Macro nodes list to removed during macro nodes expansion.
init_flags(Flag_is_macro);
C->add_macro_node(this);
@@ -87,25 +87,23 @@
bool rtm_opt() const { return (_opt == RTM_OPT); }
};
-// Used by GraphKit::must_be_not_null(): input 1 is a check that we
-// know implicitly is always true or false but the compiler has no way
-// to prove. If during optimizations, that check becomes true or
-// false, the Opaque4 node is replaced by that constant true or
-// false. Input 2 is the constant value we know the test takes. After
-// loop optimizations, we replace input 1 by input 2 so the control
-// that depends on that test can be removed and there's no overhead at
-// runtime.
+// Input 1 is a check that we know implicitly is always true or false
+// but the compiler has no way to prove. If during optimizations, that
+// check becomes true or false, the Opaque4 node is replaced by that
+// constant true or false. Input 2 is the constant value we know the
+// test takes. After loop optimizations, we replace input 1 by input 2
+// so the control that depends on that test can be removed and there's
+// no overhead at runtime. Used for instance by
+// GraphKit::must_be_not_null().
class Opaque4Node : public Node {
public:
- Opaque4Node(Compile* C, Node *tst, Node* final_tst) : Node(0, tst, final_tst) {
- // Put it on the Macro nodes list to removed during macro nodes expansion.
- init_flags(Flag_is_macro);
- C->add_macro_node(this);
+ Opaque4Node(Compile* C, Node *tst, Node* final_tst) : Node(NULL, tst, final_tst) {
+ // Put it on the Opaque4 nodes list to be removed after all optimizations
+ C->add_opaque4_node(this);
}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeInt::BOOL; }
virtual const Type* Value(PhaseGVN* phase) const;
- virtual Node* Identity(PhaseGVN* phase);
};
--- a/src/hotspot/share/opto/output.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/output.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
+#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/debugInfo.hpp"
#include "code/debugInfoRec.hpp"
--- a/src/hotspot/share/opto/parse1.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/parse1.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -39,6 +39,7 @@
#include "opto/runtime.hpp"
#include "runtime/arguments.hpp"
#include "runtime/handles.inline.hpp"
+#include "runtime/safepointMechanism.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/copy.hpp"
--- a/src/hotspot/share/opto/phaseX.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/phaseX.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1421,6 +1421,9 @@
if (cast != NULL && cast->has_range_check()) {
C->remove_range_check_cast(cast);
}
+ if (dead->Opcode() == Op_Opaque4) {
+ C->remove_opaque4_node(dead);
+ }
}
} // while (_stack.is_nonempty())
}
--- a/src/hotspot/share/opto/runtime.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/runtime.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,6 +26,7 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
+#include "code/compiledMethod.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
@@ -60,8 +61,9 @@
#include "opto/runtime.hpp"
#include "opto/subnode.hpp"
#include "runtime/atomic.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
--- a/src/hotspot/share/opto/superword.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/opto/superword.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -3328,7 +3328,7 @@
return NULL;
}
- Node* p_f = cl->skip_strip_mined()->in(LoopNode::EntryControl)->in(0)->in(0);
+ Node* p_f = cl->skip_predicates()->in(0)->in(0);
if (!p_f->is_IfFalse()) return NULL;
if (!p_f->in(0)->is_CountedLoopEnd()) return NULL;
CountedLoopEndNode* pre_end = p_f->in(0)->as_CountedLoopEnd();
--- a/src/hotspot/share/precompiled/precompiled.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/precompiled/precompiled.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -54,7 +54,7 @@
# include "ci/ciSymbol.hpp"
# include "ci/ciType.hpp"
# include "ci/ciTypeArrayKlass.hpp"
-# include "ci/ciUtilities.hpp"
+# include "ci/ciUtilities.inline.hpp"
# include "ci/compilerInterface.hpp"
# include "classfile/classFileParser.hpp"
# include "classfile/classFileStream.hpp"
@@ -95,7 +95,7 @@
# include "gc/shared/ageTable.hpp"
# include "gc/shared/barrierSet.hpp"
# include "gc/shared/blockOffsetTable.hpp"
-# include "gc/shared/cardTableModRefBS.hpp"
+# include "gc/shared/cardTableBarrierSet.hpp"
# include "gc/shared/collectedHeap.hpp"
# include "gc/shared/collectorCounters.hpp"
# include "gc/shared/collectorPolicy.hpp"
@@ -176,7 +176,7 @@
# include "runtime/handles.inline.hpp"
# include "runtime/icache.hpp"
# include "runtime/init.hpp"
-# include "runtime/interfaceSupport.hpp"
+# include "runtime/interfaceSupport.inline.hpp"
# include "runtime/java.hpp"
# include "runtime/javaCalls.hpp"
# include "runtime/javaFrameAnchor.hpp"
--- a/src/hotspot/share/prims/forte.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/forte.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -30,9 +30,10 @@
#include "memory/universe.hpp"
#include "oops/oop.inline.hpp"
#include "prims/forte.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/thread.inline.hpp"
-#include "runtime/vframe.hpp"
+#include "runtime/vframe.inline.hpp"
#include "runtime/vframeArray.hpp"
// call frame copied from old .h file and renamed
--- a/src/hotspot/share/prims/jni.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/jni.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -36,7 +36,6 @@
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
-#include "gc/shared/gcLocker.inline.hpp"
#include "interpreter/linkResolver.hpp"
#include "memory/allocation.hpp"
#include "memory/allocation.inline.hpp"
@@ -65,7 +64,7 @@
#include "runtime/compilationPolicy.hpp"
#include "runtime/fieldDescriptor.hpp"
#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jfieldIDWorkaround.hpp"
@@ -3149,11 +3148,11 @@
JNI_ENTRY(void*, jni_GetPrimitiveArrayCritical(JNIEnv *env, jarray array, jboolean *isCopy))
JNIWrapper("GetPrimitiveArrayCritical");
HOTSPOT_JNI_GETPRIMITIVEARRAYCRITICAL_ENTRY(env, array, (uintptr_t *) isCopy);
- GCLocker::lock_critical(thread);
if (isCopy != NULL) {
*isCopy = JNI_FALSE;
}
oop a = JNIHandles::resolve_non_null(array);
+ a = Universe::heap()->pin_object(thread, a);
assert(a->is_array(), "just checking");
BasicType type;
if (a->is_objArray()) {
@@ -3170,8 +3169,8 @@
JNI_ENTRY(void, jni_ReleasePrimitiveArrayCritical(JNIEnv *env, jarray array, void *carray, jint mode))
JNIWrapper("ReleasePrimitiveArrayCritical");
HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_ENTRY(env, array, carray, mode);
- // The array, carray and mode arguments are ignored
- GCLocker::unlock_critical(thread);
+ oop a = JNIHandles::resolve_non_null(array);
+ Universe::heap()->unpin_object(thread, a);
HOTSPOT_JNI_RELEASEPRIMITIVEARRAYCRITICAL_RETURN();
JNI_END
@@ -3179,8 +3178,8 @@
JNI_ENTRY(const jchar*, jni_GetStringCritical(JNIEnv *env, jstring string, jboolean *isCopy))
JNIWrapper("GetStringCritical");
HOTSPOT_JNI_GETSTRINGCRITICAL_ENTRY(env, string, (uintptr_t *) isCopy);
- GCLocker::lock_critical(thread);
oop s = JNIHandles::resolve_non_null(string);
+ s = Universe::heap()->pin_object(thread, s);
typeArrayOop s_value = java_lang_String::value(s);
bool is_latin1 = java_lang_String::is_latin1(s);
if (isCopy != NULL) {
@@ -3217,7 +3216,7 @@
// This assumes that ReleaseStringCritical bookends GetStringCritical.
FREE_C_HEAP_ARRAY(jchar, chars);
}
- GCLocker::unlock_critical(thread);
+ Universe::heap()->unpin_object(thread, s);
HOTSPOT_JNI_RELEASESTRINGCRITICAL_RETURN();
JNI_END
--- a/src/hotspot/share/prims/jniCheck.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/jniCheck.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -36,8 +36,8 @@
#include "prims/jniCheck.hpp"
#include "prims/jvm_misc.hpp"
#include "runtime/fieldDescriptor.hpp"
-#include "runtime/handles.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/jfieldIDWorkaround.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/thread.inline.hpp"
--- a/src/hotspot/share/prims/jvm.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/jvm.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -57,7 +57,7 @@
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jfieldIDWorkaround.hpp"
@@ -68,7 +68,7 @@
#include "runtime/reflection.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadSMR.hpp"
-#include "runtime/vframe.hpp"
+#include "runtime/vframe.inline.hpp"
#include "runtime/vm_operations.hpp"
#include "runtime/vm_version.hpp"
#include "services/attachListener.hpp"
--- a/src/hotspot/share/prims/jvmtiClassFileReconstituter.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiClassFileReconstituter.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "interpreter/bytecodeStream.hpp"
#include "oops/fieldStreams.hpp"
#include "prims/jvmtiClassFileReconstituter.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/signature.hpp"
#include "utilities/bytes.hpp"
@@ -34,6 +35,19 @@
// FIXME: fix Synthetic attribute
// FIXME: per Serguei, add error return handling for ConstantPool::copy_cpool_bytes()
+JvmtiConstantPoolReconstituter::JvmtiConstantPoolReconstituter(InstanceKlass* ik) {
+ set_error(JVMTI_ERROR_NONE);
+ _ik = ik;
+ _cpool = constantPoolHandle(Thread::current(), ik->constants());
+ _symmap = new SymbolHashMap();
+ _classmap = new SymbolHashMap();
+ _cpool_size = _cpool->hash_entries_to(_symmap, _classmap);
+ if (_cpool_size == 0) {
+ set_error(JVMTI_ERROR_OUT_OF_MEMORY);
+ } else if (_cpool_size < 0) {
+ set_error(JVMTI_ERROR_INTERNAL);
+ }
+}
// Write the field information portion of ClassFile structure
// JVMSpec| u2 fields_count;
--- a/src/hotspot/share/prims/jvmtiClassFileReconstituter.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiClassFileReconstituter.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -52,19 +52,7 @@
public:
// Calls to this constructor must be proceeded by a ResourceMark
// and a HandleMark
- JvmtiConstantPoolReconstituter(InstanceKlass* ik){
- set_error(JVMTI_ERROR_NONE);
- _ik = ik;
- _cpool = constantPoolHandle(Thread::current(), ik->constants());
- _symmap = new SymbolHashMap();
- _classmap = new SymbolHashMap();
- _cpool_size = _cpool->hash_entries_to(_symmap, _classmap);
- if (_cpool_size == 0) {
- set_error(JVMTI_ERROR_OUT_OF_MEMORY);
- } else if (_cpool_size < 0) {
- set_error(JVMTI_ERROR_INTERNAL);
- }
- }
+ JvmtiConstantPoolReconstituter(InstanceKlass* ik);
~JvmtiConstantPoolReconstituter() {
if (_symmap != NULL) {
--- a/src/hotspot/share/prims/jvmtiEnter.hpp Thu Mar 29 20:12:02 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_PRIMS_JVMTIENTER_HPP
-#define SHARE_VM_PRIMS_JVMTIENTER_HPP
-
-#include "classfile/systemDictionary.hpp"
-#include "jvmtifiles/jvmtiEnv.hpp"
-#include "memory/resourceArea.hpp"
-#include "prims/jvmtiImpl.hpp"
-#include "runtime/interfaceSupport.hpp"
-
-#endif // SHARE_VM_PRIMS_JVMTIENTER_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/prims/jvmtiEnter.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_PRIMS_JVMTIENTER_INLINE_HPP
+#define SHARE_VM_PRIMS_JVMTIENTER_INLINE_HPP
+
+#include "classfile/systemDictionary.hpp"
+#include "jvmtifiles/jvmtiEnv.hpp"
+#include "memory/resourceArea.hpp"
+#include "prims/jvmtiImpl.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+
+#endif // SHARE_VM_PRIMS_JVMTIENTER_INLINE_HPP
--- a/src/hotspot/share/prims/jvmtiEnter.xsl Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiEnter.xsl Sat Mar 24 01:08:35 2018 +0100
@@ -42,7 +42,7 @@
#if INCLUDE_JVMTI
# include "logging/log.hpp"
# include "oops/oop.inline.hpp"
-# include "prims/jvmtiEnter.hpp"
+# include "prims/jvmtiEnter.inline.hpp"
# include "prims/jvmtiRawMonitor.hpp"
# include "prims/jvmtiUtil.hpp"
# include "runtime/threadSMR.hpp"
--- a/src/hotspot/share/prims/jvmtiEnv.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiEnv.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -55,17 +55,18 @@
#include "prims/jvmtiUtil.hpp"
#include "runtime/arguments.hpp"
#include "runtime/deoptimization.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jfieldIDWorkaround.hpp"
#include "runtime/jniHandles.inline.hpp"
+#include "runtime/objectMonitor.inline.hpp"
#include "runtime/osThread.hpp"
#include "runtime/reflectionUtils.hpp"
#include "runtime/signature.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadSMR.hpp"
#include "runtime/timerTrace.hpp"
-#include "runtime/vframe.hpp"
+#include "runtime/vframe.inline.hpp"
#include "runtime/vmThread.hpp"
#include "services/threadService.hpp"
#include "utilities/exceptions.hpp"
--- a/src/hotspot/share/prims/jvmtiEnvBase.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiEnvBase.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -38,7 +38,8 @@
#include "prims/jvmtiThreadState.inline.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/deoptimization.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/jfieldIDWorkaround.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/objectMonitor.hpp"
--- a/src/hotspot/share/prims/jvmtiEnvBase.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiEnvBase.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -29,11 +29,9 @@
#include "prims/jvmtiEnvThreadState.hpp"
#include "prims/jvmtiEventController.hpp"
#include "prims/jvmtiThreadState.hpp"
-#include "prims/jvmtiThreadState.inline.hpp"
#include "oops/oopHandle.hpp"
#include "runtime/fieldDescriptor.hpp"
#include "runtime/frame.hpp"
-#include "runtime/handles.inline.hpp"
#include "runtime/thread.hpp"
#include "runtime/vm_operations.hpp"
#include "utilities/growableArray.hpp"
--- a/src/hotspot/share/prims/jvmtiEnvThreadState.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiEnvThreadState.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -32,7 +32,7 @@
#include "prims/jvmtiImpl.hpp"
#include "runtime/handles.hpp"
#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/signature.hpp"
#include "runtime/thread.inline.hpp"
--- a/src/hotspot/share/prims/jvmtiExport.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiExport.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -48,7 +48,7 @@
#include "prims/jvmtiThreadState.inline.hpp"
#include "runtime/arguments.hpp"
#include "runtime/handles.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/objectMonitor.hpp"
@@ -56,8 +56,7 @@
#include "runtime/os.inline.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadSMR.hpp"
-#include "runtime/vframe.hpp"
-#include "services/serviceUtil.hpp"
+#include "runtime/vframe.inline.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
#include "gc/parallel/psMarkSweep.hpp"
@@ -2363,10 +2362,6 @@
void JvmtiExport::post_monitor_contended_enter(JavaThread *thread, ObjectMonitor *obj_mntr) {
oop object = (oop)obj_mntr->object();
- if (!ServiceUtil::visible_oop(object)) {
- // Ignore monitor contended enter for vm internal object.
- return;
- }
JvmtiThreadState *state = thread->jvmti_thread_state();
if (state == NULL) {
return;
@@ -2398,10 +2393,6 @@
void JvmtiExport::post_monitor_contended_entered(JavaThread *thread, ObjectMonitor *obj_mntr) {
oop object = (oop)obj_mntr->object();
- if (!ServiceUtil::visible_oop(object)) {
- // Ignore monitor contended entered for vm internal object.
- return;
- }
JvmtiThreadState *state = thread->jvmti_thread_state();
if (state == NULL) {
return;
@@ -2465,10 +2456,6 @@
void JvmtiExport::post_monitor_waited(JavaThread *thread, ObjectMonitor *obj_mntr, jboolean timed_out) {
oop object = (oop)obj_mntr->object();
- if (!ServiceUtil::visible_oop(object)) {
- // Ignore monitor waited for vm internal object.
- return;
- }
JvmtiThreadState *state = thread->jvmti_thread_state();
if (state == NULL) {
return;
@@ -2761,9 +2748,7 @@
set_enabled(false);
for (int i = 0; i < _allocated->length(); i++) {
oop obj = _allocated->at(i);
- if (ServiceUtil::visible_oop(obj)) {
- JvmtiExport::post_vm_object_alloc(JavaThread::current(), obj);
- }
+ JvmtiExport::post_vm_object_alloc(JavaThread::current(), obj);
}
delete _allocated;
}
--- a/src/hotspot/share/prims/jvmtiGetLoadedClasses.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiGetLoadedClasses.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,13 +27,10 @@
#include "gc/shared/collectedHeap.hpp"
#include "memory/universe.hpp"
#include "prims/jvmtiGetLoadedClasses.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/thread.hpp"
#include "utilities/stack.inline.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/g1/g1BarrierSet.hpp"
-#endif
-
// The closure for GetLoadedClasses
class LoadedClassesClosure : public KlassClosure {
@@ -42,20 +39,6 @@
JvmtiEnv* _env;
Thread* _cur_thread;
-// Tell the GC to keep this klass alive
-static void ensure_klass_alive(oop o) {
- // A klass that was previously considered dead can be looked up in the
- // CLD/SD, and its _java_mirror or _class_loader can be stored in a root
- // or a reachable object making it alive again. The SATB part of G1 needs
- // to get notified about this potential resurrection, otherwise the marking
- // might not find the object.
-#if INCLUDE_ALL_GCS
- if (UseG1GC && o != NULL) {
- G1BarrierSet::enqueue(o);
- }
-#endif
-}
-
public:
LoadedClassesClosure(Thread* thread, JvmtiEnv* env) : _cur_thread(thread), _env(env) {
assert(_cur_thread == Thread::current(), "must be current thread");
@@ -64,7 +47,6 @@
void do_klass(Klass* k) {
// Collect all jclasses
_classStack.push((jclass) _env->jni_reference(Handle(_cur_thread, k->java_mirror())));
- ensure_klass_alive(k->java_mirror());
}
int extract(jclass* result_list) {
--- a/src/hotspot/share/prims/jvmtiImpl.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiImpl.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -39,9 +39,9 @@
#include "prims/jvmtiRedefineClasses.hpp"
#include "runtime/atomic.hpp"
#include "runtime/deoptimization.hpp"
-#include "runtime/handles.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/os.hpp"
#include "runtime/serviceThread.hpp"
--- a/src/hotspot/share/prims/jvmtiRawMonitor.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiRawMonitor.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,7 +26,7 @@
#include "memory/allocation.inline.hpp"
#include "prims/jvmtiRawMonitor.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/thread.inline.hpp"
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -47,6 +47,7 @@
#include "prims/resolvedMethodTable.hpp"
#include "prims/methodComparator.hpp"
#include "runtime/deoptimization.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/relocator.hpp"
#include "utilities/bitMap.inline.hpp"
--- a/src/hotspot/share/prims/jvmtiTagMap.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiTagMap.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -45,6 +45,8 @@
#include "prims/jvmtiImpl.hpp"
#include "prims/jvmtiTagMap.hpp"
#include "runtime/biasedLocking.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/mutex.hpp"
@@ -55,7 +57,6 @@
#include "runtime/vframe.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vm_operations.hpp"
-#include "services/serviceUtil.hpp"
#include "utilities/macros.hpp"
// JvmtiTagHashmapEntry
@@ -1324,9 +1325,6 @@
// check if iteration has been halted
if (is_iteration_aborted()) return;
- // ignore any objects that aren't visible to profiler
- if (!ServiceUtil::visible_oop(o)) return;
-
// instanceof check when filtering by klass
if (klass() != NULL && !o->is_a(klass())) {
return;
@@ -1407,9 +1405,6 @@
// check if iteration has been halted
if (is_iteration_aborted()) return;
- // ignore any objects that aren't visible to profiler
- if (!ServiceUtil::visible_oop(obj)) return;
-
// apply class filter
if (is_filtered_by_klass_filter(obj, klass())) return;
@@ -1987,8 +1982,6 @@
// invoke basic style heap root callback
inline bool CallbackInvoker::invoke_basic_heap_root_callback(jvmtiHeapRootKind root_kind, oop obj) {
- assert(ServiceUtil::visible_oop(obj), "checking");
-
// if we heap roots should be reported
jvmtiHeapRootCallback cb = basic_context()->heap_root_callback();
if (cb == NULL) {
@@ -2016,8 +2009,6 @@
jmethodID method,
int slot,
oop obj) {
- assert(ServiceUtil::visible_oop(obj), "checking");
-
// if we stack refs should be reported
jvmtiStackReferenceCallback cb = basic_context()->stack_ref_callback();
if (cb == NULL) {
@@ -2048,9 +2039,6 @@
oop referree,
jint index) {
- assert(ServiceUtil::visible_oop(referrer), "checking");
- assert(ServiceUtil::visible_oop(referree), "checking");
-
BasicHeapWalkContext* context = basic_context();
// callback requires the referrer's tag. If it's the same referrer
@@ -2092,8 +2080,6 @@
// invoke advanced style heap root callback
inline bool CallbackInvoker::invoke_advanced_heap_root_callback(jvmtiHeapReferenceKind ref_kind,
oop obj) {
- assert(ServiceUtil::visible_oop(obj), "checking");
-
AdvancedHeapWalkContext* context = advanced_context();
// check that callback is provided
@@ -2148,8 +2134,6 @@
jlocation bci,
jint slot,
oop obj) {
- assert(ServiceUtil::visible_oop(obj), "checking");
-
AdvancedHeapWalkContext* context = advanced_context();
// check that callback is provider
@@ -2223,9 +2207,6 @@
// field index is only valid field in reference_info
static jvmtiHeapReferenceInfo reference_info = { 0 };
- assert(ServiceUtil::visible_oop(referrer), "checking");
- assert(ServiceUtil::visible_oop(obj), "checking");
-
AdvancedHeapWalkContext* context = advanced_context();
// check that callback is provider
@@ -2279,7 +2260,6 @@
inline bool CallbackInvoker::report_simple_root(jvmtiHeapReferenceKind kind, oop obj) {
assert(kind != JVMTI_HEAP_REFERENCE_STACK_LOCAL &&
kind != JVMTI_HEAP_REFERENCE_JNI_LOCAL, "not a simple root");
- assert(ServiceUtil::visible_oop(obj), "checking");
if (is_basic_heap_walk()) {
// map to old style root kind
@@ -2604,13 +2584,6 @@
}
}
- // some objects are ignored - in the case of simple
- // roots it's mostly Symbol*s that we are skipping
- // here.
- if (!ServiceUtil::visible_oop(o)) {
- return;
- }
-
// invoke the callback
_continue = CallbackInvoker::report_simple_root(kind, o);
@@ -2651,10 +2624,6 @@
return;
}
- if (!ServiceUtil::visible_oop(o)) {
- return;
- }
-
// invoke the callback
_continue = CallbackInvoker::report_jni_local_root(_thread_tag, _tid, _depth, _method, o);
}
@@ -2982,7 +2951,7 @@
if (!is_primitive_field_type(type)) {
oop fld_o = o->obj_field(field->field_offset());
// ignore any objects that aren't visible to profiler
- if (fld_o != NULL && ServiceUtil::visible_oop(fld_o)) {
+ if (fld_o != NULL) {
assert(Universe::heap()->is_in_reserved(fld_o), "unsafe code should not "
"have references to Klass* anymore");
int slot = field->field_index();
--- a/src/hotspot/share/prims/jvmtiUtil.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/jvmtiUtil.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,7 +26,7 @@
#include "prims/jvmtiUtil.hpp"
#include "runtime/handles.hpp"
#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/vm_operations.hpp"
#include "utilities/exceptions.hpp"
--- a/src/hotspot/share/prims/methodHandles.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/methodHandles.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -39,6 +39,7 @@
#include "oops/typeArrayOop.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/compilationPolicy.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/timerTrace.hpp"
--- a/src/hotspot/share/prims/methodHandles.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/methodHandles.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,13 +27,13 @@
#include "classfile/javaClasses.hpp"
#include "classfile/vmSymbols.hpp"
-#include "runtime/frame.inline.hpp"
+#include "runtime/frame.hpp"
#include "runtime/globals.hpp"
-#include "runtime/interfaceSupport.hpp"
#include "utilities/macros.hpp"
#ifdef ZERO
# include "entry_zero.hpp"
+# include "interpreter/interpreter.hpp"
#endif
--- a/src/hotspot/share/prims/perf.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/perf.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -29,7 +29,7 @@
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/perfData.inline.hpp"
#include "runtime/perfMemory.hpp"
--- a/src/hotspot/share/prims/privilegedStack.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/privilegedStack.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "prims/privilegedStack.hpp"
-#include "runtime/vframe.hpp"
+#include "runtime/vframe.inline.hpp"
void PrivilegedElement::initialize(vframeStream* vfst, oop context, PrivilegedElement* next, TRAPS) {
Method* method = vfst->method();
--- a/src/hotspot/share/prims/stackwalk.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/stackwalk.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,7 @@
#include "runtime/globals.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
-#include "runtime/vframe.hpp"
+#include "runtime/vframe.inline.hpp"
#include "utilities/globalDefinitions.hpp"
// setup and cleanup actions
@@ -64,6 +64,8 @@
_need_method_info = StackWalk::need_method_info(mode);
}
+void JavaFrameStream::next() { _vfst.next();}
+
// Returns the BaseFrameStream for the current stack being traversed.
//
// Parameters:
--- a/src/hotspot/share/prims/stackwalk.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/stackwalk.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -80,7 +80,7 @@
public:
JavaFrameStream(JavaThread* thread, int mode);
- void next() { _vfst.next();}
+ void next();
bool at_end() { return _vfst.at_end(); }
Method* method() { return _vfst.method(); }
--- a/src/hotspot/share/prims/unsafe.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/unsafe.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -37,7 +37,7 @@
#include "prims/unsafe.hpp"
#include "runtime/atomic.hpp"
#include "runtime/globals.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/reflection.hpp"
@@ -367,7 +367,7 @@
size_t sz = (size_t)size;
sz = align_up(sz, HeapWordSize);
- void* x = os::malloc(sz, mtInternal);
+ void* x = os::malloc(sz, mtOther);
return addr_to_java(x);
} UNSAFE_END
@@ -377,7 +377,7 @@
size_t sz = (size_t)size;
sz = align_up(sz, HeapWordSize);
- void* x = os::realloc(p, sz, mtInternal);
+ void* x = os::realloc(p, sz, mtOther);
return addr_to_java(x);
} UNSAFE_END
--- a/src/hotspot/share/prims/wbtestmethods/parserTests.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/wbtestmethods/parserTests.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -29,9 +29,9 @@
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "oops/objArrayOop.inline.hpp"
-#include "prims/whitebox.hpp"
+#include "prims/whitebox.inline.hpp"
#include "prims/wbtestmethods/parserTests.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "services/diagnosticArgument.hpp"
#include "services/diagnosticFramework.hpp"
--- a/src/hotspot/share/prims/whitebox.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/whitebox.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -47,12 +47,13 @@
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "prims/wbtestmethods/parserTests.hpp"
-#include "prims/whitebox.hpp"
+#include "prims/whitebox.inline.hpp"
#include "runtime/arguments.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/handshake.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/os.hpp"
@@ -88,6 +89,22 @@
#define SIZE_T_MAX_VALUE ((size_t) -1)
+#define CHECK_JNI_EXCEPTION_(env, value) \
+ do { \
+ JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); \
+ if (HAS_PENDING_EXCEPTION) { \
+ return(value); \
+ } \
+ } while (0)
+
+#define CHECK_JNI_EXCEPTION(env) \
+ do { \
+ JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); \
+ if (HAS_PENDING_EXCEPTION) { \
+ return; \
+ } \
+ } while (0)
+
bool WhiteBox::_used = false;
volatile bool WhiteBox::compilation_locked = false;
@@ -849,14 +866,21 @@
bool WhiteBox::compile_method(Method* method, int comp_level, int bci, Thread* THREAD) {
// Screen for unavailable/bad comp level or null method
- if (method == NULL || comp_level > MIN2((CompLevel) TieredStopAtLevel, CompLevel_highest_tier) ||
- CompileBroker::compiler(comp_level) == NULL) {
+ AbstractCompiler* comp = CompileBroker::compiler(comp_level);
+ if (method == NULL || comp_level > MIN2((CompLevel) TieredStopAtLevel, CompLevel_highest_tier) || comp == NULL) {
return false;
}
+
+ // Check if compilation is blocking
methodHandle mh(THREAD, method);
+ DirectiveSet* directive = DirectivesStack::getMatchingDirective(mh, comp);
+ bool is_blocking = !directive->BackgroundCompilationOption;
+ DirectivesStack::release(directive);
+
+ // Compile method and check result
nmethod* nm = CompileBroker::compile_method(mh, bci, comp_level, mh, mh->invocation_count(), CompileTask::Reason_Whitebox, THREAD);
MutexLockerEx mu(Compile_lock);
- return (mh->queued_for_compilation() || nm != NULL);
+ return ((!is_blocking && mh->queued_for_compilation()) || nm != NULL);
}
WB_ENTRY(jboolean, WB_EnqueueMethodForCompilation(JNIEnv* env, jobject o, jobject method, jint comp_level, jint bci))
--- a/src/hotspot/share/prims/whitebox.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/prims/whitebox.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,7 +31,8 @@
#include "memory/allocation.hpp"
#include "oops/oopsHierarchy.hpp"
#include "oops/symbol.hpp"
-#include "runtime/interfaceSupport.hpp"
+
+#define WB_METHOD_DECLARE(result_type) extern "C" result_type JNICALL
// Unconditionally clear pedantic pending JNI checks
class ClearPendingJniExcCheck : public StackObj {
@@ -44,30 +45,6 @@
}
};
-// Entry macro to transition from JNI to VM state.
-
-#define WB_ENTRY(result_type, header) JNI_ENTRY(result_type, header) \
- ClearPendingJniExcCheck _clearCheck(env);
-
-#define WB_END JNI_END
-#define WB_METHOD_DECLARE(result_type) extern "C" result_type JNICALL
-
-#define CHECK_JNI_EXCEPTION_(env, value) \
- do { \
- JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); \
- if (HAS_PENDING_EXCEPTION) { \
- return(value); \
- } \
- } while (0)
-
-#define CHECK_JNI_EXCEPTION(env) \
- do { \
- JavaThread* THREAD = JavaThread::thread_from_jni_environment(env); \
- if (HAS_PENDING_EXCEPTION) { \
- return; \
- } \
- } while (0)
-
class CodeBlob;
class CodeHeap;
class JavaThread;
@@ -93,6 +70,4 @@
static bool compile_method(Method* method, int comp_level, int bci, Thread* THREAD);
};
-
-
#endif // SHARE_VM_PRIMS_WHITEBOX_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/prims/whitebox.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_PRIMS_WHITEBOX_INLINE_HPP
+#define SHARE_VM_PRIMS_WHITEBOX_INLINE_HPP
+
+#include "prims/whitebox.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+
+// Entry macro to transition from JNI to VM state.
+
+#define WB_ENTRY(result_type, header) JNI_ENTRY(result_type, header) \
+ ClearPendingJniExcCheck _clearCheck(env);
+
+#define WB_END JNI_END
+
+#endif // SHARE_VM_PRIMS_WHITEBOX_INLINE_HPP
--- a/src/hotspot/share/runtime/advancedThresholdPolicy.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/advancedThresholdPolicy.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "runtime/advancedThresholdPolicy.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/simpleThresholdPolicy.inline.hpp"
#if INCLUDE_JVMCI
#include "jvmci/jvmciRuntime.hpp"
--- a/src/hotspot/share/runtime/arguments.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/arguments.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -517,6 +517,7 @@
{ "PrintSafepointStatistics", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
{ "PrintSafepointStatisticsTimeout", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
{ "PrintSafepointStatisticsCount",JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
+ { "AggressiveOpts", JDK_Version::jdk(11), JDK_Version::jdk(12), JDK_Version::jdk(13) },
// --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in:
{ "DefaultMaxRAMFraction", JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() },
@@ -2342,10 +2343,6 @@
}
LoopStripMiningIter = 0;
}
- if (FLAG_IS_DEFAULT(LoopStripMiningIterShortLoop)) {
- // blind guess
- LoopStripMiningIterShortLoop = LoopStripMiningIter / 10;
- }
#endif
if (!FLAG_IS_DEFAULT(AllocateHeapAt)) {
if ((UseNUMAInterleaving && !FLAG_IS_DEFAULT(UseNUMAInterleaving)) || (UseNUMA && !FLAG_IS_DEFAULT(UseNUMA))) {
@@ -4339,6 +4336,10 @@
// nothing to use the profiling, turn if off
FLAG_SET_DEFAULT(TypeProfileLevel, 0);
}
+ if (FLAG_IS_DEFAULT(LoopStripMiningIterShortLoop)) {
+ // blind guess
+ LoopStripMiningIterShortLoop = LoopStripMiningIter / 10;
+ }
#endif
if (PrintAssembly && FLAG_IS_DEFAULT(DebugNonSafepoints)) {
--- a/src/hotspot/share/runtime/biasedLocking.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/biasedLocking.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,6 +31,7 @@
#include "runtime/atomic.hpp"
#include "runtime/basicLock.hpp"
#include "runtime/biasedLocking.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/task.hpp"
#include "runtime/threadSMR.hpp"
#include "runtime/vframe.hpp"
--- a/src/hotspot/share/runtime/deoptimization.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/deoptimization.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -46,7 +46,8 @@
#include "runtime/biasedLocking.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubRoutines.hpp"
--- a/src/hotspot/share/runtime/deoptimization.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/deoptimization.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
#define SHARE_VM_RUNTIME_DEOPTIMIZATION_HPP
#include "memory/allocation.hpp"
-#include "runtime/frame.inline.hpp"
+#include "runtime/frame.hpp"
class ProfileData;
class vframeArray;
--- a/src/hotspot/share/runtime/fieldDescriptor.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/fieldDescriptor.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,6 +26,8 @@
#define SHARE_VM_RUNTIME_FIELDDESCRIPTOR_HPP
#include "oops/constantPool.hpp"
+#include "oops/fieldInfo.hpp"
+#include "oops/instanceKlass.hpp"
#include "oops/symbol.hpp"
#include "runtime/fieldType.hpp"
#include "utilities/accessFlags.hpp"
--- a/src/hotspot/share/runtime/frame.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/frame.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -269,7 +269,6 @@
// expression stack (may go up or down, direction == 1 or -1)
public:
intptr_t* interpreter_frame_expression_stack() const;
- static jint interpreter_frame_expression_stack_direction();
// The _at version returns a pointer because the address is used for GC.
intptr_t* interpreter_frame_expression_stack_at(jint offset) const;
@@ -462,7 +461,7 @@
StackFrameStream(JavaThread *thread, bool update = true);
// Iteration
- bool is_done() { return (_is_done) ? true : (_is_done = _fr.is_first_frame(), false); }
+ inline bool is_done();
void next() { if (!_is_done) _fr = _fr.sender(&_reg_map); }
// Query
--- a/src/hotspot/share/runtime/frame.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/frame.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -63,4 +63,8 @@
}
}
+inline bool StackFrameStream::is_done() {
+ return (_is_done) ? true : (_is_done = _fr.is_first_frame(), false);
+}
+
#endif // SHARE_VM_RUNTIME_FRAME_INLINE_HPP
--- a/src/hotspot/share/runtime/globals.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/globals.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -447,8 +447,7 @@
_name);
return Flag::NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD;
}
- get_locked_message_ext(buf, buflen);
- return Flag::NONE;
+ return get_locked_message_ext(buf, buflen);
}
bool Flag::is_writeable() const {
@@ -462,6 +461,18 @@
return is_manageable() || is_external_ext();
}
+// Helper function for Flag::print_on().
+// Fills current line up to requested position.
+// Should the current position already be past the requested position,
+// one separator blank is enforced.
+void fill_to_pos(outputStream* st, unsigned int req_pos) {
+ if ((unsigned int)st->position() < req_pos) {
+ st->fill_to(req_pos); // need to fill with blanks to reach req_pos
+ } else {
+ st->print(" "); // enforce blank separation. Previous field too long.
+ }
+}
+
void Flag::print_on(outputStream* st, bool withComments, bool printRanges) {
// Don't print notproduct and develop flags in a product build.
if (is_constant_in_binary()) {
@@ -469,36 +480,82 @@
}
if (!printRanges) {
- // Use some named constants to make code more readable.
- const unsigned int nSpaces = 10;
- const unsigned int maxFlagLen = 40 + nSpaces;
+ // The command line options -XX:+PrintFlags* cause this function to be called
+ // for each existing flag to print information pertinent to this flag. The data
+ // is displayed in columnar form, with the following layout:
+ // col1 - data type, right-justified
+ // col2 - name, left-justified
+ // col3 - ' =' double-char, leading space to align with possible '+='
+ // col4 - value left-justified
+ // col5 - kind right-justified
+ // col6 - origin left-justified
+ // col7 - comments left-justified
+ //
+ // The column widths are fixed. They are defined such that, for most cases,
+ // an eye-pleasing tabular output is created.
+ //
+ // Sample output:
+ // bool CMSScavengeBeforeRemark = false {product} {default}
+ // uintx CMSScheduleRemarkEdenPenetration = 50 {product} {default}
+ // size_t CMSScheduleRemarkEdenSizeThreshold = 2097152 {product} {default}
+ // uintx CMSScheduleRemarkSamplingRatio = 5 {product} {default}
+ // double CMSSmallCoalSurplusPercent = 1.050000 {product} {default}
+ // ccstr CompileCommandFile = MyFile.cmd {product} {command line}
+ // ccstrlist CompileOnly = Method1
+ // CompileOnly += Method2 {product} {command line}
+ // | | | | | | |
+ // | | | | | | +-- col7
+ // | | | | | +-- col6
+ // | | | | +-- col5
+ // | | | +-- col4
+ // | | +-- col3
+ // | +-- col2
+ // +-- col1
- // The print below assumes that the flag name is 40 characters or less.
- // This works for most flags, but there are exceptions. Our longest flag
- // name right now is UseAdaptiveGenerationSizePolicyAtMajorCollection and
- // its minor collection buddy. These are 48 characters. We use a buffer of
- // nSpaces spaces below to adjust the space between the flag value and the
- // column of flag type and origin that is printed in the end of the line.
- char spaces[nSpaces + 1] = " ";
- st->print("%9s %-*s = ", _type, maxFlagLen-nSpaces, _name);
+ const unsigned int col_spacing = 1;
+ const unsigned int col1_pos = 0;
+ const unsigned int col1_width = 9;
+ const unsigned int col2_pos = col1_pos + col1_width + col_spacing;
+ const unsigned int col2_width = 39;
+ const unsigned int col3_pos = col2_pos + col2_width + col_spacing;
+ const unsigned int col3_width = 2;
+ const unsigned int col4_pos = col3_pos + col3_width + col_spacing;
+ const unsigned int col4_width = 30;
+ const unsigned int col5_pos = col4_pos + col4_width + col_spacing;
+ const unsigned int col5_width = 20;
+ const unsigned int col6_pos = col5_pos + col5_width + col_spacing;
+ const unsigned int col6_width = 15;
+ const unsigned int col7_pos = col6_pos + col6_width + col_spacing;
+ const unsigned int col7_width = 1;
+ st->fill_to(col1_pos);
+ st->print("%*s", col1_width, _type); // right-justified, therefore width is required.
+
+ fill_to_pos(st, col2_pos);
+ st->print("%s", _name);
+
+ fill_to_pos(st, col3_pos);
+ st->print(" ="); // use " =" for proper alignment with multiline ccstr output.
+
+ fill_to_pos(st, col4_pos);
if (is_bool()) {
- st->print("%-20s", get_bool() ? "true" : "false");
+ st->print("%s", get_bool() ? "true" : "false");
} else if (is_int()) {
- st->print("%-20d", get_int());
+ st->print("%d", get_int());
} else if (is_uint()) {
- st->print("%-20u", get_uint());
+ st->print("%u", get_uint());
} else if (is_intx()) {
- st->print(INTX_FORMAT_W(-20), get_intx());
+ st->print(INTX_FORMAT, get_intx());
} else if (is_uintx()) {
- st->print(UINTX_FORMAT_W(-20), get_uintx());
+ st->print(UINTX_FORMAT, get_uintx());
} else if (is_uint64_t()) {
- st->print(UINT64_FORMAT_W(-20), get_uint64_t());
+ st->print(UINT64_FORMAT, get_uint64_t());
} else if (is_size_t()) {
- st->print(SIZE_FORMAT_W(-20), get_size_t());
+ st->print(SIZE_FORMAT, get_size_t());
} else if (is_double()) {
- st->print("%-20f", get_double());
+ st->print("%f", get_double());
} else if (is_ccstr()) {
+ // Honor <newline> characters in ccstr: print multiple lines.
const char* cp = get_ccstr();
if (cp != NULL) {
const char* eol;
@@ -507,31 +564,85 @@
st->print("%.*s", (int)llen, cp);
st->cr();
cp = eol+1;
- st->print("%5s %-35s += ", "", _name);
+ fill_to_pos(st, col2_pos);
+ st->print("%s", _name);
+ fill_to_pos(st, col3_pos);
+ st->print("+=");
+ fill_to_pos(st, col4_pos);
}
- st->print("%-20s", cp);
+ st->print("%s", cp);
}
- else st->print("%-20s", "");
+ } else {
+ st->print("unhandled type %s", _type);
+ st->cr();
+ return;
}
- // Make sure we do not punch a '\0' at a negative char array index.
- unsigned int nameLen = (unsigned int)strlen(_name);
- if (nameLen <= maxFlagLen) {
- spaces[maxFlagLen - MAX2(maxFlagLen-nSpaces, nameLen)] = '\0';
- st->print("%s", spaces);
- }
- print_kind_and_origin(st);
+
+ fill_to_pos(st, col5_pos);
+ print_kind(st, col5_width);
+
+ fill_to_pos(st, col6_pos);
+ print_origin(st, col6_width);
#ifndef PRODUCT
if (withComments) {
+ fill_to_pos(st, col7_pos);
st->print("%s", _doc);
}
#endif
-
st->cr();
+ } else if (!is_bool() && !is_ccstr()) {
+ // The command line options -XX:+PrintFlags* cause this function to be called
+ // for each existing flag to print information pertinent to this flag. The data
+ // is displayed in columnar form, with the following layout:
+ // col1 - data type, right-justified
+ // col2 - name, left-justified
+ // col4 - range [ min ... max]
+ // col5 - kind right-justified
+ // col6 - origin left-justified
+ // col7 - comments left-justified
+ //
+ // The column widths are fixed. They are defined such that, for most cases,
+ // an eye-pleasing tabular output is created.
+ //
+ // Sample output:
+ // intx MinPassesBeforeFlush [ 0 ... 9223372036854775807 ] {diagnostic} {default}
+ // uintx MinRAMFraction [ 1 ... 18446744073709551615 ] {product} {default}
+ // double MinRAMPercentage [ 0.000 ... 100.000 ] {product} {default}
+ // uintx MinSurvivorRatio [ 3 ... 18446744073709551615 ] {product} {default}
+ // size_t MinTLABSize [ 1 ... 9223372036854775807 ] {product} {default}
+ // intx MonitorBound [ 0 ... 2147483647 ] {product} {default}
+ // | | | | | |
+ // | | | | | +-- col7
+ // | | | | +-- col6
+ // | | | +-- col5
+ // | | +-- col4
+ // | +-- col2
+ // +-- col1
- } else if (!is_bool() && !is_ccstr()) {
- st->print("%9s %-50s ", _type, _name);
+ const unsigned int col_spacing = 1;
+ const unsigned int col1_pos = 0;
+ const unsigned int col1_width = 9;
+ const unsigned int col2_pos = col1_pos + col1_width + col_spacing;
+ const unsigned int col2_width = 49;
+ const unsigned int col3_pos = col2_pos + col2_width + col_spacing;
+ const unsigned int col3_width = 0;
+ const unsigned int col4_pos = col3_pos + col3_width + col_spacing;
+ const unsigned int col4_width = 60;
+ const unsigned int col5_pos = col4_pos + col4_width + col_spacing;
+ const unsigned int col5_width = 35;
+ const unsigned int col6_pos = col5_pos + col5_width + col_spacing;
+ const unsigned int col6_width = 15;
+ const unsigned int col7_pos = col6_pos + col6_width + col_spacing;
+ const unsigned int col7_width = 1;
+ st->fill_to(col1_pos);
+ st->print("%*s", col1_width, _type); // right-justified, therefore width is required.
+
+ fill_to_pos(st, col2_pos);
+ st->print("%s", _name);
+
+ fill_to_pos(st, col4_pos);
RangeStrFunc func = NULL;
if (is_int()) {
func = Flag::get_int_default_range_str;
@@ -548,24 +659,29 @@
} else if (is_double()) {
func = Flag::get_double_default_range_str;
} else {
- ShouldNotReachHere();
+ st->print("unhandled type %s", _type);
+ st->cr();
+ return;
}
CommandLineFlagRangeList::print(st, _name, func);
- st->print(" %-16s", " ");
- print_kind_and_origin(st);
+ fill_to_pos(st, col5_pos);
+ print_kind(st, col5_width);
+
+ fill_to_pos(st, col6_pos);
+ print_origin(st, col6_width);
#ifndef PRODUCT
if (withComments) {
+ fill_to_pos(st, col7_pos);
st->print("%s", _doc);
}
#endif
-
st->cr();
}
}
-void Flag::print_kind_and_origin(outputStream* st) {
+void Flag::print_kind(outputStream* st, unsigned int width) {
struct Data {
int flag;
const char* name;
@@ -615,11 +731,13 @@
}
assert(buffer_used + 2 <= buffer_size, "Too small buffer");
jio_snprintf(kind + buffer_used, buffer_size - buffer_used, "}");
- st->print("%20s", kind);
+ st->print("%*s", width, kind);
}
+}
+void Flag::print_origin(outputStream* st, unsigned int width) {
int origin = _flags & VALUE_ORIGIN_MASK;
- st->print(" {");
+ st->print("{");
switch(origin) {
case DEFAULT:
st->print("default"); break;
--- a/src/hotspot/share/runtime/globals.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/globals.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -184,7 +184,9 @@
DIAGNOSTIC_FLAG_BUT_LOCKED,
EXPERIMENTAL_FLAG_BUT_LOCKED,
DEVELOPER_FLAG_BUT_PRODUCT_BUILD,
- NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD
+ NOTPRODUCT_FLAG_BUT_PRODUCT_BUILD,
+ COMMERCIAL_FLAG_BUT_DISABLED,
+ COMMERCIAL_FLAG_BUT_LOCKED
};
const char* _type;
@@ -285,11 +287,12 @@
void clear_diagnostic();
Flag::MsgType get_locked_message(char*, int) const;
- void get_locked_message_ext(char*, int) const;
+ Flag::MsgType get_locked_message_ext(char*, int) const;
// printRanges will print out flags type, name and range values as expected by -XX:+PrintFlagsRanges
void print_on(outputStream* st, bool withComments = false, bool printRanges = false);
- void print_kind_and_origin(outputStream* st);
+ void print_kind(outputStream* st, unsigned int width);
+ void print_origin(outputStream* st, unsigned int width);
void print_as_flag(outputStream* st);
static const char* flag_error_str(Flag::Error error);
@@ -2650,7 +2653,7 @@
"Inline allocations larger than this in doublewords must go slow")\
\
product(bool, AggressiveOpts, false, \
- "Enable aggressive optimizations - see arguments.cpp") \
+ "(Deprecated) Enable aggressive optimizations - see arguments.cpp") \
\
product_pd(bool, CompactStrings, \
"Enable Strings to use single byte chars in backing store") \
--- a/src/hotspot/share/runtime/globals_ext.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/globals_ext.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -61,9 +61,10 @@
return false;
}
-inline void Flag::get_locked_message_ext(char* buf, int buflen) const {
+inline Flag::MsgType Flag::get_locked_message_ext(char* buf, int buflen) const {
assert(buf != NULL, "Buffer cannot be NULL");
buf[0] = '\0';
+ return Flag::NONE;
}
#endif // SHARE_VM_RUNTIME_GLOBALS_EXT_HPP
--- a/src/hotspot/share/runtime/handles.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/handles.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -214,4 +214,4 @@
area->_no_handle_mark_nesting = _no_handle_mark_nesting;
}
-#endif
+#endif // ASSERT
--- a/src/hotspot/share/runtime/handles.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/handles.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -296,4 +296,17 @@
#endif
};
+// The HandleMarkCleaner is a faster version of HandleMark.
+// It relies on the fact that there is a HandleMark further
+// down the stack (in JavaCalls::call_helper), and just resets
+// to the saved values in that HandleMark.
+
+class HandleMarkCleaner: public StackObj {
+ private:
+ Thread* _thread;
+ public:
+ inline HandleMarkCleaner(Thread* thread);
+ inline ~HandleMarkCleaner();
+};
+
#endif // SHARE_VM_RUNTIME_HANDLES_HPP
--- a/src/hotspot/share/runtime/handles.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/handles.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -104,4 +104,13 @@
debug_only(area->_handle_mark_nesting--);
}
+inline HandleMarkCleaner::HandleMarkCleaner(Thread* thread) {
+ _thread = thread;
+ _thread->last_handle_mark()->push();
+}
+
+inline HandleMarkCleaner::~HandleMarkCleaner() {
+ _thread->last_handle_mark()->pop_and_restore();
+}
+
#endif // SHARE_VM_RUNTIME_HANDLES_INLINE_HPP
--- a/src/hotspot/share/runtime/handshake.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/handshake.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,7 +27,7 @@
#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/handshake.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/osThread.hpp"
#include "runtime/semaphore.hpp"
#include "runtime/task.hpp"
--- a/src/hotspot/share/runtime/interfaceSupport.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/interfaceSupport.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,16 +28,51 @@
#include "gc/shared/genCollectedHeap.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/atomic.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/os.inline.hpp"
+#include "runtime/thread.inline.hpp"
#include "runtime/vframe.hpp"
+#include "runtime/vmThread.hpp"
#include "utilities/preserveException.hpp"
// Implementation of InterfaceSupport
#ifdef ASSERT
+VMEntryWrapper::VMEntryWrapper() {
+ if (VerifyLastFrame) {
+ InterfaceSupport::verify_last_frame();
+ }
+}
+
+VMEntryWrapper::~VMEntryWrapper() {
+ InterfaceSupport::check_gc_alot();
+ if (WalkStackALot) {
+ InterfaceSupport::walk_stack();
+ }
+#ifdef COMPILER2
+ // This option is not used by Compiler 1
+ if (StressDerivedPointers) {
+ InterfaceSupport::stress_derived_pointers();
+ }
+#endif
+ if (DeoptimizeALot || DeoptimizeRandom) {
+ InterfaceSupport::deoptimizeAll();
+ }
+ if (ZombieALot) {
+ InterfaceSupport::zombieAll();
+ }
+ if (UnlinkSymbolsALot) {
+ InterfaceSupport::unlinkSymbols();
+ }
+ // do verification AFTER potential deoptimization
+ if (VerifyStack) {
+ InterfaceSupport::verify_stack();
+ }
+}
long InterfaceSupport::_number_of_calls = 0;
long InterfaceSupport::_scavenge_alot_counter = 1;
--- a/src/hotspot/share/runtime/interfaceSupport.hpp Thu Mar 29 20:12:02 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,623 +0,0 @@
-/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
-#define SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
-
-#include "gc/shared/gcLocker.hpp"
-#include "runtime/handles.inline.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/os.hpp"
-#include "runtime/safepointMechanism.inline.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/vmThread.hpp"
-#include "utilities/globalDefinitions.hpp"
-#include "utilities/macros.hpp"
-#include "utilities/preserveException.hpp"
-
-// Wrapper for all entry points to the virtual machine.
-// The HandleMarkCleaner is a faster version of HandleMark.
-// It relies on the fact that there is a HandleMark further
-// down the stack (in JavaCalls::call_helper), and just resets
-// to the saved values in that HandleMark.
-
-class HandleMarkCleaner: public StackObj {
- private:
- Thread* _thread;
- public:
- HandleMarkCleaner(Thread* thread) {
- _thread = thread;
- _thread->last_handle_mark()->push();
- }
- ~HandleMarkCleaner() {
- _thread->last_handle_mark()->pop_and_restore();
- }
-
- private:
- inline void* operator new(size_t size, void* ptr) throw() {
- return ptr;
- }
-};
-
-// InterfaceSupport provides functionality used by the VM_LEAF_BASE and
-// VM_ENTRY_BASE macros. These macros are used to guard entry points into
-// the VM and perform checks upon leave of the VM.
-
-
-class InterfaceSupport: AllStatic {
-# ifdef ASSERT
- public:
- static long _scavenge_alot_counter;
- static long _fullgc_alot_counter;
- static long _number_of_calls;
- static long _fullgc_alot_invocation;
-
- // Helper methods used to implement +ScavengeALot and +FullGCALot
- static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); }
- static void gc_alot();
-
- static void walk_stack_from(vframe* start_vf);
- static void walk_stack();
-
- static void zombieAll();
- static void unlinkSymbols();
- static void deoptimizeAll();
- static void stress_derived_pointers();
- static void verify_stack();
- static void verify_last_frame();
-# endif
-
- public:
- static void serialize_thread_state_with_handler(JavaThread* thread) {
- serialize_thread_state_internal(thread, true);
- }
-
- // Should only call this if we know that we have a proper SEH set up.
- static void serialize_thread_state(JavaThread* thread) {
- serialize_thread_state_internal(thread, false);
- }
-
- private:
- static void serialize_thread_state_internal(JavaThread* thread, bool needs_exception_handler) {
- // Make sure new state is seen by VM thread
- if (os::is_MP()) {
- if (UseMembar) {
- // Force a fence between the write above and read below
- OrderAccess::fence();
- } else {
- // store to serialize page so VM thread can do pseudo remote membar
- if (needs_exception_handler) {
- os::write_memory_serialize_page_with_handler(thread);
- } else {
- os::write_memory_serialize_page(thread);
- }
- }
- }
- }
-};
-
-
-// Basic class for all thread transition classes.
-
-class ThreadStateTransition : public StackObj {
- protected:
- JavaThread* _thread;
- public:
- ThreadStateTransition(JavaThread *thread) {
- _thread = thread;
- assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
- }
-
- // Change threadstate in a manner, so safepoint can detect changes.
- // Time-critical: called on exit from every runtime routine
- static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
- assert(from != _thread_in_Java, "use transition_from_java");
- assert(from != _thread_in_native, "use transition_from_native");
- assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
- assert(thread->thread_state() == from, "coming from wrong thread state");
- // Change to transition state
- thread->set_thread_state((JavaThreadState)(from + 1));
-
- InterfaceSupport::serialize_thread_state(thread);
-
- SafepointMechanism::block_if_requested(thread);
- thread->set_thread_state(to);
-
- CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
- }
-
- // transition_and_fence must be used on any thread state transition
- // where there might not be a Java call stub on the stack, in
- // particular on Windows where the Structured Exception Handler is
- // set up in the call stub. os::write_memory_serialize_page() can
- // fault and we can't recover from it on Windows without a SEH in
- // place.
- static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
- assert(thread->thread_state() == from, "coming from wrong thread state");
- assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
- // Change to transition state
- thread->set_thread_state((JavaThreadState)(from + 1));
-
- InterfaceSupport::serialize_thread_state_with_handler(thread);
-
- SafepointMechanism::block_if_requested(thread);
- thread->set_thread_state(to);
-
- CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
- }
-
- // Same as above, but assumes from = _thread_in_Java. This is simpler, since we
- // never block on entry to the VM. This will break the code, since e.g. preserve arguments
- // have not been setup.
- static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
- assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
- thread->set_thread_state(to);
- }
-
- static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
- assert((to & 1) == 0, "odd numbers are transitions states");
- assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
- // Change to transition state
- thread->set_thread_state(_thread_in_native_trans);
-
- InterfaceSupport::serialize_thread_state_with_handler(thread);
-
- // We never install asynchronous exceptions when coming (back) in
- // to the runtime from native code because the runtime is not set
- // up to handle exceptions floating around at arbitrary points.
- if (SafepointMechanism::poll(thread) || thread->is_suspend_after_native()) {
- JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
-
- // Clear unhandled oops anywhere where we could block, even if we don't.
- CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
- }
-
- thread->set_thread_state(to);
- }
- protected:
- void trans(JavaThreadState from, JavaThreadState to) { transition(_thread, from, to); }
- void trans_from_java(JavaThreadState to) { transition_from_java(_thread, to); }
- void trans_from_native(JavaThreadState to) { transition_from_native(_thread, to); }
- void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
-};
-
-class ThreadInVMForHandshake : public ThreadStateTransition {
- const JavaThreadState _original_state;
-
- void transition_back() {
- // This can be invoked from transition states and must return to the original state properly
- assert(_thread->thread_state() == _thread_in_vm, "should only call when leaving VM after handshake");
- _thread->set_thread_state(_thread_in_vm_trans);
-
- InterfaceSupport::serialize_thread_state(_thread);
-
- SafepointMechanism::block_if_requested(_thread);
-
- _thread->set_thread_state(_original_state);
- }
-
- public:
-
- ThreadInVMForHandshake(JavaThread* thread) : ThreadStateTransition(thread),
- _original_state(thread->thread_state()) {
-
- if (thread->has_last_Java_frame()) {
- thread->frame_anchor()->make_walkable(thread);
- }
-
- thread->set_thread_state(_thread_in_vm);
- }
-
- ~ThreadInVMForHandshake() {
- transition_back();
- }
-
-};
-
-class ThreadInVMfromJava : public ThreadStateTransition {
- public:
- ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
- trans_from_java(_thread_in_vm);
- }
- ~ThreadInVMfromJava() {
- if (_thread->stack_yellow_reserved_zone_disabled()) {
- _thread->enable_stack_yellow_reserved_zone();
- }
- trans(_thread_in_vm, _thread_in_Java);
- // Check for pending. async. exceptions or suspends.
- if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
- }
-};
-
-
-class ThreadInVMfromUnknown {
- private:
- JavaThread* _thread;
- public:
- ThreadInVMfromUnknown() : _thread(NULL) {
- Thread* t = Thread::current();
- if (t->is_Java_thread()) {
- JavaThread* t2 = (JavaThread*) t;
- if (t2->thread_state() == _thread_in_native) {
- _thread = t2;
- ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
- // Used to have a HandleMarkCleaner but that is dangerous as
- // it could free a handle in our (indirect, nested) caller.
- // We expect any handles will be short lived and figure we
- // don't need an actual HandleMark.
- }
- }
- }
- ~ThreadInVMfromUnknown() {
- if (_thread) {
- ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native);
- }
- }
-};
-
-
-class ThreadInVMfromNative : public ThreadStateTransition {
- public:
- ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
- trans_from_native(_thread_in_vm);
- }
- ~ThreadInVMfromNative() {
- trans_and_fence(_thread_in_vm, _thread_in_native);
- }
-};
-
-
-class ThreadToNativeFromVM : public ThreadStateTransition {
- public:
- ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
- // We are leaving the VM at this point and going directly to native code.
- // Block, if we are in the middle of a safepoint synchronization.
- assert(!thread->owns_locks(), "must release all locks when leaving VM");
- thread->frame_anchor()->make_walkable(thread);
- trans_and_fence(_thread_in_vm, _thread_in_native);
- // Check for pending. async. exceptions or suspends.
- if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
- }
-
- ~ThreadToNativeFromVM() {
- trans_from_native(_thread_in_vm);
- assert(!_thread->is_pending_jni_exception_check(), "Pending JNI Exception Check");
- // We don't need to clear_walkable because it will happen automagically when we return to java
- }
-};
-
-
-class ThreadBlockInVM : public ThreadStateTransition {
- public:
- ThreadBlockInVM(JavaThread *thread)
- : ThreadStateTransition(thread) {
- // Once we are blocked vm expects stack to be walkable
- thread->frame_anchor()->make_walkable(thread);
- trans_and_fence(_thread_in_vm, _thread_blocked);
- }
- ~ThreadBlockInVM() {
- trans_and_fence(_thread_blocked, _thread_in_vm);
- // We don't need to clear_walkable because it will happen automagically when we return to java
- }
-};
-
-
-// This special transition class is only used to prevent asynchronous exceptions
-// from being installed on vm exit in situations where we can't tolerate them.
-// See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
-class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
- public:
- ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
- trans_from_java(_thread_in_vm);
- }
- ~ThreadInVMfromJavaNoAsyncException() {
- if (_thread->stack_yellow_reserved_zone_disabled()) {
- _thread->enable_stack_yellow_reserved_zone();
- }
- trans(_thread_in_vm, _thread_in_Java);
- // NOTE: We do not check for pending. async. exceptions.
- // If we did and moved the pending async exception over into the
- // pending exception field, we would need to deopt (currently C2
- // only). However, to do so would require that we transition back
- // to the _thread_in_vm state. Instead we postpone the handling of
- // the async exception.
-
-
- // Check for pending. suspends only.
- if (_thread->has_special_runtime_exit_condition())
- _thread->handle_special_runtime_exit_condition(false);
- }
-};
-
-// Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro.
-// Can be used to verify properties on enter/exit of the VM.
-
-#ifdef ASSERT
-class VMEntryWrapper {
- public:
- VMEntryWrapper() {
- if (VerifyLastFrame) {
- InterfaceSupport::verify_last_frame();
- }
- }
-
- ~VMEntryWrapper() {
- InterfaceSupport::check_gc_alot();
- if (WalkStackALot) {
- InterfaceSupport::walk_stack();
- }
-#ifdef COMPILER2
- // This option is not used by Compiler 1
- if (StressDerivedPointers) {
- InterfaceSupport::stress_derived_pointers();
- }
-#endif
- if (DeoptimizeALot || DeoptimizeRandom) {
- InterfaceSupport::deoptimizeAll();
- }
- if (ZombieALot) {
- InterfaceSupport::zombieAll();
- }
- if (UnlinkSymbolsALot) {
- InterfaceSupport::unlinkSymbols();
- }
- // do verification AFTER potential deoptimization
- if (VerifyStack) {
- InterfaceSupport::verify_stack();
- }
-
- }
-};
-
-
-class VMNativeEntryWrapper {
- public:
- VMNativeEntryWrapper() {
- if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
- }
-
- ~VMNativeEntryWrapper() {
- if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
- }
-};
-
-#endif
-
-
-// VM-internal runtime interface support
-
-#ifdef ASSERT
-
-class RuntimeHistogramElement : public HistogramElement {
- public:
- RuntimeHistogramElement(const char* name);
-};
-
-#define TRACE_CALL(result_type, header) \
- InterfaceSupport::_number_of_calls++; \
- if (CountRuntimeCalls) { \
- static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \
- if (e != NULL) e->increment_count(); \
- }
-#else
-#define TRACE_CALL(result_type, header) \
- /* do nothing */
-#endif
-
-
-// LEAF routines do not lock, GC or throw exceptions
-
-#define VM_LEAF_BASE(result_type, header) \
- TRACE_CALL(result_type, header) \
- debug_only(NoHandleMark __hm;) \
- os::verify_stack_alignment(); \
- /* begin of body */
-
-#define VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread) \
- TRACE_CALL(result_type, header) \
- debug_only(ResetNoHandleMark __rnhm;) \
- HandleMarkCleaner __hm(thread); \
- Thread* THREAD = thread; \
- os::verify_stack_alignment(); \
- /* begin of body */
-
-
-// ENTRY routines may lock, GC and throw exceptions
-
-#define VM_ENTRY_BASE(result_type, header, thread) \
- TRACE_CALL(result_type, header) \
- HandleMarkCleaner __hm(thread); \
- Thread* THREAD = thread; \
- os::verify_stack_alignment(); \
- /* begin of body */
-
-
-// QUICK_ENTRY routines behave like ENTRY but without a handle mark
-
-#define VM_QUICK_ENTRY_BASE(result_type, header, thread) \
- TRACE_CALL(result_type, header) \
- debug_only(NoHandleMark __hm;) \
- Thread* THREAD = thread; \
- os::verify_stack_alignment(); \
- /* begin of body */
-
-
-// Definitions for IRT (Interpreter Runtime)
-// (thread is an argument passed in to all these routines)
-
-#define IRT_ENTRY(result_type, header) \
- result_type header { \
- ThreadInVMfromJava __tiv(thread); \
- VM_ENTRY_BASE(result_type, header, thread) \
- debug_only(VMEntryWrapper __vew;)
-
-
-#define IRT_LEAF(result_type, header) \
- result_type header { \
- VM_LEAF_BASE(result_type, header) \
- debug_only(NoSafepointVerifier __nspv(true);)
-
-
-#define IRT_ENTRY_NO_ASYNC(result_type, header) \
- result_type header { \
- ThreadInVMfromJavaNoAsyncException __tiv(thread); \
- VM_ENTRY_BASE(result_type, header, thread) \
- debug_only(VMEntryWrapper __vew;)
-
-#define IRT_END }
-
-
-// Definitions for JRT (Java (Compiler/Shared) Runtime)
-
-#define JRT_ENTRY(result_type, header) \
- result_type header { \
- ThreadInVMfromJava __tiv(thread); \
- VM_ENTRY_BASE(result_type, header, thread) \
- debug_only(VMEntryWrapper __vew;)
-
-
-#define JRT_LEAF(result_type, header) \
- result_type header { \
- VM_LEAF_BASE(result_type, header) \
- debug_only(JRTLeafVerifier __jlv;)
-
-
-#define JRT_ENTRY_NO_ASYNC(result_type, header) \
- result_type header { \
- ThreadInVMfromJavaNoAsyncException __tiv(thread); \
- VM_ENTRY_BASE(result_type, header, thread) \
- debug_only(VMEntryWrapper __vew;)
-
-// Same as JRT Entry but allows for return value after the safepoint
-// to get back into Java from the VM
-#define JRT_BLOCK_ENTRY(result_type, header) \
- result_type header { \
- TRACE_CALL(result_type, header) \
- HandleMarkCleaner __hm(thread);
-
-#define JRT_BLOCK \
- { \
- ThreadInVMfromJava __tiv(thread); \
- Thread* THREAD = thread; \
- debug_only(VMEntryWrapper __vew;)
-
-#define JRT_BLOCK_NO_ASYNC \
- { \
- ThreadInVMfromJavaNoAsyncException __tiv(thread); \
- Thread* THREAD = thread; \
- debug_only(VMEntryWrapper __vew;)
-
-#define JRT_BLOCK_END }
-
-#define JRT_END }
-
-// Definitions for JNI
-
-#define JNI_ENTRY(result_type, header) \
- JNI_ENTRY_NO_PRESERVE(result_type, header) \
- WeakPreserveExceptionMark __wem(thread);
-
-#define JNI_ENTRY_NO_PRESERVE(result_type, header) \
-extern "C" { \
- result_type JNICALL header { \
- JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
- assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
- ThreadInVMfromNative __tiv(thread); \
- debug_only(VMNativeEntryWrapper __vew;) \
- VM_ENTRY_BASE(result_type, header, thread)
-
-
-// Ensure that the VMNativeEntryWrapper constructor, which can cause
-// a GC, is called outside the NoHandleMark (set via VM_QUICK_ENTRY_BASE).
-#define JNI_QUICK_ENTRY(result_type, header) \
-extern "C" { \
- result_type JNICALL header { \
- JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
- assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
- ThreadInVMfromNative __tiv(thread); \
- debug_only(VMNativeEntryWrapper __vew;) \
- VM_QUICK_ENTRY_BASE(result_type, header, thread)
-
-
-#define JNI_LEAF(result_type, header) \
-extern "C" { \
- result_type JNICALL header { \
- JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
- assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
- VM_LEAF_BASE(result_type, header)
-
-
-// Close the routine and the extern "C"
-#define JNI_END } }
-
-
-
-// Definitions for JVM
-
-#define JVM_ENTRY(result_type, header) \
-extern "C" { \
- result_type JNICALL header { \
- JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
- ThreadInVMfromNative __tiv(thread); \
- debug_only(VMNativeEntryWrapper __vew;) \
- VM_ENTRY_BASE(result_type, header, thread)
-
-
-#define JVM_ENTRY_NO_ENV(result_type, header) \
-extern "C" { \
- result_type JNICALL header { \
- JavaThread* thread = JavaThread::current(); \
- ThreadInVMfromNative __tiv(thread); \
- debug_only(VMNativeEntryWrapper __vew;) \
- VM_ENTRY_BASE(result_type, header, thread)
-
-
-#define JVM_QUICK_ENTRY(result_type, header) \
-extern "C" { \
- result_type JNICALL header { \
- JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
- ThreadInVMfromNative __tiv(thread); \
- debug_only(VMNativeEntryWrapper __vew;) \
- VM_QUICK_ENTRY_BASE(result_type, header, thread)
-
-
-#define JVM_LEAF(result_type, header) \
-extern "C" { \
- result_type JNICALL header { \
- VM_Exit::block_if_vm_exited(); \
- VM_LEAF_BASE(result_type, header)
-
-
-#define JVM_ENTRY_FROM_LEAF(env, result_type, header) \
- { { \
- JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
- ThreadInVMfromNative __tiv(thread); \
- debug_only(VMNativeEntryWrapper __vew;) \
- VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)
-
-
-#define JVM_END } }
-
-#endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/interfaceSupport.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,571 @@
+/*
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_INTERFACESUPPORT_INLINE_HPP
+#define SHARE_VM_RUNTIME_INTERFACESUPPORT_INLINE_HPP
+
+#include "gc/shared/gcLocker.hpp"
+#include "runtime/handles.inline.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/os.hpp"
+#include "runtime/safepointMechanism.inline.hpp"
+#include "runtime/thread.hpp"
+#include "runtime/vm_operations.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+#include "utilities/preserveException.hpp"
+
+// Wrapper for all entry points to the virtual machine.
+
+// InterfaceSupport provides functionality used by the VM_LEAF_BASE and
+// VM_ENTRY_BASE macros. These macros are used to guard entry points into
+// the VM and perform checks upon leave of the VM.
+
+
+class InterfaceSupport: AllStatic {
+# ifdef ASSERT
+ public:
+ static long _scavenge_alot_counter;
+ static long _fullgc_alot_counter;
+ static long _number_of_calls;
+ static long _fullgc_alot_invocation;
+
+ // Helper methods used to implement +ScavengeALot and +FullGCALot
+ static void check_gc_alot() { if (ScavengeALot || FullGCALot) gc_alot(); }
+ static void gc_alot();
+
+ static void walk_stack_from(vframe* start_vf);
+ static void walk_stack();
+
+ static void zombieAll();
+ static void unlinkSymbols();
+ static void deoptimizeAll();
+ static void stress_derived_pointers();
+ static void verify_stack();
+ static void verify_last_frame();
+# endif
+
+ public:
+ static void serialize_thread_state_with_handler(JavaThread* thread) {
+ serialize_thread_state_internal(thread, true);
+ }
+
+ // Should only call this if we know that we have a proper SEH set up.
+ static void serialize_thread_state(JavaThread* thread) {
+ serialize_thread_state_internal(thread, false);
+ }
+
+ private:
+ static void serialize_thread_state_internal(JavaThread* thread, bool needs_exception_handler) {
+ // Make sure new state is seen by VM thread
+ if (os::is_MP()) {
+ if (UseMembar) {
+ // Force a fence between the write above and read below
+ OrderAccess::fence();
+ } else {
+ // store to serialize page so VM thread can do pseudo remote membar
+ if (needs_exception_handler) {
+ os::write_memory_serialize_page_with_handler(thread);
+ } else {
+ os::write_memory_serialize_page(thread);
+ }
+ }
+ }
+ }
+};
+
+
+// Basic class for all thread transition classes.
+
+class ThreadStateTransition : public StackObj {
+ protected:
+ JavaThread* _thread;
+ public:
+ ThreadStateTransition(JavaThread *thread) {
+ _thread = thread;
+ assert(thread != NULL && thread->is_Java_thread(), "must be Java thread");
+ }
+
+ // Change threadstate in a manner, so safepoint can detect changes.
+ // Time-critical: called on exit from every runtime routine
+ static inline void transition(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
+ assert(from != _thread_in_Java, "use transition_from_java");
+ assert(from != _thread_in_native, "use transition_from_native");
+ assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
+ assert(thread->thread_state() == from, "coming from wrong thread state");
+ // Change to transition state
+ thread->set_thread_state((JavaThreadState)(from + 1));
+
+ InterfaceSupport::serialize_thread_state(thread);
+
+ SafepointMechanism::block_if_requested(thread);
+ thread->set_thread_state(to);
+
+ CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
+ }
+
+ // transition_and_fence must be used on any thread state transition
+ // where there might not be a Java call stub on the stack, in
+ // particular on Windows where the Structured Exception Handler is
+ // set up in the call stub. os::write_memory_serialize_page() can
+ // fault and we can't recover from it on Windows without a SEH in
+ // place.
+ static inline void transition_and_fence(JavaThread *thread, JavaThreadState from, JavaThreadState to) {
+ assert(thread->thread_state() == from, "coming from wrong thread state");
+ assert((from & 1) == 0 && (to & 1) == 0, "odd numbers are transitions states");
+ // Change to transition state
+ thread->set_thread_state((JavaThreadState)(from + 1));
+
+ InterfaceSupport::serialize_thread_state_with_handler(thread);
+
+ SafepointMechanism::block_if_requested(thread);
+ thread->set_thread_state(to);
+
+ CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
+ }
+
+ // Same as above, but assumes from = _thread_in_Java. This is simpler, since we
+ // never block on entry to the VM. This will break the code, since e.g. preserve arguments
+ // have not been setup.
+ static inline void transition_from_java(JavaThread *thread, JavaThreadState to) {
+ assert(thread->thread_state() == _thread_in_Java, "coming from wrong thread state");
+ thread->set_thread_state(to);
+ }
+
+ static inline void transition_from_native(JavaThread *thread, JavaThreadState to) {
+ assert((to & 1) == 0, "odd numbers are transitions states");
+ assert(thread->thread_state() == _thread_in_native, "coming from wrong thread state");
+ // Change to transition state
+ thread->set_thread_state(_thread_in_native_trans);
+
+ InterfaceSupport::serialize_thread_state_with_handler(thread);
+
+ // We never install asynchronous exceptions when coming (back) in
+ // to the runtime from native code because the runtime is not set
+ // up to handle exceptions floating around at arbitrary points.
+ if (SafepointMechanism::poll(thread) || thread->is_suspend_after_native()) {
+ JavaThread::check_safepoint_and_suspend_for_native_trans(thread);
+
+ // Clear unhandled oops anywhere where we could block, even if we don't.
+ CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
+ }
+
+ thread->set_thread_state(to);
+ }
+ protected:
+ void trans(JavaThreadState from, JavaThreadState to) { transition(_thread, from, to); }
+ void trans_from_java(JavaThreadState to) { transition_from_java(_thread, to); }
+ void trans_from_native(JavaThreadState to) { transition_from_native(_thread, to); }
+ void trans_and_fence(JavaThreadState from, JavaThreadState to) { transition_and_fence(_thread, from, to); }
+};
+
+class ThreadInVMForHandshake : public ThreadStateTransition {
+ const JavaThreadState _original_state;
+
+ void transition_back() {
+ // This can be invoked from transition states and must return to the original state properly
+ assert(_thread->thread_state() == _thread_in_vm, "should only call when leaving VM after handshake");
+ _thread->set_thread_state(_thread_in_vm_trans);
+
+ InterfaceSupport::serialize_thread_state(_thread);
+
+ SafepointMechanism::block_if_requested(_thread);
+
+ _thread->set_thread_state(_original_state);
+ }
+
+ public:
+
+ ThreadInVMForHandshake(JavaThread* thread) : ThreadStateTransition(thread),
+ _original_state(thread->thread_state()) {
+
+ if (thread->has_last_Java_frame()) {
+ thread->frame_anchor()->make_walkable(thread);
+ }
+
+ thread->set_thread_state(_thread_in_vm);
+ }
+
+ ~ThreadInVMForHandshake() {
+ transition_back();
+ }
+
+};
+
+class ThreadInVMfromJava : public ThreadStateTransition {
+ public:
+ ThreadInVMfromJava(JavaThread* thread) : ThreadStateTransition(thread) {
+ trans_from_java(_thread_in_vm);
+ }
+ ~ThreadInVMfromJava() {
+ if (_thread->stack_yellow_reserved_zone_disabled()) {
+ _thread->enable_stack_yellow_reserved_zone();
+ }
+ trans(_thread_in_vm, _thread_in_Java);
+ // Check for pending. async. exceptions or suspends.
+ if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition();
+ }
+};
+
+
+class ThreadInVMfromUnknown {
+ private:
+ JavaThread* _thread;
+ public:
+ ThreadInVMfromUnknown() : _thread(NULL) {
+ Thread* t = Thread::current();
+ if (t->is_Java_thread()) {
+ JavaThread* t2 = (JavaThread*) t;
+ if (t2->thread_state() == _thread_in_native) {
+ _thread = t2;
+ ThreadStateTransition::transition_from_native(t2, _thread_in_vm);
+ // Used to have a HandleMarkCleaner but that is dangerous as
+ // it could free a handle in our (indirect, nested) caller.
+ // We expect any handles will be short lived and figure we
+ // don't need an actual HandleMark.
+ }
+ }
+ }
+ ~ThreadInVMfromUnknown() {
+ if (_thread) {
+ ThreadStateTransition::transition_and_fence(_thread, _thread_in_vm, _thread_in_native);
+ }
+ }
+};
+
+
+class ThreadInVMfromNative : public ThreadStateTransition {
+ public:
+ ThreadInVMfromNative(JavaThread* thread) : ThreadStateTransition(thread) {
+ trans_from_native(_thread_in_vm);
+ }
+ ~ThreadInVMfromNative() {
+ trans_and_fence(_thread_in_vm, _thread_in_native);
+ }
+};
+
+
+class ThreadToNativeFromVM : public ThreadStateTransition {
+ public:
+ ThreadToNativeFromVM(JavaThread *thread) : ThreadStateTransition(thread) {
+ // We are leaving the VM at this point and going directly to native code.
+ // Block, if we are in the middle of a safepoint synchronization.
+ assert(!thread->owns_locks(), "must release all locks when leaving VM");
+ thread->frame_anchor()->make_walkable(thread);
+ trans_and_fence(_thread_in_vm, _thread_in_native);
+ // Check for pending. async. exceptions or suspends.
+ if (_thread->has_special_runtime_exit_condition()) _thread->handle_special_runtime_exit_condition(false);
+ }
+
+ ~ThreadToNativeFromVM() {
+ trans_from_native(_thread_in_vm);
+ assert(!_thread->is_pending_jni_exception_check(), "Pending JNI Exception Check");
+ // We don't need to clear_walkable because it will happen automagically when we return to java
+ }
+};
+
+
+class ThreadBlockInVM : public ThreadStateTransition {
+ public:
+ ThreadBlockInVM(JavaThread *thread)
+ : ThreadStateTransition(thread) {
+ // Once we are blocked vm expects stack to be walkable
+ thread->frame_anchor()->make_walkable(thread);
+ trans_and_fence(_thread_in_vm, _thread_blocked);
+ }
+ ~ThreadBlockInVM() {
+ trans_and_fence(_thread_blocked, _thread_in_vm);
+ // We don't need to clear_walkable because it will happen automagically when we return to java
+ }
+};
+
+
+// This special transition class is only used to prevent asynchronous exceptions
+// from being installed on vm exit in situations where we can't tolerate them.
+// See bugs: 4324348, 4854693, 4998314, 5040492, 5050705.
+class ThreadInVMfromJavaNoAsyncException : public ThreadStateTransition {
+ public:
+ ThreadInVMfromJavaNoAsyncException(JavaThread* thread) : ThreadStateTransition(thread) {
+ trans_from_java(_thread_in_vm);
+ }
+ ~ThreadInVMfromJavaNoAsyncException() {
+ if (_thread->stack_yellow_reserved_zone_disabled()) {
+ _thread->enable_stack_yellow_reserved_zone();
+ }
+ trans(_thread_in_vm, _thread_in_Java);
+ // NOTE: We do not check for pending. async. exceptions.
+ // If we did and moved the pending async exception over into the
+ // pending exception field, we would need to deopt (currently C2
+ // only). However, to do so would require that we transition back
+ // to the _thread_in_vm state. Instead we postpone the handling of
+ // the async exception.
+
+
+ // Check for pending. suspends only.
+ if (_thread->has_special_runtime_exit_condition())
+ _thread->handle_special_runtime_exit_condition(false);
+ }
+};
+
+// Debug class instantiated in JRT_ENTRY and ITR_ENTRY macro.
+// Can be used to verify properties on enter/exit of the VM.
+
+#ifdef ASSERT
+class VMEntryWrapper {
+ public:
+ VMEntryWrapper();
+ ~VMEntryWrapper();
+};
+
+
+class VMNativeEntryWrapper {
+ public:
+ VMNativeEntryWrapper() {
+ if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
+ }
+
+ ~VMNativeEntryWrapper() {
+ if (GCALotAtAllSafepoints) InterfaceSupport::check_gc_alot();
+ }
+};
+
+#endif
+
+
+// VM-internal runtime interface support
+
+#ifdef ASSERT
+
+class RuntimeHistogramElement : public HistogramElement {
+ public:
+ RuntimeHistogramElement(const char* name);
+};
+
+#define TRACE_CALL(result_type, header) \
+ InterfaceSupport::_number_of_calls++; \
+ if (CountRuntimeCalls) { \
+ static RuntimeHistogramElement* e = new RuntimeHistogramElement(#header); \
+ if (e != NULL) e->increment_count(); \
+ }
+#else
+#define TRACE_CALL(result_type, header) \
+ /* do nothing */
+#endif
+
+
+// LEAF routines do not lock, GC or throw exceptions
+
+#define VM_LEAF_BASE(result_type, header) \
+ TRACE_CALL(result_type, header) \
+ debug_only(NoHandleMark __hm;) \
+ os::verify_stack_alignment(); \
+ /* begin of body */
+
+#define VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread) \
+ TRACE_CALL(result_type, header) \
+ debug_only(ResetNoHandleMark __rnhm;) \
+ HandleMarkCleaner __hm(thread); \
+ Thread* THREAD = thread; \
+ os::verify_stack_alignment(); \
+ /* begin of body */
+
+
+// ENTRY routines may lock, GC and throw exceptions
+
+#define VM_ENTRY_BASE(result_type, header, thread) \
+ TRACE_CALL(result_type, header) \
+ HandleMarkCleaner __hm(thread); \
+ Thread* THREAD = thread; \
+ os::verify_stack_alignment(); \
+ /* begin of body */
+
+
+// QUICK_ENTRY routines behave like ENTRY but without a handle mark
+
+#define VM_QUICK_ENTRY_BASE(result_type, header, thread) \
+ TRACE_CALL(result_type, header) \
+ debug_only(NoHandleMark __hm;) \
+ Thread* THREAD = thread; \
+ os::verify_stack_alignment(); \
+ /* begin of body */
+
+
+// Definitions for IRT (Interpreter Runtime)
+// (thread is an argument passed in to all these routines)
+
+#define IRT_ENTRY(result_type, header) \
+ result_type header { \
+ ThreadInVMfromJava __tiv(thread); \
+ VM_ENTRY_BASE(result_type, header, thread) \
+ debug_only(VMEntryWrapper __vew;)
+
+
+#define IRT_LEAF(result_type, header) \
+ result_type header { \
+ VM_LEAF_BASE(result_type, header) \
+ debug_only(NoSafepointVerifier __nspv(true);)
+
+
+#define IRT_ENTRY_NO_ASYNC(result_type, header) \
+ result_type header { \
+ ThreadInVMfromJavaNoAsyncException __tiv(thread); \
+ VM_ENTRY_BASE(result_type, header, thread) \
+ debug_only(VMEntryWrapper __vew;)
+
+#define IRT_END }
+
+
+// Definitions for JRT (Java (Compiler/Shared) Runtime)
+
+#define JRT_ENTRY(result_type, header) \
+ result_type header { \
+ ThreadInVMfromJava __tiv(thread); \
+ VM_ENTRY_BASE(result_type, header, thread) \
+ debug_only(VMEntryWrapper __vew;)
+
+
+#define JRT_LEAF(result_type, header) \
+ result_type header { \
+ VM_LEAF_BASE(result_type, header) \
+ debug_only(JRTLeafVerifier __jlv;)
+
+
+#define JRT_ENTRY_NO_ASYNC(result_type, header) \
+ result_type header { \
+ ThreadInVMfromJavaNoAsyncException __tiv(thread); \
+ VM_ENTRY_BASE(result_type, header, thread) \
+ debug_only(VMEntryWrapper __vew;)
+
+// Same as JRT Entry but allows for return value after the safepoint
+// to get back into Java from the VM
+#define JRT_BLOCK_ENTRY(result_type, header) \
+ result_type header { \
+ TRACE_CALL(result_type, header) \
+ HandleMarkCleaner __hm(thread);
+
+#define JRT_BLOCK \
+ { \
+ ThreadInVMfromJava __tiv(thread); \
+ Thread* THREAD = thread; \
+ debug_only(VMEntryWrapper __vew;)
+
+#define JRT_BLOCK_NO_ASYNC \
+ { \
+ ThreadInVMfromJavaNoAsyncException __tiv(thread); \
+ Thread* THREAD = thread; \
+ debug_only(VMEntryWrapper __vew;)
+
+#define JRT_BLOCK_END }
+
+#define JRT_END }
+
+// Definitions for JNI
+
+#define JNI_ENTRY(result_type, header) \
+ JNI_ENTRY_NO_PRESERVE(result_type, header) \
+ WeakPreserveExceptionMark __wem(thread);
+
+#define JNI_ENTRY_NO_PRESERVE(result_type, header) \
+extern "C" { \
+ result_type JNICALL header { \
+ JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
+ assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
+ ThreadInVMfromNative __tiv(thread); \
+ debug_only(VMNativeEntryWrapper __vew;) \
+ VM_ENTRY_BASE(result_type, header, thread)
+
+
+// Ensure that the VMNativeEntryWrapper constructor, which can cause
+// a GC, is called outside the NoHandleMark (set via VM_QUICK_ENTRY_BASE).
+#define JNI_QUICK_ENTRY(result_type, header) \
+extern "C" { \
+ result_type JNICALL header { \
+ JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
+ assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
+ ThreadInVMfromNative __tiv(thread); \
+ debug_only(VMNativeEntryWrapper __vew;) \
+ VM_QUICK_ENTRY_BASE(result_type, header, thread)
+
+
+#define JNI_LEAF(result_type, header) \
+extern "C" { \
+ result_type JNICALL header { \
+ JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
+ assert( !VerifyJNIEnvThread || (thread == Thread::current()), "JNIEnv is only valid in same thread"); \
+ VM_LEAF_BASE(result_type, header)
+
+
+// Close the routine and the extern "C"
+#define JNI_END } }
+
+
+
+// Definitions for JVM
+
+#define JVM_ENTRY(result_type, header) \
+extern "C" { \
+ result_type JNICALL header { \
+ JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
+ ThreadInVMfromNative __tiv(thread); \
+ debug_only(VMNativeEntryWrapper __vew;) \
+ VM_ENTRY_BASE(result_type, header, thread)
+
+
+#define JVM_ENTRY_NO_ENV(result_type, header) \
+extern "C" { \
+ result_type JNICALL header { \
+ JavaThread* thread = JavaThread::current(); \
+ ThreadInVMfromNative __tiv(thread); \
+ debug_only(VMNativeEntryWrapper __vew;) \
+ VM_ENTRY_BASE(result_type, header, thread)
+
+
+#define JVM_QUICK_ENTRY(result_type, header) \
+extern "C" { \
+ result_type JNICALL header { \
+ JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
+ ThreadInVMfromNative __tiv(thread); \
+ debug_only(VMNativeEntryWrapper __vew;) \
+ VM_QUICK_ENTRY_BASE(result_type, header, thread)
+
+
+#define JVM_LEAF(result_type, header) \
+extern "C" { \
+ result_type JNICALL header { \
+ VM_Exit::block_if_vm_exited(); \
+ VM_LEAF_BASE(result_type, header)
+
+
+#define JVM_ENTRY_FROM_LEAF(env, result_type, header) \
+ { { \
+ JavaThread* thread=JavaThread::thread_from_jni_environment(env); \
+ ThreadInVMfromNative __tiv(thread); \
+ debug_only(VMNativeEntryWrapper __vew;) \
+ VM_ENTRY_BASE_FROM_LEAF(result_type, header, thread)
+
+
+#define JVM_END } }
+
+#endif // SHARE_VM_RUNTIME_INTERFACESUPPORT_INLINE_HPP
--- a/src/hotspot/share/runtime/java.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/java.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -56,7 +56,7 @@
#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/init.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/memprofiler.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/share/runtime/javaCalls.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/javaCalls.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -35,7 +35,7 @@
#include "prims/jniCheck.hpp"
#include "runtime/compilationPolicy.hpp"
#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/share/runtime/javaFrameAnchor.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/javaFrameAnchor.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,7 +25,6 @@
#ifndef SHARE_VM_RUNTIME_JAVAFRAMEANCHOR_HPP
#define SHARE_VM_RUNTIME_JAVAFRAMEANCHOR_HPP
-#include "runtime/orderAccess.inline.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
@@ -33,6 +32,7 @@
// An object for encapsulating the machine/os dependent part of a JavaThread frame state
//
class JavaThread;
+class MacroAssembler;
class JavaFrameAnchor {
// Too many friends...
--- a/src/hotspot/share/runtime/mutex.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/mutex.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/mutex.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/osThread.hpp"
--- a/src/hotspot/share/runtime/objectMonitor.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/objectMonitor.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -30,7 +30,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/objectMonitor.inline.hpp"
--- a/src/hotspot/share/runtime/objectMonitor.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/objectMonitor.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,6 +31,8 @@
#include "runtime/park.hpp"
#include "runtime/perfData.hpp"
+class ObjectMonitor;
+
// ObjectWaiter serves as a "proxy" or surrogate thread.
// TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
// ParkEvent instead. Beware, however, that the JVMTI code
--- a/src/hotspot/share/runtime/os.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/os.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -48,7 +48,7 @@
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/share/runtime/os.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/os.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -30,7 +30,8 @@
#include "metaprogramming/isRegisteredEnum.hpp"
#include "metaprogramming/integralConstant.hpp"
#include "runtime/extendedPC.hpp"
-#include "runtime/handles.hpp"
+#include "utilities/exceptions.hpp"
+#include "utilities/ostream.hpp"
#include "utilities/macros.hpp"
#ifndef _WINDOWS
# include <setjmp.h>
@@ -54,6 +55,7 @@
class DLL;
class FileHandle;
class NativeCallStack;
+class methodHandle;
template<class E> class GrowableArray;
--- a/src/hotspot/share/runtime/reflection.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/reflection.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -47,7 +47,7 @@
#include "runtime/reflection.hpp"
#include "runtime/reflectionUtils.hpp"
#include "runtime/signature.hpp"
-#include "runtime/vframe.hpp"
+#include "runtime/vframe.inline.hpp"
static void trace_class_resolution(const Klass* to_class) {
ResourceMark rm;
--- a/src/hotspot/share/runtime/rframe.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/rframe.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
#define SHARE_VM_RUNTIME_RFRAME_HPP
#include "memory/allocation.hpp"
-#include "runtime/frame.inline.hpp"
+#include "runtime/frame.hpp"
// rframes ("recompiler frames") decorate stack frames with some extra information
// needed by the recompiler. The recompiler views the stack (at the time of recompilation)
--- a/src/hotspot/share/runtime/safepoint.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/safepoint.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -47,7 +47,7 @@
#include "runtime/compilationPolicy.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/osThread.hpp"
--- a/src/hotspot/share/runtime/safepointMechanism.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/safepointMechanism.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,6 +27,7 @@
#include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "runtime/safepointMechanism.inline.hpp"
+#include "services/memTracker.hpp"
#include "utilities/globalDefinitions.hpp"
SafepointMechanism::PollingType SafepointMechanism::_polling_type = SafepointMechanism::_global_page_poll;
@@ -50,6 +51,7 @@
const size_t allocation_size = 2 * page_size;
char* polling_page = os::reserve_memory(allocation_size, NULL, page_size);
os::commit_memory_or_exit(polling_page, allocation_size, false, "Unable to commit Safepoint polling page");
+ MemTracker::record_virtual_memory_type((address)polling_page, mtInternal);
char* bad_page = polling_page;
char* good_page = polling_page + page_size;
--- a/src/hotspot/share/runtime/serviceThread.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/serviceThread.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -23,7 +23,7 @@
*/
#include "precompiled.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/serviceThread.hpp"
#include "runtime/mutexLocker.hpp"
--- a/src/hotspot/share/runtime/sharedRuntime.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/sharedRuntime.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "jvm.h"
#include "aot/aotLoader.hpp"
+#include "code/compiledMethod.inline.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
@@ -54,14 +55,15 @@
#include "runtime/atomic.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/compilationPolicy.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
-#include "runtime/vframe.hpp"
+#include "runtime/vframe.inline.hpp"
#include "runtime/vframeArray.hpp"
#include "trace/tracing.hpp"
#include "utilities/copy.hpp"
--- a/src/hotspot/share/runtime/sharedRuntimeTrans.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/sharedRuntimeTrans.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "jni.h"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/sharedRuntime.hpp"
// This file contains copies of the fdlibm routines used by
--- a/src/hotspot/share/runtime/sharedRuntimeTrig.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/sharedRuntimeTrig.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "jni.h"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/sharedRuntimeMath.hpp"
--- a/src/hotspot/share/runtime/simpleThresholdPolicy.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/simpleThresholdPolicy.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,8 +24,10 @@
#include "precompiled.hpp"
#include "compiler/compileBroker.hpp"
+#include "gc/shared/gcLocker.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/arguments.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/simpleThresholdPolicy.hpp"
#include "runtime/simpleThresholdPolicy.inline.hpp"
#include "code/scopeDesc.hpp"
--- a/src/hotspot/share/runtime/stubRoutines.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/stubRoutines.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,7 +27,7 @@
#include "memory/resourceArea.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/timerTrace.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
--- a/src/hotspot/share/runtime/sweeper.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/sweeper.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -35,12 +35,14 @@
#include "oops/method.hpp"
#include "runtime/atomic.hpp"
#include "runtime/compilationPolicy.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/os.hpp"
#include "runtime/sweeper.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/vm_operations.hpp"
+#include "runtime/vmThread.hpp"
#include "trace/tracing.hpp"
#include "utilities/events.hpp"
#include "utilities/ticks.inline.hpp"
--- a/src/hotspot/share/runtime/synchronizer.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/synchronizer.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -34,7 +34,7 @@
#include "runtime/atomic.hpp"
#include "runtime/biasedLocking.hpp"
#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/objectMonitor.inline.hpp"
@@ -43,6 +43,7 @@
#include "runtime/synchronizer.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/vframe.hpp"
+#include "runtime/vmThread.hpp"
#include "trace/traceMacros.hpp"
#include "trace/tracing.hpp"
#include "utilities/align.hpp"
--- a/src/hotspot/share/runtime/thread.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/thread.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -69,7 +69,7 @@
#include "runtime/globals.hpp"
#include "runtime/handshake.hpp"
#include "runtime/init.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.inline.hpp"
@@ -92,7 +92,7 @@
#include "runtime/threadSMR.inline.hpp"
#include "runtime/timer.hpp"
#include "runtime/timerTrace.hpp"
-#include "runtime/vframe.hpp"
+#include "runtime/vframe.inline.hpp"
#include "runtime/vframeArray.hpp"
#include "runtime/vframe_hp.hpp"
#include "runtime/vmThread.hpp"
--- a/src/hotspot/share/runtime/thread.inline.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/thread.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,6 +26,7 @@
#define SHARE_VM_RUNTIME_THREAD_INLINE_HPP
#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.inline.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/thread.hpp"
--- a/src/hotspot/share/runtime/threadSMR.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/threadSMR.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -29,8 +29,10 @@
#include "runtime/thread.inline.hpp"
#include "runtime/threadSMR.inline.hpp"
#include "services/threadService.hpp"
+#include "utilities/copy.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/resourceHash.hpp"
+#include "utilities/vmError.hpp"
Monitor* ThreadsSMRSupport::_delete_lock =
new Monitor(Monitor::special, "Thread_SMR_delete_lock",
--- a/src/hotspot/share/runtime/vframe.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/vframe.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,13 +36,14 @@
#include "memory/resourceArea.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/objectMonitor.inline.hpp"
#include "runtime/signature.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/synchronizer.hpp"
-#include "runtime/vframe.hpp"
+#include "runtime/vframe.inline.hpp"
#include "runtime/vframeArray.hpp"
#include "runtime/vframe_hp.hpp"
--- a/src/hotspot/share/runtime/vframe.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/vframe.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,6 @@
#include "code/location.hpp"
#include "oops/oop.hpp"
#include "runtime/frame.hpp"
-#include "runtime/frame.inline.hpp"
#include "runtime/stackValue.hpp"
#include "runtime/stackValueCollection.hpp"
#include "utilities/growableArray.hpp"
@@ -307,14 +306,12 @@
public:
// Constructor
- vframeStreamCommon(JavaThread* thread) : _reg_map(thread, false) {
- _thread = thread;
- }
+ inline vframeStreamCommon(JavaThread* thread);
// Accessors
Method* method() const { return _method; }
int bci() const { return _bci; }
- intptr_t* frame_id() const { return _frame.id(); }
+ inline intptr_t* frame_id() const;
address frame_pc() const { return _frame.pc(); }
CodeBlob* cb() const { return _frame.cb(); }
@@ -324,19 +321,11 @@
}
// Frame type
- bool is_interpreted_frame() const { return _frame.is_interpreted_frame(); }
- bool is_entry_frame() const { return _frame.is_entry_frame(); }
+ inline bool is_interpreted_frame() const;
+ inline bool is_entry_frame() const;
// Iteration
- void next() {
- // handle frames with inlining
- if (_mode == compiled_mode && fill_in_compiled_inlined_sender()) return;
-
- // handle general case
- do {
- _frame = _frame.sender(&_reg_map);
- } while (!fill_from_frame());
- }
+ inline void next();
void security_next();
bool at_end() const { return _mode == at_end_mode; }
@@ -353,182 +342,10 @@
class vframeStream : public vframeStreamCommon {
public:
// Constructors
- vframeStream(JavaThread* thread, bool stop_at_java_call_stub = false)
- : vframeStreamCommon(thread) {
- _stop_at_java_call_stub = stop_at_java_call_stub;
-
- if (!thread->has_last_Java_frame()) {
- _mode = at_end_mode;
- return;
- }
-
- _frame = _thread->last_frame();
- while (!fill_from_frame()) {
- _frame = _frame.sender(&_reg_map);
- }
- }
+ vframeStream(JavaThread* thread, bool stop_at_java_call_stub = false);
// top_frame may not be at safepoint, start with sender
vframeStream(JavaThread* thread, frame top_frame, bool stop_at_java_call_stub = false);
};
-
-inline bool vframeStreamCommon::fill_in_compiled_inlined_sender() {
- if (_sender_decode_offset == DebugInformationRecorder::serialized_null) {
- return false;
- }
- fill_from_compiled_frame(_sender_decode_offset);
- return true;
-}
-
-
-inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) {
- _mode = compiled_mode;
-
- // Range check to detect ridiculous offsets.
- if (decode_offset == DebugInformationRecorder::serialized_null ||
- decode_offset < 0 ||
- decode_offset >= nm()->scopes_data_size()) {
- // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
- // If we read nmethod::scopes_data at serialized_null (== 0)
- // or if read some at other invalid offset, invalid values will be decoded.
- // Based on these values, invalid heap locations could be referenced
- // that could lead to crashes in product mode.
- // Therefore, do not use the decode offset if invalid, but fill the frame
- // as it were a native compiled frame (no Java-level assumptions).
-#ifdef ASSERT
- if (WizardMode) {
- ttyLocker ttyl;
- tty->print_cr("Error in fill_from_frame: pc_desc for "
- INTPTR_FORMAT " not found or invalid at %d",
- p2i(_frame.pc()), decode_offset);
- nm()->print();
- nm()->method()->print_codes();
- nm()->print_code();
- nm()->print_pcs();
- }
- found_bad_method_frame();
-#endif
- // Provide a cheap fallback in product mode. (See comment above.)
- fill_from_compiled_native_frame();
- return;
- }
-
- // Decode first part of scopeDesc
- DebugInfoReadStream buffer(nm(), decode_offset);
- _sender_decode_offset = buffer.read_int();
- _method = buffer.read_method();
- _bci = buffer.read_bci();
-
- assert(_method->is_method(), "checking type of decoded method");
-}
-
-// The native frames are handled specially. We do not rely on ScopeDesc info
-// since the pc might not be exact due to the _last_native_pc trick.
-inline void vframeStreamCommon::fill_from_compiled_native_frame() {
- _mode = compiled_mode;
- _sender_decode_offset = DebugInformationRecorder::serialized_null;
- _method = nm()->method();
- _bci = 0;
-}
-
-inline bool vframeStreamCommon::fill_from_frame() {
- // Interpreted frame
- if (_frame.is_interpreted_frame()) {
- fill_from_interpreter_frame();
- return true;
- }
-
- // Compiled frame
-
- if (cb() != NULL && cb()->is_compiled()) {
- if (nm()->is_native_method()) {
- // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick.
- fill_from_compiled_native_frame();
- } else {
- PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc());
- int decode_offset;
- if (pc_desc == NULL) {
- // Should not happen, but let fill_from_compiled_frame handle it.
-
- // If we are trying to walk the stack of a thread that is not
- // at a safepoint (like AsyncGetCallTrace would do) then this is an
- // acceptable result. [ This is assuming that safe_for_sender
- // is so bullet proof that we can trust the frames it produced. ]
- //
- // So if we see that the thread is not safepoint safe
- // then simply produce the method and a bci of zero
- // and skip the possibility of decoding any inlining that
- // may be present. That is far better than simply stopping (or
- // asserting. If however the thread is safepoint safe this
- // is the sign of a compiler bug and we'll let
- // fill_from_compiled_frame handle it.
-
-
- JavaThreadState state = _thread->thread_state();
-
- // in_Java should be good enough to test safepoint safety
- // if state were say in_Java_trans then we'd expect that
- // the pc would have already been slightly adjusted to
- // one that would produce a pcDesc since the trans state
- // would be one that might in fact anticipate a safepoint
-
- if (state == _thread_in_Java ) {
- // This will get a method a zero bci and no inlining.
- // Might be nice to have a unique bci to signify this
- // particular case but for now zero will do.
-
- fill_from_compiled_native_frame();
-
- // There is something to be said for setting the mode to
- // at_end_mode to prevent trying to walk further up the
- // stack. There is evidence that if we walk any further
- // that we could produce a bad stack chain. However until
- // we see evidence that allowing this causes us to find
- // frames bad enough to cause segv's or assertion failures
- // we don't do it as while we may get a bad call chain the
- // probability is much higher (several magnitudes) that we
- // get good data.
-
- return true;
- }
- decode_offset = DebugInformationRecorder::serialized_null;
- } else {
- decode_offset = pc_desc->scope_decode_offset();
- }
- fill_from_compiled_frame(decode_offset);
- }
- return true;
- }
-
- // End of stack?
- if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) {
- _mode = at_end_mode;
- return true;
- }
-
- return false;
-}
-
-
-inline void vframeStreamCommon::fill_from_interpreter_frame() {
- Method* method = _frame.interpreter_frame_method();
- address bcp = _frame.interpreter_frame_bcp();
- int bci = method->validate_bci_from_bcp(bcp);
- // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
- // AsyncGetCallTrace interrupts the VM asynchronously. As a result
- // it is possible to access an interpreter frame for which
- // no Java-level information is yet available (e.g., becasue
- // the frame was being created when the VM interrupted it).
- // In this scenario, pretend that the interpreter is at the point
- // of entering the method.
- if (bci < 0) {
- DEBUG_ONLY(found_bad_method_frame();)
- bci = 0;
- }
- _mode = interpreted_mode;
- _method = method;
- _bci = bci;
-}
-
#endif // SHARE_VM_RUNTIME_VFRAME_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/runtime/vframe.inline.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_RUNTIME_VFRAME_INLINE_HPP
+#define SHARE_VM_RUNTIME_VFRAME_INLINE_HPP
+
+#include "runtime/frame.inline.hpp"
+#include "runtime/vframe.hpp"
+
+inline vframeStreamCommon::vframeStreamCommon(JavaThread* thread) : _reg_map(thread, false) {
+ _thread = thread;
+}
+
+inline intptr_t* vframeStreamCommon::frame_id() const { return _frame.id(); }
+
+inline bool vframeStreamCommon::is_interpreted_frame() const { return _frame.is_interpreted_frame(); }
+
+inline bool vframeStreamCommon::is_entry_frame() const { return _frame.is_entry_frame(); }
+
+inline void vframeStreamCommon::next() {
+ // handle frames with inlining
+ if (_mode == compiled_mode && fill_in_compiled_inlined_sender()) return;
+
+ // handle general case
+ do {
+ _frame = _frame.sender(&_reg_map);
+ } while (!fill_from_frame());
+}
+
+inline vframeStream::vframeStream(JavaThread* thread, bool stop_at_java_call_stub)
+ : vframeStreamCommon(thread) {
+ _stop_at_java_call_stub = stop_at_java_call_stub;
+
+ if (!thread->has_last_Java_frame()) {
+ _mode = at_end_mode;
+ return;
+ }
+
+ _frame = _thread->last_frame();
+ while (!fill_from_frame()) {
+ _frame = _frame.sender(&_reg_map);
+ }
+}
+
+inline bool vframeStreamCommon::fill_in_compiled_inlined_sender() {
+ if (_sender_decode_offset == DebugInformationRecorder::serialized_null) {
+ return false;
+ }
+ fill_from_compiled_frame(_sender_decode_offset);
+ return true;
+}
+
+
+inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) {
+ _mode = compiled_mode;
+
+ // Range check to detect ridiculous offsets.
+ if (decode_offset == DebugInformationRecorder::serialized_null ||
+ decode_offset < 0 ||
+ decode_offset >= nm()->scopes_data_size()) {
+ // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
+ // If we read nmethod::scopes_data at serialized_null (== 0)
+ // or if read some at other invalid offset, invalid values will be decoded.
+ // Based on these values, invalid heap locations could be referenced
+ // that could lead to crashes in product mode.
+ // Therefore, do not use the decode offset if invalid, but fill the frame
+ // as it were a native compiled frame (no Java-level assumptions).
+#ifdef ASSERT
+ if (WizardMode) {
+ ttyLocker ttyl;
+ tty->print_cr("Error in fill_from_frame: pc_desc for "
+ INTPTR_FORMAT " not found or invalid at %d",
+ p2i(_frame.pc()), decode_offset);
+ nm()->print();
+ nm()->method()->print_codes();
+ nm()->print_code();
+ nm()->print_pcs();
+ }
+ found_bad_method_frame();
+#endif
+ // Provide a cheap fallback in product mode. (See comment above.)
+ fill_from_compiled_native_frame();
+ return;
+ }
+
+ // Decode first part of scopeDesc
+ DebugInfoReadStream buffer(nm(), decode_offset);
+ _sender_decode_offset = buffer.read_int();
+ _method = buffer.read_method();
+ _bci = buffer.read_bci();
+
+ assert(_method->is_method(), "checking type of decoded method");
+}
+
+// The native frames are handled specially. We do not rely on ScopeDesc info
+// since the pc might not be exact due to the _last_native_pc trick.
+inline void vframeStreamCommon::fill_from_compiled_native_frame() {
+ _mode = compiled_mode;
+ _sender_decode_offset = DebugInformationRecorder::serialized_null;
+ _method = nm()->method();
+ _bci = 0;
+}
+
+inline bool vframeStreamCommon::fill_from_frame() {
+ // Interpreted frame
+ if (_frame.is_interpreted_frame()) {
+ fill_from_interpreter_frame();
+ return true;
+ }
+
+ // Compiled frame
+
+ if (cb() != NULL && cb()->is_compiled()) {
+ if (nm()->is_native_method()) {
+ // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick.
+ fill_from_compiled_native_frame();
+ } else {
+ PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc());
+ int decode_offset;
+ if (pc_desc == NULL) {
+ // Should not happen, but let fill_from_compiled_frame handle it.
+
+ // If we are trying to walk the stack of a thread that is not
+ // at a safepoint (like AsyncGetCallTrace would do) then this is an
+ // acceptable result. [ This is assuming that safe_for_sender
+ // is so bullet proof that we can trust the frames it produced. ]
+ //
+ // So if we see that the thread is not safepoint safe
+ // then simply produce the method and a bci of zero
+ // and skip the possibility of decoding any inlining that
+ // may be present. That is far better than simply stopping (or
+ // asserting. If however the thread is safepoint safe this
+ // is the sign of a compiler bug and we'll let
+ // fill_from_compiled_frame handle it.
+
+
+ JavaThreadState state = _thread->thread_state();
+
+ // in_Java should be good enough to test safepoint safety
+ // if state were say in_Java_trans then we'd expect that
+ // the pc would have already been slightly adjusted to
+ // one that would produce a pcDesc since the trans state
+ // would be one that might in fact anticipate a safepoint
+
+ if (state == _thread_in_Java ) {
+ // This will get a method a zero bci and no inlining.
+ // Might be nice to have a unique bci to signify this
+ // particular case but for now zero will do.
+
+ fill_from_compiled_native_frame();
+
+ // There is something to be said for setting the mode to
+ // at_end_mode to prevent trying to walk further up the
+ // stack. There is evidence that if we walk any further
+ // that we could produce a bad stack chain. However until
+ // we see evidence that allowing this causes us to find
+ // frames bad enough to cause segv's or assertion failures
+ // we don't do it as while we may get a bad call chain the
+ // probability is much higher (several magnitudes) that we
+ // get good data.
+
+ return true;
+ }
+ decode_offset = DebugInformationRecorder::serialized_null;
+ } else {
+ decode_offset = pc_desc->scope_decode_offset();
+ }
+ fill_from_compiled_frame(decode_offset);
+ }
+ return true;
+ }
+
+ // End of stack?
+ if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) {
+ _mode = at_end_mode;
+ return true;
+ }
+
+ return false;
+}
+
+
+inline void vframeStreamCommon::fill_from_interpreter_frame() {
+ Method* method = _frame.interpreter_frame_method();
+ address bcp = _frame.interpreter_frame_bcp();
+ int bci = method->validate_bci_from_bcp(bcp);
+ // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
+ // AsyncGetCallTrace interrupts the VM asynchronously. As a result
+ // it is possible to access an interpreter frame for which
+ // no Java-level information is yet available (e.g., becasue
+ // the frame was being created when the VM interrupted it).
+ // In this scenario, pretend that the interpreter is at the point
+ // of entering the method.
+ if (bci < 0) {
+ DEBUG_ONLY(found_bad_method_frame();)
+ bci = 0;
+ }
+ _mode = interpreted_mode;
+ _method = method;
+ _bci = bci;
+}
+
+#endif // SHARE_VM_RUNTIME_VFRAME_INLINE_HPP
--- a/src/hotspot/share/runtime/vframeArray.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/vframeArray.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -33,6 +33,7 @@
#include "oops/methodData.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiThreadState.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/monitorChunk.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -489,6 +490,9 @@
}
+intptr_t* vframeArray::unextended_sp() const {
+ return _original.unextended_sp();
+}
vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
RegisterMap *reg_map, frame sender, frame caller, frame self,
--- a/src/hotspot/share/runtime/vframeArray.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/vframeArray.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -28,7 +28,7 @@
#include "memory/allocation.hpp"
#include "oops/arrayOop.hpp"
#include "runtime/deoptimization.hpp"
-#include "runtime/frame.inline.hpp"
+#include "runtime/frame.hpp"
#include "runtime/monitorChunk.hpp"
#include "utilities/growableArray.hpp"
@@ -189,7 +189,7 @@
// Accessors for sp
intptr_t* sp() const { return _original.sp(); }
- intptr_t* unextended_sp() const { return _original.unextended_sp(); }
+ intptr_t* unextended_sp() const;
address original_pc() const { return _original.pc(); }
--- a/src/hotspot/share/runtime/vframe_hp.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/vframe_hp.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -34,6 +34,7 @@
#include "oops/instanceKlass.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/basicLock.hpp"
+#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/monitorChunk.hpp"
#include "runtime/signature.hpp"
--- a/src/hotspot/share/runtime/vmStructs.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/vmStructs.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -475,8 +475,8 @@
nonstatic_field(CardTable, _committed, MemRegion*) \
nonstatic_field(CardTable, _guard_region, MemRegion) \
nonstatic_field(CardTable, _byte_map_base, jbyte*) \
- nonstatic_field(CardTableModRefBS, _defer_initial_card_mark, bool) \
- nonstatic_field(CardTableModRefBS, _card_table, CardTable*) \
+ nonstatic_field(CardTableBarrierSet, _defer_initial_card_mark, bool) \
+ nonstatic_field(CardTableBarrierSet, _card_table, CardTable*) \
\
nonstatic_field(CollectedHeap, _reserved, MemRegion) \
nonstatic_field(CollectedHeap, _barrier_set, BarrierSet*) \
@@ -1473,7 +1473,7 @@
declare_type(TenuredSpace, OffsetTableContigSpace) \
declare_toplevel_type(BarrierSet) \
declare_type(ModRefBarrierSet, BarrierSet) \
- declare_type(CardTableModRefBS, ModRefBarrierSet) \
+ declare_type(CardTableBarrierSet, ModRefBarrierSet) \
declare_toplevel_type(CardTable) \
declare_type(CardTableRS, CardTable) \
declare_toplevel_type(BarrierSet::Name) \
@@ -1502,8 +1502,8 @@
declare_toplevel_type(CardTable*) \
declare_toplevel_type(CardTable*const) \
declare_toplevel_type(CardTableRS*) \
- declare_toplevel_type(CardTableModRefBS*) \
- declare_toplevel_type(CardTableModRefBS**) \
+ declare_toplevel_type(CardTableBarrierSet*) \
+ declare_toplevel_type(CardTableBarrierSet**) \
declare_toplevel_type(CollectedHeap*) \
declare_toplevel_type(ContiguousSpace*) \
declare_toplevel_type(DefNewGeneration*) \
@@ -2237,7 +2237,7 @@
declare_constant(AgeTable::table_size) \
\
declare_constant(BarrierSet::ModRef) \
- declare_constant(BarrierSet::CardTableModRef) \
+ declare_constant(BarrierSet::CardTableBarrierSet) \
declare_constant(BarrierSet::G1BarrierSet) \
\
declare_constant(BOTConstants::LogN) \
--- a/src/hotspot/share/runtime/vmThread.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/vmThread.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -31,7 +31,7 @@
#include "oops/method.hpp"
#include "oops/oop.inline.hpp"
#include "oops/verifyOopClosure.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/os.hpp"
#include "runtime/safepoint.hpp"
--- a/src/hotspot/share/runtime/vm_operations.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/runtime/vm_operations.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -35,7 +35,8 @@
#include "oops/symbol.hpp"
#include "runtime/arguments.hpp"
#include "runtime/deoptimization.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/sweeper.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadSMR.inline.hpp"
--- a/src/hotspot/share/services/diagnosticCommand.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/services/diagnosticCommand.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -30,9 +30,11 @@
#include "compiler/directivesParser.hpp"
#include "gc/shared/vmGCOperations.hpp"
#include "memory/resourceArea.hpp"
+#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
#include "runtime/globals.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/os.hpp"
#include "services/diagnosticArgument.hpp"
@@ -44,7 +46,6 @@
#include "utilities/debug.hpp"
#include "utilities/formatBuffer.hpp"
#include "utilities/macros.hpp"
-#include "oops/objArrayOop.inline.hpp"
static void loadAgentModule(TRAPS) {
--- a/src/hotspot/share/services/gcNotifier.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/services/gcNotifier.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,7 +27,7 @@
#include "classfile/vmSymbols.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutex.hpp"
--- a/src/hotspot/share/services/heapDumper.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/services/heapDumper.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -37,6 +37,8 @@
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/typeArrayOop.inline.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/os.hpp"
--- a/src/hotspot/share/services/lowMemoryDetector.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/services/lowMemoryDetector.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -27,7 +27,7 @@
#include "classfile/vmSymbols.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutex.hpp"
--- a/src/hotspot/share/services/management.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/services/management.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -38,7 +38,7 @@
#include "runtime/arguments.hpp"
#include "runtime/globals.hpp"
#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/os.hpp"
--- a/src/hotspot/share/services/memTracker.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/services/memTracker.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "jvm.h"
#include "runtime/mutex.hpp"
+#include "runtime/orderAccess.inline.hpp"
#include "runtime/vmThread.hpp"
#include "runtime/vm_operations.hpp"
#include "services/memBaseline.hpp"
--- a/src/hotspot/share/services/serviceUtil.hpp Thu Mar 29 20:12:02 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,84 +0,0 @@
-/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_VM_SERVICES_SERVICEUTIL_HPP
-#define SHARE_VM_SERVICES_SERVICEUTIL_HPP
-
-#include "classfile/systemDictionary.hpp"
-#include "oops/objArrayOop.hpp"
-#include "oops/oop.inline.hpp"
-
-//
-// Serviceability utility functions.
-// (Shared by MM and JVMTI).
-//
-class ServiceUtil : public AllStatic {
- public:
-
- // Return true if oop represents an object that is "visible"
- // to the java world.
- static inline bool visible_oop(oop o) {
- // instance
- if (o->is_instance()) {
- // instance objects are visible
- if (o->klass() != SystemDictionary::Class_klass()) {
- return true;
- }
- if (java_lang_Class::is_primitive(o)) {
- return true;
- }
- // java.lang.Classes are visible
- Klass* k = java_lang_Class::as_Klass(o);
- if (k->is_klass()) {
- // if it's a class for an object, an object array, or
- // primitive (type) array then it's visible.
- if (k->is_instance_klass()) {
- return true;
- }
- if (k->is_objArray_klass()) {
- return true;
- }
- if (k->is_typeArray_klass()) {
- return true;
- }
- }
- fatal("visible_oop: should never reach here #1");
- return false;
- }
- // object arrays are visible if they aren't system object arrays
- if (o->is_objArray()) {
- return true;
- }
- // type arrays are visible
- if (o->is_typeArray()) {
- return true;
- }
- // everything else (Method*s, ...) aren't visible
- fatal("visible_oop: should never reach here #2");
- return false;
- }; // end of visible_oop()
-
-};
-
-#endif // SHARE_VM_SERVICES_SERVICEUTIL_HPP
--- a/src/hotspot/share/services/threadService.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/services/threadService.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -34,6 +34,7 @@
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
+#include "runtime/objectMonitor.inline.hpp"
#include "runtime/thread.inline.hpp"
#include "runtime/threadSMR.inline.hpp"
#include "runtime/vframe.hpp"
--- a/src/hotspot/share/services/threadService.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/services/threadService.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,12 +30,10 @@
#include "runtime/init.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/objectMonitor.hpp"
-#include "runtime/objectMonitor.inline.hpp"
#include "runtime/perfData.hpp"
#include "runtime/thread.hpp"
#include "runtime/threadSMR.hpp"
#include "services/management.hpp"
-#include "services/serviceUtil.hpp"
class OopClosure;
class ThreadDumpResult;
@@ -548,7 +546,7 @@
static bool wait_reenter_begin(JavaThread *java_thread, ObjectMonitor *obj_m) {
assert((java_thread != NULL), "Java thread should not be null here");
bool active = false;
- if (is_alive(java_thread) && ServiceUtil::visible_oop((oop)obj_m->object())) {
+ if (is_alive(java_thread)) {
active = contended_enter_begin(java_thread);
}
return active;
@@ -569,7 +567,7 @@
// like for vm internal objects and for external objects which are not contended
// thread status is not changed and contended enter stat is not collected.
_active = false;
- if (is_alive() && ServiceUtil::visible_oop((oop)obj_m->object()) && obj_m->contentions() > 0) {
+ if (is_alive() && obj_m->contentions() > 0) {
_stat = java_thread->get_thread_stat();
_active = contended_enter_begin(java_thread);
}
--- a/src/hotspot/share/trace/traceEventClasses.xsl Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/trace/traceEventClasses.xsl Sat Mar 24 01:08:35 2018 +0100
@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="utf-8"?>
<!--
- Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
This code is free software; you can redistribute it and/or modify it
@@ -142,6 +142,48 @@
writeEventContent();
}
}
+
+ using TraceEvent::commit; // else commit() is hidden by overloaded versions in this class
+<xsl:variable name="instant" select="@is_instant"/>
+<!-- non static method (only for non instant events)-->
+<xsl:if test="$instant='false'">
+ <xsl:value-of select="concat(' Event', @id)"/>(
+ <xsl:for-each select="value|structvalue|transition_value|relation">
+ <xsl:apply-templates select="." mode="cpp-type"/><xsl:value-of select="concat(' ', @field)"/>
+ <xsl:if test="position() != last()">,
+ </xsl:if></xsl:for-each>) : TraceEvent<<xsl:value-of select="concat('Event', @id)"/>>(TIMED) {
+ if (should_commit()) {<xsl:for-each select="value|structvalue|transition_value|relation">
+ set_<xsl:value-of select="@field"/>(<xsl:value-of select="@field"/>);</xsl:for-each>
+ }
+ }
+
+ void commit(<xsl:for-each select="value|structvalue|transition_value|relation">
+ <xsl:apply-templates select="." mode="cpp-type"/><xsl:value-of select="concat(' ', @field)"/>
+ <xsl:if test="position() != last()">,
+ </xsl:if></xsl:for-each>) {
+ if (should_commit()) {
+ <xsl:for-each select="value|structvalue|transition_value|relation">set_<xsl:value-of select="@field"/>(<xsl:value-of select="@field"/>);
+ </xsl:for-each>commit();
+ }
+ }</xsl:if>
+<!-- static method (for all events) -->
+ static void commit(<xsl:if test="$instant='false'">const Ticks& startTicks,
+ const Ticks& endTicks<xsl:choose><xsl:when test="value|structvalue|transition_value|relation">,
+ </xsl:when></xsl:choose></xsl:if>
+ <xsl:for-each select="value|structvalue|transition_value|relation">
+ <xsl:apply-templates select="." mode="cpp-type"/><xsl:value-of select="concat(' ', @field)"/>
+ <xsl:if test="position() != last()">,
+ </xsl:if></xsl:for-each>) {
+ <xsl:value-of select="concat('Event', @id)"/> me(UNTIMED);
+
+ if (me.should_commit()) {
+ <xsl:if test="$instant='false'">me.set_starttime(startTicks);
+ me.set_endtime(endTicks);
+ </xsl:if>
+ <xsl:for-each select="value|structvalue|transition_value|relation">me.set_<xsl:value-of select="@field"/>(<xsl:value-of select="@field"/>);
+ </xsl:for-each>me.commit();
+ }
+ }
};
</xsl:template>
@@ -249,4 +291,13 @@
</xsl:if>
</xsl:template>
+
+<xsl:template match="value|transition_value|relation" mode="cpp-type">
+ <xsl:variable name="type" select="@type"/>
+ <xsl:value-of select="//primary_type[@symbol=$type]/@type"/>
+</xsl:template>
+<xsl:template match="structvalue" mode="cpp-type">
+ <xsl:value-of select="concat('const TraceStruct', @type, '&')"/>
+</xsl:template>
+
</xsl:stylesheet>
--- a/src/hotspot/share/utilities/debug.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/utilities/debug.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -40,7 +40,8 @@
#include "prims/privilegedStack.hpp"
#include "runtime/arguments.hpp"
#include "runtime/atomic.hpp"
-#include "runtime/frame.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/share/utilities/exceptions.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/utilities/exceptions.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -30,6 +30,7 @@
#include "logging/logStream.hpp"
#include "memory/resourceArea.hpp"
#include "oops/oop.inline.hpp"
+#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
--- a/src/hotspot/share/utilities/globalDefinitions.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/utilities/globalDefinitions.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -1012,12 +1012,6 @@
#undef min
#endif
-// The following defines serve the purpose of preventing use of accidentally
-// included min max macros from compiling, while continuing to allow innocent
-// min and max identifiers in the code to compile as intended.
-#define max max
-#define min min
-
// It is necessary to use templates here. Having normal overloaded
// functions does not work because it is necessary to provide both 32-
// and 64-bit overloaded functions, which does not work, and having
--- a/src/hotspot/share/utilities/globalDefinitions_xlc.hpp Thu Mar 29 20:12:02 2018 +0100
+++ b/src/hotspot/share/utilities/globalDefinitions_xlc.hpp Sat Mar 24 01:08:35 2018 +0100
@@ -152,7 +152,18 @@
#endif
// Inlining support
-#define NOINLINE
-#define ALWAYSINLINE inline __attribute__((always_inline))
+//
+// Be aware that for function/method declarations, xlC only supports the following
+// syntax (i.e. the attribute must be placed AFTER the function/method declarator):
+//
+// void* operator new(size_t size) throw() NOINLINE;
+//
+// For function/method defintions, the more common placement BEFORE the
+// function/method declarator seems to be supported as well:
+//
+// NOINLINE void* CHeapObj<F>::operator new(size_t size) throw() {...}
+
+#define NOINLINE __attribute__((__noinline__))
+#define ALWAYSINLINE inline __attribute__((__always_inline__))
#endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_XLC_HPP
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/CommandProcessor.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/CommandProcessor.java Sat Mar 24 01:08:35 2018 +0100
@@ -52,6 +52,8 @@
import sun.jvm.hotspot.memory.SymbolTable;
import sun.jvm.hotspot.memory.SystemDictionary;
import sun.jvm.hotspot.memory.Universe;
+import sun.jvm.hotspot.gc.shared.CollectedHeap;
+import sun.jvm.hotspot.gc.g1.G1CollectedHeap;
import sun.jvm.hotspot.oops.DefaultHeapVisitor;
import sun.jvm.hotspot.oops.HeapVisitor;
import sun.jvm.hotspot.oops.InstanceKlass;
@@ -1656,6 +1658,21 @@
}
}
},
+ new Command("g1regiondetails", false) {
+ public void doit(Tokens t) {
+ if (t.countTokens() != 0) {
+ usage();
+ } else {
+ CollectedHeap heap = VM.getVM().getUniverse().heap();
+ if (!(heap instanceof G1CollectedHeap)) {
+ out.println("This command is valid only for G1GC.");
+ return;
+ }
+ out.println("Region Details:");
+ ((G1CollectedHeap)heap).printRegionDetails(out);
+ }
+ }
+ },
new Command("universe", false) {
public void doit(Tokens t) {
if (t.countTokens() != 0) {
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1CollectedHeap.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/G1CollectedHeap.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,6 +33,7 @@
import sun.jvm.hotspot.gc.shared.CollectedHeap;
import sun.jvm.hotspot.gc.shared.CollectedHeapName;
import sun.jvm.hotspot.gc.shared.SpaceClosure;
+import sun.jvm.hotspot.gc.shared.PrintRegionClosure;
import sun.jvm.hotspot.memory.MemRegion;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObjectFactory;
@@ -40,6 +41,7 @@
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
+import sun.jvm.hotspot.tools.HeapSummary;
// Mirror class for G1CollectedHeap.
@@ -133,6 +135,14 @@
tty.print("garbage-first heap");
tty.print(" [" + mr.start() + ", " + mr.end() + "]");
tty.println(" region size " + (HeapRegion.grainBytes() / 1024) + "K");
+
+ HeapSummary sum = new HeapSummary();
+ sum.printG1HeapSummary(this);
+ }
+
+ public void printRegionDetails(PrintStream tty) {
+ PrintRegionClosure prc = new PrintRegionClosure(tty);
+ heapRegionIterate(prc);
}
public G1CollectedHeap(Address addr) {
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/HeapRegion.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/HeapRegion.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
package sun.jvm.hotspot.gc.g1;
+import java.io.PrintStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Observable;
@@ -124,4 +125,9 @@
public static long getPointerSize() {
return pointerSize;
}
+
+ public void printOn(PrintStream tty) {
+ tty.print("Region: " + bottom() + "," + top() + "," + end());
+ tty.println(":" + type.typeAnnotation());
+ }
}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/HeapRegionType.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/g1/HeapRegionType.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -40,8 +40,13 @@
private static int freeTag;
private static int youngMask;
+ private static int edenTag;
+ private static int survTag;
private static int humongousMask;
+ private static int startsHumongousTag;
+ private static int continuesHumongousTag;
private static int pinnedMask;
+ private static int archiveMask;
private static int oldMask;
private static CIntegerField tagField;
private int tag;
@@ -61,6 +66,11 @@
freeTag = db.lookupIntConstant("HeapRegionType::FreeTag");
youngMask = db.lookupIntConstant("HeapRegionType::YoungMask");
+ edenTag = db.lookupIntConstant("HeapRegionType::EdenTag");
+ survTag = db.lookupIntConstant("HeapRegionType::SurvTag");
+ startsHumongousTag = db.lookupIntConstant("HeapRegionType::StartsHumongousTag");
+ continuesHumongousTag = db.lookupIntConstant("HeapRegionType::ContinuesHumongousTag");
+ archiveMask = db.lookupIntConstant("HeapRegionType::ArchiveMask");
humongousMask = db.lookupIntConstant("HeapRegionType::HumongousMask");
pinnedMask = db.lookupIntConstant("HeapRegionType::PinnedMask");
oldMask = db.lookupIntConstant("HeapRegionType::OldMask");
@@ -70,6 +80,14 @@
return tagField.getValue(addr) == freeTag;
}
+ public boolean isEden() {
+ return tagField.getValue(addr) == edenTag;
+ }
+
+ public boolean isSurvivor() {
+ return tagField.getValue(addr) == survTag;
+ }
+
public boolean isYoung() {
return (tagField.getValue(addr) & youngMask) != 0;
}
@@ -78,6 +96,18 @@
return (tagField.getValue(addr) & humongousMask) != 0;
}
+ public boolean isStartsHumongous() {
+ return tagField.getValue(addr) == startsHumongousTag;
+ }
+
+ public boolean isContinuesHumongous() {
+ return tagField.getValue(addr) == continuesHumongousTag;
+ }
+
+ public boolean isArchive() {
+ return (tagField.getValue(addr) & archiveMask) != 0;
+ }
+
public boolean isPinned() {
return (tagField.getValue(addr) & pinnedMask) != 0;
}
@@ -89,4 +119,32 @@
public HeapRegionType(Address addr) {
super(addr);
}
+
+ public String typeAnnotation() {
+ if (isFree()) {
+ return "Free";
+ }
+ if (isEden()) {
+ return "Eden";
+ }
+ if (isSurvivor()) {
+ return "Survivor";
+ }
+ if (isStartsHumongous()) {
+ return "StartsHumongous";
+ }
+ if (isContinuesHumongous()) {
+ return "ContinuesHumongous";
+ }
+ if (isArchive()) {
+ return "Archive";
+ }
+ if (isPinned()) {
+ return "Pinned";
+ }
+ if (isOld()) {
+ return "Old";
+ }
+ return "Unknown Region Type";
+ }
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/gc/shared/PrintRegionClosure.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.gc.shared;
+
+import java.io.PrintStream;
+import sun.jvm.hotspot.gc.g1.HeapRegion;
+
+public class PrintRegionClosure implements SpaceClosure {
+ private PrintStream tty;
+
+ public PrintRegionClosure(PrintStream tty) {
+ this.tty = tty;
+ }
+
+ public void doSpace(Space hr) {
+ ((HeapRegion)hr).printOn(tty);
+ }
+}
--- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -111,22 +111,7 @@
}
}
} else if (heap instanceof G1CollectedHeap) {
- G1CollectedHeap g1h = (G1CollectedHeap) heap;
- G1MonitoringSupport g1mm = g1h.g1mm();
- long edenRegionNum = g1mm.edenRegionNum();
- long survivorRegionNum = g1mm.survivorRegionNum();
- HeapRegionSetBase oldSet = g1h.oldSet();
- HeapRegionSetBase humongousSet = g1h.humongousSet();
- long oldRegionNum = oldSet.length() + humongousSet.length();
- printG1Space("G1 Heap:", g1h.n_regions(),
- g1h.used(), g1h.capacity());
- System.out.println("G1 Young Generation:");
- printG1Space("Eden Space:", edenRegionNum,
- g1mm.edenUsed(), g1mm.edenCommitted());
- printG1Space("Survivor Space:", survivorRegionNum,
- g1mm.survivorUsed(), g1mm.survivorCommitted());
- printG1Space("G1 Old Generation:", oldRegionNum,
- g1mm.oldUsed(), g1mm.oldCommitted());
+ printG1HeapSummary((G1CollectedHeap)heap);
} else if (heap instanceof ParallelScavengeHeap) {
ParallelScavengeHeap psh = (ParallelScavengeHeap) heap;
PSYoungGen youngGen = psh.youngGen();
@@ -217,6 +202,24 @@
System.out.println(alignment + (double)space.used() * 100.0 / space.capacity() + "% used");
}
+ public void printG1HeapSummary(G1CollectedHeap g1h) {
+ G1MonitoringSupport g1mm = g1h.g1mm();
+ long edenRegionNum = g1mm.edenRegionNum();
+ long survivorRegionNum = g1mm.survivorRegionNum();
+ HeapRegionSetBase oldSet = g1h.oldSet();
+ HeapRegionSetBase humongousSet = g1h.humongousSet();
+ long oldRegionNum = oldSet.length() + humongousSet.length();
+ printG1Space("G1 Heap:", g1h.n_regions(),
+ g1h.used(), g1h.capacity());
+ System.out.println("G1 Young Generation:");
+ printG1Space("Eden Space:", edenRegionNum,
+ g1mm.edenUsed(), g1mm.edenCommitted());
+ printG1Space("Survivor Space:", survivorRegionNum,
+ g1mm.survivorUsed(), g1mm.survivorCommitted());
+ printG1Space("G1 Old Generation:", oldRegionNum,
+ g1mm.oldUsed(), g1mm.oldCommitted());
+ }
+
private void printG1Space(String spaceName, long regionNum,
long used, long capacity) {
long free = capacity - used;
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotConstantPool.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotConstantPool.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -663,8 +663,12 @@
}
@Override
+ public void loadReferencedType(int cpi, int opcode) {
+ loadReferencedType(cpi, opcode, true /* initialize */);
+ }
+
@SuppressWarnings("fallthrough")
- public void loadReferencedType(int cpi, int opcode) {
+ public void loadReferencedType(int cpi, int opcode, boolean initialize) {
int index;
switch (opcode) {
case Bytecodes.CHECKCAST:
@@ -718,9 +722,11 @@
case UnresolvedClass:
case UnresolvedClassInError:
final HotSpotResolvedObjectTypeImpl type = compilerToVM().resolveTypeInPool(this, index);
- Class<?> klass = type.mirror();
- if (!klass.isPrimitive() && !klass.isArray()) {
- UNSAFE.ensureClassInitialized(klass);
+ if (initialize) {
+ Class<?> klass = type.mirror();
+ if (!klass.isPrimitive() && !klass.isArray()) {
+ UNSAFE.ensureClassInitialized(klass);
+ }
}
if (tag == JVM_CONSTANT.MethodRef) {
if (Bytecodes.isInvokeHandleAlias(opcode) && isSignaturePolymorphicHolder(type)) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.amd64/src/org/graalvm/compiler/asm/amd64/AMD64Assembler.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.amd64/src/org/graalvm/compiler/asm/amd64/AMD64Assembler.java Sat Mar 24 01:08:35 2018 +0100
@@ -195,11 +195,6 @@
private static final int VEX_W = 0x80;
}
- private static class AvxVectorLen {
- private static final int AVX_128bit = 0x0;
- private static final int AVX_256bit = 0x1;
- }
-
private static class VexSimdPrefix {
private static final int VEX_SIMD_NONE = 0x0;
private static final int VEX_SIMD_66 = 0x1;
@@ -208,11 +203,44 @@
}
private static class VexOpcode {
+ private static final int VEX_OPCODE_NONE = 0x0;
private static final int VEX_OPCODE_0F = 0x1;
private static final int VEX_OPCODE_0F_38 = 0x2;
private static final int VEX_OPCODE_0F_3A = 0x3;
}
+ public static class AvxVectorLen {
+ public static final int AVX_128bit = 0x0;
+ public static final int AVX_256bit = 0x1;
+ public static final int AVX_512bit = 0x2;
+ public static final int AVX_NoVec = 0x4;
+ }
+
+ public static class EvexTupleType {
+ public static final int EVEX_FV = 0;
+ public static final int EVEX_HV = 4;
+ public static final int EVEX_FVM = 6;
+ public static final int EVEX_T1S = 7;
+ public static final int EVEX_T1F = 11;
+ public static final int EVEX_T2 = 13;
+ public static final int EVEX_T4 = 15;
+ public static final int EVEX_T8 = 17;
+ public static final int EVEX_HVM = 18;
+ public static final int EVEX_QVM = 19;
+ public static final int EVEX_OVM = 20;
+ public static final int EVEX_M128 = 21;
+ public static final int EVEX_DUP = 22;
+ public static final int EVEX_ETUP = 23;
+ }
+
+ public static class EvexInputSizeInBits {
+ public static final int EVEX_8bit = 0;
+ public static final int EVEX_16bit = 1;
+ public static final int EVEX_32bit = 2;
+ public static final int EVEX_64bit = 3;
+ public static final int EVEX_NObit = 4;
+ }
+
private AMD64InstructionAttr curAttributes;
AMD64InstructionAttr getCurAttributes() {
@@ -873,6 +901,7 @@
opc = VexOpcode.VEX_OPCODE_0F_3A;
break;
default:
+ opc = VexOpcode.VEX_OPCODE_NONE;
isSimd = false;
break;
}
@@ -1770,6 +1799,13 @@
emitOperandHelper(dst, src, 0);
}
+ public final void bsfq(Register dst, Register src) {
+ int encode = prefixqAndEncode(dst.encoding(), src.encoding());
+ emitByte(0x0F);
+ emitByte(0xBC);
+ emitByte(0xC0 | encode);
+ }
+
public final void bsrl(Register dst, Register src) {
int encode = prefixAndEncode(dst.encoding(), src.encoding());
emitByte(0x0F);
@@ -1857,6 +1893,26 @@
emitByte(0xC0 | encode);
}
+ public final void evmovdquq(Register dst, AMD64Address src, int vectorLen) {
+ assert supports(CPUFeature.AVX512F);
+ AMD64InstructionAttr attributes = new AMD64InstructionAttr(vectorLen, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true, target);
+ attributes.setAddressAttributes(/* tuple_type */ EvexTupleType.EVEX_FVM, /* input_size_in_bits */ EvexInputSizeInBits.EVEX_NObit);
+ attributes.setIsEvexInstruction();
+ vexPrefix(src, Register.None, dst, VexSimdPrefix.VEX_SIMD_F3, VexOpcode.VEX_OPCODE_0F, attributes);
+ emitByte(0x6F);
+ emitOperandHelper(dst, src, 0);
+ }
+
+ public final void evpcmpeqb(Register kdst, Register nds, AMD64Address src, int vectorLen) {
+ assert supports(CPUFeature.AVX512BW);
+ AMD64InstructionAttr attributes = new AMD64InstructionAttr(vectorLen, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false, target);
+ attributes.setIsEvexInstruction();
+ attributes.setAddressAttributes(/* tuple_type */ EvexTupleType.EVEX_FVM, /* input_size_in_bits */ EvexInputSizeInBits.EVEX_NObit);
+ vexPrefix(src, nds, kdst, VexSimdPrefix.VEX_SIMD_66, VexOpcode.VEX_OPCODE_0F, attributes);
+ emitByte(0x74);
+ emitOperandHelper(kdst, src, 0);
+ }
+
public final void hlt() {
emitByte(0xF4);
}
@@ -1982,6 +2038,32 @@
}
}
+ // This instruction produces ZF or CF flags
+ public final void kortestql(Register src1, Register src2) {
+ assert supports(CPUFeature.AVX512BW);
+ AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false, target);
+ int encode = vexPrefixAndEncode(src1, Register.None, src2, VexSimdPrefix.VEX_SIMD_NONE, VexOpcode.VEX_OPCODE_0F, attributes);
+ emitByte(0x98);
+ emitByte(0xC0 | encode);
+ }
+
+ public final void kmovql(Register dst, Register src) {
+ assert supports(CPUFeature.AVX512BW);
+ if (src.getRegisterCategory().equals(AMD64.MASK)) {
+ // kmovql(KRegister dst, KRegister src)
+ AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false, target);
+ int encode = vexPrefixAndEncode(dst, Register.None, src, VexSimdPrefix.VEX_SIMD_NONE, VexOpcode.VEX_OPCODE_0F, attributes);
+ emitByte(0x90);
+ emitByte(0xC0 | encode);
+ } else {
+ // kmovql(KRegister dst, Register src)
+ AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false, target);
+ int encode = vexPrefixAndEncode(dst, Register.None, src, VexSimdPrefix.VEX_SIMD_F2, VexOpcode.VEX_OPCODE_0F, attributes);
+ emitByte(0x92);
+ emitByte(0xC0 | encode);
+ }
+ }
+
public final void lead(Register dst, AMD64Address src) {
prefix(src, dst);
emitByte(0x8D);
@@ -2050,6 +2132,15 @@
emitOperandHelper(dst, src, 0);
}
+ /**
+ * @param wide use 4 byte encoding for displacements that would normally fit in a byte
+ */
+ public final void movl(Register dst, AMD64Address src, boolean wide) {
+ prefix(src, dst);
+ emitByte(0x8B);
+ emitOperandHelper(dst, src, wide, 0);
+ }
+
public final void movl(AMD64Address dst, int imm32) {
prefix(dst);
emitByte(0xC7);
@@ -2291,6 +2382,10 @@
NOT.emit(this, DWORD, dst);
}
+ public final void notq(Register dst) {
+ NOT.emit(this, QWORD, dst);
+ }
+
@Override
public final void ensureUniquePC() {
nop();
@@ -2540,7 +2635,7 @@
emitByte(0xC0 | encode);
}
- void pcmpestri(Register dst, AMD64Address src, int imm8) {
+ public final void pcmpestri(Register dst, AMD64Address src, int imm8) {
assert supports(CPUFeature.SSE4_2);
AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, /* rexVexW */ false, /* legacyMode */ false, /* noMaskReg */ false, /* usesVl */ false, target);
simdPrefix(dst, Register.None, src, VexSimdPrefix.VEX_SIMD_66, VexOpcode.VEX_OPCODE_0F_3A, attributes);
@@ -2549,7 +2644,7 @@
emitByte(imm8);
}
- void pcmpestri(Register dst, Register src, int imm8) {
+ public final void pcmpestri(Register dst, Register src, int imm8) {
assert supports(CPUFeature.SSE4_2);
AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, /* rexVexW */ false, /* legacyMode */ false, /* noMaskReg */ false, /* usesVl */ false, target);
int encode = simdPrefixAndEncode(dst, Register.None, src, VexSimdPrefix.VEX_SIMD_66, VexOpcode.VEX_OPCODE_0F_3A, attributes);
@@ -2558,6 +2653,26 @@
emitByte(imm8);
}
+ public final void pmovzxbw(Register dst, AMD64Address src) {
+ assert supports(CPUFeature.SSE4_2);
+ // XXX legacy_mode should be: _legacy_mode_bw
+ AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false, target);
+ attributes.setAddressAttributes(/* tuple_type */ EvexTupleType.EVEX_HVM, /* input_size_in_bits */ EvexInputSizeInBits.EVEX_NObit);
+ simdPrefix(dst, Register.None, src, VexSimdPrefix.VEX_SIMD_66, VexOpcode.VEX_OPCODE_0F_38, attributes);
+ emitByte(0x30);
+ emitOperandHelper(dst, src, 0);
+ }
+
+ public final void vpmovzxbw(Register dst, AMD64Address src, int vectorLen) {
+ assert supports(CPUFeature.AVX);
+ // XXX legacy_mode should be: _legacy_mode_bw
+ AMD64InstructionAttr attributes = new AMD64InstructionAttr(vectorLen, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false, target);
+ attributes.setAddressAttributes(/* tuple_type */ EvexTupleType.EVEX_HVM, /* input_size_in_bits */ EvexInputSizeInBits.EVEX_NObit);
+ vexPrefix(src, Register.None, dst, VexSimdPrefix.VEX_SIMD_66, VexOpcode.VEX_OPCODE_0F_38, attributes);
+ emitByte(0x30);
+ emitOperandHelper(dst, src, 0);
+ }
+
public final void push(Register src) {
int encode = prefixAndEncode(src.encoding);
emitByte(0x50 | encode);
@@ -2634,6 +2749,15 @@
emitByte(0xC0 | encode);
}
+ public final void vpxor(Register dst, Register nds, AMD64Address src) {
+ assert supports(CPUFeature.AVX);
+ AMD64InstructionAttr attributes = new AMD64InstructionAttr(AvxVectorLen.AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true, target);
+ attributes.setAddressAttributes(/* tuple_type */ EvexTupleType.EVEX_FV, /* input_size_in_bits */ EvexInputSizeInBits.EVEX_32bit);
+ vexPrefix(src, nds, dst, VexSimdPrefix.VEX_SIMD_66, VexOpcode.VEX_OPCODE_0F, attributes);
+ emitByte(0xEF);
+ emitOperandHelper(dst, src, 0);
+ }
+
public final void pslld(Register dst, int imm8) {
assert isUByte(imm8) : "invalid value";
assert dst.getRegisterCategory().equals(AMD64.XMM);
@@ -3843,4 +3967,11 @@
emitByte(0x0f);
emitByte(0x0b);
}
+
+ public void lfence() {
+ emitByte(0x0f);
+ emitByte(0xae);
+ emitByte(0xe8);
+
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.bytecode/src/org/graalvm/compiler/bytecode/BytecodeDisassembler.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.bytecode/src/org/graalvm/compiler/bytecode/BytecodeDisassembler.java Sat Mar 24 01:08:35 2018 +0100
@@ -321,4 +321,61 @@
}
// @formatter:on
}
+
+ public static JavaMethod getInvokedMethodAt(ResolvedJavaMethod method, int invokeBci) {
+ if (method.getCode() == null) {
+ return null;
+ }
+ ConstantPool cp = method.getConstantPool();
+ BytecodeStream stream = new BytecodeStream(method.getCode());
+ int opcode = stream.currentBC();
+ while (opcode != Bytecodes.END) {
+ int bci = stream.currentBCI();
+ if (bci == invokeBci) {
+ if (stream.nextBCI() > bci + 1) {
+ switch (opcode) {
+ case INVOKEVIRTUAL:
+ case INVOKESPECIAL:
+ case INVOKESTATIC: {
+ int cpi = stream.readCPI();
+ JavaMethod callee = cp.lookupMethod(cpi, opcode);
+ return callee;
+ }
+ case INVOKEINTERFACE: {
+ int cpi = stream.readCPI();
+ JavaMethod callee = cp.lookupMethod(cpi, opcode);
+ return callee;
+ }
+ case INVOKEDYNAMIC: {
+ int cpi = stream.readCPI4();
+ JavaMethod callee = cp.lookupMethod(cpi, opcode);
+ return callee;
+ }
+ default:
+ throw new InternalError(BytecodeDisassembler.disassembleOne(method, invokeBci));
+ }
+ }
+ }
+ stream.next();
+ opcode = stream.currentBC();
+ }
+ return null;
+ }
+
+ public static int getBytecodeAt(ResolvedJavaMethod method, int invokeBci) {
+ if (method.getCode() == null) {
+ return -1;
+ }
+ BytecodeStream stream = new BytecodeStream(method.getCode());
+ int opcode = stream.currentBC();
+ while (opcode != Bytecodes.END) {
+ int bci = stream.currentBCI();
+ if (bci == invokeBci) {
+ return opcode;
+ }
+ stream.next();
+ opcode = stream.currentBC();
+ }
+ return -1;
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.code/src/org/graalvm/compiler/code/DisassemblerProvider.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.code/src/org/graalvm/compiler/code/DisassemblerProvider.java Sat Mar 24 01:08:35 2018 +0100
@@ -58,7 +58,7 @@
}
/**
- * Gets the name denoting the format of the disassmembly return by this object.
+ * Gets the name denoting the format of the disassembly returned by this object.
*/
String getName();
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.amd64/src/org/graalvm/compiler/core/amd64/AMD64AddressNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.amd64/src/org/graalvm/compiler/core/amd64/AMD64AddressNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -25,6 +25,7 @@
import org.graalvm.compiler.asm.amd64.AMD64Address.Scale;
import org.graalvm.compiler.core.common.LIRKind;
+import org.graalvm.compiler.core.common.type.IntegerStamp;
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.graph.spi.Simplifiable;
import org.graalvm.compiler.graph.spi.SimplifierTool;
@@ -72,7 +73,7 @@
}
public void canonicalizeIndex(SimplifierTool tool) {
- if (index instanceof AddNode) {
+ if (index instanceof AddNode && ((IntegerStamp) index.stamp(NodeView.DEFAULT)).getBits() == 64) {
AddNode add = (AddNode) index;
ValueNode valX = add.getX();
if (valX instanceof PhiNode) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.amd64/src/org/graalvm/compiler/core/amd64/AMD64ArithmeticLIRGenerator.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.amd64/src/org/graalvm/compiler/core/amd64/AMD64ArithmeticLIRGenerator.java Sat Mar 24 01:08:35 2018 +0100
@@ -66,15 +66,14 @@
import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant;
import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.DREM;
import static org.graalvm.compiler.lir.amd64.AMD64Arithmetic.FREM;
+import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicBinaryOp.BinaryIntrinsicOpcode.POW;
import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.COS;
+import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.EXP;
import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.LOG;
import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.LOG10;
import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.SIN;
import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.TAN;
-import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicUnaryOp.UnaryIntrinsicOpcode.EXP;
-import static org.graalvm.compiler.lir.amd64.AMD64MathIntrinsicBinaryOp.BinaryIntrinsicOpcode.POW;
-import org.graalvm.compiler.core.common.NumUtil;
import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64BinaryArithmetic;
import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp;
import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp;
@@ -83,10 +82,11 @@
import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RMOp;
import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64RRMOp;
import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64Shift;
+import org.graalvm.compiler.asm.amd64.AMD64Assembler.AVXOp;
import org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize;
import org.graalvm.compiler.asm.amd64.AMD64Assembler.SSEOp;
-import org.graalvm.compiler.asm.amd64.AMD64Assembler.AVXOp;
import org.graalvm.compiler.core.common.LIRKind;
+import org.graalvm.compiler.core.common.NumUtil;
import org.graalvm.compiler.core.common.calc.FloatConvert;
import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.lir.ConstantValue;
@@ -107,6 +107,7 @@
import org.graalvm.compiler.lir.amd64.AMD64SignExtendOp;
import org.graalvm.compiler.lir.amd64.AMD64Unary;
import org.graalvm.compiler.lir.gen.ArithmeticLIRGenerator;
+import org.graalvm.compiler.lir.gen.LIRGenerator;
import jdk.vm.ci.amd64.AMD64;
import jdk.vm.ci.amd64.AMD64.CPUFeature;
@@ -114,6 +115,7 @@
import jdk.vm.ci.code.CodeUtil;
import jdk.vm.ci.code.Register;
import jdk.vm.ci.code.RegisterValue;
+import jdk.vm.ci.code.TargetDescription;
import jdk.vm.ci.meta.AllocatableValue;
import jdk.vm.ci.meta.Constant;
import jdk.vm.ci.meta.JavaConstant;
@@ -122,7 +124,6 @@
import jdk.vm.ci.meta.VMConstant;
import jdk.vm.ci.meta.Value;
import jdk.vm.ci.meta.ValueKind;
-import jdk.vm.ci.code.TargetDescription;
/**
* This class implements the AMD64 specific portion of the LIR generator.
@@ -131,6 +132,40 @@
private static final RegisterValue RCX_I = AMD64.rcx.asValue(LIRKind.value(AMD64Kind.DWORD));
+ public AMD64ArithmeticLIRGenerator(Maths maths) {
+ this.maths = maths == null ? new Maths() {
+ } : maths;
+ }
+
+ private final Maths maths;
+
+ /**
+ * Interface for emitting LIR for selected {@link Math} routines. A {@code null} return value
+ * for any method in this interface means the caller must emit the LIR itself.
+ */
+ public interface Maths {
+
+ @SuppressWarnings("unused")
+ default Variable emitLog(LIRGenerator gen, Value input, boolean base10) {
+ return null;
+ }
+
+ @SuppressWarnings("unused")
+ default Variable emitCos(LIRGenerator gen, Value input) {
+ return null;
+ }
+
+ @SuppressWarnings("unused")
+ default Variable emitSin(LIRGenerator gen, Value input) {
+ return null;
+ }
+
+ @SuppressWarnings("unused")
+ default Variable emitTan(LIRGenerator gen, Value input) {
+ return null;
+ }
+ }
+
@Override
public Variable emitNegate(Value inputVal) {
AllocatableValue input = getLIRGen().asAllocatable(inputVal);
@@ -1042,33 +1077,49 @@
@Override
public Value emitMathLog(Value input, boolean base10) {
- Variable result = getLIRGen().newVariable(LIRKind.combine(input));
- AllocatableValue stackSlot = getLIRGen().getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD));
- getLIRGen().append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), base10 ? LOG10 : LOG, result, getLIRGen().asAllocatable(input), stackSlot));
+ LIRGenerator gen = getLIRGen();
+ Variable result = maths.emitLog(gen, input, base10);
+ if (result == null) {
+ result = gen.newVariable(LIRKind.combine(input));
+ AllocatableValue stackSlot = gen.getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD));
+ gen.append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), base10 ? LOG10 : LOG, result, gen.asAllocatable(input), stackSlot));
+ }
return result;
}
@Override
public Value emitMathCos(Value input) {
- Variable result = getLIRGen().newVariable(LIRKind.combine(input));
- AllocatableValue stackSlot = getLIRGen().getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD));
- getLIRGen().append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), COS, result, getLIRGen().asAllocatable(input), stackSlot));
+ LIRGenerator gen = getLIRGen();
+ Variable result = maths.emitCos(gen, input);
+ if (result == null) {
+ result = gen.newVariable(LIRKind.combine(input));
+ AllocatableValue stackSlot = gen.getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD));
+ gen.append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), COS, result, gen.asAllocatable(input), stackSlot));
+ }
return result;
}
@Override
public Value emitMathSin(Value input) {
- Variable result = getLIRGen().newVariable(LIRKind.combine(input));
- AllocatableValue stackSlot = getLIRGen().getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD));
- getLIRGen().append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), SIN, result, getLIRGen().asAllocatable(input), stackSlot));
+ LIRGenerator gen = getLIRGen();
+ Variable result = maths.emitSin(gen, input);
+ if (result == null) {
+ result = gen.newVariable(LIRKind.combine(input));
+ AllocatableValue stackSlot = gen.getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD));
+ gen.append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), SIN, result, gen.asAllocatable(input), stackSlot));
+ }
return result;
}
@Override
public Value emitMathTan(Value input) {
- Variable result = getLIRGen().newVariable(LIRKind.combine(input));
- AllocatableValue stackSlot = getLIRGen().getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD));
- getLIRGen().append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), TAN, result, getLIRGen().asAllocatable(input), stackSlot));
+ LIRGenerator gen = getLIRGen();
+ Variable result = maths.emitTan(gen, input);
+ if (result == null) {
+ result = gen.newVariable(LIRKind.combine(input));
+ AllocatableValue stackSlot = gen.getResult().getFrameMapBuilder().allocateSpillSlot(LIRKind.value(AMD64Kind.QWORD));
+ gen.append(new AMD64MathIntrinsicUnaryOp(getAMD64LIRGen(), TAN, result, gen.asAllocatable(input), stackSlot));
+ }
return result;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.amd64/src/org/graalvm/compiler/core/amd64/AMD64LIRGenerator.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.amd64/src/org/graalvm/compiler/core/amd64/AMD64LIRGenerator.java Sat Mar 24 01:08:35 2018 +0100
@@ -59,6 +59,7 @@
import org.graalvm.compiler.lir.Variable;
import org.graalvm.compiler.lir.amd64.AMD64AddressValue;
import org.graalvm.compiler.lir.amd64.AMD64ArithmeticLIRGeneratorTool;
+import org.graalvm.compiler.lir.amd64.AMD64ArrayCompareToOp;
import org.graalvm.compiler.lir.amd64.AMD64ArrayEqualsOp;
import org.graalvm.compiler.lir.amd64.AMD64Binary;
import org.graalvm.compiler.lir.amd64.AMD64BinaryConsumer;
@@ -74,6 +75,7 @@
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.ReturnOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.StrategySwitchOp;
import org.graalvm.compiler.lir.amd64.AMD64ControlFlow.TableSwitchOp;
+import org.graalvm.compiler.lir.amd64.AMD64LFenceOp;
import org.graalvm.compiler.lir.amd64.AMD64Move;
import org.graalvm.compiler.lir.amd64.AMD64Move.CompareAndSwapOp;
import org.graalvm.compiler.lir.amd64.AMD64Move.MembarOp;
@@ -490,6 +492,20 @@
}
@Override
+ public Variable emitArrayCompareTo(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length1, Value length2) {
+ LIRKind resultKind = LIRKind.value(AMD64Kind.DWORD);
+ RegisterValue raxRes = AMD64.rax.asValue(resultKind);
+ RegisterValue cnt1 = AMD64.rcx.asValue(length1.getValueKind());
+ RegisterValue cnt2 = AMD64.rdx.asValue(length2.getValueKind());
+ emitMove(cnt1, length1);
+ emitMove(cnt2, length2);
+ append(new AMD64ArrayCompareToOp(this, kind1, kind2, raxRes, array1, array2, cnt1, cnt2));
+ Variable result = newVariable(resultKind);
+ emitMove(result, raxRes);
+ return result;
+ }
+
+ @Override
public Variable emitArrayEquals(JavaKind kind, Value array1, Value array2, Value length) {
Variable result = newVariable(LIRKind.value(AMD64Kind.DWORD));
append(new AMD64ArrayEqualsOp(this, kind, result, array1, array2, asAllocatable(length)));
@@ -554,4 +570,8 @@
public LIRInstruction createZapArgumentSpace(StackSlot[] zappedStack, JavaConstant[] zapValues) {
return new AMD64ZapStackOp(zappedStack, zapValues);
}
+
+ public void emitLFence() {
+ append(new AMD64LFenceOp());
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.amd64/src/org/graalvm/compiler/core/amd64/AMD64NodeLIRBuilder.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.amd64/src/org/graalvm/compiler/core/amd64/AMD64NodeLIRBuilder.java Sat Mar 24 01:08:35 2018 +0100
@@ -23,6 +23,8 @@
package org.graalvm.compiler.core.amd64;
+import static org.graalvm.compiler.core.amd64.AMD64NodeLIRBuilder.Options.MitigateSpeculativeExecutionAttacks;
+
import org.graalvm.compiler.core.gen.NodeLIRBuilder;
import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.lir.LIRFrameState;
@@ -37,6 +39,11 @@
import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.nodes.calc.IntegerDivRemNode;
import org.graalvm.compiler.nodes.calc.IntegerDivRemNode.Op;
+import org.graalvm.compiler.nodes.cfg.Block;
+import org.graalvm.compiler.options.Option;
+import org.graalvm.compiler.options.OptionKey;
+import org.graalvm.compiler.options.OptionType;
+import org.graalvm.compiler.options.OptionValues;
import jdk.vm.ci.amd64.AMD64;
import jdk.vm.ci.meta.AllocatableValue;
@@ -44,6 +51,13 @@
public abstract class AMD64NodeLIRBuilder extends NodeLIRBuilder {
+ public static class Options {
+ // @formatter:off
+ @Option(help = "AMD64: Emit lfence instructions at the beginning of basic blocks", type = OptionType.Expert)
+ public static final OptionKey<Boolean> MitigateSpeculativeExecutionAttacks = new OptionKey<>(false);
+ // @formatter:on
+ }
+
public AMD64NodeLIRBuilder(StructuredGraph graph, LIRGeneratorTool gen, AMD64NodeMatchRules nodeMatchRules) {
super(graph, gen, nodeMatchRules);
}
@@ -121,4 +135,21 @@
public AMD64LIRGenerator getLIRGeneratorTool() {
return (AMD64LIRGenerator) gen;
}
+
+ @Override
+ public void doBlockPrologue(Block block, OptionValues options) {
+ if (MitigateSpeculativeExecutionAttacks.getValue(options)) {
+ boolean hasControlSplitPredecessor = false;
+ for (Block b : block.getPredecessors()) {
+ if (b.getSuccessorCount() > 1) {
+ hasControlSplitPredecessor = true;
+ break;
+ }
+ }
+ boolean isStartBlock = block.getPredecessorCount() == 0;
+ if (hasControlSplitPredecessor || isStartBlock) {
+ getLIRGeneratorTool().emitLFence();
+ }
+ }
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/GraalOptions.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/GraalOptions.java Sat Mar 24 01:08:35 2018 +0100
@@ -259,6 +259,9 @@
@Option(help = "", type = OptionType.Debug)
public static final OptionKey<Boolean> OptDevirtualizeInvokesOptimistically = new OptionKey<>(true);
+ @Option(help = "Track the NodeSourcePosition.", type = OptionType.Debug)
+ public static final OptionKey<Boolean> TrackNodeSourcePosition = new OptionKey<>(false);
+
@Option(help = "Allow backend to match complex expressions.", type = OptionType.Debug)
public static final OptionKey<Boolean> MatchExpressions = new OptionKey<>(true);
@@ -273,8 +276,7 @@
@Option(help = "Enable experimental Trace Register Allocation.", type = OptionType.Debug)
public static final OptionKey<Boolean> TraceRA = new OptionKey<>(false);
-
- @Option(help = "How to trace inlining decisions, one of: None, Linear, Tree", type = OptionType.Debug)
- public static final OptionKey<TraceInliningMode> TraceInlining = new OptionKey<>(TraceInliningMode.None);
+ @Option(help = "Enable tracing of inlining decision.", type = OptionType.Debug)
+ public static final OptionKey<Boolean> TraceInlining = new OptionKey<>(false);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/TraceInliningMode.java Thu Mar 29 20:12:02 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,39 +0,0 @@
-/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package org.graalvm.compiler.core.common;
-
-public enum TraceInliningMode {
- None(false),
- Linear(true),
- Tree(true);
-
- private final boolean tracing;
-
- TraceInliningMode(boolean tracing) {
- this.tracing = tracing;
- }
-
- public boolean isTracing() {
- return tracing;
- }
-}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/alloc/BiDirectionalTraceBuilder.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/alloc/BiDirectionalTraceBuilder.java Sat Mar 24 01:08:35 2018 +0100
@@ -84,7 +84,7 @@
AbstractBlockBase<?> block = worklist.pollFirst();
assert block != null;
if (!processed(block)) {
- Trace trace = new Trace(startTrace(debug, block));
+ Trace trace = new Trace(findTrace(debug, block));
for (AbstractBlockBase<?> traceBlock : trace.getBlocks()) {
blockToTrace[traceBlock.getId()] = trace;
}
@@ -101,13 +101,13 @@
* @param debug
*/
@SuppressWarnings("try")
- private Collection<AbstractBlockBase<?>> startTrace(DebugContext debug, AbstractBlockBase<?> block) {
+ private Collection<AbstractBlockBase<?>> findTrace(DebugContext debug, AbstractBlockBase<?> initBlock) {
ArrayDeque<AbstractBlockBase<?>> trace = new ArrayDeque<>();
- try (Indent i = debug.logAndIndent("StartTrace: %s", block)) {
+ try (Indent i = debug.logAndIndent("StartTrace: %s", initBlock)) {
try (Indent indentFront = debug.logAndIndent("Head:")) {
- for (AbstractBlockBase<?> currentBlock = block; currentBlock != null; currentBlock = selectPredecessor(currentBlock)) {
- addBlockToTrace(debug, currentBlock);
- trace.addFirst(currentBlock);
+ for (AbstractBlockBase<?> block = initBlock; block != null; block = selectPredecessor(block)) {
+ addBlockToTrace(debug, block);
+ trace.addFirst(block);
}
}
/* Number head blocks. Can not do this in the loop as we go backwards. */
@@ -117,11 +117,11 @@
}
try (Indent indentBack = debug.logAndIndent("Tail:")) {
- for (AbstractBlockBase<?> currentBlock = selectSuccessor(block); currentBlock != null; currentBlock = selectSuccessor(currentBlock)) {
- addBlockToTrace(debug, currentBlock);
- trace.addLast(currentBlock);
+ for (AbstractBlockBase<?> block = selectSuccessor(initBlock); block != null; block = selectSuccessor(block)) {
+ addBlockToTrace(debug, block);
+ trace.addLast(block);
/* This time we can number the blocks immediately as we go forwards. */
- currentBlock.setLinearScanNumber(blockNr++);
+ block.setLinearScanNumber(blockNr++);
}
}
}
@@ -129,18 +129,18 @@
return trace;
}
- private void addBlockToTrace(DebugContext debug, AbstractBlockBase<?> currentBlock) {
- debug.log("add %s (prob: %f)", currentBlock, currentBlock.probability());
- processed.set(currentBlock.getId());
+ private void addBlockToTrace(DebugContext debug, AbstractBlockBase<?> block) {
+ debug.log("add %s (prob: %f)", block, block.probability());
+ processed.set(block.getId());
}
/**
* @return The unprocessed predecessor with the highest probability, or {@code null}.
*/
- private AbstractBlockBase<?> selectPredecessor(AbstractBlockBase<?> currentBlock) {
+ private AbstractBlockBase<?> selectPredecessor(AbstractBlockBase<?> block) {
AbstractBlockBase<?> next = null;
- for (AbstractBlockBase<?> pred : currentBlock.getPredecessors()) {
- if (!processed(pred) && !isBackEdge(pred, currentBlock) && (next == null || pred.probability() > next.probability())) {
+ for (AbstractBlockBase<?> pred : block.getPredecessors()) {
+ if (!processed(pred) && !isBackEdge(pred, block) && (next == null || pred.probability() > next.probability())) {
next = pred;
}
}
@@ -155,9 +155,9 @@
/**
* @return The unprocessed successor with the highest probability, or {@code null}.
*/
- private AbstractBlockBase<?> selectSuccessor(AbstractBlockBase<?> currentBlock) {
+ private AbstractBlockBase<?> selectSuccessor(AbstractBlockBase<?> block) {
AbstractBlockBase<?> next = null;
- for (AbstractBlockBase<?> succ : currentBlock.getSuccessors()) {
+ for (AbstractBlockBase<?> succ : block.getSuccessors()) {
if (!processed(succ) && (next == null || succ.probability() > next.probability())) {
next = succ;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/alloc/UniDirectionalTraceBuilder.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/alloc/UniDirectionalTraceBuilder.java Sat Mar 24 01:08:35 2018 +0100
@@ -33,7 +33,7 @@
import org.graalvm.compiler.debug.Indent;
/**
- * Computes traces by starting at a trace head and keep adding predecessors as long as possible.
+ * Computes traces by starting at a trace head and keep adding successors as long as possible.
*/
public final class UniDirectionalTraceBuilder {
@@ -87,7 +87,7 @@
AbstractBlockBase<?> block = worklist.poll();
assert block != null;
if (!processed(block)) {
- Trace trace = new Trace(startTrace(debug, block));
+ Trace trace = new Trace(findTrace(debug, block));
for (AbstractBlockBase<?> traceBlock : trace.getBlocks()) {
blockToTrace[traceBlock.getId()] = trace;
}
@@ -102,17 +102,17 @@
* Build a new trace starting at {@code block}.
*/
@SuppressWarnings("try")
- private List<AbstractBlockBase<?>> startTrace(DebugContext debug, AbstractBlockBase<?> block) {
- assert checkPredecessorsProcessed(block);
+ private List<AbstractBlockBase<?>> findTrace(DebugContext debug, AbstractBlockBase<?> traceStart) {
+ assert checkPredecessorsProcessed(traceStart);
ArrayList<AbstractBlockBase<?>> trace = new ArrayList<>();
int blockNumber = 0;
- try (Indent i = debug.logAndIndent("StartTrace: %s", block)) {
- for (AbstractBlockBase<?> currentBlock = block; currentBlock != null; currentBlock = selectNext(currentBlock)) {
- debug.log("add %s (prob: %f)", currentBlock, currentBlock.probability());
- processed.set(currentBlock.getId());
- trace.add(currentBlock);
- unblock(currentBlock);
- currentBlock.setLinearScanNumber(blockNumber++);
+ try (Indent i = debug.logAndIndent("StartTrace: %s", traceStart)) {
+ for (AbstractBlockBase<?> block = traceStart; block != null; block = selectNext(block)) {
+ debug.log("add %s (prob: %f)", block, block.probability());
+ processed.set(block.getId());
+ trace.add(block);
+ unblock(block);
+ block.setLinearScanNumber(blockNumber++);
}
}
return trace;
@@ -120,11 +120,7 @@
private boolean checkPredecessorsProcessed(AbstractBlockBase<?> block) {
for (AbstractBlockBase<?> pred : block.getPredecessors()) {
- if (!processed(pred)) {
- assert false : "Predecessor unscheduled: " + pred;
- return false;
- }
-
+ assert processed(pred) : "Predecessor unscheduled: " + pred;
}
return true;
}
@@ -133,8 +129,8 @@
* Decrease the {@link #blocked} count for all predecessors and add them to the worklist once
* the count reaches 0.
*/
- private void unblock(AbstractBlockBase<?> currentBlock) {
- for (AbstractBlockBase<?> successor : currentBlock.getSuccessors()) {
+ private void unblock(AbstractBlockBase<?> block) {
+ for (AbstractBlockBase<?> successor : block.getSuccessors()) {
if (!processed(successor)) {
int blockCount = --blocked[successor.getId()];
assert blockCount >= 0;
@@ -148,11 +144,11 @@
/**
* @return The unprocessed predecessor with the highest probability, or {@code null}.
*/
- private AbstractBlockBase<?> selectNext(AbstractBlockBase<?> currentBlock) {
+ private AbstractBlockBase<?> selectNext(AbstractBlockBase<?> block) {
AbstractBlockBase<?> next = null;
- for (AbstractBlockBase<?> succ : currentBlock.getSuccessors()) {
- if (!processed(succ) && (next == null || succ.probability() > next.probability())) {
- next = succ;
+ for (AbstractBlockBase<?> successor : block.getSuccessors()) {
+ if (!processed(successor) && (next == null || successor.probability() > next.probability())) {
+ next = successor;
}
}
return next;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/type/AbstractObjectStamp.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/type/AbstractObjectStamp.java Sat Mar 24 01:08:35 2018 +0100
@@ -191,8 +191,6 @@
boolean joinExactType = exactType || other.exactType;
if (Objects.equals(type, other.type)) {
joinType = type;
- } else if (type == null && other.type == null) {
- joinType = null;
} else if (type == null) {
joinType = other.type;
} else if (other.type == null) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/type/FloatStamp.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/type/FloatStamp.java Sat Mar 24 01:08:35 2018 +0100
@@ -176,11 +176,15 @@
StringBuilder str = new StringBuilder();
str.append('f');
str.append(getBits());
- str.append(nonNaN ? "!" : "");
- if (lowerBound == upperBound) {
- str.append(" [").append(lowerBound).append(']');
- } else if (lowerBound != Double.NEGATIVE_INFINITY || upperBound != Double.POSITIVE_INFINITY) {
- str.append(" [").append(lowerBound).append(" - ").append(upperBound).append(']');
+ if (hasValues()) {
+ str.append(nonNaN ? "!" : "");
+ if (lowerBound == upperBound) {
+ str.append(" [").append(lowerBound).append(']');
+ } else if (lowerBound != Double.NEGATIVE_INFINITY || upperBound != Double.POSITIVE_INFINITY) {
+ str.append(" [").append(lowerBound).append(" - ").append(upperBound).append(']');
+ }
+ } else {
+ str.append("<empty>");
}
return str.toString();
}
@@ -200,6 +204,12 @@
if (otherStamp == this) {
return this;
}
+ if (isEmpty()) {
+ return this;
+ }
+ if (otherStamp.isEmpty()) {
+ return otherStamp;
+ }
FloatStamp other = (FloatStamp) otherStamp;
assert getBits() == other.getBits();
double meetUpperBound = meetBounds(upperBound, other.upperBound, Math::max);
@@ -383,6 +393,9 @@
@Override
public Stamp foldStamp(Stamp s) {
+ if (s.isEmpty()) {
+ return s;
+ }
FloatStamp stamp = (FloatStamp) s;
Stamp folded = maybeFoldConstant(this, stamp);
if (folded != null) {
@@ -412,6 +425,12 @@
@Override
public Stamp foldStamp(Stamp s1, Stamp s2) {
+ if (s1.isEmpty()) {
+ return s1;
+ }
+ if (s2.isEmpty()) {
+ return s2;
+ }
FloatStamp stamp1 = (FloatStamp) s1;
FloatStamp stamp2 = (FloatStamp) s2;
Stamp folded = maybeFoldConstant(this, stamp1, stamp2);
@@ -454,6 +473,12 @@
@Override
public Stamp foldStamp(Stamp s1, Stamp s2) {
+ if (s1.isEmpty()) {
+ return s1;
+ }
+ if (s2.isEmpty()) {
+ return s2;
+ }
FloatStamp stamp1 = (FloatStamp) s1;
FloatStamp stamp2 = (FloatStamp) s2;
Stamp folded = maybeFoldConstant(this, stamp1, stamp2);
@@ -496,6 +521,12 @@
@Override
public Stamp foldStamp(Stamp s1, Stamp s2) {
+ if (s1.isEmpty()) {
+ return s1;
+ }
+ if (s2.isEmpty()) {
+ return s2;
+ }
FloatStamp stamp1 = (FloatStamp) s1;
FloatStamp stamp2 = (FloatStamp) s2;
Stamp folded = maybeFoldConstant(this, stamp1, stamp2);
@@ -544,6 +575,12 @@
@Override
public Stamp foldStamp(Stamp s1, Stamp s2) {
+ if (s1.isEmpty()) {
+ return s1;
+ }
+ if (s2.isEmpty()) {
+ return s2;
+ }
FloatStamp stamp1 = (FloatStamp) s1;
FloatStamp stamp2 = (FloatStamp) s2;
Stamp folded = maybeFoldConstant(this, stamp1, stamp2);
@@ -586,6 +623,12 @@
@Override
public Stamp foldStamp(Stamp s1, Stamp s2) {
+ if (s1.isEmpty()) {
+ return s1;
+ }
+ if (s2.isEmpty()) {
+ return s2;
+ }
FloatStamp stamp1 = (FloatStamp) s1;
FloatStamp stamp2 = (FloatStamp) s2;
Stamp folded = maybeFoldConstant(this, stamp1, stamp2);
@@ -615,6 +658,9 @@
@Override
public Stamp foldStamp(Stamp s) {
+ if (s.isEmpty()) {
+ return s;
+ }
FloatStamp stamp = (FloatStamp) s;
JavaConstant constant = stamp.asConstant();
if (constant != null) {
@@ -653,6 +699,12 @@
@Override
public Stamp foldStamp(Stamp s1, Stamp s2) {
+ if (s1.isEmpty()) {
+ return s1;
+ }
+ if (s2.isEmpty()) {
+ return s2;
+ }
FloatStamp stamp1 = (FloatStamp) s1;
FloatStamp stamp2 = (FloatStamp) s2;
Stamp folded = maybeFoldConstant(this, stamp1, stamp2);
@@ -701,6 +753,12 @@
@Override
public Stamp foldStamp(Stamp s1, Stamp s2) {
+ if (s1.isEmpty()) {
+ return s1;
+ }
+ if (s2.isEmpty()) {
+ return s2;
+ }
FloatStamp stamp1 = (FloatStamp) s1;
FloatStamp stamp2 = (FloatStamp) s2;
Stamp folded = maybeFoldConstant(this, stamp1, stamp2);
@@ -747,6 +805,12 @@
@Override
public Stamp foldStamp(Stamp s1, Stamp s2) {
+ if (s1.isEmpty()) {
+ return s1;
+ }
+ if (s2.isEmpty()) {
+ return s2;
+ }
FloatStamp stamp1 = (FloatStamp) s1;
FloatStamp stamp2 = (FloatStamp) s2;
Stamp folded = maybeFoldConstant(this, stamp1, stamp2);
@@ -789,6 +853,9 @@
@Override
public Stamp foldStamp(Stamp s) {
+ if (s.isEmpty()) {
+ return s;
+ }
FloatStamp stamp = (FloatStamp) s;
Stamp folded = maybeFoldConstant(this, stamp);
if (folded != null) {
@@ -818,6 +885,9 @@
@Override
public Stamp foldStamp(Stamp s) {
+ if (s.isEmpty()) {
+ return s;
+ }
FloatStamp stamp = (FloatStamp) s;
Stamp folded = maybeFoldConstant(this, stamp);
if (folded != null) {
@@ -839,6 +909,9 @@
@Override
public Stamp foldStamp(Stamp stamp) {
+ if (stamp.isEmpty()) {
+ return StampFactory.empty(JavaKind.Int);
+ }
FloatStamp floatStamp = (FloatStamp) stamp;
assert floatStamp.getBits() == 32;
boolean mustHaveZero = !floatStamp.isNonNaN();
@@ -865,6 +938,9 @@
@Override
public Stamp foldStamp(Stamp stamp) {
+ if (stamp.isEmpty()) {
+ return StampFactory.empty(JavaKind.Long);
+ }
FloatStamp floatStamp = (FloatStamp) stamp;
assert floatStamp.getBits() == 32;
boolean mustHaveZero = !floatStamp.isNonNaN();
@@ -891,6 +967,9 @@
@Override
public Stamp foldStamp(Stamp stamp) {
+ if (stamp.isEmpty()) {
+ return StampFactory.empty(JavaKind.Int);
+ }
FloatStamp floatStamp = (FloatStamp) stamp;
assert floatStamp.getBits() == 64;
boolean mustHaveZero = !floatStamp.isNonNaN();
@@ -917,6 +996,9 @@
@Override
public Stamp foldStamp(Stamp stamp) {
+ if (stamp.isEmpty()) {
+ return StampFactory.empty(JavaKind.Long);
+ }
FloatStamp floatStamp = (FloatStamp) stamp;
assert floatStamp.getBits() == 64;
boolean mustHaveZero = !floatStamp.isNonNaN();
@@ -943,6 +1025,9 @@
@Override
public Stamp foldStamp(Stamp stamp) {
+ if (stamp.isEmpty()) {
+ return StampFactory.empty(JavaKind.Double);
+ }
FloatStamp floatStamp = (FloatStamp) stamp;
assert floatStamp.getBits() == 32;
return StampFactory.forFloat(JavaKind.Double, floatStamp.lowerBound(), floatStamp.upperBound(), floatStamp.isNonNaN());
@@ -959,6 +1044,9 @@
@Override
public Stamp foldStamp(Stamp stamp) {
+ if (stamp.isEmpty()) {
+ return StampFactory.empty(JavaKind.Float);
+ }
FloatStamp floatStamp = (FloatStamp) stamp;
assert floatStamp.getBits() == 64;
return StampFactory.forFloat(JavaKind.Float, (float) floatStamp.lowerBound(), (float) floatStamp.upperBound(), floatStamp.isNonNaN());
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/type/IntegerStamp.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/type/IntegerStamp.java Sat Mar 24 01:08:35 2018 +0100
@@ -122,11 +122,11 @@
return new IntegerStamp(bits, lowerBoundTmp, upperBoundTmp, defaultMask & (downMask | boundedDownMask), defaultMask & upMask & boundedUpMask);
}
- static long significantBit(long bits, long value) {
+ private static long significantBit(long bits, long value) {
return (value >>> (bits - 1)) & 1;
}
- static long minValueForMasks(int bits, long downMask, long upMask) {
+ private static long minValueForMasks(int bits, long downMask, long upMask) {
if (significantBit(bits, upMask) == 0) {
// Value is always positive. Minimum value always positive.
assert significantBit(bits, downMask) == 0;
@@ -137,7 +137,7 @@
}
}
- static long maxValueForMasks(int bits, long downMask, long upMask) {
+ private static long maxValueForMasks(int bits, long downMask, long upMask) {
if (significantBit(bits, downMask) == 1) {
// Value is always negative. Maximum value always negative.
assert significantBit(bits, upMask) == 1;
@@ -330,6 +330,12 @@
if (otherStamp == this) {
return this;
}
+ if (isEmpty()) {
+ return otherStamp;
+ }
+ if (otherStamp.isEmpty()) {
+ return this;
+ }
IntegerStamp other = (IntegerStamp) otherStamp;
return createStamp(other, Math.max(upperBound, other.upperBound), Math.min(lowerBound, other.lowerBound), downMask & other.downMask, upMask | other.upMask);
}
@@ -413,7 +419,7 @@
return super.equals(other);
}
- public static long upMaskFor(int bits, long lowerBound, long upperBound) {
+ private static long upMaskFor(int bits, long lowerBound, long upperBound) {
long mask = lowerBound | upperBound;
if (mask == 0) {
return 0;
@@ -595,6 +601,9 @@
@Override
public Stamp foldStamp(Stamp s) {
+ if (s.isEmpty()) {
+ return s;
+ }
IntegerStamp stamp = (IntegerStamp) s;
int bits = stamp.getBits();
if (stamp.lowerBound == stamp.upperBound) {
@@ -622,6 +631,12 @@
@Override
public Stamp foldStamp(Stamp stamp1, Stamp stamp2) {
+ if (stamp1.isEmpty()) {
+ return stamp1;
+ }
+ if (stamp2.isEmpty()) {
+ return stamp2;
+ }
IntegerStamp a = (IntegerStamp) stamp1;
IntegerStamp b = (IntegerStamp) stamp2;
@@ -715,6 +730,12 @@
@Override
public Stamp foldStamp(Stamp stamp1, Stamp stamp2) {
+ if (stamp1.isEmpty()) {
+ return stamp1;
+ }
+ if (stamp2.isEmpty()) {
+ return stamp2;
+ }
IntegerStamp a = (IntegerStamp) stamp1;
IntegerStamp b = (IntegerStamp) stamp2;
@@ -885,6 +906,12 @@
@Override
public Stamp foldStamp(Stamp stamp1, Stamp stamp2) {
+ if (stamp1.isEmpty()) {
+ return stamp1;
+ }
+ if (stamp2.isEmpty()) {
+ return stamp2;
+ }
IntegerStamp a = (IntegerStamp) stamp1;
IntegerStamp b = (IntegerStamp) stamp2;
JavaKind javaKind = a.getStackKind();
@@ -952,6 +979,12 @@
@Override
public Stamp foldStamp(Stamp stamp1, Stamp stamp2) {
+ if (stamp1.isEmpty()) {
+ return stamp1;
+ }
+ if (stamp2.isEmpty()) {
+ return stamp2;
+ }
IntegerStamp a = (IntegerStamp) stamp1;
IntegerStamp b = (IntegerStamp) stamp2;
JavaKind javaKind = a.getStackKind();
@@ -1046,6 +1079,12 @@
@Override
public Stamp foldStamp(Stamp stamp1, Stamp stamp2) {
+ if (stamp1.isEmpty()) {
+ return stamp1;
+ }
+ if (stamp2.isEmpty()) {
+ return stamp2;
+ }
IntegerStamp a = (IntegerStamp) stamp1;
IntegerStamp b = (IntegerStamp) stamp2;
assert a.getBits() == b.getBits();
@@ -1083,6 +1122,12 @@
@Override
public Stamp foldStamp(Stamp stamp1, Stamp stamp2) {
+ if (stamp1.isEmpty()) {
+ return stamp1;
+ }
+ if (stamp2.isEmpty()) {
+ return stamp2;
+ }
IntegerStamp a = (IntegerStamp) stamp1;
IntegerStamp b = (IntegerStamp) stamp2;
assert a.getBits() == b.getBits();
@@ -1121,6 +1166,9 @@
@Override
public Stamp foldStamp(Stamp stamp) {
+ if (stamp.isEmpty()) {
+ return stamp;
+ }
IntegerStamp integerStamp = (IntegerStamp) stamp;
int bits = integerStamp.getBits();
long defaultMask = CodeUtil.mask(bits);
@@ -1140,6 +1188,12 @@
@Override
public Stamp foldStamp(Stamp stamp1, Stamp stamp2) {
+ if (stamp1.isEmpty()) {
+ return stamp1;
+ }
+ if (stamp2.isEmpty()) {
+ return stamp2;
+ }
IntegerStamp a = (IntegerStamp) stamp1;
IntegerStamp b = (IntegerStamp) stamp2;
assert a.getBits() == b.getBits();
@@ -1167,6 +1221,12 @@
@Override
public Stamp foldStamp(Stamp stamp1, Stamp stamp2) {
+ if (stamp1.isEmpty()) {
+ return stamp1;
+ }
+ if (stamp2.isEmpty()) {
+ return stamp2;
+ }
IntegerStamp a = (IntegerStamp) stamp1;
IntegerStamp b = (IntegerStamp) stamp2;
assert a.getBits() == b.getBits();
@@ -1192,6 +1252,12 @@
@Override
public Stamp foldStamp(Stamp stamp1, Stamp stamp2) {
+ if (stamp1.isEmpty()) {
+ return stamp1;
+ }
+ if (stamp2.isEmpty()) {
+ return stamp2;
+ }
IntegerStamp a = (IntegerStamp) stamp1;
IntegerStamp b = (IntegerStamp) stamp2;
assert a.getBits() == b.getBits();
@@ -1269,8 +1335,7 @@
upMask |= value.upMask() << (i & shiftMask);
}
}
- Stamp result = IntegerStamp.stampForMask(bits, downMask, upMask & defaultMask);
- return result;
+ return IntegerStamp.stampForMask(bits, downMask, upMask & defaultMask);
}
return value.unrestricted();
}
@@ -1392,6 +1457,9 @@
@Override
public Stamp foldStamp(Stamp input) {
+ if (input.isEmpty()) {
+ return input;
+ }
IntegerStamp stamp = (IntegerStamp) input;
int bits = stamp.getBits();
if (stamp.lowerBound == stamp.upperBound) {
@@ -1419,6 +1487,9 @@
@Override
public Stamp foldStamp(int inputBits, int resultBits, Stamp input) {
+ if (input.isEmpty()) {
+ return StampFactory.forInteger(resultBits).empty();
+ }
IntegerStamp stamp = (IntegerStamp) input;
assert inputBits == stamp.getBits();
assert inputBits <= resultBits;
@@ -1458,6 +1529,9 @@
@Override
public Stamp foldStamp(int inputBits, int resultBits, Stamp input) {
+ if (input.isEmpty()) {
+ return StampFactory.forInteger(resultBits).empty();
+ }
IntegerStamp stamp = (IntegerStamp) input;
assert inputBits == stamp.getBits();
assert inputBits <= resultBits;
@@ -1487,6 +1561,9 @@
@Override
public Stamp foldStamp(int inputBits, int resultBits, Stamp input) {
+ if (input.isEmpty()) {
+ return StampFactory.forInteger(resultBits).empty();
+ }
IntegerStamp stamp = (IntegerStamp) input;
assert inputBits == stamp.getBits();
assert resultBits <= inputBits;
@@ -1526,6 +1603,9 @@
@Override
public Stamp foldStamp(Stamp input) {
+ if (input.isEmpty()) {
+ return StampFactory.empty(JavaKind.Float);
+ }
IntegerStamp stamp = (IntegerStamp) input;
assert stamp.getBits() == 32;
float lowerBound = stamp.lowerBound();
@@ -1544,6 +1624,9 @@
@Override
public Stamp foldStamp(Stamp input) {
+ if (input.isEmpty()) {
+ return StampFactory.empty(JavaKind.Float);
+ }
IntegerStamp stamp = (IntegerStamp) input;
assert stamp.getBits() == 64;
float lowerBound = stamp.lowerBound();
@@ -1562,6 +1645,9 @@
@Override
public Stamp foldStamp(Stamp input) {
+ if (input.isEmpty()) {
+ return StampFactory.empty(JavaKind.Double);
+ }
IntegerStamp stamp = (IntegerStamp) input;
assert stamp.getBits() == 32;
double lowerBound = stamp.lowerBound();
@@ -1580,6 +1666,9 @@
@Override
public Stamp foldStamp(Stamp input) {
+ if (input.isEmpty()) {
+ return StampFactory.empty(JavaKind.Double);
+ }
IntegerStamp stamp = (IntegerStamp) input;
assert stamp.getBits() == 64;
double lowerBound = stamp.lowerBound();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/type/StampFactory.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/type/StampFactory.java Sat Mar 24 01:08:35 2018 +0100
@@ -161,6 +161,10 @@
return IntegerStamp.create(bits, CodeUtil.minValue(bits), CodeUtil.maxValue(bits), 0, CodeUtil.mask(bits));
}
+ public static IntegerStamp forUnsignedInteger(int bits) {
+ return forUnsignedInteger(bits, 0, NumUtil.maxValueUnsigned(bits), 0, CodeUtil.mask(bits));
+ }
+
public static IntegerStamp forUnsignedInteger(int bits, long unsignedLowerBound, long unsignedUpperBound) {
return forUnsignedInteger(bits, unsignedLowerBound, unsignedUpperBound, 0, CodeUtil.mask(bits));
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.common/src/org/graalvm/compiler/core/common/util/UnsignedLong.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2018, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.core.common.util;
+
+public final class UnsignedLong {
+ private final long value;
+
+ public UnsignedLong(long value) {
+ this.value = value;
+ }
+
+ public long asLong() {
+ return value;
+ }
+
+ public boolean equals(long unsignedValue) {
+ return value == unsignedValue;
+ }
+
+ public boolean isLessThan(long unsignedValue) {
+ return Long.compareUnsigned(value, unsignedValue) < 0;
+ }
+
+ public boolean isLessOrEqualTo(long unsignedValue) {
+ return Long.compareUnsigned(value, unsignedValue) <= 0;
+ }
+
+ public UnsignedLong times(long unsignedValue) {
+ if (unsignedValue != 0 && Long.compareUnsigned(value, Long.divideUnsigned(0xffff_ffff_ffff_ffffL, unsignedValue)) > 0) {
+ throw new ArithmeticException();
+ }
+ return new UnsignedLong(value * unsignedValue);
+ }
+
+ public UnsignedLong minus(long unsignedValue) {
+ if (Long.compareUnsigned(value, unsignedValue) < 0) {
+ throw new ArithmeticException();
+ }
+ return new UnsignedLong(value - unsignedValue);
+ }
+
+ public UnsignedLong plus(long unsignedValue) {
+ if (Long.compareUnsigned(0xffff_ffff_ffff_ffffL - unsignedValue, value) < 0) {
+ throw new ArithmeticException();
+ }
+ return new UnsignedLong(value + unsignedValue);
+ }
+
+ public UnsignedLong wrappingPlus(long unsignedValue) {
+ return new UnsignedLong(value + unsignedValue);
+ }
+
+ public UnsignedLong wrappingTimes(long unsignedValue) {
+ return new UnsignedLong(value * unsignedValue);
+ }
+
+ @Override
+ public String toString() {
+ return "UnsignedLong(" + Long.toUnsignedString(value) + ")";
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ UnsignedLong that = (UnsignedLong) o;
+ return value == that.value;
+ }
+
+ @Override
+ public int hashCode() {
+ return Long.hashCode(value);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/BasePhaseBinaryGraphTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.core.test;
+
+import org.graalvm.compiler.debug.DebugContext;
+import org.graalvm.compiler.nodes.StructuredGraph;
+import org.graalvm.compiler.phases.BasePhase;
+import org.graalvm.compiler.printer.BinaryGraphPrinter;
+import static org.junit.Assert.assertEquals;
+import org.junit.Before;
+import org.junit.Test;
+
+public class BasePhaseBinaryGraphTest {
+ private MyPhase phase;
+ private BinaryGraphPrinter printer;
+
+ @Before
+ public void createPhase() {
+ phase = new MyPhase();
+ }
+
+ @Before
+ public void createPrinter() throws Exception {
+ printer = new BinaryGraphPrinter(DebugContext.DISABLED, null);
+ }
+
+ @Test
+ public void phaseNameIsRecognizedAsType() {
+ String res = printer.typeName(phase.getName());
+ assertEquals(MyPhase.class.getName(), res);
+ }
+
+ private static final class MyPhase extends BasePhase<Void> {
+ @Override
+ protected void run(StructuredGraph graph, Void context) {
+ }
+
+ @Override
+ protected CharSequence getName() {
+ return super.getName();
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/ConditionalNodeTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.core.test;
+
+import org.junit.Test;
+
+public class ConditionalNodeTest extends GraalCompilerTest {
+
+ @SuppressWarnings("unused") private static int sink0;
+ @SuppressWarnings("unused") private static int sink1;
+
+ @Test
+ public void test0() {
+ test("conditionalTest0", 0);
+ test("conditionalTest0", 1);
+ }
+
+ public static int conditionalTest0(int a) {
+ int value;
+ if (a == 1) {
+ value = -1;
+ sink1 = 0;
+ } else {
+ value = 6;
+ sink1 = 1;
+ }
+ sink0 = 1;
+ return Math.max(value, 6);
+ }
+
+ @Test
+ public void test1() {
+ test("conditionalTest1", 0);
+ test("conditionalTest1", 1);
+ }
+
+ public static int conditionalTest1(int a) {
+ int value;
+ if (a == 1) {
+ value = -1;
+ sink1 = 0;
+ } else {
+ value = 6;
+ sink1 = 1;
+ }
+ sink0 = 1;
+ return Math.max(6, value);
+ }
+
+ @Test
+ public void test2() {
+ test("conditionalTest2", 0);
+ test("conditionalTest2", 1);
+ }
+
+ public static int conditionalTest2(int a) {
+ int value;
+ if (a == 1) {
+ value = -1;
+ sink1 = 0;
+ } else {
+ value = 6;
+ sink1 = 1;
+ }
+ sink0 = 1;
+ return Math.min(value, -1);
+ }
+
+ @Test
+ public void test3() {
+ test("conditionalTest3", 0);
+ test("conditionalTest3", 1);
+ }
+
+ public static int conditionalTest3(int a) {
+ int value;
+ if (a == 1) {
+ value = -1;
+ sink1 = 0;
+ } else {
+ value = 6;
+ sink1 = 1;
+ }
+ sink0 = 1;
+ return Math.min(-1, value);
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/CountedLoopTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/CountedLoopTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -30,6 +30,7 @@
import org.graalvm.compiler.loop.InductionVariable;
import org.graalvm.compiler.loop.LoopsData;
import org.graalvm.compiler.nodeinfo.NodeInfo;
+import org.graalvm.compiler.nodes.ConstantNode;
import org.graalvm.compiler.nodes.NodeView;
import org.graalvm.compiler.nodes.StructuredGraph;
import org.graalvm.compiler.nodes.ValueNode;
@@ -52,25 +53,47 @@
ValueNode get(InductionVariable iv);
}
+ @FunctionalInterface
+ private interface StaticIVProperty {
+ long get(InductionVariable iv);
+ }
+
+ @FunctionalInterface
+ private interface IVPredicate {
+ boolean test(InductionVariable iv);
+ }
+
/**
* Get a property of an induction variable.
- *
- * @param property
*/
- private static int get(IVProperty property, int iv) {
+ private static int get(@SuppressWarnings("unused") IVProperty property, @SuppressWarnings("unused") StaticIVProperty staticProperty, @SuppressWarnings("unused") IVPredicate constantCheck,
+ int iv) {
+ return iv;
+ }
+
+ private static int get(@SuppressWarnings("unused") IVProperty property, int iv) {
+ return iv;
+ }
+
+ private static long get(@SuppressWarnings("unused") IVProperty property, @SuppressWarnings("unused") StaticIVProperty staticProperty, @SuppressWarnings("unused") IVPredicate constantCheck,
+ long iv) {
+ return iv;
+ }
+
+ private static long get(@SuppressWarnings("unused") IVProperty property, long iv) {
return iv;
}
private static class Result {
- public int extremum;
- public int exitValue;
+ public long extremum;
+ public long exitValue;
@Override
public int hashCode() {
final int prime = 31;
int result = 1;
- result = prime * result + exitValue;
- result = prime * result + extremum;
+ result = prime * result + Long.hashCode(exitValue);
+ result = prime * result + Long.hashCode(extremum);
return result;
}
@@ -95,7 +118,7 @@
Result ret = new Result();
for (i = start; i < limit; i += inc) {
GraalDirectives.controlFlowAnchor();
- ret.extremum = get(InductionVariable::extremumNode, i);
+ ret.extremum = get(InductionVariable::extremumNode, InductionVariable::constantExtremum, InductionVariable::isConstantExtremum, i);
}
ret.exitValue = get(InductionVariable::exitValueNode, i);
return ret;
@@ -103,32 +126,42 @@
@Test
public void increment1() {
- test("incrementSnippet", 0, 256, 1);
+ testCounted("incrementSnippet", 0, 256, 1);
}
@Test
public void increment2() {
- test("incrementSnippet", 0, 256, 2);
+ testCounted("incrementSnippet", 0, 256, 2);
}
@Test
public void increment3() {
- test("incrementSnippet", 0, 256, 3);
+ testCounted("incrementSnippet", 0, 256, 3);
}
@Test
public void increment4() {
- test("incrementSnippet", -10, Integer.MAX_VALUE, 1);
+ testCounted("incrementSnippet", -10, 1, Integer.MAX_VALUE);
}
@Test
public void increment5() {
- test("incrementSnippet", 256, 256, 1);
+ testCounted("incrementSnippet", 256, 256, 1);
}
@Test
public void increment6() {
- test("incrementSnippet", 257, 256, 1);
+ testCounted("incrementSnippet", 257, 256, 1);
+ }
+
+ @Test
+ public void increment7() {
+ testCounted("incrementSnippet", -10, Integer.MAX_VALUE, 1);
+ }
+
+ @Test
+ public void increment8() {
+ testCounted("incrementSnippet", -10, Integer.MAX_VALUE - 1, 2);
}
public static Result incrementEqSnippet(int start, int limit, int step) {
@@ -137,7 +170,7 @@
Result ret = new Result();
for (i = start; i <= limit; i += inc) {
GraalDirectives.controlFlowAnchor();
- ret.extremum = get(InductionVariable::extremumNode, i);
+ ret.extremum = get(InductionVariable::extremumNode, InductionVariable::constantExtremum, InductionVariable::isConstantExtremum, i);
}
ret.exitValue = get(InductionVariable::exitValueNode, i);
return ret;
@@ -145,32 +178,42 @@
@Test
public void incrementEq1() {
- test("incrementEqSnippet", 0, 256, 1);
+ testCounted("incrementEqSnippet", 0, 256, 1);
}
@Test
public void incrementEq2() {
- test("incrementEqSnippet", 0, 256, 2);
+ testCounted("incrementEqSnippet", 0, 256, 2);
}
@Test
public void incrementEq3() {
- test("incrementEqSnippet", 0, 256, 3);
+ testCounted("incrementEqSnippet", 0, 256, 3);
}
@Test
public void incrementEq4() {
- test("incrementEqSnippet", -10, 0, Integer.MAX_VALUE);
+ testCounted("incrementEqSnippet", -10, 0, Integer.MAX_VALUE);
}
@Test
public void incrementEq5() {
- test("incrementEqSnippet", 256, 256, 1);
+ testCounted("incrementEqSnippet", 256, 256, 1);
}
@Test
public void incrementEq6() {
- test("incrementEqSnippet", 257, 256, 1);
+ testCounted("incrementEqSnippet", 257, 256, 1);
+ }
+
+ @Test
+ public void incrementEq7() {
+ testCounted("incrementEqSnippet", -10, Integer.MAX_VALUE - 1, 1);
+ }
+
+ @Test
+ public void incrementEq8() {
+ testCounted("incrementEqSnippet", -10, Integer.MAX_VALUE - 2, 2);
}
public static Result decrementSnippet(int start, int limit, int step) {
@@ -179,7 +222,7 @@
Result ret = new Result();
for (i = start; i > limit; i -= dec) {
GraalDirectives.controlFlowAnchor();
- ret.extremum = get(InductionVariable::extremumNode, i);
+ ret.extremum = get(InductionVariable::extremumNode, InductionVariable::constantExtremum, InductionVariable::isConstantExtremum, i);
}
ret.exitValue = get(InductionVariable::exitValueNode, i);
return ret;
@@ -187,17 +230,27 @@
@Test
public void decrement1() {
- test("decrementSnippet", 256, 0, 1);
+ testCounted("decrementSnippet", 256, 0, 1);
}
@Test
public void decrement2() {
- test("decrementSnippet", 256, 0, 2);
+ testCounted("decrementSnippet", 256, 0, 2);
}
@Test
public void decrement3() {
- test("decrementSnippet", 256, 0, 3);
+ testCounted("decrementSnippet", 256, 0, 3);
+ }
+
+ @Test
+ public void decrement4() {
+ testCounted("decrementSnippet", Integer.MAX_VALUE, -10, 1);
+ }
+
+ @Test
+ public void decrement5() {
+ testCounted("decrementSnippet", Integer.MAX_VALUE, -10, 2);
}
public static Result decrementEqSnippet(int start, int limit, int step) {
@@ -206,7 +259,7 @@
Result ret = new Result();
for (i = start; i >= limit; i -= dec) {
GraalDirectives.controlFlowAnchor();
- ret.extremum = get(InductionVariable::extremumNode, i);
+ ret.extremum = get(InductionVariable::extremumNode, InductionVariable::constantExtremum, InductionVariable::isConstantExtremum, i);
}
ret.exitValue = get(InductionVariable::exitValueNode, i);
return ret;
@@ -214,22 +267,32 @@
@Test
public void decrementEq1() {
- test("decrementEqSnippet", 256, 0, 1);
+ testCounted("decrementEqSnippet", 256, 0, 1);
}
@Test
public void decrementEq2() {
- test("decrementEqSnippet", 256, 0, 2);
+ testCounted("decrementEqSnippet", 256, 0, 2);
}
@Test
public void decrementEq3() {
- test("decrementEqSnippet", 256, 0, 3);
+ testCounted("decrementEqSnippet", 256, 0, 3);
}
@Test
public void decrementEq4() {
- test("decrementEqSnippet", -10, 0, Integer.MAX_VALUE);
+ testCounted("decrementEqSnippet", -10, 0, Integer.MAX_VALUE);
+ }
+
+ @Test
+ public void decrementEq5() {
+ testCounted("decrementEqSnippet", Integer.MAX_VALUE, -10, 1);
+ }
+
+ @Test
+ public void decrementEq6() {
+ testCounted("decrementEqSnippet", Integer.MAX_VALUE, -10, 2);
}
public static Result twoVariablesSnippet() {
@@ -238,7 +301,7 @@
for (int i = 0; i < 1024; i++) {
j += 5;
GraalDirectives.controlFlowAnchor();
- ret.extremum = get(InductionVariable::extremumNode, j);
+ ret.extremum = get(InductionVariable::extremumNode, InductionVariable::constantExtremum, InductionVariable::isConstantExtremum, j);
}
ret.exitValue = get(InductionVariable::exitValueNode, j);
return ret;
@@ -246,7 +309,83 @@
@Test
public void testTwoVariables() {
- test("twoVariablesSnippet");
+ testCounted("twoVariablesSnippet");
+ }
+
+ public static Result incrementNeqSnippet(int limit) {
+ int i;
+ int posLimit = ((limit - 1) & 0xFFFF) + 1; // make sure limit is always strictly positive
+ Result ret = new Result();
+ for (i = 0; i != posLimit; i++) {
+ GraalDirectives.controlFlowAnchor();
+ ret.extremum = get(InductionVariable::extremumNode, InductionVariable::constantExtremum, InductionVariable::isConstantExtremum, i);
+ }
+ ret.exitValue = get(InductionVariable::exitValueNode, i);
+ return ret;
+ }
+
+ @Test
+ public void decrementNeq() {
+ testCounted("decrementNeqSnippet", 256);
+ }
+
+ public static Result decrementNeqSnippet(int limit) {
+ int i;
+ int posLimit = ((limit - 1) & 0xFFFF) + 1; // make sure limit is always strictly positive
+ Result ret = new Result();
+ for (i = posLimit; i != 0; i--) {
+ GraalDirectives.controlFlowAnchor();
+ ret.extremum = get(InductionVariable::extremumNode, InductionVariable::constantExtremum, InductionVariable::isConstantExtremum, i);
+ }
+ ret.exitValue = get(InductionVariable::exitValueNode, i);
+ return ret;
+ }
+
+ @Test
+ public void incrementNeq() {
+ testCounted("incrementNeqSnippet", 256);
+ }
+
+ public static Result incrementLongSnippet(long start, long limit, long step) {
+ long i;
+ long inc = ((step - 1) & 0xFFFF) + 1; // make sure this value is always strictly positive
+ Result ret = new Result();
+ for (i = start; i < limit; i += inc) {
+ GraalDirectives.controlFlowAnchor();
+ ret.extremum = get(InductionVariable::extremumNode, InductionVariable::constantExtremum, InductionVariable::isConstantExtremum, i);
+ }
+ ret.exitValue = get(InductionVariable::exitValueNode, i);
+ return ret;
+ }
+
+ @Test
+ public void incrementLong1() {
+ testCounted("incrementLongSnippet", 0L, 256L, 1L);
+ }
+
+ @Test
+ public void incrementLong2() {
+ testCounted("incrementLongSnippet", 0L, 256L, 2L);
+ }
+
+ @Test
+ public void incrementLong3() {
+ testCounted("incrementLongSnippet", 0L, 256L, 3L);
+ }
+
+ @Test
+ public void incrementLong4() {
+ testCounted("incrementLongSnippet", -10L, 1L, Long.MAX_VALUE);
+ }
+
+ @Test
+ public void incrementLong5() {
+ testCounted("incrementLongSnippet", 256L, 256L, 1L);
+ }
+
+ @Test
+ public void incrementLong6() {
+ testCounted("incrementLongSnippet", 257L, 256L, 1L);
}
@NodeInfo(cycles = CYCLES_IGNORED, size = SIZE_IGNORED)
@@ -255,18 +394,31 @@
public static final NodeClass<IVPropertyNode> TYPE = NodeClass.create(IVPropertyNode.class);
private final IVProperty property;
+ private final StaticIVProperty staticProperty;
+ private final IVPredicate staticCheck;
@Input private ValueNode iv;
- protected IVPropertyNode(IVProperty property, ValueNode iv) {
+ protected IVPropertyNode(IVProperty property, StaticIVProperty staticProperty, IVPredicate staticCheck, ValueNode iv) {
super(TYPE, iv.stamp(NodeView.DEFAULT).unrestricted());
this.property = property;
+ this.staticProperty = staticProperty;
+ this.staticCheck = staticCheck;
this.iv = iv;
}
public void rewrite(LoopsData loops) {
InductionVariable inductionVariable = loops.getInductionVariable(iv);
assert inductionVariable != null;
- ValueNode node = property.get(inductionVariable);
+ ValueNode node = null;
+ if (staticCheck != null) {
+ assert staticProperty != null;
+ if (staticCheck.test(inductionVariable)) {
+ node = ConstantNode.forLong(staticProperty.get(inductionVariable), graph());
+ }
+ }
+ if (node == null) {
+ node = property.get(inductionVariable);
+ }
replaceAtUsagesAndDelete(node);
}
@@ -279,7 +431,13 @@
@Override
protected void registerInvocationPlugins(InvocationPlugins invocationPlugins) {
Registration r = new Registration(invocationPlugins, CountedLoopTest.class);
- r.register2("get", IVProperty.class, int.class, new InvocationPlugin() {
+ registerPlugins(r, JavaKind.Int);
+ registerPlugins(r, JavaKind.Long);
+ super.registerInvocationPlugins(invocationPlugins);
+ }
+
+ private void registerPlugins(Registration r, JavaKind ivKind) {
+ r.register2("get", IVProperty.class, ivKind.toJavaClass(), new InvocationPlugin() {
@Override
public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode arg1, ValueNode arg2) {
IVProperty property = null;
@@ -287,14 +445,36 @@
property = getSnippetReflection().asObject(IVProperty.class, arg1.asJavaConstant());
}
if (property != null) {
- b.addPush(JavaKind.Int, new IVPropertyNode(property, arg2));
+ b.addPush(ivKind, new IVPropertyNode(property, null, null, arg2));
return true;
} else {
return false;
}
}
});
- super.registerInvocationPlugins(invocationPlugins);
+ r.register4("get", IVProperty.class, StaticIVProperty.class, IVPredicate.class, ivKind.toJavaClass(), new InvocationPlugin() {
+ @Override
+ public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode arg1, ValueNode arg2, ValueNode arg3, ValueNode arg4) {
+ IVProperty property = null;
+ StaticIVProperty staticProperty = null;
+ IVPredicate staticCheck = null;
+ if (arg1.isConstant()) {
+ property = getSnippetReflection().asObject(IVProperty.class, arg1.asJavaConstant());
+ }
+ if (arg2.isConstant()) {
+ staticProperty = getSnippetReflection().asObject(StaticIVProperty.class, arg2.asJavaConstant());
+ }
+ if (arg3.isConstant()) {
+ staticCheck = getSnippetReflection().asObject(IVPredicate.class, arg3.asJavaConstant());
+ }
+ if (property != null && staticProperty != null && staticCheck != null) {
+ b.addPush(ivKind, new IVPropertyNode(property, staticProperty, staticCheck, arg4));
+ return true;
+ } else {
+ return false;
+ }
+ }
+ });
}
@Override
@@ -308,37 +488,17 @@
return true;
}
- public static Result incrementNeqSnippet(int limit) {
- int i;
- int posLimit = ((limit - 1) & 0xFFFF) + 1; // make sure limit is always strictly positive
- Result ret = new Result();
- for (i = 0; i != posLimit; i++) {
- GraalDirectives.controlFlowAnchor();
- ret.extremum = get(InductionVariable::extremumNode, i);
- }
- ret.exitValue = get(InductionVariable::exitValueNode, i);
- return ret;
- }
+ private Object[] argsToBind;
- @Test
- public void decrementNeq() {
- test("decrementNeqSnippet", 256);
+ @Override
+ protected Object[] getArgumentToBind() {
+ return argsToBind;
}
- public static Result decrementNeqSnippet(int limit) {
- int i;
- int posLimit = ((limit - 1) & 0xFFFF) + 1; // make sure limit is always strictly positive
- Result ret = new Result();
- for (i = posLimit; i != 0; i--) {
- GraalDirectives.controlFlowAnchor();
- ret.extremum = get(InductionVariable::extremumNode, i);
- }
- ret.exitValue = get(InductionVariable::exitValueNode, i);
- return ret;
- }
-
- @Test
- public void incrementNeq() {
- test("incrementNeqSnippet", 256);
+ public void testCounted(String snippetName, Object... args) {
+ test(snippetName, args);
+ argsToBind = args;
+ test(snippetName, args);
+ argsToBind = null;
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/GraalCompilerTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/GraalCompilerTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,6 +22,7 @@
*/
package org.graalvm.compiler.core.test;
+import static java.lang.reflect.Modifier.isStatic;
import static jdk.vm.ci.runtime.JVMCICompiler.INVOCATION_ENTRY_BCI;
import static org.graalvm.compiler.nodes.ConstantNode.getConstantNodes;
import static org.graalvm.compiler.nodes.graphbuilderconf.InlineInvokePlugin.InlineInfo.DO_NOT_INLINE_NO_EXCEPTION;
@@ -47,6 +48,7 @@
import java.util.Set;
import java.util.function.Supplier;
+import jdk.vm.ci.meta.JavaConstant;
import org.graalvm.compiler.api.directives.GraalDirectives;
import org.graalvm.compiler.api.replacements.SnippetReflectionProvider;
import org.graalvm.compiler.api.test.Graal;
@@ -931,7 +933,8 @@
*/
@SuppressWarnings("try")
protected InstalledCode getCode(final ResolvedJavaMethod installedCodeOwner, StructuredGraph graph, boolean forceCompile, boolean installAsDefault, OptionValues options) {
- if (!forceCompile && graph == null) {
+ boolean useCache = !forceCompile && getArgumentToBind() == null;
+ if (useCache && graph == null) {
InstalledCode cached = cache.get(installedCodeOwner);
if (cached != null) {
if (cached.isValid()) {
@@ -964,7 +967,7 @@
throw new GraalError("Could not install code for " + installedCodeOwner.format("%H.%n(%p)"));
}
} catch (BailoutException e) {
- if (retry <= BAILOUT_RETRY_LIMIT && graph == null && !e.isPermanent()) {
+ if (retry < BAILOUT_RETRY_LIMIT && graph == null && !e.isPermanent()) {
// retry (if there is no predefined graph)
TTY.println(String.format("Restart compilation %s (%s) due to a non-permanent bailout!", installedCodeOwner, id));
continue;
@@ -978,7 +981,7 @@
throw debug.handle(e);
}
- if (!forceCompile) {
+ if (useCache) {
cache.put(installedCodeOwner, installedCode);
}
return installedCode;
@@ -1243,12 +1246,33 @@
DebugContext debug = graph.getDebug();
try (DebugContext.Scope ds = debug.scope("Parsing", javaMethod, graph)) {
graphBuilderSuite.apply(graph, getDefaultHighTierContext());
+ Object[] args = getArgumentToBind();
+ if (args != null) {
+ bindArguments(graph, args);
+ }
return graph;
} catch (Throwable e) {
throw debug.handle(e);
}
}
+ protected void bindArguments(StructuredGraph graph, Object[] argsToBind) {
+ ResolvedJavaMethod m = graph.method();
+ Object receiver = isStatic(m.getModifiers()) ? null : this;
+ Object[] args = argsWithReceiver(receiver, argsToBind);
+ JavaType[] parameterTypes = m.toParameterTypes();
+ assert parameterTypes.length == args.length;
+ for (ParameterNode param : graph.getNodes(ParameterNode.TYPE)) {
+ JavaConstant c = getSnippetReflection().forBoxed(parameterTypes[param.index()].getJavaKind(), args[param.index()]);
+ ConstantNode replacement = ConstantNode.forConstant(c, getMetaAccess(), graph);
+ param.replaceAtUsages(replacement);
+ }
+ }
+
+ protected Object[] getArgumentToBind() {
+ return null;
+ }
+
protected PhaseSuite<HighTierContext> getEagerGraphBuilderSuite() {
return getCustomGraphBuilderSuite(GraphBuilderConfiguration.getDefault(getDefaultGraphBuilderPlugins()).withEagerResolving(true).withUnresolvedIsError(true));
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/GraphEncoderTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/GraphEncoderTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -77,8 +77,7 @@
}
for (StructuredGraph originalGraph : originalGraphs) {
- EncodedGraph encodedGraph = new EncodedGraph(encoder.getEncoding(), startOffsets.get(originalGraph), encoder.getObjects(), encoder.getNodeClasses(), originalGraph.getAssumptions(),
- originalGraph.getMethods());
+ EncodedGraph encodedGraph = new EncodedGraph(encoder.getEncoding(), startOffsets.get(originalGraph), encoder.getObjects(), encoder.getNodeClasses(), originalGraph);
GraphEncoder.verifyEncoding(originalGraph, encodedGraph, getTarget().arch);
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/SwitchDyingLoopTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2018, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.core.test;
+
+import static org.graalvm.compiler.graph.test.matchers.NodeIterableCount.hasCount;
+import static org.graalvm.compiler.graph.test.matchers.NodeIterableIsEmpty.isEmpty;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertThat;
+
+import org.graalvm.compiler.core.common.type.StampFactory;
+import org.graalvm.compiler.nodes.LoopBeginNode;
+import org.graalvm.compiler.nodes.ParameterNode;
+import org.graalvm.compiler.nodes.StructuredGraph;
+import org.graalvm.compiler.nodes.extended.IntegerSwitchNode;
+import org.graalvm.compiler.phases.common.CanonicalizerPhase;
+import org.graalvm.compiler.phases.tiers.HighTierContext;
+import org.junit.Test;
+
+import jdk.vm.ci.meta.JavaKind;
+
+public class SwitchDyingLoopTest extends GraalCompilerTest {
+
+ @SuppressWarnings("fallthrough")
+ public static int snippet(int a, int n) {
+ int r = 0;
+ loop: for (int i = 0; i < n; i++) {
+ int v = (i * 167 + 13) & 0xff;
+ switch (v & a) {
+ case 0x80:
+ r += 1; // fall through
+ case 0x40:
+ r += 2; // fall through
+ case 0x20:
+ r += 3;
+ continue;
+ case 0x08:
+ r += 5; // fall through
+ case 0x04:
+ r += 7; // fall through
+ case 0x02:
+ r += 9; // fall through
+ default:
+ break loop;
+ }
+ }
+ return r;
+ }
+
+ @Test
+ public void test() {
+ CanonicalizerPhase canonicalizerPhase = new CanonicalizerPhase();
+ HighTierContext highTierContext = getDefaultHighTierContext();
+ StructuredGraph graph = parseEager("snippet", StructuredGraph.AllowAssumptions.YES);
+ // there should be 1 loop and 1 switch
+ assertThat(graph.getNodes(LoopBeginNode.TYPE), hasCount(1));
+ assertThat(graph.getNodes(IntegerSwitchNode.TYPE), hasCount(1));
+ canonicalizerPhase.apply(graph, highTierContext);
+ // after canonicalization, the loop and switch should still be there
+ assertThat(graph.getNodes(LoopBeginNode.TYPE), hasCount(1));
+ assertThat(graph.getNodes(IntegerSwitchNode.TYPE), hasCount(1));
+ // add stamp to `a` so that paths leading to continue can be trimmed
+ ParameterNode parameter = graph.getParameter(0);
+ assertNotNull(parameter);
+ parameter.setStamp(StampFactory.forInteger(JavaKind.Int, 0, 255, 0, 0xf));
+ canonicalizerPhase.apply(graph, highTierContext);
+ // the loop should have disappeared and there should still be a switch
+ assertThat(graph.getNodes(LoopBeginNode.TYPE), isEmpty());
+ assertThat(graph.getNodes(IntegerSwitchNode.TYPE), hasCount(1));
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/TrivialInliningExplosionTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/TrivialInliningExplosionTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -74,9 +74,9 @@
int afterCompileSize = lastCompiledGraph.getNodeCount();
// The values of afterParseSize and afterCompileSize when this
- // test was written were 849 and 848 respectively.
- Assert.assertTrue(afterParseSize < 2000);
- Assert.assertTrue(afterCompileSize < 2000);
+ // test was written were 3223 and 3505 respectively.
+ Assert.assertTrue(afterParseSize < 4000);
+ Assert.assertTrue(afterCompileSize < 4000);
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/UnsignedLongTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.core.test;
+
+import org.graalvm.compiler.core.common.util.UnsignedLong;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class UnsignedLongTest {
+ @Test
+ public void testEquals() {
+ UnsignedLong fortyTwo = new UnsignedLong(42);
+ Assert.assertTrue(fortyTwo.equals(42));
+ Assert.assertFalse(fortyTwo.equals(99));
+ UnsignedLong longFortyTwo = new UnsignedLong(0x42_0000_8888L);
+ Assert.assertTrue(longFortyTwo.equals(0x42_0000_8888L));
+ Assert.assertFalse(longFortyTwo.equals(0x99_0000_8888L));
+ UnsignedLong longUnsigned = new UnsignedLong(0x8000_7777_0000_8888L);
+ Assert.assertTrue(longUnsigned.equals(0x8000_7777_0000_8888L));
+ Assert.assertFalse(longUnsigned.equals(0xf000_7777_0000_8888L));
+ }
+
+ @Test
+ public void testIsLessThan() {
+ UnsignedLong fortyTwo = new UnsignedLong(42);
+ Assert.assertTrue(fortyTwo.isLessThan(45));
+ Assert.assertFalse(fortyTwo.isLessThan(42));
+ Assert.assertFalse(fortyTwo.isLessThan(40));
+ Assert.assertTrue(fortyTwo.isLessThan(0xffff_ffff_ffff_ffffL));
+ UnsignedLong longUnsigned = new UnsignedLong(0x8000_7777_0000_8888L);
+ Assert.assertTrue(longUnsigned.isLessThan(0xffff_ffff_ffff_ffffL));
+ Assert.assertFalse(longUnsigned.isLessThan(42));
+ Assert.assertFalse(longUnsigned.isLessThan(0x8000_0777_0000_8888L));
+ Assert.assertFalse(longUnsigned.isLessThan(0x8000_7777_0000_8888L));
+ }
+
+ @Test
+ public void testIsLessOrEqualTo() {
+ UnsignedLong fortyTwo = new UnsignedLong(42);
+ Assert.assertTrue(fortyTwo.isLessOrEqualTo(45));
+ Assert.assertTrue(fortyTwo.isLessOrEqualTo(42));
+ Assert.assertFalse(fortyTwo.isLessOrEqualTo(40));
+ Assert.assertTrue(fortyTwo.isLessOrEqualTo(0xffff_ffff_ffff_ffffL));
+ UnsignedLong longUnsigned = new UnsignedLong(0x8000_7777_0000_8888L);
+ Assert.assertTrue(longUnsigned.isLessOrEqualTo(0xffff_ffff_ffff_ffffL));
+ Assert.assertFalse(longUnsigned.isLessOrEqualTo(42));
+ Assert.assertFalse(longUnsigned.isLessOrEqualTo(0x8000_0777_0000_8888L));
+ Assert.assertTrue(longUnsigned.isLessOrEqualTo(0x8000_7777_0000_8888L));
+ }
+
+ @Test
+ public void testTimes() {
+ UnsignedLong fortyTwo = new UnsignedLong(42);
+ Assert.assertEquals(42 * 42, fortyTwo.times(42).asLong());
+ Assert.assertEquals(0xffff_ffff_ffff_fff0L, fortyTwo.times(0x618618618618618L).asLong());
+ }
+
+ @Test(expected = ArithmeticException.class)
+ public void testTimesException() {
+ UnsignedLong fortyTwo = new UnsignedLong(42);
+ fortyTwo.times(0x618618618618619L);
+ }
+
+ @Test
+ public void testMinus() {
+ UnsignedLong fortyTwo = new UnsignedLong(42);
+ Assert.assertEquals(0, fortyTwo.minus(42).asLong());
+ Assert.assertEquals(40, fortyTwo.minus(2).asLong());
+ UnsignedLong longUnsigned = new UnsignedLong(0xffff_ffff_ffff_fff0L);
+ Assert.assertEquals(0, longUnsigned.minus(0xffff_ffff_ffff_fff0L).asLong());
+ }
+
+ @Test(expected = ArithmeticException.class)
+ public void testMinusException() {
+ UnsignedLong fortyTwo = new UnsignedLong(42);
+ fortyTwo.minus(43);
+ }
+
+ @Test(expected = ArithmeticException.class)
+ public void testMinusException2() {
+ UnsignedLong longUnsigned = new UnsignedLong(0xffff_ffff_ffff_fff0L);
+ longUnsigned.minus(0xffff_ffff_ffff_fff1L);
+ }
+
+ @Test
+ public void testPlus() {
+ UnsignedLong fortyTwo = new UnsignedLong(42);
+ Assert.assertEquals(84, fortyTwo.plus(42).asLong());
+ Assert.assertEquals(44, fortyTwo.plus(2).asLong());
+ UnsignedLong longUnsigned = new UnsignedLong(0xffff_ffff_ffff_fff0L);
+ Assert.assertEquals(0xffff_ffff_ffff_ffffL, longUnsigned.plus(0xf).asLong());
+ }
+
+ @Test(expected = ArithmeticException.class)
+ public void testPlusException() {
+ UnsignedLong fortyTwo = new UnsignedLong(42);
+ fortyTwo.plus(0xffff_ffff_ffff_fff0L);
+ }
+
+ @Test(expected = ArithmeticException.class)
+ public void testPlusException2() {
+ UnsignedLong longUnsigned = new UnsignedLong(0xffff_ffff_ffff_fff0L);
+ longUnsigned.plus(42);
+ }
+
+ @Test
+ public void testWrappingTimes() {
+ UnsignedLong fortyTwo = new UnsignedLong(42);
+ Assert.assertEquals(0x1a, fortyTwo.wrappingTimes(0x618618618618619L).asLong());
+ }
+
+ @Test
+ public void testWrappingPlus() {
+ UnsignedLong fortyTwo = new UnsignedLong(42);
+ Assert.assertEquals(0x1a, fortyTwo.wrappingPlus(0xffff_ffff_ffff_fff0L).asLong());
+ UnsignedLong longUnsigned = new UnsignedLong(0xffff_ffff_ffff_fff0L);
+ Assert.assertEquals(0x1a, longUnsigned.wrappingPlus(42).asLong());
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core/src/org/graalvm/compiler/core/GraalCompiler.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core/src/org/graalvm/compiler/core/GraalCompiler.java Sat Mar 24 01:08:35 2018 +0100
@@ -240,6 +240,7 @@
debug.dump(DebugContext.BASIC_LEVEL, graph, "After low tier");
debug.dump(DebugContext.BASIC_LEVEL, graph.getLastSchedule(), "Final HIR schedule");
+ graph.logInliningTree();
} catch (Throwable e) {
throw debug.handle(e);
} finally {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core/src/org/graalvm/compiler/core/gen/NodeLIRBuilder.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core/src/org/graalvm/compiler/core/gen/NodeLIRBuilder.java Sat Mar 24 01:08:35 2018 +0100
@@ -316,6 +316,10 @@
return values.toArray(new Value[values.size()]);
}
+ public void doBlockPrologue(@SuppressWarnings("unused") Block block, @SuppressWarnings("unused") OptionValues options) {
+
+ }
+
@Override
@SuppressWarnings("try")
public void doBlock(Block block, StructuredGraph graph, BlockMap<List<Node>> blockMap) {
@@ -341,6 +345,7 @@
}
}
}
+ doBlockPrologue(block, options);
List<Node> nodes = blockMap.get(block);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core/src/org/graalvm/compiler/core/phases/HighTier.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core/src/org/graalvm/compiler/core/phases/HighTier.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -45,6 +45,7 @@
import org.graalvm.compiler.options.OptionValues;
import org.graalvm.compiler.phases.PhaseSuite;
import org.graalvm.compiler.phases.common.CanonicalizerPhase;
+import org.graalvm.compiler.phases.common.NodeCounterPhase;
import org.graalvm.compiler.phases.common.ConvertDeoptimizeToGuardPhase;
import org.graalvm.compiler.phases.common.DeadCodeEliminationPhase;
import org.graalvm.compiler.phases.common.IncrementalCanonicalizerPhase;
@@ -74,6 +75,10 @@
appendPhase(canonicalizer);
+ if (NodeCounterPhase.Options.NodeCounters.getValue(options)) {
+ appendPhase(new NodeCounterPhase());
+ }
+
if (Options.Inline.getValue(options)) {
appendPhase(new InliningPhase(canonicalizer));
appendPhase(new DeadCodeEliminationPhase(Optional));
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core/src/org/graalvm/compiler/core/target/Backend.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core/src/org/graalvm/compiler/core/target/Backend.java Sat Mar 24 01:08:35 2018 +0100
@@ -178,7 +178,7 @@
* @param method the method compiled to produce {@code compiledCode} or {@code null} if the
* input to {@code compResult} was not a {@link ResolvedJavaMethod}
* @param compilationRequest the compilation request or {@code null}
- * @param compilationResult the code to be compiled
+ * @param compilationResult the code to be installed
* @param predefinedInstalledCode a pre-allocated {@link InstalledCode} object to use as a
* reference to the installed code. If {@code null}, a new {@link InstalledCode}
* object will be created.
@@ -204,12 +204,13 @@
}
try (DebugContext.Scope s2 = debug.scope("CodeInstall", debugContext);
DebugContext.Activation a = debug.activate()) {
- preCodeInstallationTasks(tasks, compilationResult);
InstalledCode installedCode;
try {
+ preCodeInstallationTasks(tasks, compilationResult, predefinedInstalledCode);
CompiledCode compiledCode = createCompiledCode(method, compilationRequest, compilationResult);
installedCode = getProviders().getCodeCache().installCode(method, compiledCode, predefinedInstalledCode, speculationLog, isDefault);
+ assert predefinedInstalledCode == null || installedCode == predefinedInstalledCode;
} catch (Throwable t) {
failCodeInstallationTasks(tasks, t);
throw t;
@@ -229,9 +230,9 @@
}
}
- private static void preCodeInstallationTasks(CodeInstallationTask[] tasks, CompilationResult compilationResult) {
+ private static void preCodeInstallationTasks(CodeInstallationTask[] tasks, CompilationResult compilationResult, InstalledCode predefinedInstalledCode) {
for (CodeInstallationTask task : tasks) {
- task.preProcess(compilationResult);
+ task.preProcess(compilationResult, predefinedInstalledCode);
}
}
@@ -305,23 +306,29 @@
public abstract static class CodeInstallationTask {
/**
* Task to run before code installation.
+ *
+ * @param compilationResult the code about to be installed
+ * @param predefinedInstalledCode a pre-allocated {@link InstalledCode} object that will be
+ * used as a reference to the installed code. May be {@code null}.
+ *
*/
- @SuppressWarnings("unused")
- public void preProcess(CompilationResult compilationResult) {
+ public void preProcess(CompilationResult compilationResult, InstalledCode predefinedInstalledCode) {
}
/**
* Task to run after the code is installed.
+ *
+ * @param installedCode a reference to the installed code
*/
- @SuppressWarnings("unused")
public void postProcess(InstalledCode installedCode) {
}
/**
* Invoked after {@link #preProcess} when code installation fails.
+ *
+ * @param cause the cause of the installation failure
*/
- @SuppressWarnings("unused")
- public void installFailed(Throwable t) {
+ public void installFailed(Throwable cause) {
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/.checkstyle_checks.xml Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/.checkstyle_checks.xml Sat Mar 24 01:08:35 2018 +0100
@@ -116,7 +116,7 @@
<metadata name="net.sf.eclipsecs.core.comment" value="Illegal trailing whitespace(s) at the end of the line."/>
<property name="format" value="\s$"/>
<property name="message" value="Illegal trailing whitespace(s) at the end of the line."/>
- <property name="ignoreComments" value="true"/>
+ <property name="ignoreComments" value="false"/>
<metadata name="com.atlassw.tools.eclipse.checkstyle.comment" value="Checks for trailing spaces at the end of a line"/>
</module>
<module name="RegexpSinglelineJava">
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/Graph.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/Graph.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,6 +22,9 @@
*/
package org.graalvm.compiler.graph;
+import static org.graalvm.compiler.graph.Graph.SourcePositionTracking.Default;
+import static org.graalvm.compiler.graph.Graph.SourcePositionTracking.Track;
+import static org.graalvm.compiler.graph.Graph.SourcePositionTracking.UpdateOnly;
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_IGNORED;
import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_IGNORED;
@@ -33,6 +36,7 @@
import org.graalvm.collections.EconomicMap;
import org.graalvm.collections.Equivalence;
import org.graalvm.collections.UnmodifiableEconomicMap;
+import org.graalvm.compiler.core.common.GraalOptions;
import org.graalvm.compiler.debug.CounterKey;
import org.graalvm.compiler.debug.DebugCloseable;
import org.graalvm.compiler.debug.DebugContext;
@@ -65,6 +69,13 @@
DeepFreeze
}
+ public enum SourcePositionTracking {
+ Default,
+ Ignore,
+ UpdateOnly,
+ Track
+ }
+
public final String name;
/**
@@ -80,7 +91,7 @@
/**
* Records if updating of node source information is required when performing inlining.
*/
- boolean seenNodeSourcePosition;
+ protected SourcePositionTracking trackNodeSourcePosition;
/**
* The number of valid entries in {@link #nodes}.
@@ -195,7 +206,7 @@
* was opened
*/
public DebugCloseable withNodeSourcePosition(NodeSourcePosition sourcePosition) {
- return sourcePosition != null ? new NodeSourcePositionScope(sourcePosition) : null;
+ return trackNodeSourcePosition() && sourcePosition != null ? new NodeSourcePositionScope(sourcePosition) : null;
}
/**
@@ -212,16 +223,26 @@
* to short circuit logic for updating those positions after inlining since that requires
* visiting every node in the graph.
*/
- public boolean mayHaveNodeSourcePosition() {
- assert seenNodeSourcePosition || verifyHasNoSourcePosition();
- return seenNodeSourcePosition;
+ public boolean updateNodeSourcePosition() {
+ return trackNodeSourcePosition == Track || trackNodeSourcePosition == UpdateOnly;
+ }
+
+ public boolean trackNodeSourcePosition() {
+ return trackNodeSourcePosition == Track;
}
- private boolean verifyHasNoSourcePosition() {
- for (Node node : getNodes()) {
- assert node.getNodeSourcePosition() == null;
+ public void setTrackNodeSourcePosition() {
+ if (trackNodeSourcePosition != Track) {
+ assert trackNodeSourcePosition == Default : trackNodeSourcePosition;
+ trackNodeSourcePosition = Track;
}
- return true;
+ }
+
+ public static SourcePositionTracking trackNodeSourcePositionDefault(OptionValues options, DebugContext debug) {
+ if (GraalOptions.TrackNodeSourcePosition.getValue(options) || debug.isDumpEnabledForMethod()) {
+ return Track;
+ }
+ return Default;
}
/**
@@ -255,6 +276,7 @@
iterableNodesLast = new ArrayList<>(NodeClass.allocatedNodeIterabledIds());
this.name = name;
this.options = options;
+ this.trackNodeSourcePosition = trackNodeSourcePositionDefault(options, debug);
assert debug != null;
this.debug = debug;
@@ -358,6 +380,9 @@
*/
protected Graph copy(String newName, Consumer<UnmodifiableEconomicMap<Node, Node>> duplicationMapCallback, DebugContext debugForCopy) {
Graph copy = new Graph(newName, options, debugForCopy);
+ if (trackNodeSourcePosition()) {
+ copy.setTrackNodeSourcePosition();
+ }
UnmodifiableEconomicMap<Node, Node> duplicates = copy.addDuplicates(getNodes(), this, this.getNodeCount(), (EconomicMap<Node, Node>) null);
if (duplicationMapCallback != null) {
duplicationMapCallback.accept(duplicates);
@@ -1069,10 +1094,9 @@
int id = nodesSize++;
nodes[id] = node;
node.id = id;
- if (currentNodeSourcePosition != null) {
+ if (currentNodeSourcePosition != null && trackNodeSourcePosition()) {
node.setNodeSourcePosition(currentNodeSourcePosition);
}
- seenNodeSourcePosition = seenNodeSourcePosition || node.getNodeSourcePosition() != null;
updateNodeCaches(node);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/InlineCacheGuardPosition.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package org.graalvm.compiler.graph;
+
+import java.util.Objects;
+
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+import jdk.vm.ci.meta.ResolvedJavaType;
+
+public class InlineCacheGuardPosition extends NodeSourcePosition {
+ private final ResolvedJavaType dispatchedType;
+ private final ResolvedJavaMethod concreteMethod;
+ private final int hashCode;
+
+ public InlineCacheGuardPosition(NodeSourcePosition callStack, ResolvedJavaType dispatchedType, ResolvedJavaMethod targetMethod) {
+ super(callStack.getCaller(), callStack.getMethod(), callStack.getBCI());
+ this.concreteMethod = targetMethod;
+ this.dispatchedType = dispatchedType;
+ this.hashCode = super.hashCode() + 7 * ((dispatchedType == null) ? 0 : dispatchedType.hashCode()) + 31 * targetMethod.hashCode();
+ }
+
+ public ResolvedJavaType getDispatchedType() {
+ return dispatchedType;
+ }
+
+ public ResolvedJavaMethod getTargetMethod() {
+ return concreteMethod;
+ }
+
+ @Override
+ public int hashCode() {
+ return hashCode;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (obj != null && getClass() == obj.getClass()) {
+ InlineCacheGuardPosition that = (InlineCacheGuardPosition) obj;
+ if (hashCode != that.hashCode) {
+ return false;
+ }
+ if (this.getBCI() == that.getBCI() && Objects.equals(this.getMethod(), that.getMethod()) && Objects.equals(this.getCaller(), that.getCaller()) &&
+ Objects.equals(this.concreteMethod, that.concreteMethod) && Objects.equals(this.dispatchedType, that.dispatchedType)) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ @Override
+ public String toString() {
+ return "dispatchedType=" + (dispatchedType == null ? "null" : dispatchedType.getName()) + " target_method=" + concreteMethod.getName() + " target_method_class=" +
+ concreteMethod.getDeclaringClass().getName() + " position=" + super.toString();
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/Node.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/Node.java Sat Mar 24 01:08:35 2018 +0100
@@ -593,10 +593,9 @@
* Set the source position to {@code sourcePosition}.
*/
public void setNodeSourcePosition(NodeSourcePosition sourcePosition) {
+ assert sourcePosition != null || this.sourcePosition == null || this.sourcePosition.isPlaceholder() : "Invalid source position at node with id " + id;
this.sourcePosition = sourcePosition;
- if (sourcePosition != null && graph != null && !graph.seenNodeSourcePosition) {
- graph.seenNodeSourcePosition = true;
- }
+ // assert sourcePosition == null || graph == null || graph.trackNodeSourcePosition;
}
/**
@@ -920,6 +919,9 @@
}
newNode.graph = into;
newNode.id = INITIAL_ID;
+ if (sourcePosition != null && (into == null || into.updateNodeSourcePosition())) {
+ newNode.setNodeSourcePosition(sourcePosition);
+ }
if (into != null) {
into.register(newNode);
}
@@ -928,9 +930,6 @@
if (into != null && useIntoLeafNodeCache) {
into.putNodeIntoCache(newNode);
}
- if (graph != null && into != null && sourcePosition != null) {
- newNode.setNodeSourcePosition(sourcePosition);
- }
newNode.afterClone(this);
return newNode;
}
@@ -1195,6 +1194,15 @@
return getNodeClass().dataEquals(this, other);
}
+ /**
+ * Determines if this node is equal to the other node while ignoring differences in
+ * {@linkplain Successor control-flow} edges.
+ *
+ */
+ public boolean dataFlowEquals(Node other) {
+ return this == other || nodeClass == other.getNodeClass() && this.valueEquals(other) && nodeClass.equalInputs(this, other);
+ }
+
public final void pushInputs(NodeStack stack) {
getNodeClass().pushInputs(this, stack);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/NodeClass.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/NodeClass.java Sat Mar 24 01:08:35 2018 +0100
@@ -104,7 +104,7 @@
* Gets the {@link NodeClass} associated with a given {@link Class}.
*/
public static <T> NodeClass<T> create(Class<T> c) {
- assert get(c) == null;
+ assert getUnchecked(c) == null;
Class<? super T> superclass = c.getSuperclass();
NodeClass<? super T> nodeSuperclass = null;
if (superclass != NODE_CLASS) {
@@ -114,9 +114,9 @@
}
@SuppressWarnings("unchecked")
- public static <T> NodeClass<T> get(Class<T> superclass) {
+ private static <T> NodeClass<T> getUnchecked(Class<T> clazz) {
try {
- Field field = superclass.getDeclaredField("TYPE");
+ Field field = clazz.getDeclaredField("TYPE");
field.setAccessible(true);
return (NodeClass<T>) field.get(null);
} catch (IllegalArgumentException | IllegalAccessException | NoSuchFieldException | SecurityException e) {
@@ -124,6 +124,14 @@
}
}
+ public static <T> NodeClass<T> get(Class<T> clazz) {
+ NodeClass<T> result = getUnchecked(clazz);
+ if (result == null && clazz != NODE_CLASS) {
+ throw GraalError.shouldNotReachHere("TYPE field not initialized for class " + clazz.getTypeName());
+ }
+ return result;
+ }
+
private static final Class<?> NODE_CLASS = Node.class;
private static final Class<?> INPUT_LIST_CLASS = NodeInputList.class;
private static final Class<?> SUCCESSOR_LIST_CLASS = NodeSuccessorList.class;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/NodeSourcePosition.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/NodeSourcePosition.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,31 +22,100 @@
*/
package org.graalvm.compiler.graph;
+import static org.graalvm.compiler.graph.NodeSourcePosition.Marker.None;
+import static org.graalvm.compiler.graph.NodeSourcePosition.Marker.Placeholder;
+import static org.graalvm.compiler.graph.NodeSourcePosition.Marker.Substitution;
+
import java.util.Objects;
+import org.graalvm.compiler.bytecode.BytecodeDisassembler;
+import org.graalvm.compiler.bytecode.Bytecodes;
+
+import jdk.vm.ci.code.BytecodeFrame;
import jdk.vm.ci.code.BytecodePosition;
import jdk.vm.ci.code.CodeUtil;
-import jdk.vm.ci.meta.JavaConstant;
+import jdk.vm.ci.meta.JavaMethod;
import jdk.vm.ci.meta.MetaUtil;
import jdk.vm.ci.meta.ResolvedJavaMethod;
public class NodeSourcePosition extends BytecodePosition {
+ private static final boolean STRICT_SOURCE_POSITION = Boolean.getBoolean("debug.graal.SourcePositionStrictChecks");
+ private static final boolean SOURCE_POSITION_BYTECODES = Boolean.getBoolean("debug.graal.SourcePositionDisassemble");
+
+ private final int hashCode;
+ private final Marker marker;
+ private final SourceLanguagePosition sourceLanguagePosition;
+
/**
- * The receiver of the method this frame refers to.
+ * Remove marker frames.
*/
- private final JavaConstant receiver;
- private final int hashCode;
+ public NodeSourcePosition trim() {
+ if (marker != None) {
+ return null;
+ }
+ NodeSourcePosition caller = getCaller();
+ if (caller != null) {
+ caller = caller.trim();
+ }
+ if (caller != getCaller()) {
+ return new NodeSourcePosition(caller, getMethod(), getBCI());
+ }
+ return this;
+ }
- public NodeSourcePosition(JavaConstant receiver, NodeSourcePosition caller, ResolvedJavaMethod method, int bci) {
+ enum Marker {
+ None,
+ Placeholder,
+ Substitution
+ }
+
+ public NodeSourcePosition(NodeSourcePosition caller, ResolvedJavaMethod method, int bci) {
+ this(caller, method, bci, None);
+ }
+
+ public NodeSourcePosition(NodeSourcePosition caller, ResolvedJavaMethod method, int bci, Marker marker) {
+ this(null, caller, method, bci, marker);
+
+ }
+
+ public NodeSourcePosition(SourceLanguagePosition sourceLanguagePosition, NodeSourcePosition caller, ResolvedJavaMethod method, int bci) {
+ this(sourceLanguagePosition, caller, method, bci, None);
+ }
+
+ public NodeSourcePosition(SourceLanguagePosition sourceLanguagePosition, NodeSourcePosition caller, ResolvedJavaMethod method, int bci, Marker marker) {
super(caller, method, bci);
if (caller == null) {
this.hashCode = 31 * bci + method.hashCode();
} else {
this.hashCode = caller.hashCode * 7 + 31 * bci + method.hashCode();
}
- this.receiver = receiver;
- assert receiver == null || method.getDeclaringClass().isInstance(receiver);
+ this.marker = marker;
+ this.sourceLanguagePosition = sourceLanguagePosition;
+ }
+
+ public static NodeSourcePosition placeholder(ResolvedJavaMethod method) {
+ return new NodeSourcePosition(null, method, BytecodeFrame.INVALID_FRAMESTATE_BCI, Placeholder);
+ }
+
+ public static NodeSourcePosition placeholder(ResolvedJavaMethod method, int bci) {
+ return new NodeSourcePosition(null, method, bci, Placeholder);
+ }
+
+ public boolean isPlaceholder() {
+ return marker == Placeholder;
+ }
+
+ public static NodeSourcePosition substitution(ResolvedJavaMethod method) {
+ return new NodeSourcePosition(null, method, BytecodeFrame.INVALID_FRAMESTATE_BCI, Substitution);
+ }
+
+ public static NodeSourcePosition substitution(NodeSourcePosition caller, ResolvedJavaMethod method, int bci) {
+ return new NodeSourcePosition(caller, method, bci, Substitution);
+ }
+
+ public boolean isSubstitution() {
+ return marker == Substitution;
}
@Override
@@ -60,7 +129,7 @@
return false;
}
if (this.getBCI() == that.getBCI() && Objects.equals(this.getMethod(), that.getMethod()) && Objects.equals(this.getCaller(), that.getCaller()) &&
- Objects.equals(this.receiver, that.receiver)) {
+ Objects.equals(this.sourceLanguagePosition, that.sourceLanguagePosition)) {
return true;
}
}
@@ -72,8 +141,18 @@
return hashCode;
}
- public JavaConstant getReceiver() {
- return receiver;
+ public int depth() {
+ int d = 0;
+ NodeSourcePosition pos = this;
+ while (pos != null) {
+ d++;
+ pos = pos.getCaller();
+ }
+ return d;
+ }
+
+ public SourceLanguagePosition getSourceLanauage() {
+ return sourceLanguagePosition;
}
@Override
@@ -81,17 +160,29 @@
return (NodeSourcePosition) super.getCaller();
}
- public NodeSourcePosition addCaller(JavaConstant newCallerReceiver, NodeSourcePosition link) {
- if (getCaller() == null) {
- assert newCallerReceiver == null || receiver == null : "replacing receiver";
- return new NodeSourcePosition(newCallerReceiver, link, getMethod(), getBCI());
- } else {
- return new NodeSourcePosition(receiver, getCaller().addCaller(newCallerReceiver, link), getMethod(), getBCI());
- }
+ public NodeSourcePosition addCaller(SourceLanguagePosition newSourceLanguagePosition, NodeSourcePosition link) {
+ return addCaller(newSourceLanguagePosition, link, false);
}
public NodeSourcePosition addCaller(NodeSourcePosition link) {
- return addCaller(null, link);
+ return addCaller(null, link, false);
+ }
+
+ public NodeSourcePosition addCaller(NodeSourcePosition link, boolean isSubstitution) {
+ return addCaller(null, link, isSubstitution);
+ }
+
+ public NodeSourcePosition addCaller(SourceLanguagePosition newSourceLanguagePosition, NodeSourcePosition link, boolean isSubstitution) {
+ if (getCaller() == null) {
+ if (isPlaceholder()) {
+ return new NodeSourcePosition(newSourceLanguagePosition, link, getMethod(), 0);
+ }
+ assert link == null || isSubstitution || verifyCaller(this, link) : link;
+
+ return new NodeSourcePosition(newSourceLanguagePosition, link, getMethod(), getBCI());
+ } else {
+ return new NodeSourcePosition(getCaller().addCaller(newSourceLanguagePosition, link, isSubstitution), getMethod(), getBCI());
+ }
}
@Override
@@ -99,9 +190,9 @@
StringBuilder sb = new StringBuilder(100);
NodeSourcePosition pos = this;
while (pos != null) {
- MetaUtil.appendLocation(sb.append("at "), pos.getMethod(), pos.getBCI());
- if (pos.receiver != null) {
- sb.append("receiver=" + pos.receiver + " ");
+ format(sb, pos);
+ if (pos.sourceLanguagePosition != null) {
+ sb.append(" source=" + pos.sourceLanguagePosition.toShortString());
}
pos = pos.getCaller();
if (pos != null) {
@@ -110,4 +201,55 @@
}
return sb.toString();
}
+
+ private static void format(StringBuilder sb, NodeSourcePosition pos) {
+ MetaUtil.appendLocation(sb.append("at "), pos.getMethod(), pos.getBCI());
+ if (SOURCE_POSITION_BYTECODES) {
+ String disassembly = BytecodeDisassembler.disassembleOne(pos.getMethod(), pos.getBCI());
+ if (disassembly != null && disassembly.length() > 0) {
+ sb.append(" // ");
+ sb.append(disassembly);
+ }
+ }
+ }
+
+ String shallowToString() {
+ StringBuilder sb = new StringBuilder(100);
+ format(sb, this);
+ return sb.toString();
+ }
+
+ public boolean verify() {
+ NodeSourcePosition current = this;
+ NodeSourcePosition caller = getCaller();
+ while (caller != null) {
+ assert verifyCaller(current, caller) : current;
+ current = caller;
+ caller = caller.getCaller();
+ }
+ return true;
+ }
+
+ private static boolean verifyCaller(NodeSourcePosition current, NodeSourcePosition caller) {
+ if (!STRICT_SOURCE_POSITION) {
+ return true;
+ }
+ if (BytecodeFrame.isPlaceholderBci(caller.getBCI())) {
+ return true;
+ }
+ int opcode = BytecodeDisassembler.getBytecodeAt(caller.getMethod(), caller.getBCI());
+ JavaMethod method = BytecodeDisassembler.getInvokedMethodAt(caller.getMethod(), caller.getBCI());
+ /*
+ * It's not really possible to match the declaring classes since this might be an interface
+ * invoke. Matching name and signature probably provides enough accuracy.
+ */
+ assert method == null || (method.getName().equals(current.getMethod().getName()) &&
+ method.getSignature().equals(current.getMethod().getSignature())) ||
+ caller.getMethod().getName().equals("linkToTargetMethod") ||
+ opcode == Bytecodes.INVOKEDYNAMIC ||
+ caller.getMethod().getDeclaringClass().getName().startsWith("Ljava/lang/invoke/LambdaForm$") ||
+ current.getMethod().getName().equals("callInlined") : "expected " + method + " but found " +
+ current.getMethod();
+ return true;
+ }
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/SourceLanguagePosition.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.graph;
+
+import java.util.Map;
+
+/**
+ * Provides a path to report information about a high level language source position to the Graph
+ * Visualizer.
+ */
+public interface SourceLanguagePosition {
+
+ /**
+ * This is called during dumping of Nodes. The implementation should add any properties which
+ * describe this source position. The actual keys and values used are a private contract between
+ * the language implementation and the Graph Visualizer.
+ */
+ void addSourceInformation(Map<String, Object> props);
+
+ /**
+ * Produce a compact description of this position suitable for printing.
+ */
+ String toShortString();
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/SourceLanguagePositionProvider.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.graph;
+
+import jdk.vm.ci.meta.JavaConstant;
+
+/**
+ * Provider of {@link SourceLanguagePosition} for a constant if it represents an AST node.
+ */
+public interface SourceLanguagePositionProvider {
+ SourceLanguagePosition getPosition(JavaConstant node);
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64HotSpotBackend.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64HotSpotBackend.java Sat Mar 24 01:08:35 2018 +0100
@@ -288,33 +288,37 @@
}
private static void emitCodeBody(CompilationResultBuilder crb, LIR lir, AArch64MacroAssembler masm) {
- /*
- * Insert a nop at the start of the prolog so we can patch in a branch if we need to
- * invalidate the method later.
- */
+ emitInvalidatePlaceholder(crb, masm);
+ crb.emit(lir);
+ }
+
+ /**
+ * Insert a nop at the start of the prolog so we can patch in a branch if we need to invalidate
+ * the method later.
+ *
+ * @see "http://mail.openjdk.java.net/pipermail/aarch64-port-dev/2013-September/000273.html"
+ */
+ public static void emitInvalidatePlaceholder(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
crb.blockComment("[nop for method invalidation]");
masm.nop();
-
- crb.emit(lir);
}
private void emitCodeSuffix(CompilationResultBuilder crb, AArch64MacroAssembler masm, FrameMap frameMap) {
HotSpotProviders providers = getProviders();
HotSpotFrameContext frameContext = (HotSpotFrameContext) crb.frameContext;
if (!frameContext.isStub) {
+ HotSpotForeignCallsProvider foreignCalls = providers.getForeignCalls();
try (ScratchRegister sc = masm.getScratchRegister()) {
Register scratch = sc.getRegister();
- HotSpotForeignCallsProvider foreignCalls = providers.getForeignCalls();
crb.recordMark(config.MARKID_EXCEPTION_HANDLER_ENTRY);
ForeignCallLinkage linkage = foreignCalls.lookupForeignCall(EXCEPTION_HANDLER);
Register helper = AArch64Call.isNearCall(linkage) ? null : scratch;
AArch64Call.directCall(crb, masm, linkage, helper, null);
-
- crb.recordMark(config.MARKID_DEOPT_HANDLER_ENTRY);
- linkage = foreignCalls.lookupForeignCall(DEOPTIMIZATION_HANDLER);
- helper = AArch64Call.isNearCall(linkage) ? null : scratch;
- AArch64Call.directCall(crb, masm, linkage, helper, null);
}
+ crb.recordMark(config.MARKID_DEOPT_HANDLER_ENTRY);
+ ForeignCallLinkage linkage = foreignCalls.lookupForeignCall(DEOPTIMIZATION_HANDLER);
+ masm.adr(lr, 0); // Warning: the argument is an offset from the instruction!
+ AArch64Call.directJmp(crb, masm, linkage);
} else {
// No need to emit the stubs for entries back into the method since
// it has no calls that can cause such "return" entries
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64HotSpotDeoptimizeOp.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64HotSpotDeoptimizeOp.java Sat Mar 24 01:08:35 2018 +0100
@@ -46,7 +46,9 @@
@Override
public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
- AArch64Call.directCall(crb, masm, crb.foreignCalls.lookupForeignCall(UNCOMMON_TRAP_HANDLER), null, info);
+ try (AArch64MacroAssembler.ScratchRegister scratch = masm.getScratchRegister()) {
+ AArch64Call.directCall(crb, masm, crb.foreignCalls.lookupForeignCall(UNCOMMON_TRAP_HANDLER), scratch.getRegister(), info, null);
+ }
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64HotSpotMove.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64HotSpotMove.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -155,22 +155,24 @@
@Override
public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
- Register ptr = asRegister(input);
+ Register inputRegister = asRegister(input);
Register resultRegister = asRegister(result);
- Register base = (isRegister(baseRegister) ? asRegister(baseRegister) : zr);
+ Register base = encoding.hasBase() ? asRegister(baseRegister) : null;
+ emitUncompressCode(masm, inputRegister, resultRegister, base, encoding.getShift(), nonNull);
+ }
+
+ public static void emitUncompressCode(AArch64MacroAssembler masm, Register inputRegister, Register resReg, Register baseReg, int shift, boolean nonNull) {
// result = base + (ptr << shift)
- if (nonNull) {
- masm.add(64, resultRegister, base, ptr, AArch64Assembler.ShiftType.LSL, encoding.getShift());
- } else if (!encoding.hasBase()) {
- masm.add(64, resultRegister, zr, ptr, AArch64Assembler.ShiftType.LSL, encoding.getShift());
+ if (nonNull || baseReg == null) {
+ masm.add(64, resReg, baseReg == null ? zr : baseReg, inputRegister, AArch64Assembler.ShiftType.LSL, shift);
} else {
// if ptr is null it has to be null after decompression
Label done = new Label();
- if (!resultRegister.equals(ptr)) {
- masm.mov(32, resultRegister, ptr);
+ if (!resReg.equals(inputRegister)) {
+ masm.mov(32, resReg, inputRegister);
}
- masm.cbz(32, resultRegister, done);
- masm.add(64, resultRegister, base, resultRegister, AArch64Assembler.ShiftType.LSL, encoding.getShift());
+ masm.cbz(32, resReg, done);
+ masm.add(64, resReg, baseReg, resReg, AArch64Assembler.ShiftType.LSL, shift);
masm.bind(done);
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64.test/src/org/graalvm/compiler/hotspot/amd64/test/StubAVXTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64.test/src/org/graalvm/compiler/hotspot/amd64/test/StubAVXTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -65,6 +65,7 @@
import jdk.vm.ci.meta.MetaAccessProvider;
import jdk.vm.ci.meta.ResolvedJavaMethod;
import jdk.vm.ci.meta.Value;
+import org.graalvm.compiler.hotspot.HotSpotBackend;
public class StubAVXTest extends LIRTest {
@@ -72,6 +73,10 @@
public void checkAMD64() {
Assume.assumeTrue("skipping AMD64 specific test", getTarget().arch instanceof AMD64);
Assume.assumeTrue("skipping AVX test", ((AMD64) getTarget().arch).getFeatures().contains(CPUFeature.AVX));
+ if (getBackend() instanceof HotSpotBackend) {
+ HotSpotBackend backend = (HotSpotBackend) getBackend();
+ Assume.assumeTrue("skipping because of MaxVectorSize", backend.getRuntime().getVMConfig().maxVectorSize >= 32);
+ }
}
private static final DataPointerConstant avxConstant = new ArrayDataPointerConstant(new float[]{1f, 2f, 3f, 4f, 5f, 6f, 7f, 8f}, 32);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotAddressLowering.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotAddressLowering.java Sat Mar 24 01:08:35 2018 +0100
@@ -204,7 +204,7 @@
if (init >= 0 && extremum >= 0) {
long shortestTrip = (extremum - init) / stride + 1;
- if (shortestTrip == countedLoopInfo.constantMaxTripCount()) {
+ if (countedLoopInfo.constantMaxTripCount().equals(shortestTrip)) {
return graph.unique(new ZeroExtendNode(input, INT_BITS, ADDRESS_BITS, true));
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotArithmeticLIRGenerator.java Thu Mar 29 20:12:02 2018 +0100
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package org.graalvm.compiler.hotspot.amd64;
-
-import static org.graalvm.compiler.hotspot.HotSpotBackend.Options.GraalArithmeticStubs;
-import static org.graalvm.compiler.hotspot.amd64.AMD64HotSpotMathIntrinsicOp.IntrinsicOpcode.COS;
-import static org.graalvm.compiler.hotspot.amd64.AMD64HotSpotMathIntrinsicOp.IntrinsicOpcode.LOG;
-import static org.graalvm.compiler.hotspot.amd64.AMD64HotSpotMathIntrinsicOp.IntrinsicOpcode.LOG10;
-import static org.graalvm.compiler.hotspot.amd64.AMD64HotSpotMathIntrinsicOp.IntrinsicOpcode.SIN;
-import static org.graalvm.compiler.hotspot.amd64.AMD64HotSpotMathIntrinsicOp.IntrinsicOpcode.TAN;
-
-import org.graalvm.compiler.core.amd64.AMD64ArithmeticLIRGenerator;
-import org.graalvm.compiler.core.common.LIRKind;
-import org.graalvm.compiler.lir.Variable;
-
-import jdk.vm.ci.meta.Value;
-
-public class AMD64HotSpotArithmeticLIRGenerator extends AMD64ArithmeticLIRGenerator {
-
- @Override
- public Value emitMathLog(Value input, boolean base10) {
- if (GraalArithmeticStubs.getValue(getOptions())) {
- return super.emitMathLog(input, base10);
- }
- Variable result = getLIRGen().newVariable(LIRKind.combine(input));
- getLIRGen().append(new AMD64HotSpotMathIntrinsicOp(base10 ? LOG10 : LOG, result, getLIRGen().asAllocatable(input)));
- return result;
- }
-
- @Override
- public Value emitMathCos(Value input) {
- if (GraalArithmeticStubs.getValue(getOptions())) {
- return super.emitMathCos(input);
- }
- Variable result = getLIRGen().newVariable(LIRKind.combine(input));
- getLIRGen().append(new AMD64HotSpotMathIntrinsicOp(COS, result, getLIRGen().asAllocatable(input)));
- return result;
- }
-
- @Override
- public Value emitMathSin(Value input) {
- if (GraalArithmeticStubs.getValue(getOptions())) {
- return super.emitMathSin(input);
- }
- Variable result = getLIRGen().newVariable(LIRKind.combine(input));
- getLIRGen().append(new AMD64HotSpotMathIntrinsicOp(SIN, result, getLIRGen().asAllocatable(input)));
- return result;
- }
-
- @Override
- public Value emitMathTan(Value input) {
- if (GraalArithmeticStubs.getValue(getOptions())) {
- return super.emitMathTan(input);
- }
- Variable result = getLIRGen().newVariable(LIRKind.combine(input));
- getLIRGen().append(new AMD64HotSpotMathIntrinsicOp(TAN, result, getLIRGen().asAllocatable(input)));
- return result;
- }
-
-}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotLIRGenerator.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotLIRGenerator.java Sat Mar 24 01:08:35 2018 +0100
@@ -25,13 +25,13 @@
import static jdk.vm.ci.amd64.AMD64.rbp;
import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
import static org.graalvm.compiler.hotspot.HotSpotBackend.INITIALIZE_KLASS_BY_SYMBOL;
+import static org.graalvm.compiler.hotspot.HotSpotBackend.RESOLVE_DYNAMIC_INVOKE;
import static org.graalvm.compiler.hotspot.HotSpotBackend.RESOLVE_KLASS_BY_SYMBOL;
import static org.graalvm.compiler.hotspot.HotSpotBackend.RESOLVE_METHOD_BY_SYMBOL_AND_LOAD_COUNTERS;
import static org.graalvm.compiler.hotspot.HotSpotBackend.RESOLVE_STRING_BY_SYMBOL;
-import static org.graalvm.compiler.hotspot.HotSpotBackend.RESOLVE_DYNAMIC_INVOKE;
-import static org.graalvm.compiler.hotspot.meta.HotSpotConstantLoadAction.RESOLVE;
import static org.graalvm.compiler.hotspot.meta.HotSpotConstantLoadAction.INITIALIZE;
import static org.graalvm.compiler.hotspot.meta.HotSpotConstantLoadAction.LOAD_COUNTERS;
+import static org.graalvm.compiler.hotspot.meta.HotSpotConstantLoadAction.RESOLVE;
import java.util.ArrayList;
import java.util.List;
@@ -115,7 +115,7 @@
}
private AMD64HotSpotLIRGenerator(HotSpotProviders providers, GraalHotSpotVMConfig config, LIRGenerationResult lirGenRes, BackupSlotProvider backupSlotProvider) {
- this(new AMD64HotSpotLIRKindTool(), new AMD64HotSpotArithmeticLIRGenerator(), new AMD64HotSpotMoveFactory(backupSlotProvider), providers, config, lirGenRes);
+ this(new AMD64HotSpotLIRKindTool(), new AMD64ArithmeticLIRGenerator(new AMD64HotSpotMaths()), new AMD64HotSpotMoveFactory(backupSlotProvider), providers, config, lirGenRes);
}
protected AMD64HotSpotLIRGenerator(LIRKindTool lirKindTool, AMD64ArithmeticLIRGenerator arithmeticLIRGen, MoveFactory moveFactory, HotSpotProviders providers, GraalHotSpotVMConfig config,
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotMaths.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.hotspot.amd64;
+
+import static org.graalvm.compiler.hotspot.HotSpotBackend.Options.GraalArithmeticStubs;
+import static org.graalvm.compiler.hotspot.amd64.AMD64HotSpotMathIntrinsicOp.IntrinsicOpcode.COS;
+import static org.graalvm.compiler.hotspot.amd64.AMD64HotSpotMathIntrinsicOp.IntrinsicOpcode.LOG;
+import static org.graalvm.compiler.hotspot.amd64.AMD64HotSpotMathIntrinsicOp.IntrinsicOpcode.LOG10;
+import static org.graalvm.compiler.hotspot.amd64.AMD64HotSpotMathIntrinsicOp.IntrinsicOpcode.SIN;
+import static org.graalvm.compiler.hotspot.amd64.AMD64HotSpotMathIntrinsicOp.IntrinsicOpcode.TAN;
+
+import org.graalvm.compiler.core.amd64.AMD64ArithmeticLIRGenerator;
+import org.graalvm.compiler.core.common.LIRKind;
+import org.graalvm.compiler.hotspot.HotSpotBackend.Options;
+import org.graalvm.compiler.lir.Variable;
+import org.graalvm.compiler.lir.gen.LIRGenerator;
+
+import jdk.vm.ci.meta.Value;
+
+/**
+ * Lowering of selected {@link Math} routines that depends on the value of
+ * {@link Options#GraalArithmeticStubs}.
+ */
+public class AMD64HotSpotMaths implements AMD64ArithmeticLIRGenerator.Maths {
+
+ @Override
+ public Variable emitLog(LIRGenerator gen, Value input, boolean base10) {
+ if (GraalArithmeticStubs.getValue(gen.getResult().getLIR().getOptions())) {
+ return null;
+ }
+ Variable result = gen.newVariable(LIRKind.combine(input));
+ gen.append(new AMD64HotSpotMathIntrinsicOp(base10 ? LOG10 : LOG, result, gen.asAllocatable(input)));
+ return result;
+ }
+
+ @Override
+ public Variable emitCos(LIRGenerator gen, Value input) {
+ if (GraalArithmeticStubs.getValue(gen.getResult().getLIR().getOptions())) {
+ return null;
+ }
+ Variable result = gen.newVariable(LIRKind.combine(input));
+ gen.append(new AMD64HotSpotMathIntrinsicOp(COS, result, gen.asAllocatable(input)));
+ return result;
+ }
+
+ @Override
+ public Variable emitSin(LIRGenerator gen, Value input) {
+ if (GraalArithmeticStubs.getValue(gen.getResult().getLIR().getOptions())) {
+ return null;
+ }
+ Variable result = gen.newVariable(LIRKind.combine(input));
+ gen.append(new AMD64HotSpotMathIntrinsicOp(SIN, result, gen.asAllocatable(input)));
+ return result;
+ }
+
+ @Override
+ public Variable emitTan(LIRGenerator gen, Value input) {
+ if (GraalArithmeticStubs.getValue(gen.getResult().getLIR().getOptions())) {
+ return null;
+ }
+ Variable result = gen.newVariable(LIRKind.combine(input));
+ gen.append(new AMD64HotSpotMathIntrinsicOp(TAN, result, gen.asAllocatable(input)));
+ return result;
+ }
+
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.sparc/src/org/graalvm/compiler/hotspot/sparc/SPARCHotSpotMove.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.sparc/src/org/graalvm/compiler/hotspot/sparc/SPARCHotSpotMove.java Sat Mar 24 01:08:35 2018 +0100
@@ -187,21 +187,26 @@
public void emitCode(CompilationResultBuilder crb, SPARCMacroAssembler masm) {
Register inputRegister = asRegister(input);
Register resReg = asRegister(result);
+ Register baseReg = encoding.hasBase() ? asRegister(baseRegister) : null;
+ emitUncompressCode(masm, inputRegister, resReg, baseReg, encoding.getShift(), nonNull);
+ }
+
+ public static void emitUncompressCode(SPARCMacroAssembler masm, Register inputRegister, Register resReg, Register baseReg, int shift, boolean nonNull) {
Register secondaryInput;
- if (encoding.getShift() != 0) {
- masm.sll(inputRegister, encoding.getShift(), resReg);
+ if (shift != 0) {
+ masm.sll(inputRegister, shift, resReg);
secondaryInput = resReg;
} else {
secondaryInput = inputRegister;
}
- if (encoding.hasBase()) {
+ if (baseReg != null) {
if (nonNull) {
- masm.add(secondaryInput, asRegister(baseRegister), resReg);
+ masm.add(secondaryInput, baseReg, resReg);
} else {
Label done = new Label();
BPR.emit(masm, Rc_z, ANNUL, PREDICT_TAKEN, secondaryInput, done);
- masm.add(asRegister(baseRegister), secondaryInput, resReg);
+ masm.add(baseReg, secondaryInput, resReg);
masm.bind(done);
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CompileTheWorld.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CompileTheWorld.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -553,6 +553,7 @@
classFileCounter++;
if (className.startsWith("jdk.management.") ||
+ className.startsWith("jdk.internal.cmm.*") ||
// GR-5881: The class initializer for
// sun.tools.jconsole.OutputViewer
// spawns non-daemon threads for redirecting sysout and syserr.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/HotSpotLazyInitializationTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.hotspot.test;
+
+import org.graalvm.compiler.core.test.GraalCompilerTest;
+import org.graalvm.compiler.hotspot.meta.HotSpotClassInitializationPlugin;
+import org.graalvm.compiler.nodes.StructuredGraph.AllowAssumptions;
+import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration.Plugins;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Test;
+
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+
+public class HotSpotLazyInitializationTest extends GraalCompilerTest {
+
+ HotSpotClassInitializationPlugin classInitPlugin = new HotSpotClassInitializationPlugin();
+
+ @Override
+ protected Plugins getDefaultGraphBuilderPlugins() {
+ Plugins plugins = super.getDefaultGraphBuilderPlugins();
+ plugins.setClassInitializationPlugin(classInitPlugin);
+ return plugins;
+ }
+
+ static boolean initializerRun = false;
+
+ static class X {
+ static {
+ initializerRun = true;
+ }
+
+ static void foo() {
+ }
+ }
+
+ public static void invokeStatic() {
+ X.foo();
+ }
+
+ // If constant pool can do eager resolve without eager initialization, then fail if the compiler
+ // causes the static initializer to run.
+ private void test(String name) {
+ ResolvedJavaMethod method = getResolvedJavaMethod(name);
+ Assume.assumeTrue("skipping for old JVMCI", classInitPlugin.supportsLazyInitialization(method.getConstantPool()));
+ parseEager(method, AllowAssumptions.NO);
+ Assert.assertFalse(initializerRun);
+ }
+
+ @Test
+ public void test1() {
+ test("invokeStatic");
+ }
+
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/CompilerConfigurationFactory.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/CompilerConfigurationFactory.java Sat Mar 24 01:08:35 2018 +0100
@@ -24,17 +24,25 @@
import static jdk.vm.ci.common.InitTimer.timer;
+import java.net.URL;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import org.graalvm.collections.EconomicMap;
import org.graalvm.compiler.debug.GraalError;
+import org.graalvm.compiler.debug.TTY;
+import org.graalvm.compiler.lir.phases.LIRPhase;
+import org.graalvm.compiler.lir.phases.LIRPhaseSuite;
+import org.graalvm.compiler.options.EnumOptionKey;
import org.graalvm.compiler.options.Option;
import org.graalvm.compiler.options.OptionKey;
import org.graalvm.compiler.options.OptionType;
import org.graalvm.compiler.options.OptionValues;
+import org.graalvm.compiler.phases.BasePhase;
+import org.graalvm.compiler.phases.PhaseSuite;
import org.graalvm.compiler.phases.tiers.CompilerConfiguration;
import org.graalvm.compiler.serviceprovider.GraalServices;
@@ -48,12 +56,20 @@
*/
public abstract class CompilerConfigurationFactory implements Comparable<CompilerConfigurationFactory> {
+ enum ShowConfigurationLevel {
+ none,
+ info,
+ verbose
+ }
+
static class Options {
// @formatter:off
@Option(help = "Names the Graal compiler configuration to use. If ommitted, the compiler configuration " +
"with the highest auto-selection priority is used. To see the set of available configurations, " +
"supply the value 'help' to this option.", type = OptionType.Expert)
public static final OptionKey<String> CompilerConfiguration = new OptionKey<>(null);
+ @Option(help = "Writes to the VM log information about the Graal compiler configuration selected.", type = OptionType.User)
+ public static final OptionKey<ShowConfigurationLevel> ShowConfiguration = new EnumOptionKey<>(ShowConfigurationLevel.none);
// @formatter:on
}
@@ -192,6 +208,52 @@
factory = candidates.get(0);
}
}
+ ShowConfigurationLevel level = Options.ShowConfiguration.getValue(options);
+ if (level != ShowConfigurationLevel.none) {
+ switch (level) {
+ case info: {
+ printConfigInfo(factory);
+ break;
+ }
+ case verbose: {
+ printConfigInfo(factory);
+ CompilerConfiguration config = factory.createCompilerConfiguration();
+ TTY.println("High tier: " + phaseNames(config.createHighTier(options)));
+ TTY.println("Mid tier: " + phaseNames(config.createMidTier(options)));
+ TTY.println("Low tier: " + phaseNames(config.createLowTier(options)));
+ TTY.println("Pre regalloc stage: " + phaseNames(config.createPreAllocationOptimizationStage(options)));
+ TTY.println("Regalloc stage: " + phaseNames(config.createAllocationStage(options)));
+ TTY.println("Post regalloc stage: " + phaseNames(config.createPostAllocationOptimizationStage(options)));
+ config.createAllocationStage(options);
+ break;
+ }
+ }
+ }
return factory;
}
+
+ private static void printConfigInfo(CompilerConfigurationFactory factory) {
+ URL location = factory.getClass().getResource(factory.getClass().getSimpleName() + ".class");
+ TTY.printf("Using Graal compiler configuration '%s' provided by %s loaded from %s%n", factory.name, factory.getClass().getName(), location);
+ }
+
+ private static <C> List<String> phaseNames(PhaseSuite<C> suite) {
+ Collection<BasePhase<? super C>> phases = suite.getPhases();
+ List<String> res = new ArrayList<>(phases.size());
+ for (BasePhase<?> phase : phases) {
+ res.add(phase.contractorName());
+ }
+ Collections.sort(res);
+ return res;
+ }
+
+ private static <C> List<String> phaseNames(LIRPhaseSuite<C> suite) {
+ List<LIRPhase<C>> phases = suite.getPhases();
+ List<String> res = new ArrayList<>(phases.size());
+ for (LIRPhase<?> phase : phases) {
+ res.add(phase.getClass().getName());
+ }
+ Collections.sort(res);
+ return res;
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfig.java Sat Mar 24 01:08:35 2018 +0100
@@ -45,8 +45,9 @@
*/
public static final GraalHotSpotVMConfig INJECTED_VMCONFIG = null;
+ // this uses `1.9` which will give the correct result with `1.9`, `9`, `10` etc.
private final boolean isJDK8 = System.getProperty("java.specification.version").compareTo("1.9") < 0;
- private final int JDKVersion = isJDK8 ? 8 : Integer.parseInt(System.getProperty("java.specification.version"));
+ private final int jdkVersion = isJDK8 ? 8 : Integer.parseInt(System.getProperty("java.specification.version"));
public final String osName = getHostOSName();
public final String osArch = getHostArchitectureName();
public final boolean windowsOs = System.getProperty("os.name", "").startsWith("Windows");
@@ -160,6 +161,7 @@
public final boolean forceUnreachable = getFlag("ForceUnreachable", Boolean.class);
public final int codeSegmentSize = getFlag("CodeCacheSegmentSize", Integer.class);
public final boolean foldStableValues = getFlag("FoldStableValues", Boolean.class);
+ public final int maxVectorSize = getFlag("MaxVectorSize", Integer.class);
public final boolean useTLAB = getFlag("UseTLAB", Boolean.class);
public final boolean useBiasedLocking = getFlag("UseBiasedLocking", Boolean.class);
@@ -555,12 +557,10 @@
public final int logOfHRGrainBytes = getFieldValue("HeapRegion::LogOfHRGrainBytes", Integer.class, "int");
- public final byte dirtyCardValue = JDKVersion >= 11 ? getConstant("CardTable::dirty_card", Byte.class) :
- (JDKVersion > 8 ? getConstant("CardTableModRefBS::dirty_card", Byte.class) :
- getFieldValue("CompilerToVM::Data::dirty_card", Byte.class, "int"));
- public final byte g1YoungCardValue = JDKVersion >= 11 ? getConstant("G1CardTable::g1_young_gen", Byte.class) :
- (JDKVersion > 8 ? getConstant("G1SATBCardTableModRefBS::g1_young_gen", Byte.class) :
- getFieldValue("CompilerToVM::Data::g1_young_card", Byte.class, "int"));
+ public final byte dirtyCardValue = jdkVersion >= 11 ? getConstant("CardTable::dirty_card", Byte.class)
+ : (jdkVersion > 8 ? getConstant("CardTableModRefBS::dirty_card", Byte.class) : getFieldValue("CompilerToVM::Data::dirty_card", Byte.class, "int"));
+ public final byte g1YoungCardValue = jdkVersion >= 11 ? getConstant("G1CardTable::g1_young_gen", Byte.class)
+ : (jdkVersion > 8 ? getConstant("G1SATBCardTableModRefBS::g1_young_gen", Byte.class) : getFieldValue("CompilerToVM::Data::g1_young_card", Byte.class, "int"));
public final long cardtableStartAddress = getFieldValue("CompilerToVM::Data::cardtable_start_address", Long.class, "jbyte*");
public final int cardtableShift = getFieldValue("CompilerToVM::Data::cardtable_shift", Integer.class, "int");
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotBackend.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotBackend.java Sat Mar 24 01:08:35 2018 +0100
@@ -91,7 +91,7 @@
public static class Options {
// @formatter:off
@Option(help = "Use Graal arithmetic stubs instead of HotSpot stubs where possible")
- public static final OptionKey<Boolean> GraalArithmeticStubs = new OptionKey<>(true);
+ public static final OptionKey<Boolean> GraalArithmeticStubs = new OptionKey<>(false); // GR-8276
@Option(help = "Enables instruction profiling on assembler level. Valid values are a comma separated list of supported instructions." +
" Compare with subclasses of Assembler.InstructionCounter.", type = OptionType.Debug)
public static final OptionKey<String> ASMInstructionProfiling = new OptionKey<>(null);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotCompiledCodeBuilder.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotCompiledCodeBuilder.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,6 +22,8 @@
*/
package org.graalvm.compiler.hotspot;
+import static org.graalvm.util.CollectionsUtil.anyMatch;
+
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.ArrayList;
@@ -37,9 +39,9 @@
import org.graalvm.compiler.code.CompilationResult.CodeAnnotation;
import org.graalvm.compiler.code.CompilationResult.CodeComment;
import org.graalvm.compiler.code.CompilationResult.JumpTable;
-import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.code.DataSection;
import org.graalvm.compiler.code.SourceMapping;
+import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.graph.NodeSourcePosition;
import jdk.vm.ci.code.CodeCacheProvider;
@@ -55,7 +57,6 @@
import jdk.vm.ci.hotspot.HotSpotCompiledCode;
import jdk.vm.ci.hotspot.HotSpotCompiledCode.Comment;
import jdk.vm.ci.hotspot.HotSpotCompiledNmethod;
-import jdk.vm.ci.hotspot.HotSpotObjectConstant;
import jdk.vm.ci.hotspot.HotSpotResolvedJavaMethod;
import jdk.vm.ci.meta.Assumptions.Assumption;
import jdk.vm.ci.meta.ResolvedJavaMethod;
@@ -210,16 +211,31 @@
sites.addAll(target.getDataPatches());
sites.addAll(target.getMarks());
- /*
- * Translate the source mapping into appropriate info points. In HotSpot only one position
- * can really be represented and recording the end PC seems to give the best results and
- * corresponds with what C1 and C2 do.
- */
if (codeCache.shouldDebugNonSafepoints()) {
+ /*
+ * Translate the source mapping into appropriate info points. In HotSpot only one
+ * position can really be represented and recording the end PC seems to give the best
+ * results and corresponds with what C1 and C2 do. HotSpot doesn't like to see these
+ * unless -XX:+DebugNonSafepoints is enabled, so don't emit them in that case.
+ */
+ List<Site> sourcePositionSites = new ArrayList<>();
for (SourceMapping source : target.getSourceMappings()) {
- sites.add(new Infopoint(source.getEndOffset(), new DebugInfo(source.getSourcePosition()), InfopointReason.BYTECODE_POSITION));
- assert verifySourcePositionReceivers(source.getSourcePosition());
+ NodeSourcePosition sourcePosition = source.getSourcePosition();
+ assert sourcePosition.verify();
+ sourcePosition = sourcePosition.trim();
+ /*
+ * Don't add BYTECODE_POSITION info points that would potentially create conflicts.
+ * Under certain conditions the site's pc is not the pc that gets recorded by
+ * HotSpot (see @code {CodeInstaller::site_Call}). So, avoid adding any source
+ * positions that can potentially map to the same pc. To do that make sure that the
+ * source mapping doesn't contain a pc of any important Site.
+ */
+ if (sourcePosition != null && !anyMatch(sites, s -> source.contains(s.pcOffset))) {
+ sourcePositionSites.add(new Infopoint(source.getEndOffset(), new DebugInfo(sourcePosition), InfopointReason.BYTECODE_POSITION));
+
+ }
}
+ sites.addAll(sourcePositionSites);
}
SiteComparator c = new SiteComparator();
@@ -245,18 +261,4 @@
}
return sites.toArray(new Site[sites.size()]);
}
-
- /**
- * Verifies that the captured receiver type agrees with the declared type of the method.
- */
- private static boolean verifySourcePositionReceivers(NodeSourcePosition start) {
- NodeSourcePosition pos = start;
- while (pos != null) {
- if (pos.getReceiver() != null) {
- assert ((HotSpotObjectConstant) pos.getReceiver()).asObject(pos.getMethod().getDeclaringClass()) != null;
- }
- pos = pos.getCaller();
- }
- return true;
- }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalCompilerFactory.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalCompilerFactory.java Sat Mar 24 01:08:35 2018 +0100
@@ -26,8 +26,8 @@
import static org.graalvm.compiler.hotspot.HotSpotGraalOptionValues.GRAAL_OPTION_PROPERTY_PREFIX;
import java.io.PrintStream;
+import java.util.Collections;
import java.util.Map;
-import java.util.Collections;
import org.graalvm.compiler.debug.MethodFilter;
import org.graalvm.compiler.options.Option;
@@ -36,6 +36,7 @@
import org.graalvm.compiler.options.OptionValues;
import org.graalvm.compiler.options.OptionsParser;
import org.graalvm.compiler.phases.tiers.CompilerConfiguration;
+import org.graalvm.compiler.serviceprovider.JDK9Method;
import jdk.vm.ci.common.InitTimer;
import jdk.vm.ci.hotspot.HotSpotJVMCICompilerFactory;
@@ -48,6 +49,22 @@
private static MethodFilter[] graalCompileOnlyFilter;
private static boolean compileGraalWithC1Only;
+ /**
+ * Module containing {@link HotSpotJVMCICompilerFactory}.
+ */
+ private Object jvmciModule;
+
+ /**
+ * Module containing {@link HotSpotGraalCompilerFactory}.
+ */
+ private Object graalModule;
+
+ /**
+ * Module containing the {@linkplain CompilerConfigurationFactory#selectFactory selected}
+ * configuration.
+ */
+ private Object compilerConfigurationModule;
+
private final HotSpotGraalJVMCIServiceLocator locator;
HotSpotGraalCompilerFactory(HotSpotGraalJVMCIServiceLocator locator) {
@@ -70,6 +87,10 @@
assert options == null : "cannot select " + getClass() + " service more than once";
options = HotSpotGraalOptionValues.HOTSPOT_OPTIONS;
initializeGraalCompilePolicyFields(options);
+ if (!JDK9Method.Java8OrEarlier) {
+ jvmciModule = JDK9Method.getModule(HotSpotJVMCICompilerFactory.class);
+ graalModule = JDK9Method.getModule(HotSpotGraalCompilerFactory.class);
+ }
/*
* Exercise this code path early to encourage loading now. This doesn't solve problem of
* deadlock during class loading but seems to eliminate it in practice.
@@ -102,7 +123,9 @@
@Option(help = "In tiered mode compile Graal and JVMCI using optimized first tier code.", type = OptionType.Expert)
public static final OptionKey<Boolean> CompileGraalWithC1Only = new OptionKey<>(true);
- @Option(help = "A method filter selecting what should be compiled by Graal. All other requests will be reduced to CompilationLevel.Simple.", type = OptionType.Expert)
+ @Option(help = "A filter applied to a method the VM has selected for compilation by Graal. " +
+ "A method not matching the filter is redirected to a lower tier compiler. " +
+ "The filter format is the same as for the MethodFilter option.", type = OptionType.Expert)
public static final OptionKey<String> GraalCompileOnly = new OptionKey<>(null);
// @formatter:on
@@ -110,7 +133,11 @@
@Override
public HotSpotGraalCompiler createCompiler(JVMCIRuntime runtime) {
- HotSpotGraalCompiler compiler = createCompiler(runtime, options, CompilerConfigurationFactory.selectFactory(null, options));
+ CompilerConfigurationFactory factory = CompilerConfigurationFactory.selectFactory(null, options);
+ if (!JDK9Method.Java8OrEarlier) {
+ compilerConfigurationModule = JDK9Method.getModule(factory.getClass());
+ }
+ HotSpotGraalCompiler compiler = createCompiler(runtime, options, factory);
// Only the HotSpotGraalRuntime associated with the compiler created via
// jdk.vm.ci.runtime.JVMCIRuntime.getCompiler() is registered for receiving
// VM events.
@@ -160,15 +187,54 @@
assert HotSpotGraalCompilerFactory.class.getName().equals("org.graalvm.compiler.hotspot.HotSpotGraalCompilerFactory");
}
+ static final ClassLoader JVMCI_LOADER = HotSpotGraalCompilerFactory.class.getClassLoader();
+
/*
* This method is static so it can be exercised during initialization.
*/
- private static CompilationLevel adjustCompilationLevelInternal(Class<?> declaringClass, String name, String signature, CompilationLevel level) {
+ private CompilationLevel adjustCompilationLevelInternal(Class<?> declaringClass, String name, String signature, CompilationLevel level) {
if (compileGraalWithC1Only) {
if (level.ordinal() > CompilationLevel.Simple.ordinal()) {
- String declaringClassName = declaringClass.getName();
- if (declaringClassName.startsWith("jdk.vm.ci") || declaringClassName.startsWith("org.graalvm") || declaringClassName.startsWith("com.oracle.graal")) {
- return CompilationLevel.Simple;
+ if (JDK9Method.Java8OrEarlier) {
+ if (JVMCI_LOADER != null) {
+ // When running with +UseJVMCIClassLoader all classes in
+ // the JVMCI loader should be compiled with C1.
+ try {
+ if (declaringClass.getClassLoader() == JVMCI_LOADER) {
+ return CompilationLevel.Simple;
+ }
+ } catch (SecurityException e) {
+ // This is definitely not a JVMCI or Graal class
+ }
+ } else {
+ // JVMCI and Graal are on the bootclasspath so match based on the package.
+ String declaringClassName = declaringClass.getName();
+ if (declaringClassName.startsWith("jdk.vm.ci")) {
+ return CompilationLevel.Simple;
+ }
+ if (declaringClassName.startsWith("org.graalvm.") &&
+ (declaringClassName.startsWith("org.graalvm.compiler.") ||
+ declaringClassName.startsWith("org.graalvm.collections.") ||
+ declaringClassName.startsWith("org.graalvm.compiler.word.") ||
+ declaringClassName.startsWith("org.graalvm.graphio."))) {
+ return CompilationLevel.Simple;
+ }
+ if (declaringClassName.startsWith("com.oracle.graal") &&
+ (declaringClassName.startsWith("com.oracle.graal.enterprise") ||
+ declaringClassName.startsWith("com.oracle.graal.vector") ||
+ declaringClassName.startsWith("com.oracle.graal.asm"))) {
+ return CompilationLevel.Simple;
+ }
+ }
+ } else {
+ try {
+ Object module = JDK9Method.getModule(declaringClass);
+ if (jvmciModule == module || graalModule == module || compilerConfigurationModule == module) {
+ return CompilationLevel.Simple;
+ }
+ } catch (Throwable e) {
+ throw new InternalError(e);
+ }
}
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/NodeCostDumpUtil.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/NodeCostDumpUtil.java Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,6 @@
import java.io.File;
import java.io.IOException;
-import java.lang.reflect.Field;
import java.net.URI;
import java.net.URL;
import java.net.URLClassLoader;
@@ -114,14 +113,9 @@
}
System.err.printf("Loaded %s node classes...\n", nodeClasses.size());
List<NodeClass<?>> nc = new ArrayList<>();
- for (Class<?> nodeClass : nodeClasses) {
- Field f;
+ for (Class<?> c : nodeClasses) {
try {
- f = nodeClass.getField("TYPE");
- f.setAccessible(true);
- Object val = f.get(null);
- NodeClass<?> nodeType = (NodeClass<?>) val;
- nc.add(nodeType);
+ nc.add(NodeClass.get(c));
} catch (Throwable t) {
// Silently ignore problems here
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/lir/VerifyMaxRegisterSizePhase.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.hotspot.lir;
+
+import java.util.EnumSet;
+
+import org.graalvm.compiler.core.common.cfg.AbstractBlockBase;
+import org.graalvm.compiler.lir.LIR;
+import org.graalvm.compiler.lir.LIRInstruction;
+import org.graalvm.compiler.lir.gen.LIRGenerationResult;
+import org.graalvm.compiler.lir.phases.PostAllocationOptimizationPhase;
+
+import jdk.vm.ci.code.TargetDescription;
+import static jdk.vm.ci.code.ValueUtil.isRegister;
+import jdk.vm.ci.meta.Value;
+import org.graalvm.compiler.lir.LIRInstruction.OperandFlag;
+import org.graalvm.compiler.lir.LIRInstruction.OperandMode;
+
+/**
+ * Checks that no registers exceed the MaxVectorSize flag from the VM config.
+ */
+public final class VerifyMaxRegisterSizePhase extends PostAllocationOptimizationPhase {
+
+ private final int maxVectorSize;
+
+ public VerifyMaxRegisterSizePhase(int maxVectorSize) {
+ this.maxVectorSize = maxVectorSize;
+ }
+
+ @Override
+ protected void run(TargetDescription target, LIRGenerationResult lirGenRes, PostAllocationOptimizationContext context) {
+ LIR lir = lirGenRes.getLIR();
+ for (AbstractBlockBase<?> block : lir.getControlFlowGraph().getBlocks()) {
+ verifyBlock(lir, block);
+ }
+ }
+
+ protected void verifyBlock(LIR lir, AbstractBlockBase<?> block) {
+ for (LIRInstruction inst : lir.getLIRforBlock(block)) {
+ verifyInstruction(inst);
+ }
+ }
+
+ protected void verifyInstruction(LIRInstruction inst) {
+ inst.visitEachInput(this::verifyOperands);
+ inst.visitEachOutput(this::verifyOperands);
+ inst.visitEachAlive(this::verifyOperands);
+ inst.visitEachTemp(this::verifyOperands);
+ }
+
+ @SuppressWarnings("unused")
+ protected void verifyOperands(LIRInstruction instruction, Value value, OperandMode mode, EnumSet<OperandFlag> flags) {
+ if (isRegister(value)) {
+ assert value.getPlatformKind().getSizeInBytes() <= maxVectorSize : "value " + value + " exceeds MaxVectorSize " + maxVectorSize;
+ }
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotClassInitializationPlugin.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotClassInitializationPlugin.java Sat Mar 24 01:08:35 2018 +0100
@@ -25,6 +25,7 @@
import org.graalvm.compiler.core.common.type.ObjectStamp;
import org.graalvm.compiler.core.common.type.Stamp;
import org.graalvm.compiler.core.common.type.StampFactory;
+import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.hotspot.nodes.aot.InitializeKlassNode;
import org.graalvm.compiler.hotspot.nodes.aot.ResolveConstantNode;
import org.graalvm.compiler.nodes.ConstantNode;
@@ -37,6 +38,11 @@
import jdk.vm.ci.hotspot.HotSpotResolvedObjectType;
import jdk.vm.ci.meta.ResolvedJavaMethod;
import jdk.vm.ci.meta.ResolvedJavaType;
+import jdk.vm.ci.meta.ConstantPool;
+
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
public final class HotSpotClassInitializationPlugin implements ClassInitializationPlugin {
@Override
@@ -73,4 +79,47 @@
result.setStateBefore(frameState);
return result;
}
+
+ private static final Class<? extends ConstantPool> hscp;
+ private static final MethodHandle loadReferencedTypeIIZMH;
+
+ static {
+ MethodHandle m = null;
+ Class<? extends ConstantPool> c = null;
+ try {
+ c = Class.forName("jdk.vm.ci.hotspot.HotSpotConstantPool").asSubclass(ConstantPool.class);
+ m = MethodHandles.lookup().findVirtual(c, "loadReferencedType", MethodType.methodType(void.class, int.class, int.class, boolean.class));
+ } catch (Exception e) {
+ }
+ loadReferencedTypeIIZMH = m;
+ hscp = c;
+ }
+
+ private static boolean isHotSpotConstantPool(ConstantPool cp) {
+ // jdk.vm.ci.hotspot.HotSpotConstantPool is final, so we can
+ // directly compare Classes.
+ return cp.getClass() == hscp;
+ }
+
+ @Override
+ public boolean supportsLazyInitialization(ConstantPool cp) {
+ if (loadReferencedTypeIIZMH != null && isHotSpotConstantPool(cp)) {
+ return true;
+ }
+ return false;
+ }
+
+ @Override
+ public void loadReferencedType(GraphBuilderContext builder, ConstantPool cp, int cpi, int opcode) {
+ if (loadReferencedTypeIIZMH != null && isHotSpotConstantPool(cp)) {
+ try {
+ loadReferencedTypeIIZMH.invoke(cp, cpi, opcode, false);
+ } catch (Throwable t) {
+ throw GraalError.shouldNotReachHere(t);
+ }
+ } else {
+ cp.loadReferencedType(cpi, opcode);
+ }
+ }
+
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotGraphBuilderPlugins.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotGraphBuilderPlugins.java Sat Mar 24 01:08:35 2018 +0100
@@ -234,7 +234,7 @@
b.addPush(JavaKind.Object, object);
} else {
FixedGuardNode fixedGuard = b.add(new FixedGuardNode(condition, DeoptimizationReason.ClassCastException, DeoptimizationAction.InvalidateReprofile, false));
- b.addPush(JavaKind.Object, new DynamicPiNode(object, fixedGuard, javaClass));
+ b.addPush(JavaKind.Object, DynamicPiNode.create(b.getAssumptions(), b.getConstantReflection(), object, fixedGuard, javaClass));
}
return true;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotInvocationPlugins.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotInvocationPlugins.java Sat Mar 24 01:08:35 2018 +0100
@@ -24,6 +24,7 @@
import static org.graalvm.compiler.serviceprovider.JDK9Method.Java8OrEarlier;
+import java.lang.invoke.MethodHandle;
import java.lang.reflect.Type;
import java.util.Set;
@@ -129,7 +130,7 @@
ClassLoader cl = javaClass.getClassLoader();
return cl == null || cl == getClass().getClassLoader() || cl == extLoader;
} else {
- Object module = JDK9Method.getModule.invoke(javaClass);
+ Object module = JDK9Method.getModule(javaClass);
return trustedModules.contains(module);
}
}
@@ -148,34 +149,43 @@
}
}
+ /**
+ * Gets the set of modules whose methods can be intrinsified. This set is the module owning the
+ * class of {@code compilerConfiguration} and all its dependencies.
+ */
private static EconomicSet<Object> initTrustedModules(CompilerConfiguration compilerConfiguration) throws GraalError {
try {
EconomicSet<Object> res = EconomicSet.create();
- Object compilerConfigurationModule = JDK9Method.getModule.invoke(compilerConfiguration.getClass());
+ Object compilerConfigurationModule = JDK9Method.getModule(compilerConfiguration.getClass());
res.add(compilerConfigurationModule);
Class<?> moduleClass = compilerConfigurationModule.getClass();
- Object layer = new JDK9Method(moduleClass, "getLayer").invoke(compilerConfigurationModule);
+ Object layer = JDK9Method.lookupMethodHandle(moduleClass, "getLayer").invoke(compilerConfigurationModule);
Class<? extends Object> layerClass = layer.getClass();
- JDK9Method getName = new JDK9Method(moduleClass, "getName");
- Set<Object> modules = new JDK9Method(layerClass, "modules").invoke(layer);
- Object descriptor = new JDK9Method(moduleClass, "getDescriptor").invoke(compilerConfigurationModule);
+ MethodHandle getName = JDK9Method.lookupMethodHandle(moduleClass, "getName");
+ Set<Object> modules = (Set<Object>) JDK9Method.lookupMethodHandle(layerClass, "modules").invoke(layer);
+ Object descriptor = JDK9Method.lookupMethodHandle(moduleClass, "getDescriptor").invoke(compilerConfigurationModule);
Class<?> moduleDescriptorClass = descriptor.getClass();
- Set<Object> requires = new JDK9Method(moduleDescriptorClass, "requires").invoke(descriptor);
- JDK9Method requireNameGetter = null;
+ Set<Object> requires = (Set<Object>) JDK9Method.lookupMethodHandle(moduleDescriptorClass, "requires").invoke(descriptor);
+ boolean isAutomatic = (Boolean) JDK9Method.lookupMethodHandle(moduleDescriptorClass, "isAutomatic").invoke(descriptor);
+ if (isAutomatic) {
+ throw new IllegalArgumentException(String.format("The module '%s' defining the Graal compiler configuration class '%s' must not be an automatic module",
+ getName.invoke(compilerConfigurationModule), compilerConfiguration.getClass().getName()));
+ }
+ MethodHandle requireNameGetter = null;
for (Object require : requires) {
if (requireNameGetter == null) {
- requireNameGetter = new JDK9Method(require.getClass(), "name");
+ requireNameGetter = JDK9Method.lookupMethodHandle(require.getClass(), "name");
}
- String name = requireNameGetter.invoke(require);
+ String name = (String) requireNameGetter.invoke(require);
for (Object module : modules) {
- String moduleName = getName.invoke(module);
+ String moduleName = (String) getName.invoke(module);
if (moduleName.equals(name)) {
res.add(module);
}
}
}
return res;
- } catch (Exception e) {
+ } catch (Throwable e) {
throw new GraalError(e);
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotSuitesProvider.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotSuitesProvider.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -28,11 +28,13 @@
import static org.graalvm.compiler.core.phases.HighTier.Options.Inline;
import java.util.ListIterator;
+import org.graalvm.compiler.debug.Assertions;
import org.graalvm.compiler.hotspot.GraalHotSpotVMConfig;
import org.graalvm.compiler.hotspot.HotSpotBackend;
import org.graalvm.compiler.hotspot.HotSpotGraalRuntimeProvider;
import org.graalvm.compiler.hotspot.HotSpotInstructionProfiling;
+import org.graalvm.compiler.hotspot.lir.VerifyMaxRegisterSizePhase;
import org.graalvm.compiler.hotspot.phases.AheadOfTimeVerificationPhase;
import org.graalvm.compiler.hotspot.phases.LoadJavaMirrorWithKlassPhase;
import org.graalvm.compiler.hotspot.phases.WriteBarrierAdditionPhase;
@@ -139,6 +141,10 @@
StructuredGraph targetGraph = new StructuredGraph.Builder(graph.getOptions(), graph.getDebug(), AllowAssumptions.YES).method(graph.method()).build();
SimplifyingGraphDecoder graphDecoder = new SimplifyingGraphDecoder(runtime.getTarget().arch, targetGraph, context.getMetaAccess(), context.getConstantReflection(),
context.getConstantFieldProvider(), context.getStampProvider(), !ImmutableCode.getValue(graph.getOptions()));
+
+ if (graph.trackNodeSourcePosition()) {
+ targetGraph.setTrackNodeSourcePosition();
+ }
graphDecoder.decode(encodedGraph);
}
@@ -171,6 +177,9 @@
if (profileInstructions != null) {
suites.getPostAllocationOptimizationStage().appendPhase(new HotSpotInstructionProfiling(profileInstructions));
}
+ if (Assertions.detailedAssertionsEnabled(options)) {
+ suites.getPostAllocationOptimizationStage().appendPhase(new VerifyMaxRegisterSizePhase(config.maxVectorSize));
+ }
return suites;
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/phases/aot/AOTInliningPolicy.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/phases/aot/AOTInliningPolicy.java Sat Mar 24 01:08:35 2018 +0100
@@ -76,17 +76,17 @@
OptionValues options = info.graph().getOptions();
if (InlineEverything.getValue(options)) {
- InliningUtil.logInlinedMethod(info, inliningDepth, fullyProcessed, "inline everything");
+ InliningUtil.traceInlinedMethod(info, inliningDepth, fullyProcessed, "inline everything");
return true;
}
if (isIntrinsic(replacements, info)) {
- InliningUtil.logInlinedMethod(info, inliningDepth, fullyProcessed, "intrinsic");
+ InliningUtil.traceInlinedMethod(info, inliningDepth, fullyProcessed, "intrinsic");
return true;
}
if (info.shouldInline()) {
- InliningUtil.logInlinedMethod(info, inliningDepth, fullyProcessed, "forced inlining");
+ InliningUtil.traceInlinedMethod(info, inliningDepth, fullyProcessed, "forced inlining");
return true;
}
@@ -94,18 +94,18 @@
int nodes = info.determineNodeCount();
if (nodes < TrivialInliningSize.getValue(options) * inliningBonus) {
- InliningUtil.logInlinedMethod(info, inliningDepth, fullyProcessed, "trivial (relevance=%f, probability=%f, bonus=%f, nodes=%d)", relevance, probability, inliningBonus, nodes);
+ InliningUtil.traceInlinedMethod(info, inliningDepth, fullyProcessed, "trivial (relevance=%f, probability=%f, bonus=%f, nodes=%d)", relevance, probability, inliningBonus, nodes);
return true;
}
double maximumNodes = computeMaximumSize(relevance, (int) (maxInliningSize(inliningDepth, options) * inliningBonus));
if (nodes <= maximumNodes) {
- InliningUtil.logInlinedMethod(info, inliningDepth, fullyProcessed, "relevance-based (relevance=%f, probability=%f, bonus=%f, nodes=%d <= %f)", relevance, probability, inliningBonus,
+ InliningUtil.traceInlinedMethod(info, inliningDepth, fullyProcessed, "relevance-based (relevance=%f, probability=%f, bonus=%f, nodes=%d <= %f)", relevance, probability, inliningBonus,
nodes, maximumNodes);
return true;
}
- InliningUtil.logNotInlinedMethod(info, inliningDepth, "relevance-based (relevance=%f, probability=%f, bonus=%f, nodes=%d > %f)", relevance, probability, inliningBonus, nodes, maximumNodes);
+ InliningUtil.traceNotInlinedMethod(info, inliningDepth, "relevance-based (relevance=%f, probability=%f, bonus=%f, nodes=%d > %f)", relevance, probability, inliningBonus, nodes, maximumNodes);
return false;
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/AssertionSnippets.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/AssertionSnippets.java Sat Mar 24 01:08:35 2018 +0100
@@ -86,7 +86,7 @@
args.add("condition", assertionNode.condition());
args.addConst("message", "failed runtime assertion in snippet/stub: " + assertionNode.message() + " (" + graph.method() + ")");
- template(assertionNode.getDebug(), args).instantiate(providers.getMetaAccess(), assertionNode, DEFAULT_REPLACER, args);
+ template(assertionNode, args).instantiate(providers.getMetaAccess(), assertionNode, DEFAULT_REPLACER, args);
}
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/HashCodeSnippets.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/HashCodeSnippets.java Sat Mar 24 01:08:35 2018 +0100
@@ -87,7 +87,7 @@
StructuredGraph graph = node.graph();
Arguments args = new Arguments(identityHashCodeSnippet, graph.getGuardsStage(), tool.getLoweringStage());
args.add("thisObj", node.object);
- SnippetTemplate template = template(node.getDebug(), args);
+ SnippetTemplate template = template(node, args);
template.instantiate(providers.getMetaAccess(), node, SnippetTemplate.DEFAULT_REPLACER, args);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/LoadExceptionObjectSnippets.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/LoadExceptionObjectSnippets.java Sat Mar 24 01:08:35 2018 +0100
@@ -101,7 +101,7 @@
} else {
Arguments args = new Arguments(loadException, loadExceptionObject.graph().getGuardsStage(), tool.getLoweringStage());
args.addConst("threadRegister", registers.getThreadRegister());
- template(loadExceptionObject.getDebug(), args).instantiate(providers.getMetaAccess(), loadExceptionObject, DEFAULT_REPLACER, args);
+ template(loadExceptionObject, args).instantiate(providers.getMetaAccess(), loadExceptionObject, DEFAULT_REPLACER, args);
}
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/MonitorSnippets.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/MonitorSnippets.java Sat Mar 24 01:08:35 2018 +0100
@@ -759,7 +759,7 @@
args.addConst("counters", counters);
}
- template(graph.getDebug(), args).instantiate(providers.getMetaAccess(), monitorenterNode, DEFAULT_REPLACER, args);
+ template(monitorenterNode, args).instantiate(providers.getMetaAccess(), monitorenterNode, DEFAULT_REPLACER, args);
}
public void lower(MonitorExitNode monitorexitNode, HotSpotRegistersProvider registers, LoweringTool tool) {
@@ -778,7 +778,7 @@
args.addConst("options", graph.getOptions());
args.addConst("counters", counters);
- template(graph.getDebug(), args).instantiate(providers.getMetaAccess(), monitorexitNode, DEFAULT_REPLACER, args);
+ template(monitorexitNode, args).instantiate(providers.getMetaAccess(), monitorexitNode, DEFAULT_REPLACER, args);
}
public static boolean isTracingEnabledForType(ValueNode object) {
@@ -828,7 +828,7 @@
invoke.setStateAfter(graph.start().stateAfter());
graph.addAfterFixed(graph.start(), invoke);
- StructuredGraph inlineeGraph = providers.getReplacements().getSnippet(initCounter.getMethod(), null);
+ StructuredGraph inlineeGraph = providers.getReplacements().getSnippet(initCounter.getMethod(), null, invoke.graph().trackNodeSourcePosition(), invoke.getNodeSourcePosition());
InliningUtil.inline(invoke, inlineeGraph, false, null);
List<ReturnNode> rets = graph.getNodes(ReturnNode.TYPE).snapshot();
@@ -846,7 +846,7 @@
Arguments args = new Arguments(checkCounter, graph.getGuardsStage(), tool.getLoweringStage());
args.addConst("errMsg", msg);
- inlineeGraph = template(graph.getDebug(), args).copySpecializedGraph(graph.getDebug());
+ inlineeGraph = template(invoke, args).copySpecializedGraph(graph.getDebug());
InliningUtil.inline(invoke, inlineeGraph, false, null);
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/NewObjectSnippets.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/NewObjectSnippets.java Sat Mar 24 01:08:35 2018 +0100
@@ -626,7 +626,7 @@
args.addConst("options", localOptions);
args.addConst("counters", counters);
- SnippetTemplate template = template(graph.getDebug(), args);
+ SnippetTemplate template = template(newInstanceNode, args);
graph.getDebug().log("Lowering allocateInstance in %s: node=%s, template=%s, arguments=%s", graph, newInstanceNode, template, args);
template.instantiate(providers.getMetaAccess(), newInstanceNode, DEFAULT_REPLACER, args);
}
@@ -669,7 +669,7 @@
args.addConst("typeContext", ProfileAllocations.getValue(localOptions) ? arrayType.toJavaName(false) : "");
args.addConst("options", localOptions);
args.addConst("counters", counters);
- SnippetTemplate template = template(graph.getDebug(), args);
+ SnippetTemplate template = template(newArrayNode, args);
graph.getDebug().log("Lowering allocateArray in %s: node=%s, template=%s, arguments=%s", graph, newArrayNode, template, args);
template.instantiate(providers.getMetaAccess(), newArrayNode, DEFAULT_REPLACER, args);
}
@@ -686,7 +686,7 @@
args.addConst("options", localOptions);
args.addConst("counters", counters);
- SnippetTemplate template = template(newInstanceNode.getDebug(), args);
+ SnippetTemplate template = template(newInstanceNode, args);
template.instantiate(providers.getMetaAccess(), newInstanceNode, DEFAULT_REPLACER, args);
}
@@ -715,7 +715,7 @@
args.add("prototypeMarkWord", lookupArrayClass(tool, JavaKind.Object).prototypeMarkWord());
args.addConst("options", localOptions);
args.addConst("counters", counters);
- SnippetTemplate template = template(graph.getDebug(), args);
+ SnippetTemplate template = template(newArrayNode, args);
template.instantiate(providers.getMetaAccess(), newArrayNode, DEFAULT_REPLACER, args);
}
@@ -739,7 +739,7 @@
args.add("hub", hub);
args.addConst("rank", rank);
args.addVarargs("dimensions", int.class, StampFactory.forKind(JavaKind.Int), dims);
- template(newmultiarrayNode.getDebug(), args).instantiate(providers.getMetaAccess(), newmultiarrayNode, DEFAULT_REPLACER, args);
+ template(newmultiarrayNode, args).instantiate(providers.getMetaAccess(), newmultiarrayNode, DEFAULT_REPLACER, args);
}
private static int instanceSize(HotSpotResolvedObjectType type) {
@@ -753,7 +753,7 @@
Arguments args = new Arguments(verifyHeap, verifyHeapNode.graph().getGuardsStage(), tool.getLoweringStage());
args.addConst("threadRegister", registers.getThreadRegister());
- SnippetTemplate template = template(verifyHeapNode.getDebug(), args);
+ SnippetTemplate template = template(verifyHeapNode, args);
template.instantiate(providers.getMetaAccess(), verifyHeapNode, DEFAULT_REPLACER, args);
} else {
GraphUtil.removeFixedWithUnusedInputs(verifyHeapNode);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/ObjectCloneNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/ObjectCloneNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -86,7 +86,7 @@
StructuredGraph snippetGraph = null;
DebugContext debug = getDebug();
try (DebugContext.Scope s = debug.scope("ArrayCloneSnippet", snippetMethod)) {
- snippetGraph = replacements.getSnippet(snippetMethod, null);
+ snippetGraph = replacements.getSnippet(snippetMethod, null, graph().trackNodeSourcePosition(), this.getNodeSourcePosition());
} catch (Throwable e) {
throw debug.handle(e);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/StringToBytesSnippets.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/StringToBytesSnippets.java Sat Mar 24 01:08:35 2018 +0100
@@ -83,7 +83,7 @@
public void lower(StringToBytesNode stringToBytesNode, LoweringTool tool) {
Arguments args = new Arguments(create, stringToBytesNode.graph().getGuardsStage(), tool.getLoweringStage());
args.addConst("compilationTimeString", stringToBytesNode.getValue());
- SnippetTemplate template = template(stringToBytesNode.getDebug(), args);
+ SnippetTemplate template = template(stringToBytesNode, args);
template.instantiate(providers.getMetaAccess(), stringToBytesNode, DEFAULT_REPLACER, args);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/UnsafeLoadSnippets.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/UnsafeLoadSnippets.java Sat Mar 24 01:08:35 2018 +0100
@@ -65,7 +65,7 @@
Arguments args = new Arguments(unsafeLoad, load.graph().getGuardsStage(), tool.getLoweringStage());
args.add("object", load.object());
args.add("offset", load.offset());
- template(load.getDebug(), args).instantiate(providers.getMetaAccess(), load, DEFAULT_REPLACER, args);
+ template(load, args).instantiate(providers.getMetaAccess(), load, DEFAULT_REPLACER, args);
}
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/WriteBarrierSnippets.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/WriteBarrierSnippets.java Sat Mar 24 01:08:35 2018 +0100
@@ -434,7 +434,7 @@
args.add("object", address.getBase());
}
args.addConst("counters", counters);
- template(writeBarrier.getDebug(), args).instantiate(providers.getMetaAccess(), writeBarrier, DEFAULT_REPLACER, args);
+ template(writeBarrier, args).instantiate(providers.getMetaAccess(), writeBarrier, DEFAULT_REPLACER, args);
}
public void lower(SerialArrayRangeWriteBarrier arrayRangeWriteBarrier, LoweringTool tool) {
@@ -442,7 +442,7 @@
args.add("address", arrayRangeWriteBarrier.getAddress());
args.add("length", arrayRangeWriteBarrier.getLength());
args.addConst("elementStride", arrayRangeWriteBarrier.getElementStride());
- template(arrayRangeWriteBarrier.getDebug(), args).instantiate(providers.getMetaAccess(), arrayRangeWriteBarrier, DEFAULT_REPLACER, args);
+ template(arrayRangeWriteBarrier, args).instantiate(providers.getMetaAccess(), arrayRangeWriteBarrier, DEFAULT_REPLACER, args);
}
public void lower(G1PreWriteBarrier writeBarrierPre, HotSpotRegistersProvider registers, LoweringTool tool) {
@@ -467,7 +467,7 @@
args.addConst("threadRegister", registers.getThreadRegister());
args.addConst("trace", traceBarrier(writeBarrierPre.graph()));
args.addConst("counters", counters);
- template(writeBarrierPre.getDebug(), args).instantiate(providers.getMetaAccess(), writeBarrierPre, DEFAULT_REPLACER, args);
+ template(writeBarrierPre, args).instantiate(providers.getMetaAccess(), writeBarrierPre, DEFAULT_REPLACER, args);
}
public void lower(G1ReferentFieldReadBarrier readBarrier, HotSpotRegistersProvider registers, LoweringTool tool) {
@@ -492,7 +492,7 @@
args.addConst("threadRegister", registers.getThreadRegister());
args.addConst("trace", traceBarrier(readBarrier.graph()));
args.addConst("counters", counters);
- template(readBarrier.getDebug(), args).instantiate(providers.getMetaAccess(), readBarrier, DEFAULT_REPLACER, args);
+ template(readBarrier, args).instantiate(providers.getMetaAccess(), readBarrier, DEFAULT_REPLACER, args);
}
public void lower(G1PostWriteBarrier writeBarrierPost, HotSpotRegistersProvider registers, LoweringTool tool) {
@@ -522,7 +522,7 @@
args.addConst("threadRegister", registers.getThreadRegister());
args.addConst("trace", traceBarrier(writeBarrierPost.graph()));
args.addConst("counters", counters);
- template(graph.getDebug(), args).instantiate(providers.getMetaAccess(), writeBarrierPost, DEFAULT_REPLACER, args);
+ template(writeBarrierPost, args).instantiate(providers.getMetaAccess(), writeBarrierPost, DEFAULT_REPLACER, args);
}
public void lower(G1ArrayRangePreWriteBarrier arrayRangeWriteBarrier, HotSpotRegistersProvider registers, LoweringTool tool) {
@@ -531,7 +531,7 @@
args.add("length", arrayRangeWriteBarrier.getLength());
args.addConst("elementStride", arrayRangeWriteBarrier.getElementStride());
args.addConst("threadRegister", registers.getThreadRegister());
- template(arrayRangeWriteBarrier.getDebug(), args).instantiate(providers.getMetaAccess(), arrayRangeWriteBarrier, DEFAULT_REPLACER, args);
+ template(arrayRangeWriteBarrier, args).instantiate(providers.getMetaAccess(), arrayRangeWriteBarrier, DEFAULT_REPLACER, args);
}
public void lower(G1ArrayRangePostWriteBarrier arrayRangeWriteBarrier, HotSpotRegistersProvider registers, LoweringTool tool) {
@@ -540,7 +540,7 @@
args.add("length", arrayRangeWriteBarrier.getLength());
args.addConst("elementStride", arrayRangeWriteBarrier.getElementStride());
args.addConst("threadRegister", registers.getThreadRegister());
- template(arrayRangeWriteBarrier.getDebug(), args).instantiate(providers.getMetaAccess(), arrayRangeWriteBarrier, DEFAULT_REPLACER, args);
+ template(arrayRangeWriteBarrier, args).instantiate(providers.getMetaAccess(), arrayRangeWriteBarrier, DEFAULT_REPLACER, args);
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/aot/ResolveConstantSnippets.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/aot/ResolveConstantSnippets.java Sat Mar 24 01:08:35 2018 +0100
@@ -36,10 +36,10 @@
import org.graalvm.compiler.hotspot.nodes.aot.InitializeKlassStubCall;
import org.graalvm.compiler.hotspot.nodes.aot.LoadConstantIndirectlyNode;
import org.graalvm.compiler.hotspot.nodes.aot.LoadMethodCountersIndirectlyNode;
+import org.graalvm.compiler.hotspot.nodes.aot.ResolveConstantNode;
+import org.graalvm.compiler.hotspot.nodes.aot.ResolveConstantStubCall;
+import org.graalvm.compiler.hotspot.nodes.aot.ResolveDynamicConstantNode;
import org.graalvm.compiler.hotspot.nodes.aot.ResolveDynamicStubCall;
-import org.graalvm.compiler.hotspot.nodes.aot.ResolveConstantNode;
-import org.graalvm.compiler.hotspot.nodes.aot.ResolveDynamicConstantNode;
-import org.graalvm.compiler.hotspot.nodes.aot.ResolveConstantStubCall;
import org.graalvm.compiler.hotspot.nodes.aot.ResolveMethodAndLoadCountersNode;
import org.graalvm.compiler.hotspot.nodes.aot.ResolveMethodAndLoadCountersStubCall;
import org.graalvm.compiler.hotspot.nodes.type.MethodPointerStamp;
@@ -141,7 +141,7 @@
Arguments args = new Arguments(snippet, graph.getGuardsStage(), tool.getLoweringStage());
args.add("constant", value);
- SnippetTemplate template = template(graph.getDebug(), args);
+ SnippetTemplate template = template(resolveConstantNode, args);
template.instantiate(providers.getMetaAccess(), resolveConstantNode, DEFAULT_REPLACER, args);
assert resolveConstantNode.hasNoUsages();
@@ -180,7 +180,7 @@
Arguments args = new Arguments(snippet, graph.getGuardsStage(), tool.getLoweringStage());
args.add("constant", value);
- SnippetTemplate template = template(graph.getDebug(), args);
+ SnippetTemplate template = template(resolveConstantNode, args);
template.instantiate(providers.getMetaAccess(), resolveConstantNode, DEFAULT_REPLACER, args);
assert resolveConstantNode.hasNoUsages();
@@ -200,7 +200,7 @@
Arguments args = new Arguments(initializeKlass, graph.getGuardsStage(), tool.getLoweringStage());
args.add("constant", value);
- SnippetTemplate template = template(graph.getDebug(), args);
+ SnippetTemplate template = template(initializeKlassNode, args);
template.instantiate(providers.getMetaAccess(), initializeKlassNode, DEFAULT_REPLACER, args);
assert initializeKlassNode.hasNoUsages();
if (!initializeKlassNode.isDeleted()) {
@@ -218,7 +218,7 @@
Arguments args = new Arguments(resolveMethodAndLoadCounters, graph.getGuardsStage(), tool.getLoweringStage());
args.add("method", method);
args.add("klassHint", resolveMethodAndLoadCountersNode.getHub());
- SnippetTemplate template = template(graph.getDebug(), args);
+ SnippetTemplate template = template(resolveMethodAndLoadCountersNode, args);
template.instantiate(providers.getMetaAccess(), resolveMethodAndLoadCountersNode, DEFAULT_REPLACER, args);
assert resolveMethodAndLoadCountersNode.hasNoUsages();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/arraycopy/ArrayCopySnippets.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/arraycopy/ArrayCopySnippets.java Sat Mar 24 01:08:35 2018 +0100
@@ -524,7 +524,7 @@
*/
private void instantiate(Arguments args, BasicArrayCopyNode arraycopy) {
StructuredGraph graph = arraycopy.graph();
- SnippetTemplate template = template(graph.getDebug(), args);
+ SnippetTemplate template = template(arraycopy, args);
UnmodifiableEconomicMap<Node, Node> replacements = template.instantiate(providers.getMetaAccess(), arraycopy, SnippetTemplate.DEFAULT_REPLACER, args, false);
for (Node originalNode : replacements.getKeys()) {
if (originalNode instanceof Invoke) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/profiling/ProbabilisticProfileSnippets.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/profiling/ProbabilisticProfileSnippets.java Sat Mar 24 01:08:35 2018 +0100
@@ -150,7 +150,7 @@
args.add("bci", bci);
args.add("targetBci", targetBci);
- SnippetTemplate template = template(graph.getDebug(), args);
+ SnippetTemplate template = template(profileNode, args);
template.instantiate(providers.getMetaAccess(), profileNode, DEFAULT_REPLACER, args);
} else if (profileNode instanceof ProfileInvokeNode) {
ProfileInvokeNode profileInvokeNode = (ProfileInvokeNode) profileNode;
@@ -163,7 +163,7 @@
args.add("stepLog", stepLog);
args.addConst("freqLog", profileInvokeNode.getNotificationFreqLog());
args.addConst("probLog", profileInvokeNode.getProbabilityLog());
- SnippetTemplate template = template(graph.getDebug(), args);
+ SnippetTemplate template = template(profileNode, args);
template.instantiate(providers.getMetaAccess(), profileNode, DEFAULT_REPLACER, args);
} else {
throw new GraalError("Unsupported profile node type: " + profileNode);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/profiling/ProfileSnippets.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/profiling/ProfileSnippets.java Sat Mar 24 01:08:35 2018 +0100
@@ -132,7 +132,7 @@
args.add("bci", bci);
args.add("targetBci", targetBci);
- SnippetTemplate template = template(graph.getDebug(), args);
+ SnippetTemplate template = template(profileNode, args);
template.instantiate(providers.getMetaAccess(), profileNode, DEFAULT_REPLACER, args);
} else if (profileNode instanceof ProfileInvokeNode) {
ProfileInvokeNode profileInvokeNode = (ProfileInvokeNode) profileNode;
@@ -142,7 +142,7 @@
args.add("step", step);
args.add("stepLog", stepLog);
args.addConst("freqLog", profileInvokeNode.getNotificationFreqLog());
- SnippetTemplate template = template(graph.getDebug(), args);
+ SnippetTemplate template = template(profileNode, args);
template.instantiate(providers.getMetaAccess(), profileNode, DEFAULT_REPLACER, args);
} else {
throw new GraalError("Unsupported profile node type: " + profileNode);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/stubs/ForeignCallStub.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/stubs/ForeignCallStub.java Sat Mar 24 01:08:35 2018 +0100
@@ -34,8 +34,11 @@
import org.graalvm.compiler.core.common.type.Stamp;
import org.graalvm.compiler.core.common.type.StampFactory;
import org.graalvm.compiler.core.common.type.StampPair;
+import org.graalvm.compiler.debug.DebugCloseable;
import org.graalvm.compiler.debug.DebugContext;
+import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.debug.JavaMethodContext;
+import org.graalvm.compiler.graph.NodeSourcePosition;
import org.graalvm.compiler.hotspot.HotSpotForeignCallLinkage;
import org.graalvm.compiler.hotspot.HotSpotForeignCallLinkage.Transition;
import org.graalvm.compiler.hotspot.HotSpotForeignCallLinkageImpl;
@@ -224,33 +227,37 @@
* %r15 on AMD64) and is only prepended if {@link #prependThread} is true.
*/
@Override
+ @SuppressWarnings("try")
protected StructuredGraph getGraph(DebugContext debug, CompilationIdentifier compilationId) {
WordTypes wordTypes = providers.getWordTypes();
Class<?>[] args = linkage.getDescriptor().getArgumentTypes();
boolean isObjectResult = !LIRKind.isValue(linkage.getOutgoingCallingConvention().getReturn());
StructuredGraph graph = new StructuredGraph.Builder(options, debug).name(toString()).compilationId(compilationId).build();
graph.disableUnsafeAccessTracking();
-
- GraphKit kit = new GraphKit(graph, providers, wordTypes, providers.getGraphBuilderPlugins());
- ParameterNode[] params = createParameters(kit, args);
+ graph.setTrackNodeSourcePosition();
+ try {
+ ResolvedJavaMethod thisMethod = providers.getMetaAccess().lookupJavaMethod(ForeignCallStub.class.getDeclaredMethod("getGraph", DebugContext.class, CompilationIdentifier.class));
+ try (DebugCloseable context = graph.withNodeSourcePosition(NodeSourcePosition.substitution(thisMethod))) {
+ GraphKit kit = new GraphKit(graph, providers, wordTypes, providers.getGraphBuilderPlugins());
+ ParameterNode[] params = createParameters(kit, args);
+ ReadRegisterNode thread = kit.append(new ReadRegisterNode(providers.getRegisters().getThreadRegister(), wordTypes.getWordKind(), true, false));
+ ValueNode result = createTargetCall(kit, params, thread);
+ kit.createInvoke(StubUtil.class, "handlePendingException", thread, ConstantNode.forBoolean(isObjectResult, graph));
+ if (isObjectResult) {
+ InvokeNode object = kit.createInvoke(HotSpotReplacementsUtil.class, "getAndClearObjectResult", thread);
+ result = kit.createInvoke(StubUtil.class, "verifyObject", object);
+ }
+ kit.append(new ReturnNode(linkage.getDescriptor().getResultType() == void.class ? null : result));
+ debug.dump(DebugContext.VERBOSE_LEVEL, graph, "Initial stub graph");
- ReadRegisterNode thread = kit.append(new ReadRegisterNode(providers.getRegisters().getThreadRegister(), wordTypes.getWordKind(), true, false));
- ValueNode result = createTargetCall(kit, params, thread);
- kit.createInvoke(StubUtil.class, "handlePendingException", thread, ConstantNode.forBoolean(isObjectResult, graph));
- if (isObjectResult) {
- InvokeNode object = kit.createInvoke(HotSpotReplacementsUtil.class, "getAndClearObjectResult", thread);
- result = kit.createInvoke(StubUtil.class, "verifyObject", object);
+ kit.inlineInvokes();
+ new RemoveValueProxyPhase().apply(graph);
+
+ debug.dump(DebugContext.VERBOSE_LEVEL, graph, "Stub graph before compilation");
+ }
+ } catch (Exception e) {
+ throw GraalError.shouldNotReachHere(e);
}
- kit.append(new ReturnNode(linkage.getDescriptor().getResultType() == void.class ? null : result));
-
- debug.dump(DebugContext.VERBOSE_LEVEL, graph, "Initial stub graph");
-
- kit.inlineInvokes();
-
- new RemoveValueProxyPhase().apply(graph);
-
- debug.dump(DebugContext.VERBOSE_LEVEL, graph, "Stub graph before compilation");
-
return graph;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/BytecodeParser.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/BytecodeParser.java Sat Mar 24 01:08:35 2018 +0100
@@ -279,6 +279,7 @@
import org.graalvm.compiler.bytecode.Bytes;
import org.graalvm.compiler.bytecode.ResolvedJavaMethodBytecode;
import org.graalvm.compiler.bytecode.ResolvedJavaMethodBytecodeProvider;
+import org.graalvm.compiler.core.common.GraalOptions;
import org.graalvm.compiler.core.common.PermanentBailoutException;
import org.graalvm.compiler.core.common.calc.CanonicalCondition;
import org.graalvm.compiler.core.common.calc.Condition;
@@ -678,6 +679,8 @@
private boolean finalBarrierRequired;
private ValueNode originalReceiver;
+ private final boolean eagerInitializing;
+ private final boolean uninitializedIsError;
protected BytecodeParser(GraphBuilderPhase.Instance graphBuilderInstance, StructuredGraph graph, BytecodeParser parent, ResolvedJavaMethod method,
int entryBCI, IntrinsicContext intrinsicContext) {
@@ -701,6 +704,14 @@
this.entryBCI = entryBCI;
this.parent = parent;
+ ClassInitializationPlugin classInitializationPlugin = graphBuilderConfig.getPlugins().getClassInitializationPlugin();
+ if (classInitializationPlugin != null && graphBuilderConfig.eagerResolving()) {
+ uninitializedIsError = eagerInitializing = !classInitializationPlugin.supportsLazyInitialization(constantPool);
+ } else {
+ eagerInitializing = graphBuilderConfig.eagerResolving();
+ uninitializedIsError = graphBuilderConfig.unresolvedIsError();
+ }
+
assert code.getCode() != null : "method must contain bytecodes: " + method;
if (TraceBytecodeParserLevel.getValue(options) != 0) {
@@ -713,6 +724,11 @@
lnt = code.getLineNumberTable();
previousLineNumber = -1;
}
+
+ assert !GraalOptions.TrackNodeSourcePosition.getValue(options) || graph.trackNodeSourcePosition();
+ if (graphBuilderConfig.trackNodeSourcePosition() || (parent != null && parent.graph.trackNodeSourcePosition())) {
+ graph.setTrackNodeSourcePosition();
+ }
}
protected GraphBuilderPhase.Instance getGraphBuilderInstance() {
@@ -807,26 +823,28 @@
}
}
- if (method.isSynchronized()) {
- finishPrepare(lastInstr, BytecodeFrame.BEFORE_BCI);
-
- // add a monitor enter to the start block
- methodSynchronizedObject = synchronizedObject(frameState, method);
- frameState.clearNonLiveLocals(startBlock, liveness, true);
- assert bci() == 0;
- genMonitorEnter(methodSynchronizedObject, bci());
+ try (DebugCloseable context = openNodeContext()) {
+ if (method.isSynchronized()) {
+ finishPrepare(lastInstr, BytecodeFrame.BEFORE_BCI);
+
+ // add a monitor enter to the start block
+ methodSynchronizedObject = synchronizedObject(frameState, method);
+ frameState.clearNonLiveLocals(startBlock, liveness, true);
+ assert bci() == 0;
+ genMonitorEnter(methodSynchronizedObject, bci());
+ }
+
+ ProfilingPlugin profilingPlugin = this.graphBuilderConfig.getPlugins().getProfilingPlugin();
+ if (profilingPlugin != null && profilingPlugin.shouldProfile(this, method)) {
+ FrameState stateBefore = frameState.create(bci(), getNonIntrinsicAncestor(), false, null, null);
+ profilingPlugin.profileInvoke(this, method, stateBefore);
+ }
+
+ finishPrepare(lastInstr, 0);
+
+ genInfoPointNode(InfopointReason.METHOD_START, null);
}
- ProfilingPlugin profilingPlugin = this.graphBuilderConfig.getPlugins().getProfilingPlugin();
- if (profilingPlugin != null && profilingPlugin.shouldProfile(this, method)) {
- FrameState stateBefore = frameState.create(bci(), getNonIntrinsicAncestor(), false, null, null);
- profilingPlugin.profileInvoke(this, method, stateBefore);
- }
-
- finishPrepare(lastInstr, 0);
-
- genInfoPointNode(InfopointReason.METHOD_START, null);
-
currentBlock = blockMap.getStartBlock();
setEntryState(startBlock, frameState);
if (startBlock.isLoopHeader) {
@@ -1338,6 +1356,8 @@
protected void genInvokeStatic(int cpi, int opcode) {
JavaMethod target = lookupMethod(cpi, opcode);
+ assert !uninitializedIsError ||
+ (target instanceof ResolvedJavaMethod && ((ResolvedJavaMethod) target).getDeclaringClass().isInitialized()) : target;
genInvokeStatic(target);
}
@@ -2017,6 +2037,7 @@
}
}
+ @SuppressWarnings("try")
protected boolean tryInvocationPlugin(InvokeKind invokeKind, ValueNode[] args, ResolvedJavaMethod targetMethod, JavaKind resultType, JavaType returnType) {
InvocationPlugin plugin = graphBuilderConfig.getPlugins().getInvocationPlugins().lookupInvocation(targetMethod);
if (plugin != null) {
@@ -2041,11 +2062,13 @@
}
InvocationPluginAssertions assertions = Assertions.assertionsEnabled() ? new InvocationPluginAssertions(plugin, args, targetMethod, resultType) : null;
- if (plugin.execute(this, targetMethod, pluginReceiver, args)) {
- afterInvocationPluginExecution(true, assertions, intrinsicGuard, invokeKind, args, targetMethod, resultType, returnType);
- return true;
- } else {
- afterInvocationPluginExecution(false, assertions, intrinsicGuard, invokeKind, args, targetMethod, resultType, returnType);
+ try (DebugCloseable context = openNodeContext(targetMethod)) {
+ if (plugin.execute(this, targetMethod, pluginReceiver, args)) {
+ afterInvocationPluginExecution(true, assertions, intrinsicGuard, invokeKind, args, targetMethod, resultType, returnType);
+ return true;
+ } else {
+ afterInvocationPluginExecution(false, assertions, intrinsicGuard, invokeKind, args, targetMethod, resultType, returnType);
+ }
}
}
return false;
@@ -2112,6 +2135,7 @@
* Tries to inline {@code targetMethod} if it is an instance field accessor. This avoids the
* overhead of creating and using a nested {@link BytecodeParser} object.
*/
+ @SuppressWarnings("try")
private boolean tryFastInlineAccessor(ValueNode[] args, ResolvedJavaMethod targetMethod) {
byte[] bytecode = targetMethod.getCode();
if (bytecode != null && bytecode.length == ACCESSOR_BYTECODE_LENGTH &&
@@ -2124,10 +2148,12 @@
if (field instanceof ResolvedJavaField) {
ValueNode receiver = invocationPluginReceiver.init(targetMethod, args).get();
ResolvedJavaField resolvedField = (ResolvedJavaField) field;
- genGetField(resolvedField, receiver);
- notifyBeforeInline(targetMethod);
- printInlining(targetMethod, targetMethod, true, "inline accessor method (bytecode parsing)");
- notifyAfterInline(targetMethod);
+ try (DebugCloseable context = openNodeContext(targetMethod, 1)) {
+ genGetField(resolvedField, receiver);
+ notifyBeforeInline(targetMethod);
+ printInlining(targetMethod, targetMethod, true, "inline accessor method (bytecode parsing)");
+ notifyAfterInline(targetMethod);
+ }
return true;
}
}
@@ -2562,6 +2588,7 @@
@Override
public <T extends ValueNode> T append(T v) {
+ assert !graph.trackNodeSourcePosition() || graph.currentNodeSourcePosition() != null || currentBlock == blockMap.getUnwindBlock() || currentBlock instanceof ExceptionDispatchBlock;
if (v.graph() != null) {
return v;
}
@@ -2670,93 +2697,97 @@
return createTarget(block, state, false, false);
}
+ @SuppressWarnings("try")
private FixedNode createTarget(BciBlock block, FrameStateBuilder state, boolean canReuseInstruction, boolean canReuseState) {
assert block != null && state != null;
assert !block.isExceptionEntry || state.stackSize() == 1;
- if (getFirstInstruction(block) == null) {
- /*
- * This is the first time we see this block as a branch target. Create and return a
- * placeholder that later can be replaced with a MergeNode when we see this block again.
- */
- FixedNode targetNode;
- if (canReuseInstruction && (block.getPredecessorCount() == 1 || !controlFlowSplit) && !block.isLoopHeader && (currentBlock.loops & ~block.loops) == 0) {
- setFirstInstruction(block, lastInstr);
- lastInstr = null;
- } else {
- setFirstInstruction(block, graph.add(new BeginNode()));
+ try (DebugCloseable context = openNodeContext(state, block.startBci)) {
+ if (getFirstInstruction(block) == null) {
+ /*
+ * This is the first time we see this block as a branch target. Create and return a
+ * placeholder that later can be replaced with a MergeNode when we see this block
+ * again.
+ */
+ FixedNode targetNode;
+ if (canReuseInstruction && (block.getPredecessorCount() == 1 || !controlFlowSplit) && !block.isLoopHeader && (currentBlock.loops & ~block.loops) == 0) {
+ setFirstInstruction(block, lastInstr);
+ lastInstr = null;
+ } else {
+ setFirstInstruction(block, graph.add(new BeginNode()));
+ }
+ targetNode = getFirstInstruction(block);
+ Target target = checkLoopExit(targetNode, block, state);
+ FixedNode result = target.fixed;
+ FrameStateBuilder currentEntryState = target.state == state ? (canReuseState ? state : state.copy()) : target.state;
+ setEntryState(block, currentEntryState);
+ currentEntryState.clearNonLiveLocals(block, liveness, true);
+
+ debug.log("createTarget %s: first visit, result: %s", block, targetNode);
+ return result;
+ }
+
+ // We already saw this block before, so we have to merge states.
+ if (!getEntryState(block).isCompatibleWith(state)) {
+ throw bailout("stacks do not match; bytecodes would not verify");
}
- targetNode = getFirstInstruction(block);
- Target target = checkLoopExit(targetNode, block, state);
+
+ if (getFirstInstruction(block) instanceof LoopBeginNode) {
+ assert (block.isLoopHeader && currentBlock.getId() >= block.getId()) : "must be backward branch";
+ /*
+ * Backward loop edge. We need to create a special LoopEndNode and merge with the
+ * loop begin node created before.
+ */
+ LoopBeginNode loopBegin = (LoopBeginNode) getFirstInstruction(block);
+ LoopEndNode loopEnd = graph.add(new LoopEndNode(loopBegin));
+ Target target = checkLoopExit(loopEnd, block, state);
+ FixedNode result = target.fixed;
+ getEntryState(block).merge(loopBegin, target.state);
+
+ debug.log("createTarget %s: merging backward branch to loop header %s, result: %s", block, loopBegin, result);
+ return result;
+ }
+ assert currentBlock == null || currentBlock.getId() < block.getId() : "must not be backward branch";
+ assert getFirstInstruction(block).next() == null : "bytecodes already parsed for block";
+
+ if (getFirstInstruction(block) instanceof AbstractBeginNode && !(getFirstInstruction(block) instanceof AbstractMergeNode)) {
+ /*
+ * This is the second time we see this block. Create the actual MergeNode and the
+ * End Node for the already existing edge.
+ */
+ AbstractBeginNode beginNode = (AbstractBeginNode) getFirstInstruction(block);
+
+ // The EndNode for the already existing edge.
+ EndNode end = graph.add(new EndNode());
+ // The MergeNode that replaces the placeholder.
+ AbstractMergeNode mergeNode = graph.add(new MergeNode());
+ FixedNode next = beginNode.next();
+
+ if (beginNode.predecessor() instanceof ControlSplitNode) {
+ beginNode.setNext(end);
+ } else {
+ beginNode.replaceAtPredecessor(end);
+ beginNode.safeDelete();
+ }
+
+ mergeNode.addForwardEnd(end);
+ mergeNode.setNext(next);
+
+ setFirstInstruction(block, mergeNode);
+ }
+
+ AbstractMergeNode mergeNode = (AbstractMergeNode) getFirstInstruction(block);
+
+ // The EndNode for the newly merged edge.
+ EndNode newEnd = graph.add(new EndNode());
+ Target target = checkLoopExit(newEnd, block, state);
FixedNode result = target.fixed;
- FrameStateBuilder currentEntryState = target.state == state ? (canReuseState ? state : state.copy()) : target.state;
- setEntryState(block, currentEntryState);
- currentEntryState.clearNonLiveLocals(block, liveness, true);
-
- debug.log("createTarget %s: first visit, result: %s", block, targetNode);
- return result;
- }
-
- // We already saw this block before, so we have to merge states.
- if (!getEntryState(block).isCompatibleWith(state)) {
- throw bailout("stacks do not match; bytecodes would not verify");
- }
-
- if (getFirstInstruction(block) instanceof LoopBeginNode) {
- assert (block.isLoopHeader && currentBlock.getId() >= block.getId()) : "must be backward branch";
- /*
- * Backward loop edge. We need to create a special LoopEndNode and merge with the loop
- * begin node created before.
- */
- LoopBeginNode loopBegin = (LoopBeginNode) getFirstInstruction(block);
- LoopEndNode loopEnd = graph.add(new LoopEndNode(loopBegin));
- Target target = checkLoopExit(loopEnd, block, state);
- FixedNode result = target.fixed;
- getEntryState(block).merge(loopBegin, target.state);
-
- debug.log("createTarget %s: merging backward branch to loop header %s, result: %s", block, loopBegin, result);
+ getEntryState(block).merge(mergeNode, target.state);
+ mergeNode.addForwardEnd(newEnd);
+
+ debug.log("createTarget %s: merging state, result: %s", block, result);
return result;
}
- assert currentBlock == null || currentBlock.getId() < block.getId() : "must not be backward branch";
- assert getFirstInstruction(block).next() == null : "bytecodes already parsed for block";
-
- if (getFirstInstruction(block) instanceof AbstractBeginNode && !(getFirstInstruction(block) instanceof AbstractMergeNode)) {
- /*
- * This is the second time we see this block. Create the actual MergeNode and the End
- * Node for the already existing edge.
- */
- AbstractBeginNode beginNode = (AbstractBeginNode) getFirstInstruction(block);
-
- // The EndNode for the already existing edge.
- EndNode end = graph.add(new EndNode());
- // The MergeNode that replaces the placeholder.
- AbstractMergeNode mergeNode = graph.add(new MergeNode());
- FixedNode next = beginNode.next();
-
- if (beginNode.predecessor() instanceof ControlSplitNode) {
- beginNode.setNext(end);
- } else {
- beginNode.replaceAtPredecessor(end);
- beginNode.safeDelete();
- }
-
- mergeNode.addForwardEnd(end);
- mergeNode.setNext(next);
-
- setFirstInstruction(block, mergeNode);
- }
-
- AbstractMergeNode mergeNode = (AbstractMergeNode) getFirstInstruction(block);
-
- // The EndNode for the newly merged edge.
- EndNode newEnd = graph.add(new EndNode());
- Target target = checkLoopExit(newEnd, block, state);
- FixedNode result = target.fixed;
- getEntryState(block).merge(mergeNode, target.state);
- mergeNode.addForwardEnd(newEnd);
-
- debug.log("createTarget %s: merging state, result: %s", block, result);
- return result;
}
/**
@@ -2965,28 +2996,28 @@
}
while (bci < endBCI) {
- if (graphBuilderConfig.insertFullInfopoints() && !parsingIntrinsic()) {
- currentLineNumber = lnt != null ? lnt.getLineNumber(bci) : -1;
- if (currentLineNumber != previousLineNumber) {
- genInfoPointNode(InfopointReason.BYTECODE_POSITION, null);
- previousLineNumber = currentLineNumber;
+ try (DebugCloseable context = openNodeContext()) {
+ if (graphBuilderConfig.insertFullInfopoints() && !parsingIntrinsic()) {
+ currentLineNumber = lnt != null ? lnt.getLineNumber(bci) : -1;
+ if (currentLineNumber != previousLineNumber) {
+ genInfoPointNode(InfopointReason.BYTECODE_POSITION, null);
+ previousLineNumber = currentLineNumber;
+ }
}
- }
-
- // read the opcode
- int opcode = stream.currentBC();
- assert traceState();
- assert traceInstruction(bci, opcode, bci == block.startBci);
- if (parent == null && bci == entryBCI) {
- if (block.getJsrScope() != JsrScope.EMPTY_SCOPE) {
- throw new JsrNotSupportedBailout("OSR into a JSR scope is not supported");
+
+ // read the opcode
+ int opcode = stream.currentBC();
+ assert traceState();
+ assert traceInstruction(bci, opcode, bci == block.startBci);
+ if (parent == null && bci == entryBCI) {
+ if (block.getJsrScope() != JsrScope.EMPTY_SCOPE) {
+ throw new JsrNotSupportedBailout("OSR into a JSR scope is not supported");
+ }
+ EntryMarkerNode x = append(new EntryMarkerNode());
+ frameState.insertProxies(value -> graph.unique(new EntryProxyNode(value, x)));
+ x.setStateAfter(createFrameState(bci, x));
}
- EntryMarkerNode x = append(new EntryMarkerNode());
- frameState.insertProxies(value -> graph.unique(new EntryProxyNode(value, x)));
- x.setStateAfter(createFrameState(bci, x));
- }
-
- try (DebugCloseable context = openNodeContext()) {
+
processBytecode(bci, opcode);
} catch (BailoutException e) {
// Don't wrap bailouts as parser errors
@@ -3017,13 +3048,28 @@
}
}
- private DebugCloseable openNodeContext() {
- if ((graphBuilderConfig.trackNodeSourcePosition() || debug.isDumpEnabledForMethod()) && !parsingIntrinsic()) {
- return graph.withNodeSourcePosition(createBytecodePosition());
+ private DebugCloseable openNodeContext(FrameStateBuilder state, int startBci) {
+ if (graph.trackNodeSourcePosition()) {
+ return graph.withNodeSourcePosition(state.createBytecodePosition(startBci));
}
return null;
}
+ private DebugCloseable openNodeContext(ResolvedJavaMethod targetMethod) {
+ return openNodeContext(targetMethod, -1);
+ }
+
+ private DebugCloseable openNodeContext(ResolvedJavaMethod targetMethod, int bci) {
+ if (graph.trackNodeSourcePosition()) {
+ return graph.withNodeSourcePosition(new NodeSourcePosition(createBytecodePosition(), targetMethod, bci));
+ }
+ return null;
+ }
+
+ private DebugCloseable openNodeContext() {
+ return openNodeContext(frameState, bci());
+ }
+
/* Also a hook for subclasses. */
protected boolean forceLoopPhis() {
return graph.isOSR();
@@ -3133,7 +3179,7 @@
genIf(condition, trueSuccessor, falseSuccessor, probability);
}
- private double getProfileProbability(boolean negate) {
+ protected double getProfileProbability(boolean negate) {
double probability;
if (profilingInfo == null) {
probability = 0.5;
@@ -3433,7 +3479,8 @@
}
protected NodeSourcePosition createBytecodePosition() {
- return frameState.createBytecodePosition(bci());
+ NodeSourcePosition bytecodePosition = frameState.createBytecodePosition(bci());
+ return bytecodePosition;
}
public void setCurrentFrameState(FrameStateBuilder frameState) {
@@ -3454,6 +3501,7 @@
frameState.push(kind, value);
}
+ @SuppressWarnings("try")
public void loadLocalObject(int index) {
ValueNode value = frameState.loadLocal(index, JavaKind.Object);
@@ -3461,7 +3509,9 @@
int nextBC = stream.readUByte(nextBCI);
if (nextBCI <= currentBlock.endBci && nextBC == Bytecodes.GETFIELD) {
stream.next();
- genGetField(stream.readCPI(), Bytecodes.GETFIELD, value);
+ try (DebugCloseable ignored = openNodeContext()) {
+ genGetField(stream.readCPI(), Bytecodes.GETFIELD, value);
+ }
} else {
frameState.push(JavaKind.Object, value);
}
@@ -3689,6 +3739,17 @@
genIf(x, cond, y);
}
+ private static void initialize(ResolvedJavaType resolvedType) {
+ /*
+ * Since we're potentially triggering class initialization here, we need synchronization to
+ * mitigate the potential for class initialization related deadlock being caused by the
+ * compiler (e.g., https://github.com/graalvm/graal-core/pull/232/files#r90788550).
+ */
+ synchronized (BytecodeParser.class) {
+ resolvedType.initialize();
+ }
+ }
+
protected JavaType lookupType(int cpi, int bytecode) {
maybeEagerlyResolve(cpi, bytecode);
JavaType result = constantPool.lookupType(cpi, bytecode);
@@ -3699,32 +3760,26 @@
private JavaMethod lookupMethod(int cpi, int opcode) {
maybeEagerlyResolve(cpi, opcode);
JavaMethod result = constantPool.lookupMethod(cpi, opcode);
- /*
- * In general, one cannot assume that the declaring class being initialized is useful, since
- * the actual concrete receiver may be a different class (except for static calls). Also,
- * interfaces are initialized only under special circumstances, so that this assertion would
- * often fail for interface calls.
- */
- assert !graphBuilderConfig.unresolvedIsError() ||
- (result instanceof ResolvedJavaMethod && (opcode != INVOKESTATIC || ((ResolvedJavaMethod) result).getDeclaringClass().isInitialized())) : result;
+ assert !graphBuilderConfig.unresolvedIsError() || result instanceof ResolvedJavaMethod : result;
return result;
}
protected JavaField lookupField(int cpi, int opcode) {
maybeEagerlyResolve(cpi, opcode);
JavaField result = constantPool.lookupField(cpi, method, opcode);
-
- if (graphBuilderConfig.eagerResolving()) {
- assert !graphBuilderConfig.unresolvedIsError() || result instanceof ResolvedJavaField : "Not resolved: " + result;
+ assert !graphBuilderConfig.unresolvedIsError() || result instanceof ResolvedJavaField : "Not resolved: " + result;
+ if (parsingIntrinsic() || eagerInitializing) {
if (result instanceof ResolvedJavaField) {
ResolvedJavaType declaringClass = ((ResolvedJavaField) result).getDeclaringClass();
if (!declaringClass.isInitialized()) {
- assert declaringClass.isInterface() : "Declaring class not initialized but not an interface? " + declaringClass;
- declaringClass.initialize();
+ // Even with eager initialization, superinterfaces are not always initialized.
+ // See StaticInterfaceFieldTest
+ assert !eagerInitializing || declaringClass.isInterface() : "Declaring class not initialized but not an interface? " + declaringClass;
+ initialize(declaringClass);
}
}
}
- assert !graphBuilderConfig.unresolvedIsError() || (result instanceof ResolvedJavaField && ((ResolvedJavaField) result).getDeclaringClass().isInitialized()) : result;
+ assert !uninitializedIsError || (result instanceof ResolvedJavaField && ((ResolvedJavaField) result).getDeclaringClass().isInitialized()) : result;
return result;
}
@@ -3745,7 +3800,12 @@
* the compiler (e.g., https://github.com/graalvm/graal-core/pull/232/files#r90788550).
*/
synchronized (BytecodeParser.class) {
- constantPool.loadReferencedType(cpi, bytecode);
+ ClassInitializationPlugin classInitializationPlugin = graphBuilderConfig.getPlugins().getClassInitializationPlugin();
+ if (classInitializationPlugin != null) {
+ classInitializationPlugin.loadReferencedType(this, constantPool, cpi, bytecode);
+ } else {
+ constantPool.loadReferencedType(cpi, bytecode);
+ }
}
}
}
@@ -3872,11 +3932,16 @@
}
void genNewInstance(JavaType type) {
- if (!(type instanceof ResolvedJavaType) || !((ResolvedJavaType) type).isInitialized()) {
+ if (!(type instanceof ResolvedJavaType)) {
handleUnresolvedNewInstance(type);
return;
}
ResolvedJavaType resolvedType = (ResolvedJavaType) type;
+ ClassInitializationPlugin classInitializationPlugin = graphBuilderConfig.getPlugins().getClassInitializationPlugin();
+ if (!resolvedType.isInitialized() && classInitializationPlugin == null) {
+ handleUnresolvedNewInstance(type);
+ return;
+ }
ResolvedJavaType[] skippedExceptionTypes = this.graphBuilderConfig.getSkippedExceptionTypes();
if (skippedExceptionTypes != null) {
@@ -3888,7 +3953,6 @@
}
}
- ClassInitializationPlugin classInitializationPlugin = graphBuilderConfig.getPlugins().getClassInitializationPlugin();
if (classInitializationPlugin != null && classInitializationPlugin.shouldApply(this, resolvedType)) {
FrameState stateBefore = frameState.create(bci(), getNonIntrinsicAncestor(), false, null, null);
classInitializationPlugin.apply(this, resolvedType, stateBefore);
@@ -4078,7 +4142,7 @@
}
}
- private boolean needsExplicitException() {
+ protected boolean needsExplicitException() {
BytecodeExceptionMode exceptionMode = graphBuilderConfig.getBytecodeExceptionMode();
if (exceptionMode == BytecodeExceptionMode.CheckAll || StressExplicitExceptionCode.getValue(options)) {
return true;
@@ -4163,7 +4227,7 @@
private ResolvedJavaField resolveStaticFieldAccess(JavaField field, ValueNode value) {
if (field instanceof ResolvedJavaField) {
ResolvedJavaField resolvedField = (ResolvedJavaField) field;
- if (resolvedField.getDeclaringClass().isInitialized()) {
+ if (resolvedField.getDeclaringClass().isInitialized() || graphBuilderConfig.getPlugins().getClassInitializationPlugin() != null) {
return resolvedField;
}
/*
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/BytecodeParserOptions.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/BytecodeParserOptions.java Sat Mar 24 01:08:35 2018 +0100
@@ -58,7 +58,7 @@
public static final OptionKey<Boolean> TraceParserPlugins = new OptionKey<>(false);
@Option(help = "Maximum depth when inlining during bytecode parsing.", type = OptionType.Debug)
- public static final OptionKey<Integer> InlineDuringParsingMaxDepth = new OptionKey<>(3);
+ public static final OptionKey<Integer> InlineDuringParsingMaxDepth = new OptionKey<>(10);
@Option(help = "When creating info points hide the methods of the substitutions.", type = OptionType.Debug)
public static final OptionKey<Boolean> HideSubstitutionStates = new OptionKey<>(false);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/FrameStateBuilder.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/FrameStateBuilder.java Sat Mar 24 01:08:35 2018 +0100
@@ -32,7 +32,6 @@
import static org.graalvm.compiler.bytecode.Bytecodes.POP2;
import static org.graalvm.compiler.bytecode.Bytecodes.SWAP;
import static org.graalvm.compiler.debug.GraalError.shouldNotReachHere;
-import static org.graalvm.compiler.java.BytecodeParserOptions.HideSubstitutionStates;
import static org.graalvm.compiler.nodes.FrameState.TWO_SLOT_MARKER;
import java.util.ArrayList;
@@ -74,7 +73,6 @@
import jdk.vm.ci.code.BytecodeFrame;
import jdk.vm.ci.meta.Assumptions;
-import jdk.vm.ci.meta.JavaConstant;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.JavaType;
import jdk.vm.ci.meta.ResolvedJavaMethod;
@@ -112,8 +110,6 @@
*/
private List<StateSplit> sideEffects;
- private JavaConstant constantReceiver;
-
/**
* Creates a new frame state builder for the given method and the given target graph.
*
@@ -164,7 +160,6 @@
locals[javaIndex] = arguments[index];
javaIndex = 1;
index = 1;
- constantReceiver = locals[0].asJavaConstant();
}
Signature sig = getMethod().getSignature();
int max = sig.getParameterCount(false);
@@ -310,7 +305,7 @@
public FrameState create(int bci, StateSplit forStateSplit) {
if (parser != null && parser.parsingIntrinsic()) {
- NodeSourcePosition sourcePosition = createBytecodePosition(bci, false);
+ NodeSourcePosition sourcePosition = parser.getGraph().trackNodeSourcePosition() ? createBytecodePosition(bci) : null;
return parser.intrinsicContext.createFrameState(parser.getGraph(), this, forStateSplit, sourcePosition);
}
@@ -354,25 +349,14 @@
}
public NodeSourcePosition createBytecodePosition(int bci) {
- return createBytecodePosition(bci, HideSubstitutionStates.getValue(parser.graph.getOptions()));
+ BytecodeParser parent = parser.getParent();
+ NodeSourcePosition position = create(bci, parent);
+ return position;
}
- private NodeSourcePosition createBytecodePosition(int bci, boolean hideSubstitutionStates) {
- BytecodeParser parent = parser.getParent();
- if (hideSubstitutionStates) {
- if (parser.parsingIntrinsic()) {
- // Attribute to the method being replaced
- return new NodeSourcePosition(constantReceiver, parent.getFrameStateBuilder().createBytecodePosition(parent.bci()), parser.intrinsicContext.getOriginalMethod(), -1);
- }
- // Skip intrinsic frames
- parent = parser.getNonIntrinsicAncestor();
- }
- return create(constantReceiver, bci, parent, hideSubstitutionStates);
- }
-
- private NodeSourcePosition create(JavaConstant receiver, int bci, BytecodeParser parent, boolean hideSubstitutionStates) {
+ private NodeSourcePosition create(int bci, BytecodeParser parent) {
if (outerSourcePosition == null && parent != null) {
- outerSourcePosition = parent.getFrameStateBuilder().createBytecodePosition(parent.bci(), hideSubstitutionStates);
+ outerSourcePosition = parent.getFrameStateBuilder().createBytecodePosition(parent.bci());
}
if (bci == BytecodeFrame.AFTER_EXCEPTION_BCI && parent != null) {
return FrameState.toSourcePosition(outerFrameState);
@@ -380,7 +364,7 @@
if (bci == BytecodeFrame.INVALID_FRAMESTATE_BCI) {
throw shouldNotReachHere();
}
- return new NodeSourcePosition(receiver, outerSourcePosition, code.getMethod(), bci);
+ return new NodeSourcePosition(outerSourcePosition, code.getMethod(), bci);
}
public FrameStateBuilder copy() {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/JTTTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/JTTTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,25 +22,14 @@
*/
package org.graalvm.compiler.jtt;
-import static java.lang.reflect.Modifier.isStatic;
-
import java.util.Collections;
import java.util.Set;
import org.graalvm.compiler.core.test.GraalCompilerTest;
-import org.graalvm.compiler.nodes.ConstantNode;
-import org.graalvm.compiler.nodes.ParameterNode;
-import org.graalvm.compiler.nodes.StructuredGraph;
-import org.graalvm.compiler.nodes.StructuredGraph.Builder;
import org.graalvm.compiler.options.OptionValues;
-import org.graalvm.compiler.phases.PhaseSuite;
-import org.graalvm.compiler.phases.tiers.HighTierContext;
import org.junit.Assert;
-import jdk.vm.ci.code.InstalledCode;
import jdk.vm.ci.meta.DeoptimizationReason;
-import jdk.vm.ci.meta.JavaConstant;
-import jdk.vm.ci.meta.JavaType;
import jdk.vm.ci.meta.ResolvedJavaMethod;
/**
@@ -66,45 +55,35 @@
}
@Override
- protected StructuredGraph parse(Builder builder, PhaseSuite<HighTierContext> graphBuilderSuite) {
- StructuredGraph graph = super.parse(builder, graphBuilderSuite);
- if (argsToBind != null) {
- ResolvedJavaMethod m = graph.method();
- Object receiver = isStatic(m.getModifiers()) ? null : this;
- Object[] args = argsWithReceiver(receiver, argsToBind);
- JavaType[] parameterTypes = m.toParameterTypes();
- assert parameterTypes.length == args.length;
- for (ParameterNode param : graph.getNodes(ParameterNode.TYPE)) {
- JavaConstant c = getSnippetReflection().forBoxed(parameterTypes[param.index()].getJavaKind(), args[param.index()]);
- ConstantNode replacement = ConstantNode.forConstant(c, getMetaAccess(), graph);
- param.replaceAtUsages(replacement);
- }
- }
- return graph;
+ protected Object[] getArgumentToBind() {
+ return argsToBind;
}
- @Override
- protected InstalledCode getCode(ResolvedJavaMethod method, StructuredGraph graph, boolean forceCompile, boolean installAsDefault, OptionValues options) {
- return super.getCode(method, graph, argsToBind != null, installAsDefault, options);
- }
-
- Double delta;
+ /**
+ * If non-null, then this is a test for a method returning a {@code double} value that must be
+ * within {@code ulpDelta}s of the expected value.
+ */
+ protected Double ulpDelta;
@Override
protected void assertDeepEquals(Object expected, Object actual) {
- if (delta != null) {
- Assert.assertEquals(((Number) expected).doubleValue(), ((Number) actual).doubleValue(), delta);
+ if (ulpDelta != null) {
+ double expectedDouble = (double) expected;
+ double actualDouble = (Double) actual;
+ double ulp = Math.ulp(expectedDouble);
+ double delta = ulpDelta * ulp;
+ try {
+ Assert.assertEquals(expectedDouble, actualDouble, delta);
+ } catch (AssertionError e) {
+ double diff = Math.abs(expectedDouble - actualDouble);
+ double diffUlps = diff / ulp;
+ throw new AssertionError(e.getMessage() + " // " + diffUlps + " ulps");
+ }
} else {
super.assertDeepEquals(expected, actual);
}
}
- @SuppressWarnings("hiding")
- protected void runTestWithDelta(double delta, String name, Object... args) {
- this.delta = Double.valueOf(delta);
- runTest(name, args);
- }
-
protected void runTest(String name, Object... args) {
runTest(getInitialOptions(), name, args);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/hotpath/HP_series.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/hotpath/HP_series.java Sat Mar 24 01:08:35 2018 +0100
@@ -24,11 +24,9 @@
package org.graalvm.compiler.jtt.hotpath;
-import org.junit.Ignore;
+import org.graalvm.compiler.jtt.JTTTest;
import org.junit.Test;
-import org.graalvm.compiler.jtt.JTTTest;
-
/*
*/
public class HP_series extends JTTTest {
@@ -101,17 +99,16 @@
return (0.0);
}
- /*
- * This test is sensible to the implementation of Math.pow, cos and sin. Since for these
- * functions, the specs says "The computed result must be within 1 ulp of the exact result",
- * different implementation may return different results. The 11 ulp delta allowed for test(100)
- * tries to account for that but is not guaranteed to work forever.
+ /**
+ * This test is sensitive to the implementation of {@link Math#pow}, {@link Math#cos} and
+ * {@link Math#sin(double)}. Since for these functions, the specs says "The computed result must
+ * be within 1 ulp of the exact result", different implementation may return different results.
+ * The 11 ulp delta allowed for test(100) tries to account for that but is not guaranteed to
+ * work forever.
*/
- @Ignore("failure-prone because of the variabiliy of pow/cos/sin")
@Test
public void run0() throws Throwable {
- double expected = 0.6248571921291398d;
- runTestWithDelta(11 * Math.ulp(expected), "test", 100);
+ ulpDelta = 11.0D;
+ runTest("test", 100);
}
-
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/lang/Math_abs.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/lang/Math_abs.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,13 +22,14 @@
*/
package org.graalvm.compiler.jtt.lang;
+import org.graalvm.compiler.options.OptionValues;
import org.junit.Test;
-import org.graalvm.compiler.jtt.JTTTest;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
/*
*/
-public class Math_abs extends JTTTest {
+public class Math_abs extends UnaryMath {
@SuppressWarnings("serial")
public static class NaN extends Throwable {
@@ -78,4 +79,10 @@
runTest("test", java.lang.Double.NaN);
}
+ @Test
+ public void run7() {
+ OptionValues options = getInitialOptions();
+ ResolvedJavaMethod method = getResolvedJavaMethod("test");
+ testManyValues(options, method);
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/lang/Math_cos.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/lang/Math_cos.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,13 +22,14 @@
*/
package org.graalvm.compiler.jtt.lang;
+import org.graalvm.compiler.options.OptionValues;
import org.junit.Test;
-import org.graalvm.compiler.jtt.JTTTest;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
/*
*/
-public class Math_cos extends JTTTest {
+public class Math_cos extends UnaryMath {
@SuppressWarnings("serial")
public static class NaN extends Throwable {
@@ -58,4 +59,10 @@
runTest("test", java.lang.Double.POSITIVE_INFINITY);
}
+ @Test
+ public void run3() {
+ OptionValues options = getInitialOptions();
+ ResolvedJavaMethod method = getResolvedJavaMethod("test");
+ testManyValues(options, method);
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/lang/Math_exp.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/lang/Math_exp.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,14 +22,12 @@
*/
package org.graalvm.compiler.jtt.lang;
-import org.junit.Ignore;
+import org.graalvm.compiler.options.OptionValues;
import org.junit.Test;
-import org.graalvm.compiler.jtt.JTTTest;
-
/*
*/
-public class Math_exp extends JTTTest {
+public class Math_exp extends UnaryMath {
public static double test(double arg) {
return Math.exp(arg);
@@ -65,9 +63,19 @@
runTest("test", 0.0D);
}
- @Ignore("java.lang.AssertionError: expected:<2.718281828459045> but was:<2.7182818284590455>")
@Test
public void run6() {
runTest("test", 1.0D);
}
+
+ @Test
+ public void run7() {
+ runTest("test", -1024D);
+ }
+
+ @Test
+ public void run8() {
+ OptionValues options = getInitialOptions();
+ testManyValues(options, getResolvedJavaMethod("test"));
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/lang/Math_log.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/lang/Math_log.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,13 +22,14 @@
*/
package org.graalvm.compiler.jtt.lang;
+import org.graalvm.compiler.options.OptionValues;
import org.junit.Test;
-import org.graalvm.compiler.jtt.JTTTest;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
/*
*/
-public class Math_log extends JTTTest {
+public class Math_log extends UnaryMath {
@SuppressWarnings("serial")
public static class NaN extends Throwable {
@@ -78,4 +79,10 @@
runTest("test", -0.0d);
}
+ @Test
+ public void run7() {
+ OptionValues options = getInitialOptions();
+ ResolvedJavaMethod method = getResolvedJavaMethod("test");
+ testManyValues(options, method);
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/lang/Math_pow.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/lang/Math_pow.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,9 +22,11 @@
*/
package org.graalvm.compiler.jtt.lang;
+import org.graalvm.compiler.jtt.JTTTest;
+import org.graalvm.compiler.options.OptionValues;
import org.junit.Test;
-import org.graalvm.compiler.jtt.JTTTest;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
/*
*/
@@ -88,4 +90,44 @@
public void run10() throws Throwable {
runTest("test", 0.999998, 1500000.0);
}
+
+ private static final long STEP = Long.MAX_VALUE / 1_000_000;
+
+ @Test
+ public void run11() {
+ OptionValues options = getInitialOptions();
+ ResolvedJavaMethod method = getResolvedJavaMethod("test");
+ Object receiver = null;
+ long testIteration = 0;
+ for (long l = Long.MIN_VALUE;; l += STEP) {
+ double x = Double.longBitsToDouble(l);
+ double y = x;
+ testOne(options, method, receiver, testIteration, l, x, y);
+ y = l < 0 ? Double.longBitsToDouble(Long.MAX_VALUE + l) : Double.longBitsToDouble(Long.MAX_VALUE - l);
+ testOne(options, method, receiver, testIteration, l, x, y);
+ if (Long.MAX_VALUE - STEP < l) {
+ break;
+ }
+ testIteration++;
+ }
+ }
+
+ @Test
+ public void run12() {
+ long l = 4355599093822972882L;
+ double x = Double.longBitsToDouble(l);
+ OptionValues options = getInitialOptions();
+ ResolvedJavaMethod method = getResolvedJavaMethod("test");
+ Object receiver = null;
+ testOne(options, method, receiver, 1, l, x, x);
+ }
+
+ private void testOne(OptionValues options, ResolvedJavaMethod method, Object receiver, long testIteration, long l, double x, double y) throws AssertionError {
+ Result expect = executeExpected(method, receiver, x, y);
+ try {
+ testAgainstExpected(options, method, expect, EMPTY, receiver, x, y);
+ } catch (AssertionError e) {
+ throw new AssertionError(String.format("%d: While testing %g [long: %d, hex: %x]", testIteration, x, l, l), e);
+ }
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/lang/Math_sin.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/lang/Math_sin.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,13 +22,14 @@
*/
package org.graalvm.compiler.jtt.lang;
+import org.graalvm.compiler.options.OptionValues;
import org.junit.Test;
-import org.graalvm.compiler.jtt.JTTTest;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
/*
*/
-public class Math_sin extends JTTTest {
+public class Math_sin extends UnaryMath {
@SuppressWarnings("serial")
public static class NaN extends Throwable {
@@ -83,4 +84,10 @@
runTest("test", 0.0d);
}
+ @Test
+ public void run5() {
+ OptionValues options = getInitialOptions();
+ ResolvedJavaMethod method = getResolvedJavaMethod("test");
+ testManyValues(options, method);
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/lang/Math_sqrt.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/lang/Math_sqrt.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,13 +22,14 @@
*/
package org.graalvm.compiler.jtt.lang;
+import org.graalvm.compiler.options.OptionValues;
import org.junit.Test;
-import org.graalvm.compiler.jtt.JTTTest;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
/*
*/
-public class Math_sqrt extends JTTTest {
+public class Math_sqrt extends UnaryMath {
@SuppressWarnings("serial")
public static class NaN extends Throwable {
@@ -78,4 +79,10 @@
runTest("test", -0.0d);
}
+ @Test
+ public void run7() {
+ OptionValues options = getInitialOptions();
+ ResolvedJavaMethod method = getResolvedJavaMethod("test");
+ testManyValues(options, method);
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/lang/Math_tan.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/lang/Math_tan.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,13 +22,14 @@
*/
package org.graalvm.compiler.jtt.lang;
+import org.graalvm.compiler.options.OptionValues;
import org.junit.Test;
-import org.graalvm.compiler.jtt.JTTTest;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
/*
*/
-public class Math_tan extends JTTTest {
+public class Math_tan extends UnaryMath {
@SuppressWarnings("serial")
public static class NaN extends Throwable {
@@ -68,4 +69,10 @@
runTest("test", 0.0d);
}
+ @Test
+ public void run5() {
+ OptionValues options = getInitialOptions();
+ ResolvedJavaMethod method = getResolvedJavaMethod("test");
+ testManyValues(options, method);
+ }
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.jtt/src/org/graalvm/compiler/jtt/lang/UnaryMath.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.jtt.lang;
+
+import org.graalvm.compiler.jtt.JTTTest;
+import org.graalvm.compiler.options.OptionValues;
+
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+
+public abstract class UnaryMath extends JTTTest {
+
+ private static final long STEP = Long.MAX_VALUE / 1_000_000;
+
+ /**
+ * Tests a unary {@link Math} method on a wide range of values.
+ */
+ void testManyValues(OptionValues options, ResolvedJavaMethod method) throws AssertionError {
+ if (!Java8OrEarlier) {
+ /*
+ * GR-8276: Allow for variance on JVMCI > 8 until a JVMCI version that includes
+ * https://github.com/graalvm/graal-jvmci-8/commit/
+ * c86fb66f86b8d52a08dd2495d34879d3730f9987 or Graal has stubs that a monotonic with
+ * other HotSpot implementations of these Math routines.
+ */
+ ulpDelta = 2D;
+ } else {
+ /*
+ * Forces the assertion message shows the ulps by which a computed result is wrong.
+ */
+ ulpDelta = 0D;
+ }
+ Object receiver = null;
+ long testIteration = 0;
+ for (long l = Long.MIN_VALUE;; l += STEP) {
+ double d = Double.longBitsToDouble(l);
+ Result expect = executeExpected(method, receiver, d);
+ try {
+ testAgainstExpected(options, method, expect, EMPTY, receiver, d);
+ testIteration++;
+ } catch (AssertionError e) {
+ throw new AssertionError(String.format("%d: While testing %g [long: %d, hex: %x]", testIteration, d, l, l), e);
+ }
+ if (Long.MAX_VALUE - STEP < l) {
+ break;
+ }
+ }
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.aarch64/src/org/graalvm/compiler/lir/aarch64/AArch64Call.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.aarch64/src/org/graalvm/compiler/lir/aarch64/AArch64Call.java Sat Mar 24 01:08:35 2018 +0100
@@ -227,13 +227,15 @@
masm.ensureUniquePC();
}
- public static void directJmp(CompilationResultBuilder crb, AArch64MacroAssembler masm, InvokeTarget target) {
- int before = masm.position();
- // Address is fixed up later by c++ code.
- masm.jmp();
- int after = masm.position();
- crb.recordDirectCall(before, after, target, null);
- masm.ensureUniquePC();
+ public static void directJmp(CompilationResultBuilder crb, AArch64MacroAssembler masm, InvokeTarget callTarget) {
+ try (AArch64MacroAssembler.ScratchRegister scratch = masm.getScratchRegister()) {
+ int before = masm.position();
+ masm.movNativeAddress(scratch.getRegister(), 0L);
+ masm.jmp(scratch.getRegister());
+ int after = masm.position();
+ crb.recordDirectCall(before, after, callTarget, null);
+ masm.ensureUniquePC();
+ }
}
public static void indirectJmp(CompilationResultBuilder crb, AArch64MacroAssembler masm, Register dst, InvokeTarget target) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64ArrayCompareToOp.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,595 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.lir.amd64;
+
+import static jdk.vm.ci.amd64.AMD64.k7;
+import static jdk.vm.ci.amd64.AMD64.rax;
+import static jdk.vm.ci.amd64.AMD64.rcx;
+import static jdk.vm.ci.amd64.AMD64.rdx;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.ILLEGAL;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
+
+import java.lang.reflect.Array;
+import java.lang.reflect.Field;
+import java.util.EnumSet;
+
+import org.graalvm.compiler.asm.Label;
+import org.graalvm.compiler.asm.amd64.AMD64Address;
+import org.graalvm.compiler.asm.amd64.AMD64Address.Scale;
+import org.graalvm.compiler.asm.amd64.AMD64Assembler.AvxVectorLen;
+import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag;
+import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
+import org.graalvm.compiler.core.common.LIRKind;
+import org.graalvm.compiler.lir.LIRInstructionClass;
+import org.graalvm.compiler.lir.Opcode;
+import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
+import org.graalvm.compiler.lir.gen.LIRGeneratorTool;
+
+import jdk.vm.ci.amd64.AMD64;
+import jdk.vm.ci.amd64.AMD64.CPUFeature;
+import jdk.vm.ci.amd64.AMD64Kind;
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.code.TargetDescription;
+import jdk.vm.ci.meta.JavaKind;
+import jdk.vm.ci.meta.Value;
+import sun.misc.Unsafe;
+
+/**
+ * Emits code which compares two arrays lexicographically. If the CPU supports any vector
+ * instructions specialized code is emitted to leverage these instructions.
+ */
+@Opcode("ARRAY_COMPARE_TO")
+public final class AMD64ArrayCompareToOp extends AMD64LIRInstruction {
+ public static final LIRInstructionClass<AMD64ArrayCompareToOp> TYPE = LIRInstructionClass.create(AMD64ArrayCompareToOp.class);
+
+ private final JavaKind kind1;
+ private final JavaKind kind2;
+ private final int array1BaseOffset;
+ private final int array2BaseOffset;
+
+ @Def({REG}) protected Value resultValue;
+ @Alive({REG}) protected Value array1Value;
+ @Alive({REG}) protected Value array2Value;
+ @Alive({REG}) protected Value length1Value;
+ @Alive({REG}) protected Value length2Value;
+ @Temp({REG}) protected Value temp1;
+ @Temp({REG}) protected Value temp2;
+
+ @Temp({REG, ILLEGAL}) protected Value vectorTemp1;
+
+ public AMD64ArrayCompareToOp(LIRGeneratorTool tool, JavaKind kind1, JavaKind kind2, Value result, Value array1, Value array2, Value length1, Value length2) {
+ super(TYPE);
+ this.kind1 = kind1;
+ this.kind2 = kind2;
+
+ // Both offsets should be the same but better be safe than sorry.
+ Class<?> array1Class = Array.newInstance(kind1.toJavaClass(), 0).getClass();
+ Class<?> array2Class = Array.newInstance(kind2.toJavaClass(), 0).getClass();
+ this.array1BaseOffset = UNSAFE.arrayBaseOffset(array1Class);
+ this.array2BaseOffset = UNSAFE.arrayBaseOffset(array2Class);
+
+ this.resultValue = result;
+ this.array1Value = array1;
+ this.array2Value = array2;
+ this.length1Value = length1;
+ this.length2Value = length2;
+
+ // Allocate some temporaries.
+ this.temp1 = tool.newVariable(LIRKind.unknownReference(tool.target().arch.getWordKind()));
+ this.temp2 = tool.newVariable(LIRKind.unknownReference(tool.target().arch.getWordKind()));
+
+ // We only need the vector temporaries if we generate SSE code.
+ if (supportsSSE42(tool.target())) {
+ this.vectorTemp1 = tool.newVariable(LIRKind.value(AMD64Kind.DOUBLE));
+ } else {
+ this.vectorTemp1 = Value.ILLEGAL;
+ }
+ }
+
+ private static boolean supportsSSE42(TargetDescription target) {
+ AMD64 arch = (AMD64) target.arch;
+ return arch.getFeatures().contains(CPUFeature.SSE4_2);
+ }
+
+ private static boolean supportsAVX2(TargetDescription target) {
+ AMD64 arch = (AMD64) target.arch;
+ return arch.getFeatures().contains(CPUFeature.AVX2);
+ }
+
+ private static boolean supportsAVX512VLBW(TargetDescription target) {
+ AMD64 arch = (AMD64) target.arch;
+ EnumSet<CPUFeature> features = arch.getFeatures();
+ return features.contains(CPUFeature.AVX512BW) && features.contains(CPUFeature.AVX512VL);
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+ Register result = asRegister(resultValue);
+ Register str1 = asRegister(temp1);
+ Register str2 = asRegister(temp2);
+
+ // Load array base addresses.
+ masm.leaq(str1, new AMD64Address(asRegister(array1Value), array1BaseOffset));
+ masm.leaq(str2, new AMD64Address(asRegister(array2Value), array2BaseOffset));
+ Register cnt1 = asRegister(length1Value);
+ Register cnt2 = asRegister(length2Value);
+
+ // Checkstyle: stop
+ Label LENGTH_DIFF_LABEL = new Label();
+ Label POP_LABEL = new Label();
+ Label DONE_LABEL = new Label();
+ Label WHILE_HEAD_LABEL = new Label();
+ Label COMPARE_WIDE_VECTORS_LOOP_FAILED = new Label(); // used only _LP64 && AVX3
+ int stride, stride2;
+ int adr_stride = -1;
+ int adr_stride1 = -1;
+ int adr_stride2 = -1;
+ // Checkstyle: resume
+ int stride2x2 = 0x40;
+ AMD64Address.Scale scale = null;
+ AMD64Address.Scale scale1 = null;
+ AMD64Address.Scale scale2 = null;
+
+ // if (ae != StrIntrinsicNode::LL) {
+ if (kind1 == JavaKind.Byte && kind2 == JavaKind.Byte) {
+ stride2x2 = 0x20;
+ }
+
+ // if (ae == StrIntrinsicNode::LU || ae == StrIntrinsicNode::UL) {
+ if (kind1 != kind2) {
+ masm.shrl(cnt2, 1);
+ }
+ // Compute the minimum of the string lengths and the
+ // difference of the string lengths (stack).
+ // Do the conditional move stuff
+ masm.movl(result, cnt1);
+ masm.subl(cnt1, cnt2);
+ masm.push(cnt1);
+ masm.cmovl(ConditionFlag.LessEqual, cnt2, result); // cnt2 = min(cnt1, cnt2)
+
+ // Is the minimum length zero?
+ masm.testl(cnt2, cnt2);
+ masm.jcc(ConditionFlag.Zero, LENGTH_DIFF_LABEL);
+ // if (ae == StrIntrinsicNode::LL) {
+ if (kind1 == JavaKind.Byte && kind2 == JavaKind.Byte) {
+ // Load first bytes
+ masm.movzbl(result, new AMD64Address(str1, 0)); // result = str1[0]
+ masm.movzbl(cnt1, new AMD64Address(str2, 0)); // cnt1 = str2[0]
+ // } else if (ae == StrIntrinsicNode::UU) {
+ } else if (kind1 == JavaKind.Char && kind2 == JavaKind.Char) {
+ // Load first characters
+ masm.movzwl(result, new AMD64Address(str1, 0));
+ masm.movzwl(cnt1, new AMD64Address(str2, 0));
+ } else {
+ masm.movzbl(result, new AMD64Address(str1, 0));
+ masm.movzwl(cnt1, new AMD64Address(str2, 0));
+ }
+ masm.subl(result, cnt1);
+ masm.jcc(ConditionFlag.NotZero, POP_LABEL);
+
+ // if (ae == StrIntrinsicNode::UU) {
+ if (kind1 == JavaKind.Char && kind2 == JavaKind.Char) {
+ // Divide length by 2 to get number of chars
+ masm.shrl(cnt2, 1);
+ }
+ masm.cmpl(cnt2, 1);
+ masm.jcc(ConditionFlag.Equal, LENGTH_DIFF_LABEL);
+
+ // Check if the strings start at the same location and setup scale and stride
+ // if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
+ if (kind1 == kind2) {
+ masm.cmpptr(str1, str2);
+ masm.jcc(ConditionFlag.Equal, LENGTH_DIFF_LABEL);
+ // if (ae == StrIntrinsicNode::LL) {
+ if (kind1 == JavaKind.Byte && kind2 == JavaKind.Byte) {
+ scale = AMD64Address.Scale.Times1;
+ stride = 16;
+ } else {
+ scale = AMD64Address.Scale.Times2;
+ stride = 8;
+ }
+ } else {
+ scale1 = AMD64Address.Scale.Times1;
+ scale2 = AMD64Address.Scale.Times2;
+ // scale not used
+ stride = 8;
+ }
+
+ // if (UseAVX >= 2 && UseSSE42Intrinsics) {
+ if (supportsAVX2(crb.target) && supportsSSE42(crb.target)) {
+ Register vec1 = asRegister(vectorTemp1, AMD64Kind.DOUBLE);
+
+ // Checkstyle: stop
+ Label COMPARE_WIDE_VECTORS = new Label();
+ Label VECTOR_NOT_EQUAL = new Label();
+ Label COMPARE_WIDE_TAIL = new Label();
+ Label COMPARE_SMALL_STR = new Label();
+ Label COMPARE_WIDE_VECTORS_LOOP = new Label();
+ Label COMPARE_16_CHARS = new Label();
+ Label COMPARE_INDEX_CHAR = new Label();
+ Label COMPARE_WIDE_VECTORS_LOOP_AVX2 = new Label();
+ Label COMPARE_TAIL_LONG = new Label();
+ Label COMPARE_WIDE_VECTORS_LOOP_AVX3 = new Label(); // used only _LP64 && AVX3
+ // Checkstyle: resume
+
+ int pcmpmask = 0x19;
+ // if (ae == StrIntrinsicNode::LL) {
+ if (kind1 == JavaKind.Byte && kind2 == JavaKind.Byte) {
+ pcmpmask &= ~0x01;
+ }
+
+ // Setup to compare 16-chars (32-bytes) vectors,
+ // start from first character again because it has aligned address.
+ // if (ae == StrIntrinsicNode::LL) {
+ if (kind1 == JavaKind.Byte && kind2 == JavaKind.Byte) {
+ stride2 = 32;
+ } else {
+ stride2 = 16;
+ }
+ // if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
+ if (kind1 == kind2) {
+ adr_stride = stride << scale.log2;
+ } else {
+ adr_stride1 = 8; // stride << scale1;
+ adr_stride2 = 16; // stride << scale2;
+ }
+
+ assert result.equals(rax) && cnt2.equals(rdx) && cnt1.equals(rcx) : "pcmpestri";
+ // rax and rdx are used by pcmpestri as elements counters
+ masm.movl(result, cnt2);
+ masm.andl(cnt2, ~(stride2 - 1)); // cnt2 holds the vector count
+ masm.jcc(ConditionFlag.Zero, COMPARE_TAIL_LONG);
+
+ // fast path : compare first 2 8-char vectors.
+ masm.bind(COMPARE_16_CHARS);
+ // if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
+ if (kind1 == kind2) {
+ masm.movdqu(vec1, new AMD64Address(str1, 0));
+ } else {
+ masm.pmovzxbw(vec1, new AMD64Address(str1, 0));
+ }
+ masm.pcmpestri(vec1, new AMD64Address(str2, 0), pcmpmask);
+ masm.jccb(ConditionFlag.Below, COMPARE_INDEX_CHAR);
+
+ // if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
+ if (kind1 == kind2) {
+ masm.movdqu(vec1, new AMD64Address(str1, adr_stride));
+ masm.pcmpestri(vec1, new AMD64Address(str2, adr_stride), pcmpmask);
+ } else {
+ masm.pmovzxbw(vec1, new AMD64Address(str1, adr_stride1));
+ masm.pcmpestri(vec1, new AMD64Address(str2, adr_stride2), pcmpmask);
+ }
+ masm.jccb(ConditionFlag.AboveEqual, COMPARE_WIDE_VECTORS);
+ masm.addl(cnt1, stride);
+
+ // Compare the characters at index in cnt1
+ masm.bind(COMPARE_INDEX_CHAR); // cnt1 has the offset of the mismatching character
+ loadNextElements(masm, result, cnt2, str1, str2, scale, scale1, scale2, cnt1);
+ masm.subl(result, cnt2);
+ masm.jmp(POP_LABEL);
+
+ // Setup the registers to start vector comparison loop
+ masm.bind(COMPARE_WIDE_VECTORS);
+ // if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
+ if (kind1 == kind2) {
+ masm.leaq(str1, new AMD64Address(str1, result, scale));
+ masm.leaq(str2, new AMD64Address(str2, result, scale));
+ } else {
+ masm.leaq(str1, new AMD64Address(str1, result, scale1));
+ masm.leaq(str2, new AMD64Address(str2, result, scale2));
+ }
+ masm.subl(result, stride2);
+ masm.subl(cnt2, stride2);
+ masm.jcc(ConditionFlag.Zero, COMPARE_WIDE_TAIL);
+ masm.negq(result);
+
+ // In a loop, compare 16-chars (32-bytes) at once using (vpxor+vptest)
+ masm.bind(COMPARE_WIDE_VECTORS_LOOP);
+
+ // if (VM_Version::supports_avx512vlbw()) { // trying 64 bytes fast loop
+ if (supportsAVX512VLBW(crb.target)) {
+ masm.cmpl(cnt2, stride2x2);
+ masm.jccb(ConditionFlag.Below, COMPARE_WIDE_VECTORS_LOOP_AVX2);
+ masm.testl(cnt2, stride2x2 - 1); // cnt2 holds the vector count
+ // means we cannot subtract by 0x40
+ masm.jccb(ConditionFlag.NotZero, COMPARE_WIDE_VECTORS_LOOP_AVX2);
+
+ masm.bind(COMPARE_WIDE_VECTORS_LOOP_AVX3); // the hottest loop
+ // if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
+ if (kind1 == kind2) {
+ masm.evmovdquq(vec1, new AMD64Address(str1, result, scale), AvxVectorLen.AVX_512bit);
+ // k7 == 11..11, if operands equal, otherwise k7 has some 0
+ masm.evpcmpeqb(k7, vec1, new AMD64Address(str2, result, scale), AvxVectorLen.AVX_512bit);
+ } else {
+ masm.vpmovzxbw(vec1, new AMD64Address(str1, result, scale1), AvxVectorLen.AVX_512bit);
+ // k7 == 11..11, if operands equal, otherwise k7 has some 0
+ masm.evpcmpeqb(k7, vec1, new AMD64Address(str2, result, scale2), AvxVectorLen.AVX_512bit);
+ }
+ masm.kortestql(k7, k7);
+ masm.jcc(ConditionFlag.AboveEqual, COMPARE_WIDE_VECTORS_LOOP_FAILED); // miscompare
+ masm.addq(result, stride2x2); // update since we already compared at this addr
+ masm.subl(cnt2, stride2x2); // and sub the size too
+ masm.jccb(ConditionFlag.NotZero, COMPARE_WIDE_VECTORS_LOOP_AVX3);
+
+ masm.vpxor(vec1, vec1, vec1);
+ masm.jmpb(COMPARE_WIDE_TAIL);
+ }
+
+ masm.bind(COMPARE_WIDE_VECTORS_LOOP_AVX2);
+ // if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
+ if (kind1 == kind2) {
+ masm.vmovdqu(vec1, new AMD64Address(str1, result, scale));
+ masm.vpxor(vec1, vec1, new AMD64Address(str2, result, scale));
+ } else {
+ masm.vpmovzxbw(vec1, new AMD64Address(str1, result, scale1), AvxVectorLen.AVX_256bit);
+ masm.vpxor(vec1, vec1, new AMD64Address(str2, result, scale2));
+ }
+ masm.vptest(vec1, vec1);
+ masm.jcc(ConditionFlag.NotZero, VECTOR_NOT_EQUAL);
+ masm.addq(result, stride2);
+ masm.subl(cnt2, stride2);
+ masm.jcc(ConditionFlag.NotZero, COMPARE_WIDE_VECTORS_LOOP);
+ // clean upper bits of YMM registers
+ masm.vpxor(vec1, vec1, vec1);
+
+ // compare wide vectors tail
+ masm.bind(COMPARE_WIDE_TAIL);
+ masm.testq(result, result);
+ masm.jcc(ConditionFlag.Zero, LENGTH_DIFF_LABEL);
+
+ masm.movl(result, stride2);
+ masm.movl(cnt2, result);
+ masm.negq(result);
+ masm.jmp(COMPARE_WIDE_VECTORS_LOOP_AVX2);
+
+ // Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors.
+ masm.bind(VECTOR_NOT_EQUAL);
+ // clean upper bits of YMM registers
+ masm.vpxor(vec1, vec1, vec1);
+ // if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
+ if (kind1 == kind2) {
+ masm.leaq(str1, new AMD64Address(str1, result, scale));
+ masm.leaq(str2, new AMD64Address(str2, result, scale));
+ } else {
+ masm.leaq(str1, new AMD64Address(str1, result, scale1));
+ masm.leaq(str2, new AMD64Address(str2, result, scale2));
+ }
+ masm.jmp(COMPARE_16_CHARS);
+
+ // Compare tail chars, length between 1 to 15 chars
+ masm.bind(COMPARE_TAIL_LONG);
+ masm.movl(cnt2, result);
+ masm.cmpl(cnt2, stride);
+ masm.jcc(ConditionFlag.Less, COMPARE_SMALL_STR);
+
+ // if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
+ if (kind1 == kind2) {
+ masm.movdqu(vec1, new AMD64Address(str1, 0));
+ } else {
+ masm.pmovzxbw(vec1, new AMD64Address(str1, 0));
+ }
+ masm.pcmpestri(vec1, new AMD64Address(str2, 0), pcmpmask);
+ masm.jcc(ConditionFlag.Below, COMPARE_INDEX_CHAR);
+ masm.subq(cnt2, stride);
+ masm.jcc(ConditionFlag.Zero, LENGTH_DIFF_LABEL);
+ // if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
+ if (kind1 == kind2) {
+ masm.leaq(str1, new AMD64Address(str1, result, scale));
+ masm.leaq(str2, new AMD64Address(str2, result, scale));
+ } else {
+ masm.leaq(str1, new AMD64Address(str1, result, scale1));
+ masm.leaq(str2, new AMD64Address(str2, result, scale2));
+ }
+ masm.negq(cnt2);
+ masm.jmpb(WHILE_HEAD_LABEL);
+
+ masm.bind(COMPARE_SMALL_STR);
+ } else if (supportsSSE42(crb.target)) {
+ Register vec1 = asRegister(vectorTemp1, AMD64Kind.DOUBLE);
+
+ // Checkstyle: stop
+ Label COMPARE_WIDE_VECTORS = new Label();
+ Label VECTOR_NOT_EQUAL = new Label();
+ Label COMPARE_TAIL = new Label();
+ // Checkstyle: resume
+ int pcmpmask = 0x19;
+ // Setup to compare 8-char (16-byte) vectors,
+ // start from first character again because it has aligned address.
+ masm.movl(result, cnt2);
+ masm.andl(cnt2, ~(stride - 1)); // cnt2 holds the vector count
+ // if (ae == StrIntrinsicNode::LL) {
+ if (kind1 == JavaKind.Byte && kind2 == JavaKind.Byte) {
+ pcmpmask &= ~0x01;
+ }
+ masm.jcc(ConditionFlag.Zero, COMPARE_TAIL);
+ // if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
+ if (kind1 == kind2) {
+ masm.leaq(str1, new AMD64Address(str1, result, scale));
+ masm.leaq(str2, new AMD64Address(str2, result, scale));
+ } else {
+ masm.leaq(str1, new AMD64Address(str1, result, scale1));
+ masm.leaq(str2, new AMD64Address(str2, result, scale2));
+ }
+ masm.negq(result);
+
+ // pcmpestri
+ // inputs:
+ // vec1- substring
+ // rax - negative string length (elements count)
+ // mem - scanned string
+ // rdx - string length (elements count)
+ // pcmpmask - cmp mode: 11000 (string compare with negated result)
+ // + 00 (unsigned bytes) or + 01 (unsigned shorts)
+ // outputs:
+ // rcx - first mismatched element index
+ assert result.equals(rax) && cnt2.equals(rdx) && cnt1.equals(rcx) : "pcmpestri";
+
+ masm.bind(COMPARE_WIDE_VECTORS);
+ // if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
+ if (kind1 == kind2) {
+ masm.movdqu(vec1, new AMD64Address(str1, result, scale));
+ masm.pcmpestri(vec1, new AMD64Address(str2, result, scale), pcmpmask);
+ } else {
+ masm.pmovzxbw(vec1, new AMD64Address(str1, result, scale1));
+ masm.pcmpestri(vec1, new AMD64Address(str2, result, scale2), pcmpmask);
+ }
+ // After pcmpestri cnt1(rcx) contains mismatched element index
+
+ masm.jccb(ConditionFlag.Below, VECTOR_NOT_EQUAL); // CF==1
+ masm.addq(result, stride);
+ masm.subq(cnt2, stride);
+ masm.jccb(ConditionFlag.NotZero, COMPARE_WIDE_VECTORS);
+
+ // compare wide vectors tail
+ masm.testq(result, result);
+ masm.jcc(ConditionFlag.Zero, LENGTH_DIFF_LABEL);
+
+ masm.movl(cnt2, stride);
+ masm.movl(result, stride);
+ masm.negq(result);
+ // if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
+ if (kind1 == kind2) {
+ masm.movdqu(vec1, new AMD64Address(str1, result, scale));
+ masm.pcmpestri(vec1, new AMD64Address(str2, result, scale), pcmpmask);
+ } else {
+ masm.pmovzxbw(vec1, new AMD64Address(str1, result, scale1));
+ masm.pcmpestri(vec1, new AMD64Address(str2, result, scale2), pcmpmask);
+ }
+ masm.jccb(ConditionFlag.AboveEqual, LENGTH_DIFF_LABEL);
+
+ // Mismatched characters in the vectors
+ masm.bind(VECTOR_NOT_EQUAL);
+ masm.addq(cnt1, result);
+ loadNextElements(masm, result, cnt2, str1, str2, scale, scale1, scale2, cnt1);
+ masm.subl(result, cnt2);
+ masm.jmpb(POP_LABEL);
+
+ masm.bind(COMPARE_TAIL); // limit is zero
+ masm.movl(cnt2, result);
+ // Fallthru to tail compare
+ }
+
+ // Shift str2 and str1 to the end of the arrays, negate min
+ // if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
+ if (kind1 == kind2) {
+ masm.leaq(str1, new AMD64Address(str1, cnt2, scale));
+ masm.leaq(str2, new AMD64Address(str2, cnt2, scale));
+ } else {
+ masm.leaq(str1, new AMD64Address(str1, cnt2, scale1));
+ masm.leaq(str2, new AMD64Address(str2, cnt2, scale2));
+ }
+ masm.decrementl(cnt2); // first character was compared already
+ masm.negq(cnt2);
+
+ // Compare the rest of the elements
+ masm.bind(WHILE_HEAD_LABEL);
+ loadNextElements(masm, result, cnt1, str1, str2, scale, scale1, scale2, cnt2);
+ masm.subl(result, cnt1);
+ masm.jccb(ConditionFlag.NotZero, POP_LABEL);
+ masm.incrementq(cnt2, 1);
+ masm.jccb(ConditionFlag.NotZero, WHILE_HEAD_LABEL);
+
+ // Strings are equal up to min length. Return the length difference.
+ masm.bind(LENGTH_DIFF_LABEL);
+ masm.pop(result);
+ // if (ae == StrIntrinsicNode::UU) {
+ if (kind1 == JavaKind.Char && kind2 == JavaKind.Char) {
+ // Divide diff by 2 to get number of chars
+ masm.sarl(result, 1);
+ }
+ masm.jmpb(DONE_LABEL);
+
+ // if (VM_Version::supports_avx512vlbw()) {
+ if (supportsAVX512VLBW(crb.target)) {
+ masm.bind(COMPARE_WIDE_VECTORS_LOOP_FAILED);
+
+ masm.kmovql(cnt1, k7);
+ masm.notq(cnt1);
+ masm.bsfq(cnt2, cnt1);
+ // if (ae != StrIntrinsicNode::LL) {
+ if (kind1 != JavaKind.Byte && kind2 != JavaKind.Byte) {
+ // Divide diff by 2 to get number of chars
+ masm.sarl(cnt2, 1);
+ }
+ masm.addq(result, cnt2);
+ // if (ae == StrIntrinsicNode::LL) {
+ if (kind1 == JavaKind.Byte && kind2 == JavaKind.Byte) {
+ masm.movzbl(cnt1, new AMD64Address(str2, result, Scale.Times1));
+ masm.movzbl(result, new AMD64Address(str1, result, Scale.Times1));
+ } else if (kind1 == JavaKind.Char && kind2 == JavaKind.Char) {
+ masm.movzwl(cnt1, new AMD64Address(str2, result, scale));
+ masm.movzwl(result, new AMD64Address(str1, result, scale));
+ } else {
+ masm.movzwl(cnt1, new AMD64Address(str2, result, scale2));
+ masm.movzbl(result, new AMD64Address(str1, result, scale1));
+ }
+ masm.subl(result, cnt1);
+ masm.jmpb(POP_LABEL);
+ }
+
+ // Discard the stored length difference
+ masm.bind(POP_LABEL);
+ masm.pop(cnt1);
+
+ // That's it
+ masm.bind(DONE_LABEL);
+ // if (ae == StrIntrinsicNode::UL) {
+ if (kind1 == JavaKind.Char && kind2 == JavaKind.Byte) {
+ masm.negl(result);
+ }
+ }
+
+ private void loadNextElements(AMD64MacroAssembler masm, Register elem1, Register elem2, Register str1, Register str2,
+ AMD64Address.Scale scale, AMD64Address.Scale scale1,
+ AMD64Address.Scale scale2, Register index) {
+ // if (ae == StrIntrinsicNode::LL) {
+ if (kind1 == JavaKind.Byte && kind2 == JavaKind.Byte) {
+ masm.movzbl(elem1, new AMD64Address(str1, index, scale, 0));
+ masm.movzbl(elem2, new AMD64Address(str2, index, scale, 0));
+ // } else if (ae == StrIntrinsicNode::UU) {
+ } else if (kind1 == JavaKind.Char && kind2 == JavaKind.Char) {
+ masm.movzwl(elem1, new AMD64Address(str1, index, scale, 0));
+ masm.movzwl(elem2, new AMD64Address(str2, index, scale, 0));
+ } else {
+ masm.movzbl(elem1, new AMD64Address(str1, index, scale1, 0));
+ masm.movzwl(elem2, new AMD64Address(str2, index, scale2, 0));
+ }
+ }
+
+ private static final Unsafe UNSAFE = initUnsafe();
+
+ private static Unsafe initUnsafe() {
+ try {
+ return Unsafe.getUnsafe();
+ } catch (SecurityException se) {
+ try {
+ Field theUnsafe = Unsafe.class.getDeclaredField("theUnsafe");
+ theUnsafe.setAccessible(true);
+ return (Unsafe) theUnsafe.get(Unsafe.class);
+ } catch (Exception e) {
+ throw new RuntimeException("exception while trying to get Unsafe", e);
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64LFenceOp.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2018, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.lir.amd64;
+
+import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
+import org.graalvm.compiler.lir.LIRInstructionClass;
+import org.graalvm.compiler.lir.Opcode;
+import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
+
+@Opcode("LFENCE")
+public final class AMD64LFenceOp extends AMD64LIRInstruction {
+ public static final LIRInstructionClass<AMD64LFenceOp> TYPE = LIRInstructionClass.create(AMD64LFenceOp.class);
+
+ public AMD64LFenceOp() {
+ super(TYPE);
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler asm) {
+ asm.lfence();
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64Move.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64Move.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,6 +22,11 @@
*/
package org.graalvm.compiler.lir.amd64;
+import static java.lang.Double.doubleToRawLongBits;
+import static java.lang.Float.floatToRawIntBits;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+import static jdk.vm.ci.code.ValueUtil.isRegister;
+import static jdk.vm.ci.code.ValueUtil.isStackSlot;
import static org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag.Equal;
import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.COMPOSITE;
@@ -33,21 +38,16 @@
import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.UNINITIALIZED;
import static org.graalvm.compiler.lir.LIRValueUtil.asJavaConstant;
import static org.graalvm.compiler.lir.LIRValueUtil.isJavaConstant;
-import static java.lang.Double.doubleToRawLongBits;
-import static java.lang.Float.floatToRawIntBits;
-import static jdk.vm.ci.code.ValueUtil.asRegister;
-import static jdk.vm.ci.code.ValueUtil.isRegister;
-import static jdk.vm.ci.code.ValueUtil.isStackSlot;
import org.graalvm.compiler.asm.Label;
-import org.graalvm.compiler.core.common.CompressEncoding;
-import org.graalvm.compiler.core.common.LIRKind;
-import org.graalvm.compiler.core.common.NumUtil;
import org.graalvm.compiler.asm.amd64.AMD64Address;
import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MIOp;
import org.graalvm.compiler.asm.amd64.AMD64Assembler.AMD64MOp;
import org.graalvm.compiler.asm.amd64.AMD64Assembler.OperandSize;
import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
+import org.graalvm.compiler.core.common.CompressEncoding;
+import org.graalvm.compiler.core.common.LIRKind;
+import org.graalvm.compiler.core.common.NumUtil;
import org.graalvm.compiler.core.common.spi.LIRKindTool;
import org.graalvm.compiler.core.common.type.DataPointerConstant;
import org.graalvm.compiler.debug.GraalError;
@@ -59,6 +59,7 @@
import org.graalvm.compiler.lir.StandardOp.ValueMoveOp;
import org.graalvm.compiler.lir.VirtualStackSlot;
import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
+import org.graalvm.compiler.options.OptionValues;
import jdk.vm.ci.amd64.AMD64;
import jdk.vm.ci.amd64.AMD64Kind;
@@ -763,7 +764,7 @@
@Def({REG, HINT}) private AllocatableValue result;
@Use({REG, CONST}) private Value input;
- @Alive({REG, ILLEGAL}) private AllocatableValue baseRegister;
+ @Alive({REG, ILLEGAL, UNINITIALIZED}) private AllocatableValue baseRegister;
protected PointerCompressionOp(LIRInstructionClass<? extends PointerCompressionOp> type, AllocatableValue result, Value input,
AllocatableValue baseRegister, CompressEncoding encoding, boolean nonNull, LIRKindTool lirKindTool) {
@@ -777,8 +778,8 @@
this.lirKindTool = lirKindTool;
}
- protected boolean hasBase(CompilationResultBuilder crb) {
- return GeneratePIC.getValue(crb.getOptions()) || encoding.hasBase();
+ public static boolean hasBase(OptionValues options, CompressEncoding encoding) {
+ return GeneratePIC.getValue(options) || encoding.hasBase();
}
public final Value getInput() {
@@ -820,7 +821,7 @@
move(lirKindTool.getObjectKind(), crb, masm);
Register resReg = asRegister(getResult());
- if (hasBase(crb)) {
+ if (hasBase(crb.getOptions(), encoding)) {
Register baseReg = getBaseRegister();
if (!nonNull) {
masm.testq(resReg, resReg);
@@ -852,15 +853,15 @@
@Override
public void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
move(lirKindTool.getNarrowOopKind(), crb, masm);
+ emitUncompressCode(masm, asRegister(getResult()), getShift(), hasBase(crb.getOptions(), encoding) ? getBaseRegister() : null, nonNull);
+ }
- Register resReg = asRegister(getResult());
- int shift = getShift();
+ public static void emitUncompressCode(AMD64MacroAssembler masm, Register resReg, int shift, Register baseReg, boolean nonNull) {
if (shift != 0) {
masm.shlq(resReg, shift);
}
- if (hasBase(crb)) {
- Register baseReg = getBaseRegister();
+ if (baseReg != null) {
if (nonNull) {
masm.addq(resReg, baseReg);
return;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/LIRInstructionClass.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/LIRInstructionClass.java Sat Mar 24 01:08:35 2018 +0100
@@ -96,7 +96,11 @@
try {
Field field = clazz.getDeclaredField("TYPE");
field.setAccessible(true);
- return (LIRInstructionClass<T>) field.get(null);
+ LIRInstructionClass<T> result = (LIRInstructionClass<T>) field.get(null);
+ if (result == null) {
+ throw GraalError.shouldNotReachHere("TYPE field not initialized for class " + clazz.getTypeName());
+ }
+ return result;
} catch (IllegalArgumentException | IllegalAccessException | NoSuchFieldException | SecurityException e) {
throw new RuntimeException(e);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/GlobalLivenessInfo.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/GlobalLivenessInfo.java Sat Mar 24 01:08:35 2018 +0100
@@ -53,7 +53,11 @@
public final int[] emptySet;
public Builder(LIR lir) {
- info = new GlobalLivenessInfo(lir);
+ this(lir.numVariables(), lir.getControlFlowGraph().getBlocks().length);
+ }
+
+ public Builder(int numVariables, int numBlocks) {
+ info = new GlobalLivenessInfo(numVariables, numBlocks);
emptySet = new int[0];
}
@@ -97,10 +101,8 @@
private final Value[][] blockToLocIn;
private final Value[][] blockToLocOut;
- private GlobalLivenessInfo(LIR lir) {
- int numVariables = lir.numVariables();
+ private GlobalLivenessInfo(int numVariables, int numBlocks) {
variables = new Variable[numVariables];
- int numBlocks = lir.getControlFlowGraph().getBlocks().length;
blockToVarIn = new int[numBlocks][];
blockToVarOut = new int[numBlocks][];
blockToLocIn = new Value[numBlocks][];
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/TraceAssertions.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.lir.alloc.trace;
+
+import org.graalvm.compiler.core.common.alloc.Trace;
+import org.graalvm.compiler.core.common.cfg.AbstractBlockBase;
+
+/**
+ * A collection of assertions that are assumed to hold in various places of the Trace Register
+ * Allocation framework.
+ *
+ * The main goal is to document pieces of code that rely on specific properties of traces. In case
+ * an assumption is no longer valid, this makes it easy (assumed they are used correctly) to find
+ * places that need changes.
+ */
+final class TraceAssertions {
+
+ /**
+ * Asserts that variable indices are properly sorted.
+ */
+ public static boolean liveSetsAreSorted(GlobalLivenessInfo livenessInfo, AbstractBlockBase<?> block) {
+ return isSorted(livenessInfo.getBlockIn(block)) && isSorted(livenessInfo.getBlockOut(block));
+ }
+
+ private static boolean isSorted(int[] live) {
+ if (live.length == 0) {
+ return true;
+ }
+ int current = live[0];
+ for (int i = 1; i < live.length; i++) {
+ int last = current;
+ current = live[i];
+ if (current <= last) {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ /**
+ * Asserts that a trace head has only a single predecessor.
+ *
+ * This is not true for every trace-building algorithm (for example
+ * {@link TraceBuilderPhase.TraceBuilder#SingleBlock}).
+ */
+ public static boolean singleHeadPredecessor(Trace trace) {
+ return trace.getBlocks()[0].getPredecessorCount() == 1;
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/TraceGlobalMoveResolutionPhase.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/TraceGlobalMoveResolutionPhase.java Sat Mar 24 01:08:35 2018 +0100
@@ -84,32 +84,58 @@
DebugContext debug = lir.getDebug();
try (Indent indent = debug.logAndIndent("Trace global move resolution")) {
for (Trace trace : resultTraces.getTraces()) {
- for (AbstractBlockBase<?> fromBlock : trace.getBlocks()) {
- for (AbstractBlockBase<?> toBlock : fromBlock.getSuccessors()) {
- if (resultTraces.getTraceForBlock(fromBlock) != resultTraces.getTraceForBlock(toBlock)) {
- try (Indent indent0 = debug.logAndIndent("Handle trace edge from %s (Trace%d) to %s (Trace%d)", fromBlock, resultTraces.getTraceForBlock(fromBlock).getId(), toBlock,
- resultTraces.getTraceForBlock(toBlock).getId())) {
+ resolveTrace(resultTraces, livenessInfo, lir, moveResolver, trace);
+ }
+ }
+ }
- final ArrayList<LIRInstruction> instructions;
- final int insertIdx;
- if (fromBlock.getSuccessorCount() == 1) {
- instructions = lir.getLIRforBlock(fromBlock);
- insertIdx = instructions.size() - 1;
- } else {
- assert toBlock.getPredecessorCount() == 1;
- instructions = lir.getLIRforBlock(toBlock);
- insertIdx = 1;
- }
-
- moveResolver.setInsertPosition(instructions, insertIdx);
- resolveEdge(lir, livenessInfo, moveResolver, fromBlock, toBlock);
- moveResolver.resolveAndAppendMoves();
- }
- }
+ private static void resolveTrace(TraceBuilderResult resultTraces, GlobalLivenessInfo livenessInfo, LIR lir, TraceGlobalMoveResolver moveResolver, Trace trace) {
+ AbstractBlockBase<?>[] traceBlocks = trace.getBlocks();
+ int traceLength = traceBlocks.length;
+ // all but the last block
+ AbstractBlockBase<?> nextBlock = traceBlocks[0];
+ for (int i = 1; i < traceLength; i++) {
+ AbstractBlockBase<?> fromBlock = nextBlock;
+ nextBlock = traceBlocks[i];
+ if (fromBlock.getSuccessorCount() > 1) {
+ for (AbstractBlockBase<?> toBlock : fromBlock.getSuccessors()) {
+ if (toBlock != nextBlock) {
+ interTraceEdge(resultTraces, livenessInfo, lir, moveResolver, fromBlock, toBlock);
}
}
}
}
+ // last block
+ assert nextBlock == traceBlocks[traceLength - 1];
+ for (AbstractBlockBase<?> toBlock : nextBlock.getSuccessors()) {
+ if (resultTraces.getTraceForBlock(nextBlock) != resultTraces.getTraceForBlock(toBlock)) {
+ interTraceEdge(resultTraces, livenessInfo, lir, moveResolver, nextBlock, toBlock);
+ }
+ }
+ }
+
+ @SuppressWarnings("try")
+ private static void interTraceEdge(TraceBuilderResult resultTraces, GlobalLivenessInfo livenessInfo, LIR lir, TraceGlobalMoveResolver moveResolver, AbstractBlockBase<?> fromBlock,
+ AbstractBlockBase<?> toBlock) {
+ DebugContext debug = lir.getDebug();
+ try (Indent indent0 = debug.logAndIndent("Handle trace edge from %s (Trace%d) to %s (Trace%d)", fromBlock, resultTraces.getTraceForBlock(fromBlock).getId(), toBlock,
+ resultTraces.getTraceForBlock(toBlock).getId())) {
+
+ final ArrayList<LIRInstruction> instructions;
+ final int insertIdx;
+ if (fromBlock.getSuccessorCount() == 1) {
+ instructions = lir.getLIRforBlock(fromBlock);
+ insertIdx = instructions.size() - 1;
+ } else {
+ assert toBlock.getPredecessorCount() == 1;
+ instructions = lir.getLIRforBlock(toBlock);
+ insertIdx = 1;
+ }
+
+ moveResolver.setInsertPosition(instructions, insertIdx);
+ resolveEdge(lir, livenessInfo, moveResolver, fromBlock, toBlock);
+ moveResolver.resolveAndAppendMoves();
+ }
}
private static void resolveEdge(LIR lir, GlobalLivenessInfo livenessInfo, TraceGlobalMoveResolver moveResolver, AbstractBlockBase<?> fromBlock, AbstractBlockBase<?> toBlock) {
@@ -129,6 +155,10 @@
// GLI
Value[] locFrom = livenessInfo.getOutLocation(fromBlock);
Value[] locTo = livenessInfo.getInLocation(toBlock);
+ if (locFrom == locTo) {
+ // a strategy might reuse the locations array if locations are the same
+ return;
+ }
assert locFrom.length == locTo.length;
for (int i = 0; i < locFrom.length; i++) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/TraceUtil.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/TraceUtil.java Sat Mar 24 01:08:35 2018 +0100
@@ -36,19 +36,6 @@
public class TraceUtil {
- public static AbstractBlockBase<?> getBestTraceInterPredecessor(TraceBuilderResult traceResult, AbstractBlockBase<?> block) {
- AbstractBlockBase<?> bestPred = null;
- int bestTraceId = traceResult.getTraceForBlock(block).getId();
- for (AbstractBlockBase<?> pred : block.getPredecessors()) {
- int predTraceId = traceResult.getTraceForBlock(pred).getId();
- if (predTraceId < bestTraceId) {
- bestPred = pred;
- bestTraceId = predTraceId;
- }
- }
- return bestPred;
- }
-
public static boolean isShadowedRegisterValue(Value value) {
assert value != null;
return value instanceof ShadowedRegisterValue;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/TrivialTraceAllocator.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/TrivialTraceAllocator.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,12 +26,13 @@
import static org.graalvm.compiler.lir.LIRValueUtil.isVariable;
import static org.graalvm.compiler.lir.alloc.trace.TraceUtil.isTrivialTrace;
+import java.util.Arrays;
import java.util.EnumSet;
import org.graalvm.compiler.core.common.alloc.Trace;
-import org.graalvm.compiler.core.common.alloc.TraceBuilderResult;
import org.graalvm.compiler.core.common.cfg.AbstractBlockBase;
import org.graalvm.compiler.lir.LIR;
+import org.graalvm.compiler.lir.LIRInstruction;
import org.graalvm.compiler.lir.LIRInstruction.OperandFlag;
import org.graalvm.compiler.lir.LIRInstruction.OperandMode;
import org.graalvm.compiler.lir.StandardOp.JumpOp;
@@ -47,68 +48,67 @@
* Allocates a trivial trace i.e. a trace consisting of a single block with no instructions other
* than the {@link LabelOp} and the {@link JumpOp}.
*/
-final class TrivialTraceAllocator extends TraceAllocationPhase<TraceAllocationPhase.TraceAllocationContext> {
+public final class TrivialTraceAllocator extends TraceAllocationPhase<TraceAllocationPhase.TraceAllocationContext> {
@Override
protected void run(TargetDescription target, LIRGenerationResult lirGenRes, Trace trace, TraceAllocationContext context) {
LIR lir = lirGenRes.getLIR();
- TraceBuilderResult resultTraces = context.resultTraces;
assert isTrivialTrace(lir, trace) : "Not a trivial trace! " + trace;
AbstractBlockBase<?> block = trace.getBlocks()[0];
-
- AbstractBlockBase<?> pred = TraceUtil.getBestTraceInterPredecessor(resultTraces, block);
+ assert TraceAssertions.singleHeadPredecessor(trace) : "Trace head with more than one predecessor?!" + trace;
+ AbstractBlockBase<?> pred = block.getPredecessors()[0];
- Value[] variableMap = new Value[lir.numVariables()];
GlobalLivenessInfo livenessInfo = context.livenessInfo;
- collectMapping(block, pred, livenessInfo, variableMap);
- assignLocations(lir, block, livenessInfo, variableMap);
+ allocate(block, pred, livenessInfo, SSAUtil.phiOutOrNull(lir, block));
}
- /**
- * Collects the mapping from variable to location. Additionally the
- * {@link GlobalLivenessInfo#setInLocations incoming location array} is set.
- */
- private static void collectMapping(AbstractBlockBase<?> block, AbstractBlockBase<?> pred, GlobalLivenessInfo livenessInfo, Value[] variableMap) {
+ public static void allocate(AbstractBlockBase<?> block, AbstractBlockBase<?> pred, GlobalLivenessInfo livenessInfo, LIRInstruction jump) {
+ // exploit that the live sets are sorted
+ assert TraceAssertions.liveSetsAreSorted(livenessInfo, block);
+ assert TraceAssertions.liveSetsAreSorted(livenessInfo, pred);
+
+ // setup incoming variables/locations
final int[] blockIn = livenessInfo.getBlockIn(block);
final Value[] predLocOut = livenessInfo.getOutLocation(pred);
- final Value[] locationIn = new Value[blockIn.length];
- for (int i = 0; i < blockIn.length; i++) {
- int varNum = blockIn[i];
- if (varNum >= 0) {
- Value location = predLocOut[i];
- variableMap[varNum] = location;
- locationIn[i] = location;
- } else {
- locationIn[i] = Value.ILLEGAL;
+ int inLenght = blockIn.length;
+
+ // setup outgoing variables/locations
+ final int[] blockOut = livenessInfo.getBlockOut(block);
+ int outLength = blockOut.length;
+ final Value[] locationOut = new Value[outLength];
+
+ assert outLength <= inLenght : "Trivial Trace! There cannot be more outgoing values than incoming.";
+ for (int outIdx = 0, inIdx = 0; outIdx < outLength; inIdx++) {
+ if (blockOut[outIdx] == blockIn[inIdx]) {
+ // set the outgoing location to the incoming value
+ locationOut[outIdx++] = predLocOut[inIdx];
}
}
- livenessInfo.setInLocations(block, locationIn);
+
+ /*
+ * Since we do not change any of the location we can just use the outgoing of the
+ * predecessor.
+ */
+ livenessInfo.setInLocations(block, predLocOut);
+ livenessInfo.setOutLocations(block, locationOut);
+ if (jump != null) {
+ handlePhiOut(jump, blockIn, predLocOut);
+ }
}
- /**
- * Assigns the outgoing locations according to the {@link #collectMapping variable mapping}.
- */
- private static void assignLocations(LIR lir, AbstractBlockBase<?> block, GlobalLivenessInfo livenessInfo, Value[] variableMap) {
- final int[] blockOut = livenessInfo.getBlockOut(block);
- final Value[] locationOut = new Value[blockOut.length];
- for (int i = 0; i < blockOut.length; i++) {
- int varNum = blockOut[i];
- locationOut[i] = variableMap[varNum];
- }
- livenessInfo.setOutLocations(block, locationOut);
-
+ private static void handlePhiOut(LIRInstruction jump, int[] varIn, Value[] locIn) {
// handle outgoing phi values
ValueProcedure outputConsumer = new ValueProcedure() {
@Override
public Value doValue(Value value, OperandMode mode, EnumSet<OperandFlag> flags) {
if (isVariable(value)) {
- return variableMap[asVariable(value).index];
+ // since incoming variables are sorted, we can do a binary search
+ return locIn[Arrays.binarySearch(varIn, asVariable(value).index)];
}
return value;
}
};
- JumpOp jump = SSAUtil.phiOut(lir, block);
// Jumps have only alive values (outgoing phi values)
jump.forEachAlive(outputConsumer);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/bu/BottomUpAllocator.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/bu/BottomUpAllocator.java Sat Mar 24 01:08:35 2018 +0100
@@ -501,11 +501,12 @@
try (Indent indent = debug.logAndIndent("handle block %s", block)) {
currentInstructions = getLIR().getLIRforBlock(block);
- for (currentInstructionIndex = currentInstructions.size() - 1; currentInstructionIndex >= 0; currentInstructionIndex--) {
+ final int lastInstIdx = currentInstructions.size() - 1;
+ for (currentInstructionIndex = lastInstIdx; currentInstructionIndex >= 0; currentInstructionIndex--) {
LIRInstruction inst = currentInstructions.get(currentInstructionIndex);
if (inst != null) {
inst.setId(currentOpId);
- allocateInstruction(inst, block);
+ allocateInstruction(inst, block, currentInstructionIndex == 0, currentInstructionIndex == lastInstIdx);
}
}
allocatedBlocks.set(block.getId());
@@ -514,7 +515,7 @@
}
@SuppressWarnings("try")
- private void allocateInstruction(LIRInstruction op, AbstractBlockBase<?> block) {
+ private void allocateInstruction(LIRInstruction op, AbstractBlockBase<?> block, boolean isLabel, boolean isBlockEnd) {
assert op != null && op.id() == currentOpId;
try (Indent indent = debug.logAndIndent("handle inst: %d: %s", op.id(), op)) {
try (Indent indent1 = debug.logAndIndent("output pos")) {
@@ -537,7 +538,8 @@
// should have
op.forEachTemp(allocStackOrRegisterProcedure);
op.forEachOutput(allocStackOrRegisterProcedure);
- if (op instanceof LabelOp) {
+ if (isLabel) {
+ assert op instanceof LabelOp;
processIncoming(block, op);
}
}
@@ -551,7 +553,8 @@
op.forEachInput(allocRegisterProcedure);
op.forEachAlive(allocStackOrRegisterProcedure);
- if (op instanceof BlockEndOp) {
+ if (isBlockEnd) {
+ assert op instanceof BlockEndOp;
processOutgoing(block, op);
}
op.forEachState(allocStackOrRegisterProcedure);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/lsra/TraceLinearScanLifetimeAnalysisPhase.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/lsra/TraceLinearScanLifetimeAnalysisPhase.java Sat Mar 24 01:08:35 2018 +0100
@@ -112,10 +112,6 @@
buildIntervals();
}
- private boolean isAllocated(AbstractBlockBase<?> currentBlock, AbstractBlockBase<?> other) {
- return traceBuilderResult.getTraceForBlock(other).getId() < traceBuilderResult.getTraceForBlock(currentBlock).getId();
- }
-
/**
* Count instructions in all blocks. The numbering follows the
* {@linkplain TraceLinearScan#sortedBlocks() register allocation order}.
@@ -621,26 +617,29 @@
assert allocator.instructionForId(opId) == op : "must match";
}
+ /**
+ * Add register hints for incoming values, i.e., values that are not defined in the trace.
+ *
+ * Due to the dominance property of SSA form, all values live at some point in the trace
+ * that are not defined in the trace are live at the beginning of it.
+ */
@SuppressWarnings("try")
private void addInterTraceHints() {
try (DebugContext.Scope s = debug.scope("InterTraceHints", allocator)) {
- GlobalLivenessInfo livenessInfo = allocator.getGlobalLivenessInfo();
- // set hints for phi/incoming intervals
- for (AbstractBlockBase<?> block : sortedBlocks()) {
- LabelOp label = (LabelOp) getLIR().getLIRforBlock(block).get(0);
- for (AbstractBlockBase<?> pred : block.getPredecessors()) {
- addInterTraceHints(livenessInfo, pred, block, label);
- }
+ AbstractBlockBase<?> traceHeadBlock = sortedBlocks()[0];
+ if (traceHeadBlock.getPredecessorCount() == 0) {
+ return;
}
- } catch (Throwable e) {
- throw debug.handle(e);
- }
- }
+ assert traceHeadBlock.getPredecessorCount() == 1 : "Trace head with more than one predecessor?!" + traceHeadBlock;
+
+ AbstractBlockBase<?> pred = traceHeadBlock.getPredecessors()[0];
+ assert traceBuilderResult.getTraceForBlock(pred).getId() < traceBuilderResult.getTraceForBlock(traceHeadBlock).getId() : "Not yet allocated? " + pred;
- private void addInterTraceHints(GlobalLivenessInfo livenessInfo, AbstractBlockBase<?> from, AbstractBlockBase<?> to, LabelOp label) {
- if (isAllocated(to, from)) {
- int[] liveVars = livenessInfo.getBlockIn(to);
- Value[] outLocation = livenessInfo.getOutLocation(from);
+ GlobalLivenessInfo livenessInfo = allocator.getGlobalLivenessInfo();
+ LabelOp label = (LabelOp) getLIR().getLIRforBlock(traceHeadBlock).get(0);
+
+ int[] liveVars = livenessInfo.getBlockIn(traceHeadBlock);
+ Value[] outLocation = livenessInfo.getOutLocation(pred);
for (int i = 0; i < liveVars.length; i++) {
int varNum = liveVars[i];
@@ -652,18 +651,20 @@
}
}
}
+ } catch (Throwable e) {
+ throw debug.handle(e);
}
}
private void addInterTraceHint(LabelOp label, int varNum, Value fromValue) {
- assert isRegister(fromValue) || isVariable(fromValue) || isStackSlotValue(fromValue) || isShadowedRegisterValue(fromValue) : "Wrong fromValue: " + fromValue;
+ assert isRegister(fromValue) || isStackSlotValue(fromValue) || isShadowedRegisterValue(fromValue) : "Wrong fromValue: " + fromValue;
TraceInterval to = allocator.intervalFor(varNum);
if (to == null) {
// variable not live -> do nothing
return;
}
- if (isVariableOrRegister(fromValue)) {
- IntervalHint from = getIntervalHint((AllocatableValue) fromValue);
+ if (isRegister(fromValue)) {
+ IntervalHint from = allocator.getOrCreateFixedInterval(asRegisterValue(fromValue));
setHint(label, to, from, debug);
} else if (isStackSlotValue(fromValue)) {
setSpillSlot(label, to, (AllocatableValue) fromValue, debug);
@@ -672,8 +673,6 @@
IntervalHint from = getIntervalHint(shadowedRegisterValue.getRegister());
setHint(label, to, from, debug);
setSpillSlot(label, to, shadowedRegisterValue.getStackSlot(), debug);
- } else {
- throw GraalError.shouldNotReachHere();
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/gen/LIRGeneratorTool.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/gen/LIRGeneratorTool.java Sat Mar 24 01:08:35 2018 +0100
@@ -252,11 +252,16 @@
Variable emitByteSwap(Value operand);
+ @SuppressWarnings("unused")
+ default Variable emitArrayCompareTo(JavaKind kind1, JavaKind kind2, Value array1, Value array2, Value length1, Value length2) {
+ throw GraalError.unimplemented("String.compareTo substitution is not implemented on this architecture");
+ }
+
Variable emitArrayEquals(JavaKind kind, Value array1, Value array2, Value length);
@SuppressWarnings("unused")
default Variable emitStringIndexOf(Value sourcePointer, Value sourceCount, Value targetPointer, Value targetCount, int constantTargetCount) {
- throw GraalError.unimplemented();
+ throw GraalError.unimplemented("String.indexOf substitution is not implemented on this architecture");
}
void emitBlackhole(Value operand);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/phases/LIRPhaseSuite.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/phases/LIRPhaseSuite.java Sat Mar 24 01:08:35 2018 +0100
@@ -40,6 +40,13 @@
}
/**
+ * Gets an unmodifiable view on the phases in this suite.
+ */
+ public List<LIRPhase<C>> getPhases() {
+ return Collections.unmodifiableList(phases);
+ }
+
+ /**
* Add a new phase at the beginning of this suite.
*/
public final void prependPhase(LIRPhase<C> phase) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/ssa/SSAUtil.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/ssa/SSAUtil.java Sat Mar 24 01:08:35 2018 +0100
@@ -112,6 +112,13 @@
return (JumpOp) op;
}
+ public static JumpOp phiOutOrNull(LIR lir, AbstractBlockBase<?> block) {
+ if (block.getSuccessorCount() != 1) {
+ return null;
+ }
+ return phiOut(lir, block);
+ }
+
public static int phiOutIndex(LIR lir, AbstractBlockBase<?> block) {
assert block.getSuccessorCount() == 1;
ArrayList<LIRInstruction> instructions = lir.getLIRforBlock(block);
@@ -181,4 +188,19 @@
return -1;
}
+ public static int numPhiOut(LIR lir, AbstractBlockBase<?> block) {
+ if (block.getSuccessorCount() != 1) {
+ // cannot be a phi_out block
+ return 0;
+ }
+ return numPhiIn(lir, block.getSuccessors()[0]);
+ }
+
+ private static int numPhiIn(LIR lir, AbstractBlockBase<?> block) {
+ if (!isMerge(block)) {
+ return 0;
+ }
+ return phiIn(lir, block).getPhiSize();
+ }
+
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.loop/src/org/graalvm/compiler/loop/BasicInductionVariable.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.loop/src/org/graalvm/compiler/loop/BasicInductionVariable.java Sat Mar 24 01:08:35 2018 +0100
@@ -28,6 +28,7 @@
import org.graalvm.compiler.core.common.type.IntegerStamp;
import org.graalvm.compiler.core.common.type.Stamp;
+import org.graalvm.compiler.core.common.util.UnsignedLong;
import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.nodes.ConstantNode;
import org.graalvm.compiler.nodes.NodeView;
@@ -159,7 +160,7 @@
@Override
public ValueNode exitValueNode() {
Stamp stamp = phi.stamp(NodeView.DEFAULT);
- ValueNode maxTripCount = loop.counted().maxTripCountNode(false);
+ ValueNode maxTripCount = loop.counted().maxTripCountNode();
if (!maxTripCount.stamp(NodeView.DEFAULT).isCompatible(stamp)) {
maxTripCount = IntegerConvertNode.convert(maxTripCount, stamp, graph(), NodeView.DEFAULT);
}
@@ -173,7 +174,11 @@
@Override
public long constantExtremum() {
- return constantStride() * (loop.counted().constantMaxTripCount() - 1) + constantInit();
+ UnsignedLong tripCount = loop.counted().constantMaxTripCount();
+ if (tripCount.isLessThan(1)) {
+ return constantInit();
+ }
+ return tripCount.minus(1).wrappingTimes(constantStride()).wrappingPlus(constantInit()).asLong();
}
@Override
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.loop/src/org/graalvm/compiler/loop/CountedLoopInfo.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.loop/src/org/graalvm/compiler/loop/CountedLoopInfo.java Sat Mar 24 01:08:35 2018 +0100
@@ -23,11 +23,12 @@
package org.graalvm.compiler.loop;
import static org.graalvm.compiler.loop.MathUtil.add;
-import static org.graalvm.compiler.loop.MathUtil.divBefore;
import static org.graalvm.compiler.loop.MathUtil.sub;
+import static org.graalvm.compiler.loop.MathUtil.unsignedDivBefore;
import org.graalvm.compiler.core.common.type.IntegerStamp;
import org.graalvm.compiler.core.common.type.Stamp;
+import org.graalvm.compiler.core.common.util.UnsignedLong;
import org.graalvm.compiler.loop.InductionVariable.Direction;
import org.graalvm.compiler.nodes.AbstractBeginNode;
import org.graalvm.compiler.nodes.ConstantNode;
@@ -39,6 +40,7 @@
import org.graalvm.compiler.nodes.calc.CompareNode;
import org.graalvm.compiler.nodes.calc.ConditionalNode;
import org.graalvm.compiler.nodes.calc.IntegerLessThanNode;
+import org.graalvm.compiler.nodes.calc.NegateNode;
import org.graalvm.compiler.nodes.extended.GuardingNode;
import jdk.vm.ci.code.CodeUtil;
@@ -64,38 +66,58 @@
this.ifNode = ifNode;
}
+ /**
+ * Returns a node that computes the maximum trip count of this loop. That is the trip count of
+ * this loop assuming it is not exited by an other exit than the {@linkplain #getLimitTest()
+ * count check}.
+ *
+ * This count is exact if {@link #isExactTripCount()} returns true.
+ *
+ * THIS VALUE SHOULD BE TREATED AS UNSIGNED.
+ */
public ValueNode maxTripCountNode() {
return maxTripCountNode(false);
}
+ /**
+ * Returns a node that computes the maximum trip count of this loop. That is the trip count of
+ * this loop assuming it is not exited by an other exit than the {@linkplain #getLimitTest()
+ * count check}.
+ *
+ * This count is exact if {@link #isExactTripCount()} returns true.
+ *
+ * THIS VALUE SHOULD BE TREATED AS UNSIGNED.
+ *
+ * @param assumePositive if true the check that the loop is entered at all will be omitted.
+ */
public ValueNode maxTripCountNode(boolean assumePositive) {
StructuredGraph graph = iv.valueNode().graph();
Stamp stamp = iv.valueNode().stamp(NodeView.DEFAULT);
- ValueNode range = sub(graph, end, iv.initNode());
ValueNode max;
ValueNode min;
- ValueNode oneDirection;
+ ValueNode range;
+ ValueNode absStride;
if (iv.direction() == Direction.Up) {
- oneDirection = ConstantNode.forIntegerStamp(stamp, 1, graph);
+ absStride = iv.strideNode();
+ range = sub(graph, end, iv.initNode());
max = end;
min = iv.initNode();
} else {
assert iv.direction() == Direction.Down;
- oneDirection = ConstantNode.forIntegerStamp(stamp, -1, graph);
+ absStride = graph.maybeAddOrUnique(NegateNode.create(iv.strideNode(), NodeView.DEFAULT));
+ range = sub(graph, iv.initNode(), end);
max = iv.initNode();
min = end;
}
+
+ ConstantNode one = ConstantNode.forIntegerStamp(stamp, 1, graph);
if (oneOff) {
- range = add(graph, range, oneDirection);
+ range = add(graph, range, one);
}
// round-away-from-zero divison: (range + stride -/+ 1) / stride
- ValueNode denominator = range;
- if (!oneDirection.stamp(NodeView.DEFAULT).equals(iv.strideNode().stamp(NodeView.DEFAULT))) {
- ValueNode subedRanged = sub(graph, range, oneDirection);
- denominator = add(graph, subedRanged, iv.strideNode());
- }
- ValueNode div = divBefore(graph, loop.entryPoint(), denominator, iv.strideNode());
+ ValueNode denominator = add(graph, range, sub(graph, absStride, one));
+ ValueNode div = unsignedDivBefore(graph, loop.entryPoint(), denominator, absStride);
if (assumePositive) {
return div;
@@ -105,49 +127,44 @@
}
/**
- * @return true if the loop has constant bounds and the trip count is representable as a
- * positive integer.
+ * @return true if the loop has constant bounds.
*/
public boolean isConstantMaxTripCount() {
- /*
- * It's possible that the iteration range is too large to treat this as constant because it
- * will overflow.
- */
- return (hasConstantBounds() && rawConstantMaxTripCount() >= 0);
- }
-
- /**
- * @return true if the bounds on the iteration space are all constants.
- */
- public boolean hasConstantBounds() {
return end instanceof ConstantNode && iv.isConstantInit() && iv.isConstantStride();
}
- public long constantMaxTripCount() {
+ public UnsignedLong constantMaxTripCount() {
assert isConstantMaxTripCount();
- return rawConstantMaxTripCount();
+ return new UnsignedLong(rawConstantMaxTripCount());
}
/**
- * Compute the raw value of the trip count for this loop. Since we don't have unsigned values
- * this may be outside representable positive values.
+ * Compute the raw value of the trip count for this loop. THIS IS AN UNSIGNED VALUE;
*/
- protected long rawConstantMaxTripCount() {
+ private long rawConstantMaxTripCount() {
assert iv.direction() != null;
- long off = oneOff ? iv.direction() == Direction.Up ? 1 : -1 : 0;
- long endValue = ((ConstantNode) end).asJavaConstant().asLong();
- try {
- // If no overflow occurs then negative values represent a trip count of 0
- long max = Math.subtractExact(Math.addExact(endValue, off), iv.constantInit()) / iv.constantStride();
- return Math.max(0, max);
- } catch (ArithmeticException e) {
- /*
- * The computation overflowed to return a negative value. It's possible some
- * optimization could handle this value as an unsigned and produce the right answer but
- * we hide this value by default.
- */
- return -1;
+ long endValue = end.asJavaConstant().asLong();
+ long initValue = iv.constantInit();
+ long range;
+ long absStride;
+ if (iv.direction() == Direction.Up) {
+ if (endValue < initValue) {
+ return 0;
+ }
+ range = endValue - iv.constantInit();
+ absStride = iv.constantStride();
+ } else {
+ if (initValue < endValue) {
+ return 0;
+ }
+ range = iv.constantInit() - endValue;
+ absStride = -iv.constantStride();
}
+ if (oneOff) {
+ range += 1;
+ }
+ long denominator = range + absStride - 1;
+ return Long.divideUnsigned(denominator, absStride);
}
public boolean isExactTripCount() {
@@ -164,7 +181,7 @@
return isConstantMaxTripCount();
}
- public long constantExactTripCount() {
+ public UnsignedLong constantExactTripCount() {
assert isExactTripCount();
return constantMaxTripCount();
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.loop/src/org/graalvm/compiler/loop/DefaultLoopPolicies.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.loop/src/org/graalvm/compiler/loop/DefaultLoopPolicies.java Sat Mar 24 01:08:35 2018 +0100
@@ -28,6 +28,7 @@
import java.util.List;
+import org.graalvm.compiler.core.common.util.UnsignedLong;
import org.graalvm.compiler.debug.CounterKey;
import org.graalvm.compiler.debug.DebugContext;
import org.graalvm.compiler.graph.Node;
@@ -89,11 +90,22 @@
}
OptionValues options = loop.entryPoint().getOptions();
CountedLoopInfo counted = loop.counted();
- long maxTrips = counted.constantMaxTripCount();
+ UnsignedLong maxTrips = counted.constantMaxTripCount();
+ if (maxTrips.equals(0)) {
+ return loop.canDuplicateLoop();
+ }
int maxNodes = (counted.isExactTripCount() && counted.isConstantExactTripCount()) ? Options.ExactFullUnrollMaxNodes.getValue(options) : Options.FullUnrollMaxNodes.getValue(options);
maxNodes = Math.min(maxNodes, Math.max(0, MaximumDesiredSize.getValue(options) - loop.loopBegin().graph().getNodeCount()));
int size = Math.max(1, loop.size() - 1 - loop.loopBegin().phis().count());
- if (maxTrips <= Options.FullUnrollMaxIterations.getValue(options) && size * (maxTrips - 1) <= maxNodes) {
+ /* @formatter:off
+ * The check below should not throw ArithmeticException because:
+ * maxTrips is guaranteed to be >= 1 by the check above
+ * - maxTrips * size can not overfow because:
+ * - maxTrips <= FullUnrollMaxIterations <= Integer.MAX_VALUE
+ * - 1 <= size <= Integer.MAX_VALUE
+ * @formatter:on
+ */
+ if (maxTrips.isLessOrEqualTo(Options.FullUnrollMaxIterations.getValue(options)) && maxTrips.minus(1).times(size).isLessOrEqualTo(maxNodes)) {
// check whether we're allowed to unroll this loop
return loop.canDuplicateLoop();
} else {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.loop/src/org/graalvm/compiler/loop/LoopFragment.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.loop/src/org/graalvm/compiler/loop/LoopFragment.java Sat Mar 24 01:08:35 2018 +0100
@@ -35,6 +35,7 @@
import org.graalvm.compiler.graph.NodeBitMap;
import org.graalvm.compiler.graph.iterators.NodeIterable;
import org.graalvm.compiler.nodes.AbstractBeginNode;
+import org.graalvm.compiler.nodes.AbstractMergeNode;
import org.graalvm.compiler.nodes.EndNode;
import org.graalvm.compiler.nodes.FixedNode;
import org.graalvm.compiler.nodes.FrameState;
@@ -208,6 +209,12 @@
NodeWithState withState = (NodeWithState) n;
withState.states().forEach(state -> state.applyToVirtual(node -> nodes.mark(node)));
}
+ if (n instanceof AbstractMergeNode) {
+ // if a merge is in the loop, all of its phis are also in the loop
+ for (PhiNode phi : ((AbstractMergeNode) n).phis()) {
+ nodes.mark(phi);
+ }
+ }
nodes.mark(n);
}
}
@@ -246,6 +253,17 @@
if (n instanceof MonitorEnterNode) {
markFloating(worklist, ((MonitorEnterNode) n).getMonitorId(), nodes, nonLoopNodes);
}
+ if (n instanceof AbstractMergeNode) {
+ /*
+ * Since we already marked all phi nodes as being in the loop to break cycles,
+ * we also have to iterate over their usages here.
+ */
+ for (PhiNode phi : ((AbstractMergeNode) n).phis()) {
+ for (Node usage : phi.usages()) {
+ markFloating(worklist, usage, nodes, nonLoopNodes);
+ }
+ }
+ }
for (Node usage : n.usages()) {
markFloating(worklist, usage, nodes, nonLoopNodes);
}
@@ -263,6 +281,20 @@
this.usages = n.usages().iterator();
this.isLoopNode = loopNodes.isMarked(n);
}
+
+ @Override
+ public boolean equals(Object obj) {
+ if (!(obj instanceof WorkListEntry)) {
+ return false;
+ }
+ WorkListEntry other = (WorkListEntry) obj;
+ return this.n == other.n;
+ }
+
+ @Override
+ public int hashCode() {
+ return n.hashCode();
+ }
}
static TriState isLoopNode(Node n, NodeBitMap loopNodes, NodeBitMap nonLoopNodes) {
@@ -272,32 +304,24 @@
if (nonLoopNodes.isMarked(n)) {
return TriState.FALSE;
}
- if (n instanceof FixedNode) {
+ if (n instanceof FixedNode || n instanceof PhiNode) {
+ // phi nodes are treated the same as fixed nodes in this algorithm to break cycles
return TriState.FALSE;
}
- boolean mark = false;
- if (n instanceof PhiNode) {
- PhiNode phi = (PhiNode) n;
- mark = loopNodes.isMarked(phi.merge());
- if (mark) {
- /*
- * This Phi is a loop node but the inputs might not be so they must be processed by
- * the caller.
- */
- loopNodes.mark(n);
- } else {
- nonLoopNodes.mark(n);
- return TriState.FALSE;
- }
- }
return TriState.UNKNOWN;
}
+ private static void pushWorkList(Deque<WorkListEntry> workList, Node node, NodeBitMap loopNodes) {
+ WorkListEntry entry = new WorkListEntry(node, loopNodes);
+ assert !workList.contains(entry) : "node " + node + " added to worklist twice";
+ workList.push(entry);
+ }
+
private static void markFloating(Deque<WorkListEntry> workList, Node start, NodeBitMap loopNodes, NodeBitMap nonLoopNodes) {
if (isLoopNode(start, loopNodes, nonLoopNodes).isKnown()) {
return;
}
- workList.push(new WorkListEntry(start, loopNodes));
+ pushWorkList(workList, start, loopNodes);
while (!workList.isEmpty()) {
WorkListEntry currentEntry = workList.peek();
if (currentEntry.usages.hasNext()) {
@@ -308,7 +332,7 @@
currentEntry.isLoopNode = true;
}
} else {
- workList.push(new WorkListEntry(current, loopNodes));
+ pushWorkList(workList, current, loopNodes);
}
} else {
workList.pop();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.loop/src/org/graalvm/compiler/loop/LoopFragmentInside.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.loop/src/org/graalvm/compiler/loop/LoopFragmentInside.java Sat Mar 24 01:08:35 2018 +0100
@@ -28,6 +28,7 @@
import org.graalvm.collections.EconomicMap;
import org.graalvm.collections.Equivalence;
+import org.graalvm.compiler.debug.DebugCloseable;
import org.graalvm.compiler.debug.DebugContext;
import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.graph.Graph.DuplicationReplacement;
@@ -338,6 +339,7 @@
}
@Override
+ @SuppressWarnings("try")
protected DuplicationReplacement getDuplicationReplacement() {
final LoopBeginNode loopBegin = loop().loopBegin();
final StructuredGraph graph = graph();
@@ -347,34 +349,36 @@
@Override
public Node replacement(Node original) {
- if (original == loopBegin) {
- Node value = seenNode.get(original);
- if (value != null) {
- return value;
- }
- AbstractBeginNode newValue = graph.add(new BeginNode());
- seenNode.put(original, newValue);
- return newValue;
- }
- if (original instanceof LoopExitNode && ((LoopExitNode) original).loopBegin() == loopBegin) {
- Node value = seenNode.get(original);
- if (value != null) {
- return value;
+ try (DebugCloseable position = original.withNodeSourcePosition()) {
+ if (original == loopBegin) {
+ Node value = seenNode.get(original);
+ if (value != null) {
+ return value;
+ }
+ AbstractBeginNode newValue = graph.add(new BeginNode());
+ seenNode.put(original, newValue);
+ return newValue;
}
- AbstractBeginNode newValue = graph.add(new BeginNode());
- seenNode.put(original, newValue);
- return newValue;
+ if (original instanceof LoopExitNode && ((LoopExitNode) original).loopBegin() == loopBegin) {
+ Node value = seenNode.get(original);
+ if (value != null) {
+ return value;
+ }
+ AbstractBeginNode newValue = graph.add(new BeginNode());
+ seenNode.put(original, newValue);
+ return newValue;
+ }
+ if (original instanceof LoopEndNode && ((LoopEndNode) original).loopBegin() == loopBegin) {
+ Node value = seenNode.get(original);
+ if (value != null) {
+ return value;
+ }
+ EndNode newValue = graph.add(new EndNode());
+ seenNode.put(original, newValue);
+ return newValue;
+ }
+ return original;
}
- if (original instanceof LoopEndNode && ((LoopEndNode) original).loopBegin() == loopBegin) {
- Node value = seenNode.get(original);
- if (value != null) {
- return value;
- }
- EndNode newValue = graph.add(new EndNode());
- seenNode.put(original, newValue);
- return newValue;
- }
- return original;
}
};
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.loop/src/org/graalvm/compiler/loop/MathUtil.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.loop/src/org/graalvm/compiler/loop/MathUtil.java Sat Mar 24 01:08:35 2018 +0100
@@ -30,6 +30,9 @@
import org.graalvm.compiler.nodes.calc.BinaryArithmeticNode;
import org.graalvm.compiler.nodes.calc.FixedBinaryNode;
import org.graalvm.compiler.nodes.calc.SignedDivNode;
+import org.graalvm.compiler.nodes.calc.UnsignedDivNode;
+
+import java.util.function.BiFunction;
/**
* Utility methods to perform integer math with some obvious constant folding first.
@@ -71,12 +74,28 @@
}
public static ValueNode divBefore(StructuredGraph graph, FixedNode before, ValueNode dividend, ValueNode divisor) {
+ return fixedDivBefore(graph, before, dividend, divisor, (dend, sor) -> SignedDivNode.create(dend, sor, NodeView.DEFAULT));
+ }
+
+ public static ValueNode unsignedDivBefore(StructuredGraph graph, FixedNode before, ValueNode dividend, ValueNode divisor) {
+ return fixedDivBefore(graph, before, dividend, divisor, (dend, sor) -> UnsignedDivNode.create(dend, sor, NodeView.DEFAULT));
+ }
+
+ private static ValueNode fixedDivBefore(StructuredGraph graph, FixedNode before, ValueNode dividend, ValueNode divisor, BiFunction<ValueNode, ValueNode, ValueNode> createDiv) {
if (isConstantOne(divisor)) {
return dividend;
}
- ValueNode div = graph.addOrUniqueWithInputs(SignedDivNode.create(dividend, divisor, NodeView.DEFAULT));
+ ValueNode div = graph.addOrUniqueWithInputs(createDiv.apply(dividend, divisor));
if (div instanceof FixedBinaryNode) {
- graph.addBeforeFixed(before, (FixedBinaryNode) div);
+ FixedBinaryNode fixedDiv = (FixedBinaryNode) div;
+ if (before.predecessor() instanceof FixedBinaryNode) {
+ FixedBinaryNode binaryPredecessor = (FixedBinaryNode) before.predecessor();
+ if (fixedDiv.dataFlowEquals(binaryPredecessor)) {
+ fixedDiv.safeDelete();
+ return binaryPredecessor;
+ }
+ }
+ graph.addBeforeFixed(before, fixedDiv);
}
return div;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes.test/src/org/graalvm/compiler/nodes/test/IntegerStampTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes.test/src/org/graalvm/compiler/nodes/test/IntegerStampTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -570,4 +570,16 @@
assertEquals(IntegerStamp.create(bits, 0, 1000), div.foldStamp(IntegerStamp.create(bits, 100, 1000), IntegerStamp.create(bits, 1, max)));
assertEquals(IntegerStamp.create(bits, -1000, 0), div.foldStamp(IntegerStamp.create(bits, -1000, -100), IntegerStamp.create(bits, 1, max)));
}
+
+ @Test
+ public void testEmpty() {
+ IntegerStamp intStamp = StampFactory.forInteger(32);
+ IntegerStamp longStamp = StampFactory.forInteger(64);
+ Stamp intEmpty = StampFactory.empty(JavaKind.Int);
+ Stamp longEmpty = StampFactory.empty(JavaKind.Long);
+ assertEquals(intStamp.join(intEmpty), intEmpty);
+ assertEquals(intStamp.meet(intEmpty), intStamp);
+ assertEquals(longStamp.join(longEmpty), longEmpty);
+ assertEquals(longStamp.meet(longEmpty), longStamp);
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes.test/src/org/graalvm/compiler/nodes/test/PrimitiveStampBoundaryTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes.test/src/org/graalvm/compiler/nodes/test/PrimitiveStampBoundaryTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -35,6 +35,7 @@
import org.graalvm.compiler.core.common.type.IntegerStamp;
import org.graalvm.compiler.core.common.type.PrimitiveStamp;
import org.graalvm.compiler.core.common.type.Stamp;
+import org.graalvm.compiler.core.common.type.StampFactory;
import org.graalvm.compiler.test.GraalTest;
import org.junit.Test;
@@ -64,6 +65,7 @@
shiftStamps.add(IntegerStamp.create(32, Math.min(v1, v2), Math.max(v1, v2)));
}
}
+ shiftStamps.add((IntegerStamp) StampFactory.empty(JavaKind.Int));
integerTestStamps = new HashSet<>();
for (long v1 : longBoundaryValues) {
@@ -74,6 +76,8 @@
integerTestStamps.add(IntegerStamp.create(64, Math.min(v1, v2), Math.max(v1, v2)));
}
}
+ integerTestStamps.add((PrimitiveStamp) StampFactory.empty(JavaKind.Int));
+ integerTestStamps.add((PrimitiveStamp) StampFactory.empty(JavaKind.Long));
}
static double[] doubleBoundaryValues = {Double.NEGATIVE_INFINITY, Double.MIN_VALUE, Float.NEGATIVE_INFINITY, Float.MIN_VALUE,
@@ -98,6 +102,8 @@
generateFloatingStamps(new FloatStamp(64, Math.min(d1, d2), Math.max(d1, d2), false));
}
}
+ floatTestStamps.add((PrimitiveStamp) StampFactory.empty(JavaKind.Float));
+ floatTestStamps.add((PrimitiveStamp) StampFactory.empty(JavaKind.Double));
}
private static void generateFloatingStamps(FloatStamp floatStamp) {
@@ -130,7 +136,7 @@
private static void checkConvertOperation(IntegerConvertOp<?> op, int inputBits, int resultBits, Stamp result, Stamp v1stamp) {
Stamp folded = op.foldStamp(inputBits, resultBits, v1stamp);
- assertTrue(folded.asConstant() != null, "should constant fold %s %s %s", op, v1stamp, folded);
+ assertTrue(folded.isEmpty() || folded.asConstant() != null, "should constant fold %s %s %s", op, v1stamp, folded);
assertTrue(result.meet(folded).equals(result), "result out of range %s %s %s %s %s", op, v1stamp, folded, result, result.meet(folded));
}
@@ -167,7 +173,7 @@
private static void checkConvertOperation(ArithmeticOpTable.FloatConvertOp op, Stamp result, Stamp v1stamp) {
Stamp folded = op.foldStamp(v1stamp);
- assertTrue(folded.asConstant() != null, "should constant fold %s %s %s", op, v1stamp, folded);
+ assertTrue(folded.isEmpty() || folded.asConstant() != null, "should constant fold %s %s %s", op, v1stamp, folded);
assertTrue(result.meet(folded).equals(result), "result out of range %s %s %s %s %s", op, v1stamp, folded, result, result.meet(folded));
}
@@ -184,6 +190,10 @@
IntegerStamp stamp = (IntegerStamp) testStamp;
for (IntegerStamp shiftStamp : shifts) {
IntegerStamp foldedStamp = (IntegerStamp) shiftOp.foldStamp(stamp, shiftStamp);
+ if (foldedStamp.isEmpty()) {
+ assertTrue(stamp.isEmpty() || shiftStamp.isEmpty());
+ continue;
+ }
checkShiftOperation(stamp.getBits(), shiftOp, foldedStamp, stamp.lowerBound(), shiftStamp.lowerBound());
checkShiftOperation(stamp.getBits(), shiftOp, foldedStamp, stamp.lowerBound(), shiftStamp.upperBound());
checkShiftOperation(stamp.getBits(), shiftOp, foldedStamp, stamp.upperBound(), shiftStamp.lowerBound());
@@ -205,8 +215,15 @@
private static void checkBinaryOperation(ArithmeticOpTable.BinaryOp<?> op, Stamp result, Stamp v1stamp, Stamp v2stamp) {
Stamp folded = op.foldStamp(v1stamp, v2stamp);
+ if (v1stamp.isEmpty() || v2stamp.isEmpty()) {
+ assertTrue(folded.isEmpty());
+ assertTrue(v1stamp.asConstant() != null || v1stamp.isEmpty());
+ assertTrue(v2stamp.asConstant() != null || v2stamp.isEmpty());
+ return;
+ }
Constant constant = op.foldConstant(v1stamp.asConstant(), v2stamp.asConstant());
if (constant != null) {
+ assertFalse(folded.isEmpty());
Constant constant2 = folded.asConstant();
if (constant2 == null && v1stamp instanceof FloatStamp) {
JavaConstant c = (JavaConstant) constant;
@@ -239,6 +256,9 @@
}
private static Stamp boundaryStamp(Stamp v1, boolean upper) {
+ if (v1.isEmpty()) {
+ return v1;
+ }
if (v1 instanceof IntegerStamp) {
IntegerStamp istamp = (IntegerStamp) v1;
long bound = upper ? istamp.upperBound() : istamp.lowerBound();
@@ -319,7 +339,7 @@
}
}
} else {
- assert v1stamp instanceof FloatStamp;
+ assertTrue(v1stamp.isEmpty() || v1stamp instanceof FloatStamp);
}
assertTrue(result.meet(folded).equals(result), "result out of range %s %s %s %s %s", op, v1stamp, folded, result, result.meet(folded));
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/AbstractFixedGuardNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/AbstractFixedGuardNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -23,6 +23,7 @@
package org.graalvm.compiler.nodes;
import org.graalvm.compiler.core.common.type.StampFactory;
+import org.graalvm.compiler.debug.DebugCloseable;
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.graph.spi.Simplifiable;
import org.graalvm.compiler.graph.spi.SimplifierTool;
@@ -41,8 +42,8 @@
public static final NodeClass<AbstractFixedGuardNode> TYPE = NodeClass.create(AbstractFixedGuardNode.class);
@Input(InputType.Condition) protected LogicNode condition;
- protected final DeoptimizationReason reason;
- protected final DeoptimizationAction action;
+ protected DeoptimizationReason reason;
+ protected DeoptimizationAction action;
protected JavaConstant speculation;
protected boolean negated;
@@ -109,29 +110,42 @@
}
}
+ @SuppressWarnings("try")
public DeoptimizeNode lowerToIf() {
- FixedNode currentNext = next();
- setNext(null);
- DeoptimizeNode deopt = graph().add(new DeoptimizeNode(action, reason, speculation));
- deopt.setStateBefore(stateBefore());
- IfNode ifNode;
- AbstractBeginNode noDeoptSuccessor;
- if (negated) {
- ifNode = graph().add(new IfNode(condition, deopt, currentNext, 0));
- noDeoptSuccessor = ifNode.falseSuccessor();
- } else {
- ifNode = graph().add(new IfNode(condition, currentNext, deopt, 1));
- noDeoptSuccessor = ifNode.trueSuccessor();
+ try (DebugCloseable position = this.withNodeSourcePosition()) {
+ FixedNode currentNext = next();
+ setNext(null);
+ DeoptimizeNode deopt = graph().add(new DeoptimizeNode(action, reason, speculation));
+ deopt.setStateBefore(stateBefore());
+ IfNode ifNode;
+ AbstractBeginNode noDeoptSuccessor;
+ if (negated) {
+ ifNode = graph().add(new IfNode(condition, deopt, currentNext, 0));
+ noDeoptSuccessor = ifNode.falseSuccessor();
+ } else {
+ ifNode = graph().add(new IfNode(condition, currentNext, deopt, 1));
+ noDeoptSuccessor = ifNode.trueSuccessor();
+ }
+ ((FixedWithNextNode) predecessor()).setNext(ifNode);
+ this.replaceAtUsages(noDeoptSuccessor);
+ GraphUtil.killWithUnusedFloatingInputs(this);
+
+ return deopt;
}
- ((FixedWithNextNode) predecessor()).setNext(ifNode);
- this.replaceAtUsages(noDeoptSuccessor);
- GraphUtil.killWithUnusedFloatingInputs(this);
-
- return deopt;
}
@Override
public boolean canDeoptimize() {
return true;
}
+
+ @Override
+ public void setAction(DeoptimizationAction action) {
+ this.action = action;
+ }
+
+ @Override
+ public void setReason(DeoptimizationReason reason) {
+ this.reason = reason;
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/BeginNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/BeginNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -27,6 +27,7 @@
import org.graalvm.compiler.core.common.type.Stamp;
import org.graalvm.compiler.core.common.type.StampFactory;
+import org.graalvm.compiler.debug.DebugCloseable;
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.graph.spi.Simplifiable;
import org.graalvm.compiler.graph.spi.SimplifierTool;
@@ -71,12 +72,15 @@
}
}
+ @SuppressWarnings("try")
public static AbstractBeginNode begin(FixedNode with) {
- if (with instanceof AbstractBeginNode) {
- return (AbstractBeginNode) with;
+ try (DebugCloseable position = with.withNodeSourcePosition()) {
+ if (with instanceof AbstractBeginNode) {
+ return (AbstractBeginNode) with;
+ }
+ BeginNode begin = with.graph().add(new BeginNode());
+ begin.setNext(with);
+ return begin;
}
- BeginNode begin = with.graph().add(new BeginNode());
- begin.setNext(with);
- return begin;
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/DeoptimizeNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/DeoptimizeNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -43,8 +43,8 @@
public static final int DEFAULT_DEBUG_ID = 0;
public static final NodeClass<DeoptimizeNode> TYPE = NodeClass.create(DeoptimizeNode.class);
- protected final DeoptimizationAction action;
- protected final DeoptimizationReason reason;
+ protected DeoptimizationAction action;
+ protected DeoptimizationReason reason;
protected int debugId;
protected final JavaConstant speculation;
@@ -73,11 +73,21 @@
}
@Override
+ public void setAction(DeoptimizationAction action) {
+ this.action = action;
+ }
+
+ @Override
public DeoptimizationReason getReason() {
return reason;
}
@Override
+ public void setReason(DeoptimizationReason reason) {
+ this.reason = reason;
+ }
+
+ @Override
public void lower(LoweringTool tool) {
tool.getLowerer().lower(this, tool);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/DynamicPiNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/DynamicPiNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -31,6 +31,8 @@
import org.graalvm.compiler.nodeinfo.NodeInfo;
import org.graalvm.compiler.nodes.extended.GuardingNode;
+import jdk.vm.ci.meta.Assumptions;
+import jdk.vm.ci.meta.ConstantReflectionProvider;
import jdk.vm.ci.meta.ResolvedJavaType;
/**
@@ -42,29 +44,55 @@
public static final NodeClass<DynamicPiNode> TYPE = NodeClass.create(DynamicPiNode.class);
@Input ValueNode typeMirror;
+ private final boolean exact;
- public DynamicPiNode(ValueNode object, GuardingNode guard, ValueNode typeMirror) {
+ protected DynamicPiNode(ValueNode object, GuardingNode guard, ValueNode typeMirror, boolean exact) {
super(TYPE, object, StampFactory.object(), guard);
this.typeMirror = typeMirror;
+ this.exact = exact;
+ }
+
+ public static ValueNode create(Assumptions assumptions, ConstantReflectionProvider constantReflection, ValueNode object, GuardingNode guard, ValueNode typeMirror, boolean exact) {
+ ValueNode synonym = findSynonym(assumptions, constantReflection, object, guard, typeMirror, exact);
+ if (synonym != null) {
+ return synonym;
+ }
+ return new DynamicPiNode(object, guard, typeMirror, exact);
}
- @Override
- public Node canonical(CanonicalizerTool tool) {
+ public static ValueNode create(Assumptions assumptions, ConstantReflectionProvider constantReflection, ValueNode object, GuardingNode guard, ValueNode typeMirror) {
+ return create(assumptions, constantReflection, object, guard, typeMirror, false);
+ }
+
+ public boolean isExact() {
+ return exact;
+ }
+
+ private static ValueNode findSynonym(Assumptions assumptions, ConstantReflectionProvider constantReflection, ValueNode object, GuardingNode guard, ValueNode typeMirror, boolean exact) {
if (typeMirror.isConstant()) {
- ResolvedJavaType t = tool.getConstantReflection().asJavaType(typeMirror.asConstant());
+ ResolvedJavaType t = constantReflection.asJavaType(typeMirror.asConstant());
if (t != null) {
Stamp staticPiStamp;
if (t.isPrimitive()) {
staticPiStamp = StampFactory.alwaysNull();
} else {
- TypeReference type = TypeReference.createTrusted(tool.getAssumptions(), t);
+ TypeReference type = exact ? TypeReference.createExactTrusted(t) : TypeReference.createTrusted(assumptions, t);
staticPiStamp = StampFactory.object(type);
}
- return new PiNode(object(), staticPiStamp, (ValueNode) getGuard()).canonical(tool);
+ return PiNode.create(object, staticPiStamp, (ValueNode) guard);
}
}
+ return null;
+ }
+
+ @Override
+ public Node canonical(CanonicalizerTool tool) {
+ ValueNode synonym = findSynonym(tool.getAssumptions(), tool.getConstantReflection(), object, guard, typeMirror, exact);
+ if (synonym != null) {
+ return synonym;
+ }
return this;
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/EncodedGraph.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/EncodedGraph.java Sat Mar 24 01:08:35 2018 +0100
@@ -24,9 +24,11 @@
import java.util.List;
+import org.graalvm.collections.EconomicSet;
import org.graalvm.compiler.graph.NodeClass;
import jdk.vm.ci.meta.Assumptions;
+import jdk.vm.ci.meta.ResolvedJavaField;
import jdk.vm.ci.meta.ResolvedJavaMethod;
/**
@@ -34,7 +36,7 @@
* {@link GraphEncoder} for a description of the encoding format. Use {@link GraphDecoder} for
* decoding.
*/
-public class EncodedGraph {
+public final class EncodedGraph {
private final byte[] encoding;
private final int startOffset;
@@ -42,6 +44,9 @@
private final NodeClass<?>[] types;
private final Assumptions assumptions;
private final List<ResolvedJavaMethod> inlinedMethods;
+ private final boolean trackNodeSourcePosition;
+ private final EconomicSet<ResolvedJavaField> fields;
+ private final boolean hasUnsafeAccess;
/**
* The "table of contents" of the encoded graph, i.e., the mapping from orderId numbers to the
@@ -49,13 +54,22 @@
*/
protected int[] nodeStartOffsets;
- public EncodedGraph(byte[] encoding, int startOffset, Object[] objects, NodeClass<?>[] types, Assumptions assumptions, List<ResolvedJavaMethod> inlinedMethods) {
+ public EncodedGraph(byte[] encoding, int startOffset, Object[] objects, NodeClass<?>[] types, StructuredGraph sourceGraph) {
+ this(encoding, startOffset, objects, types, sourceGraph.getAssumptions(), sourceGraph.getMethods(), sourceGraph.getFields(), sourceGraph.hasUnsafeAccess(),
+ sourceGraph.trackNodeSourcePosition());
+ }
+
+ public EncodedGraph(byte[] encoding, int startOffset, Object[] objects, NodeClass<?>[] types, Assumptions assumptions, List<ResolvedJavaMethod> inlinedMethods,
+ EconomicSet<ResolvedJavaField> fields, boolean hasUnsafeAccess, boolean trackNodeSourcePosition) {
this.encoding = encoding;
this.startOffset = startOffset;
this.objects = objects;
this.types = types;
this.assumptions = assumptions;
this.inlinedMethods = inlinedMethods;
+ this.trackNodeSourcePosition = trackNodeSourcePosition;
+ this.fields = fields;
+ this.hasUnsafeAccess = hasUnsafeAccess;
}
public byte[] getEncoding() {
@@ -81,4 +95,16 @@
public List<ResolvedJavaMethod> getInlinedMethods() {
return inlinedMethods;
}
+
+ public boolean trackNodeSourcePosition() {
+ return trackNodeSourcePosition;
+ }
+
+ public EconomicSet<ResolvedJavaField> getFields() {
+ return fields;
+ }
+
+ public boolean hasUnsafeAccess() {
+ return hasUnsafeAccess;
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/FixedGuardNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/FixedGuardNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,6 +22,7 @@
*/
package org.graalvm.compiler.nodes;
+import org.graalvm.compiler.debug.DebugCloseable;
import static org.graalvm.compiler.nodeinfo.InputType.Guard;
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_2;
import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_2;
@@ -80,16 +81,19 @@
}
}
+ @SuppressWarnings("try")
@Override
public void lower(LoweringTool tool) {
- if (graph().getGuardsStage().allowsFloatingGuards()) {
- if (getAction() != DeoptimizationAction.None) {
- ValueNode guard = tool.createGuard(this, getCondition(), getReason(), getAction(), getSpeculation(), isNegated()).asNode();
- this.replaceAtUsages(guard);
- graph().removeFixed(this);
+ try (DebugCloseable position = this.withNodeSourcePosition()) {
+ if (graph().getGuardsStage().allowsFloatingGuards()) {
+ if (getAction() != DeoptimizationAction.None) {
+ ValueNode guard = tool.createGuard(this, getCondition(), getReason(), getAction(), getSpeculation(), isNegated()).asNode();
+ this.replaceAtUsages(guard);
+ graph().removeFixed(this);
+ }
+ } else {
+ lowerToIf().lower(tool);
}
- } else {
- lowerToIf().lower(tool);
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/FrameState.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/FrameState.java Sat Mar 24 01:08:35 2018 +0100
@@ -244,7 +244,7 @@
if (fs == null) {
return null;
}
- return new NodeSourcePosition(null, toSourcePosition(fs.outerFrameState()), fs.code.getMethod(), fs.bci);
+ return new NodeSourcePosition(toSourcePosition(fs.outerFrameState()), fs.code.getMethod(), fs.bci);
}
/**
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/GraphDecoder.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/GraphDecoder.java Sat Mar 24 01:08:35 2018 +0100
@@ -146,6 +146,15 @@
public boolean isInlinedMethod() {
return false;
}
+
+ public NodeSourcePosition getCallerBytecodePosition() {
+ return getCallerBytecodePosition(null);
+ }
+
+ public NodeSourcePosition getCallerBytecodePosition(NodeSourcePosition position) {
+ return position;
+ }
+
}
/** Decoding state maintained for each loop in the encoded graph. */
@@ -1001,7 +1010,7 @@
}
protected void readProperties(MethodScope methodScope, Node node) {
- node.setNodeSourcePosition((NodeSourcePosition) readObject(methodScope));
+ NodeSourcePosition position = (NodeSourcePosition) readObject(methodScope);
Fields fields = node.getNodeClass().getData();
for (int pos = 0; pos < fields.getCount(); pos++) {
if (fields.getType(pos).isPrimitive()) {
@@ -1012,6 +1021,9 @@
fields.putObject(node, pos, value);
}
}
+ if (graph.trackNodeSourcePosition() && position != null) {
+ node.setNodeSourcePosition(methodScope.getCallerBytecodePosition(position));
+ }
}
/**
@@ -1252,7 +1264,11 @@
long readerByteIndex = methodScope.reader.getByteIndex();
methodScope.reader.setByteIndex(methodScope.encodedGraph.nodeStartOffsets[nodeOrderId]);
NodeClass<?> nodeClass = methodScope.encodedGraph.getNodeClasses()[methodScope.reader.getUVInt()];
- node = (FixedNode) graph.add(nodeClass.allocateInstance());
+ Node stubNode = nodeClass.allocateInstance();
+ if (graph.trackNodeSourcePosition()) {
+ stubNode.setNodeSourcePosition(NodeSourcePosition.placeholder(graph.method()));
+ }
+ node = (FixedNode) graph.add(stubNode);
/* Properties and edges are not filled yet, the node remains uninitialized. */
methodScope.reader.setByteIndex(readerByteIndex);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/GraphEncoder.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/GraphEncoder.java Sat Mar 24 01:08:35 2018 +0100
@@ -154,7 +154,7 @@
encoder.prepare(graph);
encoder.finishPrepare();
int startOffset = encoder.encode(graph);
- return new EncodedGraph(encoder.getEncoding(), startOffset, encoder.getObjects(), encoder.getNodeClasses(), graph.getAssumptions(), graph.getMethods());
+ return new EncodedGraph(encoder.getEncoding(), startOffset, encoder.getObjects(), encoder.getNodeClasses(), graph);
}
public GraphEncoder(Architecture architecture) {
@@ -288,8 +288,7 @@
}
/* Check that the decoding of the encode graph is the same as the input. */
- assert verifyEncoding(graph, new EncodedGraph(getEncoding(), metadataStart, getObjects(), getNodeClasses(), graph.getAssumptions(), graph.getMethods()),
- architecture);
+ assert verifyEncoding(graph, new EncodedGraph(getEncoding(), metadataStart, getObjects(), getNodeClasses(), graph), architecture);
return metadataStart;
}
@@ -436,6 +435,9 @@
public static boolean verifyEncoding(StructuredGraph originalGraph, EncodedGraph encodedGraph, Architecture architecture) {
DebugContext debug = originalGraph.getDebug();
StructuredGraph decodedGraph = new StructuredGraph.Builder(originalGraph.getOptions(), debug, AllowAssumptions.YES).method(originalGraph.method()).build();
+ if (originalGraph.trackNodeSourcePosition()) {
+ decodedGraph.setTrackNodeSourcePosition();
+ }
GraphDecoder decoder = new GraphDecoder(architecture, decodedGraph);
decoder.decode(encodedGraph);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/GuardNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/GuardNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -59,9 +59,9 @@
public static final NodeClass<GuardNode> TYPE = NodeClass.create(GuardNode.class);
@Input(Condition) protected LogicNode condition;
- protected final DeoptimizationReason reason;
+ protected DeoptimizationReason reason;
+ protected DeoptimizationAction action;
protected JavaConstant speculation;
- protected DeoptimizationAction action;
protected boolean negated;
public GuardNode(LogicNode condition, AnchoringNode anchor, DeoptimizationReason reason, DeoptimizationAction action, boolean negated, JavaConstant speculation) {
@@ -149,7 +149,13 @@
negated = !negated;
}
+ @Override
public void setAction(DeoptimizationAction invalidaterecompile) {
this.action = invalidaterecompile;
}
+
+ @Override
+ public void setReason(DeoptimizationReason reason) {
+ this.reason = reason;
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/InliningLog.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/InliningLog.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,13 +22,20 @@
*/
package org.graalvm.compiler.nodes;
-import jdk.vm.ci.code.BytecodePosition;
+import jdk.vm.ci.meta.MetaUtil;
import jdk.vm.ci.meta.ResolvedJavaMethod;
+import org.graalvm.collections.EconomicMap;
+import org.graalvm.collections.Equivalence;
+import org.graalvm.collections.MapCursor;
+import org.graalvm.collections.UnmodifiableEconomicMap;
+import org.graalvm.compiler.core.common.GraalOptions;
+import org.graalvm.compiler.debug.GraalError;
+import org.graalvm.compiler.graph.Node;
+import org.graalvm.compiler.options.OptionValues;
import java.util.ArrayList;
-import java.util.HashMap;
import java.util.List;
-import java.util.Map;
+import java.util.function.BiConsumer;
/**
* This class contains all inlining decisions performed on a graph during the compilation.
@@ -40,8 +47,6 @@
* <li>the call target method</li>
* <li>the reason for the inlining decision</li>
* <li>the name of the phase in which the inlining decision took place</li>
- * <li>the special {@link BytecodePositionWithId} value that describes the position in the bytecode
- * together with the callsite-specific unique identifier</li>
* <li>the inlining log of the inlined graph, or {@code null} if the decision was negative</li>
* </ul>
*
@@ -49,99 +54,20 @@
* {@link StructuredGraph} by calling {@link #addDecision} whenever it decides to inline a method.
* If there are invokes in the graph at the end of the respective phase, then that phase must call
* {@link #addDecision} to log negative decisions.
- *
- * At the end of the compilation, the contents of the inlining log can be converted into a list of
- * decisions by calling {@link #formatAsList} or into an inlining tree, by calling
- * {@link #formatAsTree}.
*/
public class InliningLog {
- /**
- * A bytecode position with a unique identifier attached.
- *
- * The purpose of this class is to disambiguate callsites that are duplicated by a
- * transformation (such as loop peeling or path duplication).
- */
- public static final class BytecodePositionWithId extends BytecodePosition implements Comparable<BytecodePositionWithId> {
- private final int id;
-
- public BytecodePositionWithId(BytecodePositionWithId caller, ResolvedJavaMethod method, int bci, int id) {
- super(caller, method, bci);
- this.id = id;
- }
-
- public BytecodePositionWithId addCallerWithId(BytecodePositionWithId caller) {
- if (getCaller() == null) {
- return new BytecodePositionWithId(caller, getMethod(), getBCI(), id);
- } else {
- return new BytecodePositionWithId(getCaller().addCallerWithId(caller), getMethod(), getBCI(), id);
- }
- }
-
- public static BytecodePositionWithId create(FrameState state) {
- return create(state, true);
- }
-
- @SuppressWarnings("deprecation")
- private static BytecodePositionWithId create(FrameState state, boolean topLevel) {
- if (state == null) {
- return null;
- }
- ResolvedJavaMethod method = state.getMethod();
- int bci = topLevel ? state.bci - 3 : state.bci;
- int id = state.getId();
- return new BytecodePositionWithId(create(state.outerFrameState(), false), method, bci, id);
- }
-
- @Override
- public BytecodePositionWithId getCaller() {
- return (BytecodePositionWithId) super.getCaller();
- }
-
- public BytecodePositionWithId withoutCaller() {
- return new BytecodePositionWithId(null, getMethod(), getBCI(), id);
- }
-
- public long getId() {
- return id;
- }
-
- @Override
- public boolean equals(Object that) {
- return super.equals(that) && this.id == ((BytecodePositionWithId) that).id;
- }
-
- @Override
- public int hashCode() {
- return super.hashCode() ^ (id << 16);
- }
-
- @Override
- public int compareTo(BytecodePositionWithId that) {
- int diff = this.getBCI() - that.getBCI();
- if (diff != 0) {
- return diff;
- }
- diff = (int) (this.getId() - that.getId());
- return diff;
- }
- }
public static final class Decision {
private final boolean positive;
private final String reason;
private final String phase;
private final ResolvedJavaMethod target;
- private final BytecodePositionWithId position;
- private final InliningLog childLog;
- private Decision(boolean positive, String reason, String phase, ResolvedJavaMethod target, BytecodePositionWithId position, InliningLog childLog) {
- assert position != null;
+ private Decision(boolean positive, String reason, String phase, ResolvedJavaMethod target) {
this.positive = positive;
this.reason = reason;
this.phase = phase;
this.target = target;
- this.position = position;
- this.childLog = childLog;
}
public boolean isPositive() {
@@ -156,123 +82,304 @@
return phase;
}
- public BytecodePositionWithId getPosition() {
- return position;
+ public ResolvedJavaMethod getTarget() {
+ return target;
}
- public InliningLog getChildLog() {
- return childLog;
+ @Override
+ public String toString() {
+ return String.format("<%s> %s: %s", phase, target != null ? target.format("%H.%n(%p)") : "", reason);
+ }
+ }
+
+ private class Callsite {
+ public final List<Decision> decisions;
+ public final List<Callsite> children;
+ public Callsite parent;
+ public ResolvedJavaMethod target;
+ public Invokable invoke;
+
+ Callsite(Callsite parent, Invokable originalInvoke) {
+ this.parent = parent;
+ this.decisions = new ArrayList<>();
+ this.children = new ArrayList<>();
+ this.invoke = originalInvoke;
}
- public ResolvedJavaMethod getTarget() {
- return target;
+ public Callsite addChild(Invokable childInvoke) {
+ Callsite child = new Callsite(this, childInvoke);
+ children.add(child);
+ return child;
+ }
+
+ public String positionString() {
+ if (parent == null) {
+ return "<root>";
+ }
+ return MetaUtil.appendLocation(new StringBuilder(100), parent.target, getBci()).toString();
+ }
+
+ public int getBci() {
+ return invoke != null ? invoke.bci() : -1;
}
}
- private static class Callsite {
- public final List<String> decisions;
- public final Map<BytecodePositionWithId, Callsite> children;
- public final BytecodePositionWithId position;
+ private final Callsite root;
+ private final EconomicMap<Invokable, Callsite> leaves;
+ private final OptionValues options;
+
+ public InliningLog(ResolvedJavaMethod rootMethod, OptionValues options) {
+ this.root = new Callsite(null, null);
+ this.root.target = rootMethod;
+ this.leaves = EconomicMap.create(Equivalence.IDENTITY_WITH_SYSTEM_HASHCODE);
+ this.options = options;
+ }
- Callsite(BytecodePositionWithId position) {
- this.children = new HashMap<>();
- this.position = position;
- this.decisions = new ArrayList<>();
- }
-
- public Callsite getOrCreateChild(BytecodePositionWithId fromRootPosition) {
- Callsite child = children.get(fromRootPosition.withoutCaller());
- if (child == null) {
- child = new Callsite(fromRootPosition);
- children.put(fromRootPosition.withoutCaller(), child);
+ /**
+ * Add an inlining decision for the specified invoke.
+ *
+ * An inlining decision can be either positive or negative. A positive inlining decision must be
+ * logged after replacing an {@link Invoke} with a graph. In this case, the node replacement map
+ * and the {@link InliningLog} of the inlined graph must be provided.
+ */
+ public void addDecision(Invokable invoke, boolean positive, String reason, String phase, EconomicMap<Node, Node> replacements, InliningLog calleeLog) {
+ assert leaves.containsKey(invoke);
+ assert (!positive && replacements == null && calleeLog == null) || (positive && replacements != null && calleeLog != null);
+ Callsite callsite = leaves.get(invoke);
+ callsite.target = callsite.invoke.getTargetMethod();
+ Decision decision = new Decision(positive, reason, phase, invoke.getTargetMethod());
+ callsite.decisions.add(decision);
+ if (positive) {
+ leaves.removeKey(invoke);
+ EconomicMap<Callsite, Callsite> mapping = EconomicMap.create(Equivalence.IDENTITY_WITH_SYSTEM_HASHCODE);
+ for (Callsite calleeChild : calleeLog.root.children) {
+ Callsite child = callsite.addChild(calleeChild.invoke);
+ copyTree(child, calleeChild, replacements, mapping);
}
- return child;
- }
-
- public Callsite createCallsite(BytecodePositionWithId fromRootPosition, String decision) {
- Callsite parent = getOrCreateCallsite(fromRootPosition.getCaller());
- Callsite callsite = parent.getOrCreateChild(fromRootPosition);
- callsite.decisions.add(decision);
- return null;
- }
-
- private Callsite getOrCreateCallsite(BytecodePositionWithId fromRootPosition) {
- if (fromRootPosition == null) {
- return this;
- } else {
- Callsite parent = getOrCreateCallsite(fromRootPosition.getCaller());
- Callsite callsite = parent.getOrCreateChild(fromRootPosition);
- return callsite;
+ MapCursor<Invokable, Callsite> entries = calleeLog.leaves.getEntries();
+ while (entries.advance()) {
+ Invokable invokeFromCallee = entries.getKey();
+ Callsite callsiteFromCallee = entries.getValue();
+ if (invokeFromCallee.asFixedNode().isDeleted()) {
+ // Some invoke nodes could have been removed by optimizations.
+ continue;
+ }
+ Invokable inlinedInvokeFromCallee = (Invokable) replacements.get(invokeFromCallee.asFixedNode());
+ Callsite descendant = mapping.get(callsiteFromCallee);
+ leaves.put(inlinedInvokeFromCallee, descendant);
}
}
}
- private final List<Decision> decisions;
-
- public InliningLog() {
- this.decisions = new ArrayList<>();
- }
-
- public List<Decision> getDecisions() {
- return decisions;
- }
-
- public void addDecision(boolean positive, String reason, String phase, ResolvedJavaMethod target, BytecodePositionWithId position,
- InliningLog calleeLog) {
- Decision decision = new Decision(positive, reason, phase, target, position, calleeLog);
- decisions.add(decision);
+ /**
+ * Append the inlining decision tree from the specified log.
+ *
+ * The subtrees of the specified log are appended below the root of this log. This is usually
+ * called when a node in the graph is replaced with its snippet.
+ *
+ * @see InliningLog#addDecision
+ */
+ public void addLog(UnmodifiableEconomicMap<Node, Node> replacements, InliningLog replacementLog) {
+ EconomicMap<Callsite, Callsite> mapping = EconomicMap.create(Equivalence.IDENTITY_WITH_SYSTEM_HASHCODE);
+ for (Callsite calleeChild : replacementLog.root.children) {
+ Callsite child = root.addChild(calleeChild.invoke);
+ copyTree(child, calleeChild, replacements, mapping);
+ }
+ MapCursor<Invokable, Callsite> entries = replacementLog.leaves.getEntries();
+ while (entries.advance()) {
+ Invokable replacementInvoke = entries.getKey();
+ Callsite replacementCallsite = entries.getValue();
+ if (replacementInvoke.asFixedNode().isDeleted()) {
+ // Some invoke nodes could have been removed by optimizations.
+ continue;
+ }
+ Invokable invoke = (Invokable) replacements.get(replacementInvoke.asFixedNode());
+ Callsite callsite = mapping.get(replacementCallsite);
+ leaves.put(invoke, callsite);
+ }
}
- public String formatAsList() {
- StringBuilder builder = new StringBuilder();
- formatAsList("", null, decisions, builder);
- return builder.toString();
+ /**
+ * Completely replace the current log with the copy of the specified log.
+ *
+ * The precondition is that the current inlining log is completely empty. This is usually called
+ * when copying the entire graph.
+ *
+ * @see InliningLog#addDecision
+ */
+ public void replaceLog(UnmodifiableEconomicMap<Node, Node> replacements, InliningLog replacementLog) {
+ assert root.decisions.isEmpty();
+ assert root.children.isEmpty();
+ assert leaves.isEmpty();
+ EconomicMap<Callsite, Callsite> mapping = EconomicMap.create(Equivalence.IDENTITY_WITH_SYSTEM_HASHCODE);
+ copyTree(root, replacementLog.root, replacements, mapping);
+ MapCursor<Invokable, Callsite> replacementEntries = replacementLog.leaves.getEntries();
+ while (replacementEntries.advance()) {
+ Invokable replacementInvoke = replacementEntries.getKey();
+ Callsite replacementSite = replacementEntries.getValue();
+ Invokable invoke = (Invokable) replacements.get((Node) replacementInvoke);
+ Callsite site = mapping.get(replacementSite);
+ leaves.put(invoke, site);
+ }
}
- private void formatAsList(String phasePrefix, BytecodePositionWithId caller, List<Decision> subDecisions, StringBuilder builder) {
- for (Decision decision : subDecisions) {
- String phaseStack = phasePrefix.equals("") ? decision.getPhase() : phasePrefix + "-" + decision.getPhase();
- String target = decision.getTarget().format("%H.%n(%p)");
- String positive = decision.isPositive() ? "inline" : "do not inline";
- BytecodePositionWithId absolutePosition = decision.getPosition().addCallerWithId(caller);
- String position = " " + decision.getPosition().toString().replaceAll("\n", "\n ");
- String line = String.format("<%s> %s %s: %s\n%s", phaseStack, positive, target, decision.getReason(), position);
- builder.append(line).append(System.lineSeparator());
- if (decision.getChildLog() != null) {
- formatAsList(phaseStack, absolutePosition, decision.getChildLog().getDecisions(), builder);
- }
+ private void copyTree(Callsite site, Callsite replacementSite, UnmodifiableEconomicMap<Node, Node> replacements, EconomicMap<Callsite, Callsite> mapping) {
+ mapping.put(replacementSite, site);
+ site.target = replacementSite.target;
+ site.decisions.addAll(replacementSite.decisions);
+ site.invoke = replacementSite.invoke != null && replacementSite.invoke.asFixedNode().isAlive() ? (Invokable) replacements.get(replacementSite.invoke.asFixedNode()) : null;
+ for (Callsite replacementChild : replacementSite.children) {
+ Callsite child = new Callsite(site, null);
+ site.children.add(child);
+ copyTree(child, replacementChild, replacements, mapping);
+ }
+ }
+
+ public void checkInvariants(StructuredGraph graph) {
+ for (Invoke invoke : graph.getInvokes()) {
+ assert leaves.containsKey(invoke) : "Invoke " + invoke + " not contained in the leaves.";
+ }
+ assert root.parent == null;
+ checkTreeInvariants(root);
+ }
+
+ private void checkTreeInvariants(Callsite site) {
+ for (Callsite child : site.children) {
+ assert site == child.parent : "Callsite " + site + " with child " + child + " has an invalid parent pointer " + site;
+ checkTreeInvariants(child);
}
}
+ private UpdateScope noUpdates = new UpdateScope((oldNode, newNode) -> {
+ });
+
+ private UpdateScope activated = null;
+
+ /**
+ * Used to designate scopes in which {@link Invokable} registration or cloning should be handled
+ * differently.
+ */
+ public final class UpdateScope implements AutoCloseable {
+ private BiConsumer<Invokable, Invokable> updater;
+
+ private UpdateScope(BiConsumer<Invokable, Invokable> updater) {
+ this.updater = updater;
+ }
+
+ public void activate() {
+ if (activated != null) {
+ throw GraalError.shouldNotReachHere("InliningLog updating already set.");
+ }
+ activated = this;
+ }
+
+ @Override
+ public void close() {
+ if (GraalOptions.TraceInlining.getValue(options)) {
+ assert activated != null;
+ activated = null;
+ }
+ }
+
+ public BiConsumer<Invokable, Invokable> getUpdater() {
+ return updater;
+ }
+ }
+
+ public BiConsumer<Invokable, Invokable> getUpdateScope() {
+ if (activated == null) {
+ return null;
+ }
+ return activated.getUpdater();
+ }
+
+ /**
+ * Creates and sets a new update scope for the log.
+ *
+ * The specified {@code updater} is invoked when an {@link Invokable} node is registered or
+ * cloned. If the node is newly registered, then the first argument to the {@code updater} is
+ * {@code null}. If the node is cloned, then the first argument is the node it was cloned from.
+ *
+ * @param updater an operation taking a null (or the original node), and the registered (or
+ * cloned) {@link Invokable}
+ * @return a bound {@link UpdateScope} object, or a {@code null} if tracing is disabled
+ */
+ public UpdateScope openUpdateScope(BiConsumer<Invokable, Invokable> updater) {
+ if (GraalOptions.TraceInlining.getValue(options)) {
+ UpdateScope scope = new UpdateScope(updater);
+ scope.activate();
+ return scope;
+ } else {
+ return null;
+ }
+ }
+
+ /**
+ * Creates a new update scope that does not update the log.
+ *
+ * This update scope will not add a newly created {@code Invokable} to the log, nor will it
+ * amend its position if it was cloned. Instead, users need to update the inlining log with the
+ * new {@code Invokable} on their own.
+ *
+ * @see #openUpdateScope
+ */
+ public UpdateScope openDefaultUpdateScope() {
+ if (GraalOptions.TraceInlining.getValue(options)) {
+ noUpdates.activate();
+ return noUpdates;
+ } else {
+ return null;
+ }
+ }
+
+ public boolean containsLeafCallsite(Invokable invokable) {
+ return leaves.containsKey(invokable);
+ }
+
+ public void removeLeafCallsite(Invokable invokable) {
+ leaves.removeKey(invokable);
+ }
+
+ public void trackNewCallsite(Invokable invoke) {
+ assert !leaves.containsKey(invoke);
+ Callsite callsite = new Callsite(root, invoke);
+ root.children.add(callsite);
+ leaves.put(invoke, callsite);
+ }
+
+ public void trackDuplicatedCallsite(Invokable sibling, Invokable newInvoke) {
+ Callsite siblingCallsite = leaves.get(sibling);
+ Callsite parentCallsite = siblingCallsite.parent;
+ Callsite callsite = parentCallsite.addChild(newInvoke);
+ leaves.put(newInvoke, callsite);
+ }
+
+ public void updateExistingCallsite(Invokable previousInvoke, Invokable newInvoke) {
+ Callsite callsite = leaves.get(previousInvoke);
+ leaves.removeKey(previousInvoke);
+ leaves.put(newInvoke, callsite);
+ callsite.invoke = newInvoke;
+ }
+
public String formatAsTree() {
- Callsite root = new Callsite(null);
- createTree("", null, root, decisions);
- StringBuilder builder = new StringBuilder();
+ StringBuilder builder = new StringBuilder(512);
formatAsTree(root, "", builder);
return builder.toString();
}
- private void createTree(String phasePrefix, BytecodePositionWithId caller, Callsite root, List<Decision> subDecisions) {
- for (Decision decision : subDecisions) {
- String phaseStack = phasePrefix.equals("") ? decision.getPhase() : phasePrefix + "-" + decision.getPhase();
- String target = decision.getTarget().format("%H.%n(%p)");
- BytecodePositionWithId absolutePosition = decision.getPosition().addCallerWithId(caller);
- String line = String.format("<%s> %s: %s", phaseStack, target, decision.getReason());
- root.createCallsite(absolutePosition, line);
- if (decision.getChildLog() != null) {
- createTree(phaseStack, absolutePosition, root, decision.getChildLog().getDecisions());
+ private void formatAsTree(Callsite site, String indent, StringBuilder builder) {
+ String position = site.positionString();
+ builder.append(indent).append("at ").append(position).append(": ");
+ if (site.decisions.isEmpty()) {
+ builder.append(System.lineSeparator());
+ } else {
+ for (Decision decision : site.decisions) {
+ builder.append(decision.toString());
+ builder.append(System.lineSeparator());
}
}
+ for (Callsite child : site.children) {
+ formatAsTree(child, indent + " ", builder);
+ }
}
-
- private void formatAsTree(Callsite site, String indent, StringBuilder builder) {
- String position = site.position != null ? site.position.withoutCaller().toString() : "<root>";
- String decision = String.join("; ", site.decisions);
- String line = String.format("%s%s; %s", indent, position, decision);
- builder.append(line).append(System.lineSeparator());
- String childIndent = indent + " ";
- site.children.entrySet().stream().sorted((x, y) -> x.getKey().compareTo(y.getKey())).forEach(e -> {
- formatAsTree(e.getValue(), childIndent, builder);
- });
- }
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/Invokable.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.nodes;
+
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+import org.graalvm.compiler.core.common.GraalOptions;
+import org.graalvm.compiler.debug.GraalError;
+import org.graalvm.compiler.graph.Node;
+
+/**
+ * A marker interface for nodes that represent calls to other methods.
+ */
+public interface Invokable {
+ ResolvedJavaMethod getTargetMethod();
+
+ int bci();
+
+ FixedNode asFixedNode();
+
+ /**
+ * Called on a {@link Invokable} node after it is registered with a graph.
+ *
+ * To override the default functionality, code that creates an {@link Invokable} should set the
+ * updating logic by calling {@link InliningLog#openUpdateScope}.
+ */
+ default void updateInliningLogAfterRegister(StructuredGraph newGraph) {
+ InliningLog log = newGraph.getInliningLog();
+ if (log.getUpdateScope() != null) {
+ log.getUpdateScope().accept(null, this);
+ } else {
+ assert !log.containsLeafCallsite(this);
+ log.trackNewCallsite(this);
+ }
+ }
+
+ /**
+ * Called on a {@link Invokable} node after it was cloned from another node.
+ *
+ * This call is always preceded with a call to {@link Invokable#updateInliningLogAfterRegister}.
+ *
+ * To override the default functionality, code that creates an {@link Invokable} should set the
+ * updating logic by calling {@link InliningLog#openUpdateScope}.
+ */
+ default void updateInliningLogAfterClone(Node other) {
+ if (GraalOptions.TraceInlining.getValue(asFixedNode().getOptions())) {
+ // At this point, the invokable node was already added to the inlining log
+ // in the call to updateInliningLogAfterRegister, so we need to remove it.
+ InliningLog log = asFixedNode().graph().getInliningLog();
+ assert other instanceof Invokable;
+ if (log.getUpdateScope() != null) {
+ // InliningLog.UpdateScope determines how to update the log.
+ log.getUpdateScope().accept((Invokable) other, this);
+ } else if (other.graph() == this.asFixedNode().graph()) {
+ // This node was cloned as part of duplication.
+ // We need to add it as a sibling of the node other.
+ assert log.containsLeafCallsite(this) : "Node " + this + " not contained in the log.";
+ assert log.containsLeafCallsite((Invokable) other) : "Sibling " + other + " not contained in the log.";
+ log.removeLeafCallsite(this);
+ log.trackDuplicatedCallsite((Invokable) other, this);
+ } else {
+ // This node was added from a different graph.
+ // The adder is responsible for providing a context.
+ throw GraalError.shouldNotReachHere("No InliningLog.Update scope provided.");
+ }
+ }
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/Invoke.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/Invoke.java Sat Mar 24 01:08:35 2018 +0100
@@ -31,7 +31,7 @@
import jdk.vm.ci.meta.ResolvedJavaMethod;
import jdk.vm.ci.meta.ResolvedJavaType;
-public interface Invoke extends StateSplit, Lowerable, DeoptimizingNode.DeoptDuring, FixedNodeInterface {
+public interface Invoke extends StateSplit, Lowerable, DeoptimizingNode.DeoptDuring, FixedNodeInterface, Invokable {
FixedNode next();
@@ -39,6 +39,7 @@
CallTargetNode callTarget();
+ @Override
int bci();
Node predecessor();
@@ -60,6 +61,11 @@
void setPolymorphic(boolean value);
+ @Override
+ default ResolvedJavaMethod getTargetMethod() {
+ return callTarget() != null ? callTarget().targetMethod() : null;
+ }
+
/**
* Returns the {@linkplain ResolvedJavaMethod method} from which this invoke is executed. This
* is the caller method and in the case of inlining may be different from the method of the
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/InvokeNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/InvokeNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -93,6 +93,16 @@
}
@Override
+ protected void afterClone(Node other) {
+ updateInliningLogAfterClone(other);
+ }
+
+ @Override
+ public FixedNode asFixedNode() {
+ return this;
+ }
+
+ @Override
public CallTargetNode callTarget() {
return callTarget;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/InvokeWithExceptionNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/InvokeWithExceptionNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -74,6 +74,16 @@
this.exceptionProbability = EXCEPTION_PROBA;
}
+ @Override
+ protected void afterClone(Node other) {
+ updateInliningLogAfterClone(other);
+ }
+
+ @Override
+ public FixedNode asFixedNode() {
+ return this;
+ }
+
public AbstractBeginNode exceptionEdge() {
return exceptionEdge;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/LoopExitNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/LoopExitNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -105,6 +105,9 @@
Node prev = this.predecessor();
while (tool.allUsagesAvailable() && prev instanceof BeginNode && prev.hasNoUsages()) {
AbstractBeginNode begin = (AbstractBeginNode) prev;
+ if (begin.getNodeSourcePosition() != null || this.getNodeSourcePosition() == null || this.getNodeSourcePosition().isPlaceholder()) {
+ this.setNodeSourcePosition(begin.getNodeSourcePosition());
+ }
prev = prev.predecessor();
graph().removeFixed(begin);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/PiNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/PiNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -192,7 +192,7 @@
if (guard == null) {
// Try to merge the pi node with a load node.
- if (object instanceof ReadNode) {
+ if (object instanceof ReadNode && !object.hasMoreThanOneUsage()) {
ReadNode readNode = (ReadNode) object;
readNode.setStamp(readNode.stamp(NodeView.DEFAULT).improveWith(stamp));
return readNode;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/StaticDeoptimizingNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/StaticDeoptimizingNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -32,8 +32,12 @@
DeoptimizationReason getReason();
+ void setReason(DeoptimizationReason reason);
+
DeoptimizationAction getAction();
+ void setAction(DeoptimizationAction action);
+
JavaConstant getSpeculation();
/**
@@ -75,4 +79,15 @@
}
throw GraalError.shouldNotReachHere();
}
+
+ static DeoptimizationAction mergeActions(DeoptimizationAction a1, DeoptimizationAction a2) {
+ if (a1 == a2) {
+ return a1;
+ }
+ if (a1 == DeoptimizationAction.InvalidateRecompile && a2 == DeoptimizationAction.InvalidateReprofile ||
+ a1 == DeoptimizationAction.InvalidateReprofile && a2 == DeoptimizationAction.InvalidateRecompile) {
+ return DeoptimizationAction.InvalidateReprofile;
+ }
+ return null;
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/StructuredGraph.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/StructuredGraph.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,6 +22,8 @@
*/
package org.graalvm.compiler.nodes;
+import static org.graalvm.compiler.graph.Graph.SourcePositionTracking.Default;
+
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
@@ -39,6 +41,7 @@
import org.graalvm.compiler.core.common.type.Stamp;
import org.graalvm.compiler.debug.DebugContext;
import org.graalvm.compiler.debug.JavaMethodContext;
+import org.graalvm.compiler.debug.TTY;
import org.graalvm.compiler.graph.Graph;
import org.graalvm.compiler.graph.Node;
import org.graalvm.compiler.graph.NodeMap;
@@ -167,6 +170,7 @@
private CompilationIdentifier compilationId = CompilationIdentifier.INVALID_COMPILATION_ID;
private int entryBCI = JVMCICompiler.INVOCATION_ENTRY_BCI;
private boolean useProfilingInfo = true;
+ private SourcePositionTracking trackNodeSourcePosition = Default;
private final OptionValues options;
private Cancellable cancellable = null;
private final DebugContext debug;
@@ -179,6 +183,7 @@
this.options = options;
this.debug = debug;
this.assumptions = allowAssumptions == AllowAssumptions.YES ? new Assumptions() : null;
+ this.trackNodeSourcePosition = Graph.trackNodeSourcePositionDefault(options, debug);
}
/**
@@ -187,7 +192,8 @@
public Builder(OptionValues options, DebugContext debug) {
this.options = options;
this.debug = debug;
- assumptions = null;
+ this.assumptions = null;
+ this.trackNodeSourcePosition = Graph.trackNodeSourcePositionDefault(options, debug);
}
public String getName() {
@@ -257,13 +263,25 @@
return this;
}
+ public Builder trackNodeSourcePosition(SourcePositionTracking tracking) {
+ this.trackNodeSourcePosition = tracking;
+ return this;
+ }
+
+ public Builder trackNodeSourcePosition(boolean flag) {
+ if (flag) {
+ this.trackNodeSourcePosition = SourcePositionTracking.Track;
+ }
+ return this;
+ }
+
public Builder callerContext(NodeSourcePosition context) {
this.callerContext = context;
return this;
}
public StructuredGraph build() {
- return new StructuredGraph(name, rootMethod, entryBCI, assumptions, speculationLog, useProfilingInfo, compilationId, options, debug, cancellable, callerContext);
+ return new StructuredGraph(name, rootMethod, entryBCI, assumptions, speculationLog, useProfilingInfo, trackNodeSourcePosition, compilationId, options, debug, cancellable, callerContext);
}
}
@@ -328,6 +346,7 @@
Assumptions assumptions,
SpeculationLog speculationLog,
boolean useProfilingInfo,
+ SourcePositionTracking trackNodeSourcePosition,
CompilationIdentifier compilationId,
OptionValues options,
DebugContext debug,
@@ -342,8 +361,10 @@
this.assumptions = assumptions;
this.speculationLog = speculationLog;
this.useProfilingInfo = useProfilingInfo;
+ this.trackNodeSourcePosition = trackNodeSourcePosition;
+ assert trackNodeSourcePosition != null;
this.cancellable = cancellable;
- this.inliningLog = new InliningLog();
+ this.inliningLog = new InliningLog(rootMethod, options);
this.callerContext = context;
}
@@ -457,6 +478,12 @@
return inliningLog;
}
+ public void logInliningTree() {
+ if (GraalOptions.TraceInlining.getValue(getOptions())) {
+ TTY.println(getInliningLog().formatAsTree());
+ }
+ }
+
/**
* Creates a copy of this graph.
*
@@ -471,6 +498,7 @@
return copy(newName, duplicationMapCallback, compilationId, debugForCopy);
}
+ @SuppressWarnings("try")
private StructuredGraph copy(String newName, Consumer<UnmodifiableEconomicMap<Node, Node>> duplicationMapCallback, CompilationIdentifier newCompilationId, DebugContext debugForCopy) {
AllowAssumptions allowAssumptions = AllowAssumptions.ifNonNull(assumptions);
StructuredGraph copy = new StructuredGraph(newName,
@@ -479,6 +507,7 @@
assumptions == null ? null : new Assumptions(),
speculationLog,
useProfilingInfo,
+ trackNodeSourcePosition,
newCompilationId,
getOptions(), debugForCopy, null, callerContext);
if (allowAssumptions == AllowAssumptions.YES && assumptions != null) {
@@ -491,7 +520,13 @@
copy.isAfterExpandLogic = isAfterExpandLogic;
EconomicMap<Node, Node> replacements = EconomicMap.create(Equivalence.IDENTITY);
replacements.put(start, copy.start);
- UnmodifiableEconomicMap<Node, Node> duplicates = copy.addDuplicates(getNodes(), this, this.getNodeCount(), replacements);
+ UnmodifiableEconomicMap<Node, Node> duplicates;
+ try (InliningLog.UpdateScope scope = copy.getInliningLog().openDefaultUpdateScope()) {
+ duplicates = copy.addDuplicates(getNodes(), this, this.getNodeCount(), replacements);
+ if (scope != null) {
+ copy.getInliningLog().replaceLog(duplicates, this.getInliningLog());
+ }
+ }
if (duplicationMapCallback != null) {
duplicationMapCallback.accept(duplicates);
}
@@ -951,6 +986,11 @@
@Override
protected void afterRegister(Node node) {
assert hasValueProxies() || !(node instanceof ValueProxyNode);
+ if (GraalOptions.TraceInlining.getValue(getOptions())) {
+ if (node instanceof Invokable) {
+ ((Invokable) node).updateInliningLogAfterRegister(this);
+ }
+ }
}
public NodeSourcePosition getCallerContext() {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/CompareNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/CompareNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -25,12 +25,12 @@
import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_1;
+import org.graalvm.compiler.core.common.PermanentBailoutException;
import org.graalvm.compiler.core.common.calc.CanonicalCondition;
import org.graalvm.compiler.core.common.calc.Condition;
import org.graalvm.compiler.core.common.type.AbstractObjectStamp;
import org.graalvm.compiler.core.common.type.AbstractPointerStamp;
import org.graalvm.compiler.core.common.type.IntegerStamp;
-import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.graph.spi.Canonicalizable;
import org.graalvm.compiler.nodeinfo.NodeInfo;
@@ -209,7 +209,7 @@
@SuppressWarnings("unused")
protected LogicNode optimizeNormalizeCompare(ConstantReflectionProvider constantReflection, MetaAccessProvider metaAccess, OptionValues options, Integer smallestCompareWidth,
Constant constant, NormalizeCompareNode normalizeNode, boolean mirrored, NodeView view) {
- throw new GraalError("NormalizeCompareNode connected to %s (%s %s %s)", this, constant, normalizeNode, mirrored);
+ throw new PermanentBailoutException("NormalizeCompareNode connected to %s (%s %s %s)", this, constant, normalizeNode, mirrored);
}
private static LogicNode optimizeConditional(Constant constant, ConditionalNode conditionalNode, ConstantReflectionProvider constantReflection, Condition cond, boolean unorderedIsTrue) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/ConditionalNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/ConditionalNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -170,9 +170,9 @@
return trueValue;
}
} else if (lessThan.getX() == falseValue && lessThan.getY() == trueValue) {
- // return "x" for "x < y ? y : x" in case that we know "x <= y"
+ // return "y" for "x < y ? y : x" in case that we know "x <= y"
if (falseValueStamp.upperBound() <= trueValueStamp.lowerBound()) {
- return falseValue;
+ return trueValue;
}
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/IntegerConvertNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/IntegerConvertNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -139,6 +139,15 @@
return convert(input, stamp, true, view);
}
+ public static ValueNode convertUnsigned(ValueNode input, Stamp stamp, StructuredGraph graph, NodeView view) {
+ ValueNode convert = convert(input, stamp, true, view);
+ if (!convert.isAlive()) {
+ assert !convert.isDeleted();
+ convert = graph.addOrUniqueWithInputs(convert);
+ }
+ return convert;
+ }
+
public static ValueNode convert(ValueNode input, Stamp stamp, boolean zeroExtend, NodeView view) {
IntegerStamp fromStamp = (IntegerStamp) input.stamp(view);
IntegerStamp toStamp = (IntegerStamp) stamp;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/IntegerLowerThanNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/IntegerLowerThanNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,6 +22,8 @@
*/
package org.graalvm.compiler.nodes.calc;
+import static jdk.vm.ci.code.CodeUtil.mask;
+
import org.graalvm.compiler.core.common.calc.CanonicalCondition;
import org.graalvm.compiler.core.common.type.IntegerStamp;
import org.graalvm.compiler.core.common.type.Stamp;
@@ -70,8 +72,8 @@
private Stamp getSucceedingStampForX(boolean mirror, boolean strict, Stamp xStampGeneric, Stamp yStampGeneric, ValueNode forX, ValueNode forY) {
Stamp s = getSucceedingStampForX(mirror, strict, xStampGeneric, yStampGeneric);
- if (s != null) {
- return s;
+ if (s != null && s.isUnrestricted()) {
+ s = null;
}
if (forY instanceof AddNode && xStampGeneric instanceof IntegerStamp) {
IntegerStamp xStamp = (IntegerStamp) xStampGeneric;
@@ -88,11 +90,15 @@
IntegerStamp result = getOp().getSucceedingStampForXLowerXPlusA(mirror, strict, aStamp);
result = (IntegerStamp) xStamp.tryImproveWith(result);
if (result != null) {
- return result;
+ if (s != null) {
+ s = s.improveWith(result);
+ } else {
+ s = result;
+ }
}
}
}
- return null;
+ return s;
}
private Stamp getSucceedingStampForX(boolean mirror, boolean strict, Stamp xStampGeneric, Stamp yStampGeneric) {
@@ -278,7 +284,7 @@
}
low += 1;
}
- if (compare(low, lowerBound(xStamp)) > 0) {
+ if (compare(low, lowerBound(xStamp)) > 0 || upperBound(xStamp) != (xStamp.upperBound() & mask(xStamp.getBits()))) {
return forInteger(bits, low, upperBound(xStamp));
}
} else {
@@ -290,7 +296,7 @@
}
low -= 1;
}
- if (compare(low, upperBound(xStamp)) < 0) {
+ if (compare(low, upperBound(xStamp)) < 0 || lowerBound(xStamp) != (xStamp.lowerBound() & mask(xStamp.getBits()))) {
return forInteger(bits, lowerBound(xStamp), low);
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/extended/IntegerSwitchNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/extended/IntegerSwitchNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -24,6 +24,7 @@
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -40,7 +41,6 @@
import org.graalvm.compiler.nodes.AbstractBeginNode;
import org.graalvm.compiler.nodes.ConstantNode;
import org.graalvm.compiler.nodes.FixedGuardNode;
-import org.graalvm.compiler.nodes.FixedNode;
import org.graalvm.compiler.nodes.FixedWithNextNode;
import org.graalvm.compiler.nodes.LogicNode;
import org.graalvm.compiler.nodes.NodeView;
@@ -316,7 +316,7 @@
private void doReplace(ValueNode newValue, List<KeyData> newKeyDatas, ArrayList<AbstractBeginNode> newSuccessors, int newDefaultSuccessor, double newDefaultProbability) {
/* Sort the new keys (invariant of the IntegerSwitchNode). */
- newKeyDatas.sort((k1, k2) -> k1.key - k2.key);
+ newKeyDatas.sort(Comparator.comparingInt(k -> k.key));
/* Create the final data arrays. */
int newKeyCount = newKeyDatas.size();
@@ -349,20 +349,27 @@
}
}
+ /*
+ * Collect dead successors. Successors have to be cleaned before adding the new node to the
+ * graph.
+ */
+ List<AbstractBeginNode> deadSuccessors = successors.filter(s -> !newSuccessors.contains(s)).snapshot();
+ successors.clear();
+
+ /*
+ * Create the new switch node. This is done before removing dead successors as `killCFG`
+ * could edit some of the inputs (e.g., if `newValue` is a loop-phi of the loop that dies
+ * while removing successors).
+ */
+ AbstractBeginNode[] successorsArray = newSuccessors.toArray(new AbstractBeginNode[newSuccessors.size()]);
+ SwitchNode newSwitch = graph().add(new IntegerSwitchNode(newValue, successorsArray, newKeys, newKeyProbabilities, newKeySuccessors));
+
/* Remove dead successors. */
- for (int i = 0; i < blockSuccessorCount(); i++) {
- AbstractBeginNode successor = blockSuccessor(i);
- if (!newSuccessors.contains(successor)) {
- FixedNode fixedBranch = successor;
- fixedBranch.predecessor().replaceFirstSuccessor(fixedBranch, null);
- GraphUtil.killCFG(fixedBranch);
- }
- setBlockSuccessor(i, null);
+ for (AbstractBeginNode successor : deadSuccessors) {
+ GraphUtil.killCFG(successor);
}
- /* Create the new switch node and replace ourself with it. */
- AbstractBeginNode[] successorsArray = newSuccessors.toArray(new AbstractBeginNode[newSuccessors.size()]);
- SwitchNode newSwitch = graph().add(new IntegerSwitchNode(newValue, successorsArray, newKeys, newKeyProbabilities, newKeySuccessors));
+ /* Replace ourselves with the new switch */
((FixedWithNextNode) predecessor()).setNext(newSwitch);
GraphUtil.killWithUnusedFloatingInputs(this);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/ClassInitializationPlugin.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/ClassInitializationPlugin.java Sat Mar 24 01:08:35 2018 +0100
@@ -25,10 +25,16 @@
import org.graalvm.compiler.nodes.FrameState;
import org.graalvm.compiler.nodes.ValueNode;
+import jdk.vm.ci.meta.ConstantPool;
import jdk.vm.ci.meta.ResolvedJavaType;
public interface ClassInitializationPlugin extends GraphBuilderPlugin {
boolean shouldApply(GraphBuilderContext builder, ResolvedJavaType type);
ValueNode apply(GraphBuilderContext builder, ResolvedJavaType type, FrameState frameState);
+
+ boolean supportsLazyInitialization(ConstantPool cp);
+
+ void loadReferencedType(GraphBuilderContext builder, ConstantPool cp, int cpi, int bytecode);
+
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/GraphBuilderConfiguration.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/GraphBuilderConfiguration.java Sat Mar 24 01:08:35 2018 +0100
@@ -228,8 +228,7 @@
}
protected GraphBuilderConfiguration(boolean eagerResolving, boolean unresolvedIsError, BytecodeExceptionMode bytecodeExceptionMode, boolean omitAssertions, boolean insertFullInfopoints,
- boolean trackNodeSourcePosition, ResolvedJavaType[] skippedExceptionTypes,
- Plugins plugins) {
+ boolean trackNodeSourcePosition, ResolvedJavaType[] skippedExceptionTypes, Plugins plugins) {
this.eagerResolving = eagerResolving;
this.unresolvedIsError = unresolvedIsError;
this.bytecodeExceptionMode = bytecodeExceptionMode;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/InlineInvokePlugin.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/InlineInvokePlugin.java Sat Mar 24 01:08:35 2018 +0100
@@ -45,33 +45,35 @@
* Denotes a call site that must not be inlined and should be implemented by a node that
* does not speculate on the call not raising an exception.
*/
- public static final InlineInfo DO_NOT_INLINE_WITH_EXCEPTION = new InlineInfo(null, null);
+ public static final InlineInfo DO_NOT_INLINE_WITH_EXCEPTION = new InlineInfo(null, null, null);
/**
* Denotes a call site must not be inlined and can be implemented by a node that speculates
* the call will not throw an exception.
*/
- public static final InlineInfo DO_NOT_INLINE_NO_EXCEPTION = new InlineInfo(null, null);
+ public static final InlineInfo DO_NOT_INLINE_NO_EXCEPTION = new InlineInfo(null, null, null);
/**
* Denotes a call site must not be inlined and the execution should be transferred to
* interpreter in case of an exception.
*/
- public static final InlineInfo DO_NOT_INLINE_DEOPTIMIZE_ON_EXCEPTION = new InlineInfo(null, null);
+ public static final InlineInfo DO_NOT_INLINE_DEOPTIMIZE_ON_EXCEPTION = new InlineInfo(null, null, null);
private final ResolvedJavaMethod methodToInline;
+ private final ResolvedJavaMethod originalMethod;
private final BytecodeProvider intrinsicBytecodeProvider;
public static InlineInfo createStandardInlineInfo(ResolvedJavaMethod methodToInline) {
- return new InlineInfo(methodToInline, null);
+ return new InlineInfo(methodToInline, null, null);
}
- public static InlineInfo createIntrinsicInlineInfo(ResolvedJavaMethod methodToInline, BytecodeProvider intrinsicBytecodeProvider) {
- return new InlineInfo(methodToInline, intrinsicBytecodeProvider);
+ public static InlineInfo createIntrinsicInlineInfo(ResolvedJavaMethod methodToInline, ResolvedJavaMethod originalMethod, BytecodeProvider intrinsicBytecodeProvider) {
+ return new InlineInfo(methodToInline, originalMethod, intrinsicBytecodeProvider);
}
- private InlineInfo(ResolvedJavaMethod methodToInline, BytecodeProvider intrinsicBytecodeProvider) {
+ private InlineInfo(ResolvedJavaMethod methodToInline, ResolvedJavaMethod originalMethod, BytecodeProvider intrinsicBytecodeProvider) {
this.methodToInline = methodToInline;
+ this.originalMethod = originalMethod;
this.intrinsicBytecodeProvider = intrinsicBytecodeProvider;
}
@@ -87,6 +89,14 @@
}
/**
+ * Returns the original method if this is an inline of an intrinsic, or {@code null} if the
+ * call site must not be inlined.
+ */
+ public ResolvedJavaMethod getOriginalMethod() {
+ return originalMethod;
+ }
+
+ /**
* Gets the provider of bytecode to be parsed for {@link #getMethodToInline()} if is is an
* intrinsic for the original method (i.e., the {@code method} passed to
* {@link InlineInvokePlugin#shouldInlineInvoke}). A {@code null} return value indicates
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/IntrinsicContext.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/IntrinsicContext.java Sat Mar 24 01:08:35 2018 +0100
@@ -29,6 +29,7 @@
import static org.graalvm.compiler.nodes.graphbuilderconf.IntrinsicContext.CompilationContext.INLINE_AFTER_PARSING;
import static org.graalvm.compiler.nodes.graphbuilderconf.IntrinsicContext.CompilationContext.ROOT_COMPILATION;
+import org.graalvm.compiler.api.replacements.MethodSubstitution;
import org.graalvm.compiler.bytecode.BytecodeProvider;
import org.graalvm.compiler.graph.NodeSourcePosition;
import org.graalvm.compiler.nodes.AbstractMergeNode;
@@ -54,12 +55,12 @@
/**
* Method being intrinsified.
*/
- final ResolvedJavaMethod method;
+ final ResolvedJavaMethod originalMethod;
/**
* Method providing the intrinsic implementation.
*/
- final ResolvedJavaMethod intrinsic;
+ final ResolvedJavaMethod intrinsicMethod;
/**
* Provider of bytecode to be parsed for a method that is part of an intrinsic.
@@ -76,13 +77,14 @@
public IntrinsicContext(ResolvedJavaMethod method, ResolvedJavaMethod intrinsic, BytecodeProvider bytecodeProvider, CompilationContext compilationContext,
boolean allowPartialIntrinsicArgumentMismatch) {
- this.method = method;
- this.intrinsic = intrinsic;
+ this.originalMethod = method;
+ this.intrinsicMethod = intrinsic;
this.bytecodeProvider = bytecodeProvider;
assert bytecodeProvider != null;
this.compilationContext = compilationContext;
this.allowPartialIntrinsicArgumentMismatch = allowPartialIntrinsicArgumentMismatch;
assert !isCompilationRoot() || method.hasBytecodes() : "Cannot root compile intrinsic for native or abstract method " + method.format("%H.%n(%p)");
+ assert !method.equals(intrinsic) || method.getAnnotation(MethodSubstitution.class) == null : "method and intrinsic must be different: " + method + " " + intrinsic;
}
/**
@@ -98,14 +100,14 @@
* Gets the method being intrinsified.
*/
public ResolvedJavaMethod getOriginalMethod() {
- return method;
+ return originalMethod;
}
/**
* Gets the method providing the intrinsic implementation.
*/
public ResolvedJavaMethod getIntrinsicMethod() {
- return intrinsic;
+ return intrinsicMethod;
}
/**
@@ -121,9 +123,11 @@
* intrinsification falls back to the original method.
*/
public boolean isCallToOriginal(ResolvedJavaMethod targetMethod) {
- return method.equals(targetMethod) || intrinsic.equals(targetMethod);
+ return originalMethod.equals(targetMethod) || intrinsicMethod.equals(targetMethod);
}
+ private NodeSourcePosition nodeSourcePosition;
+
public boolean isPostParseInlined() {
return compilationContext.equals(INLINE_AFTER_PARSING);
}
@@ -132,6 +136,15 @@
return compilationContext.equals(ROOT_COMPILATION);
}
+ public NodeSourcePosition getNodeSourcePosition() {
+ return nodeSourcePosition;
+ }
+
+ public void setNodeSourcePosition(NodeSourcePosition position) {
+ assert nodeSourcePosition == null : "can only be set once";
+ this.nodeSourcePosition = position;
+ }
+
/**
* Denotes the compilation context in which an intrinsic is being parsed.
*/
@@ -182,7 +195,9 @@
// Only the last side effect on any execution path in a replacement
// can inherit the stateAfter of the replaced node
FrameState invalid = graph.add(new FrameState(INVALID_FRAMESTATE_BCI));
- invalid.setNodeSourcePosition(sourcePosition);
+ if (graph.trackNodeSourcePosition()) {
+ invalid.setNodeSourcePosition(sourcePosition);
+ }
for (StateSplit lastSideEffect : sideEffects.sideEffects()) {
lastSideEffect.setStateAfter(invalid);
}
@@ -194,7 +209,9 @@
} else {
frameState = graph.add(new FrameState(AFTER_BCI));
}
- frameState.setNodeSourcePosition(sourcePosition);
+ if (graph.trackNodeSourcePosition()) {
+ frameState.setNodeSourcePosition(sourcePosition);
+ }
return frameState;
} else {
if (forStateSplit instanceof AbstractMergeNode) {
@@ -202,12 +219,16 @@
if (sideEffects.isAfterSideEffect()) {
// A merge after one or more side effects
FrameState frameState = graph.add(new FrameState(AFTER_BCI));
- frameState.setNodeSourcePosition(sourcePosition);
+ if (graph.trackNodeSourcePosition()) {
+ frameState.setNodeSourcePosition(sourcePosition);
+ }
return frameState;
} else {
// A merge before any side effects
FrameState frameState = graph.add(new FrameState(BEFORE_BCI));
- frameState.setNodeSourcePosition(sourcePosition);
+ if (graph.trackNodeSourcePosition()) {
+ frameState.setNodeSourcePosition(sourcePosition);
+ }
return frameState;
}
} else {
@@ -219,6 +240,6 @@
@Override
public String toString() {
- return "Intrinsic{original: " + method.format("%H.%n(%p)") + ", intrinsic: " + intrinsic.format("%H.%n(%p)") + ", context: " + compilationContext + "}";
+ return "Intrinsic{original: " + originalMethod.format("%H.%n(%p)") + ", intrinsic: " + intrinsicMethod.format("%H.%n(%p)") + ", context: " + compilationContext + "}";
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/java/InstanceOfDynamicNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/java/InstanceOfDynamicNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -55,18 +55,24 @@
public static final NodeClass<InstanceOfDynamicNode> TYPE = NodeClass.create(InstanceOfDynamicNode.class);
private final boolean allowNull;
+ private final boolean exact;
- public static LogicNode create(Assumptions assumptions, ConstantReflectionProvider constantReflection, ValueNode mirror, ValueNode object, boolean allowNull) {
- LogicNode synonym = findSynonym(assumptions, constantReflection, mirror, object, allowNull);
+ public static LogicNode create(Assumptions assumptions, ConstantReflectionProvider constantReflection, ValueNode mirror, ValueNode object, boolean allowNull, boolean exact) {
+ LogicNode synonym = findSynonym(assumptions, constantReflection, mirror, object, allowNull, exact);
if (synonym != null) {
return synonym;
}
- return new InstanceOfDynamicNode(mirror, object, allowNull);
+ return new InstanceOfDynamicNode(mirror, object, allowNull, exact);
}
- protected InstanceOfDynamicNode(ValueNode mirror, ValueNode object, boolean allowNull) {
+ public static LogicNode create(Assumptions assumptions, ConstantReflectionProvider constantReflection, ValueNode mirror, ValueNode object, boolean allowNull) {
+ return create(assumptions, constantReflection, mirror, object, allowNull, false);
+ }
+
+ protected InstanceOfDynamicNode(ValueNode mirror, ValueNode object, boolean allowNull, boolean exact) {
super(TYPE, mirror, object);
this.allowNull = allowNull;
+ this.exact = exact;
assert mirror.getStackKind() == JavaKind.Object || mirror.getStackKind() == JavaKind.Illegal : mirror.getStackKind();
}
@@ -83,8 +89,7 @@
tool.getLowerer().lower(this, tool);
}
- private static LogicNode findSynonym(Assumptions assumptions, ConstantReflectionProvider constantReflection, ValueNode forMirror, ValueNode forObject,
- boolean allowNull) {
+ private static LogicNode findSynonym(Assumptions assumptions, ConstantReflectionProvider constantReflection, ValueNode forMirror, ValueNode forObject, boolean allowNull, boolean exact) {
if (forMirror.isConstant()) {
ResolvedJavaType t = constantReflection.asJavaType(forMirror.asConstant());
if (t != null) {
@@ -95,7 +100,7 @@
return LogicConstantNode.contradiction();
}
} else {
- TypeReference type = TypeReference.createTrusted(assumptions, t);
+ TypeReference type = exact ? TypeReference.createExactTrusted(t) : TypeReference.createTrusted(assumptions, t);
if (allowNull) {
return InstanceOfNode.createAllowNull(type, forObject, null, null);
} else {
@@ -117,7 +122,7 @@
@Override
public LogicNode canonical(CanonicalizerTool tool, ValueNode forMirror, ValueNode forObject) {
- LogicNode result = findSynonym(tool.getAssumptions(), tool.getConstantReflection(), forMirror, forObject, allowNull);
+ LogicNode result = findSynonym(tool.getAssumptions(), tool.getConstantReflection(), forMirror, forObject, allowNull, exact);
if (result != null) {
return result;
}
@@ -133,6 +138,10 @@
return allowNull;
}
+ public boolean isExact() {
+ return exact;
+ }
+
@Override
public Stamp getSucceedingStampForX(boolean negated, Stamp xStamp, Stamp yStamp) {
return null;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/spi/Replacements.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/spi/Replacements.java Sat Mar 24 01:08:35 2018 +0100
@@ -26,6 +26,7 @@
import org.graalvm.compiler.api.replacements.SnippetTemplateCache;
import org.graalvm.compiler.bytecode.Bytecode;
import org.graalvm.compiler.bytecode.BytecodeProvider;
+import org.graalvm.compiler.graph.NodeSourcePosition;
import org.graalvm.compiler.nodes.StructuredGraph;
import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration;
import org.graalvm.compiler.nodes.graphbuilderconf.InvocationPlugin;
@@ -50,9 +51,10 @@
* Gets the snippet graph derived from a given method.
*
* @param args arguments to the snippet if available, otherwise {@code null}
+ * @param trackNodeSourcePosition
* @return the snippet graph, if any, that is derived from {@code method}
*/
- StructuredGraph getSnippet(ResolvedJavaMethod method, Object[] args);
+ StructuredGraph getSnippet(ResolvedJavaMethod method, Object[] args, boolean trackNodeSourcePosition, NodeSourcePosition replaceePosition);
/**
* Gets the snippet graph derived from a given method.
@@ -61,23 +63,25 @@
* recursive call and won't be processed for {@linkplain MethodSubstitution
* substitutions}.
* @param args arguments to the snippet if available, otherwise {@code null}
+ * @param trackNodeSourcePosition
* @return the snippet graph, if any, that is derived from {@code method}
*/
- StructuredGraph getSnippet(ResolvedJavaMethod method, ResolvedJavaMethod recursiveEntry, Object[] args);
+ StructuredGraph getSnippet(ResolvedJavaMethod method, ResolvedJavaMethod recursiveEntry, Object[] args, boolean trackNodeSourcePosition, NodeSourcePosition replaceePosition);
/**
* Registers a method as snippet.
*/
- void registerSnippet(ResolvedJavaMethod method);
+ void registerSnippet(ResolvedJavaMethod method, boolean trackNodeSourcePosition);
/**
* Gets a graph that is a substitution for a given method.
*
* @param invokeBci the call site BCI if this request is made for inlining a substitute
* otherwise {@code -1}
+ * @param trackNodeSourcePosition
* @return the graph, if any, that is a substitution for {@code method}
*/
- StructuredGraph getSubstitution(ResolvedJavaMethod method, int invokeBci);
+ StructuredGraph getSubstitution(ResolvedJavaMethod method, int invokeBci, boolean trackNodeSourcePosition, NodeSourcePosition replaceePosition);
/**
* Gets the substitute bytecode for a given method.
@@ -88,7 +92,8 @@
Bytecode getSubstitutionBytecode(ResolvedJavaMethod method);
/**
- * Determines if there may be a {@linkplain #getSubstitution(ResolvedJavaMethod, int)
+ * Determines if there may be a
+ * {@linkplain #getSubstitution(ResolvedJavaMethod, int, boolean, NodeSourcePosition)
* substitution graph} for a given method.
*
* A call to {@link #getSubstitution} may still return {@code null} for {@code method} and
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/ConditionalEliminationPhase.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/ConditionalEliminationPhase.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,6 +22,8 @@
*/
package org.graalvm.compiler.phases.common;
+import static org.graalvm.compiler.nodes.StaticDeoptimizingNode.mergeActions;
+
import java.util.ArrayDeque;
import java.util.Deque;
import java.util.List;
@@ -30,6 +32,7 @@
import org.graalvm.collections.Equivalence;
import org.graalvm.collections.MapCursor;
import org.graalvm.collections.Pair;
+import org.graalvm.compiler.core.common.cfg.AbstractControlFlowGraph;
import org.graalvm.compiler.core.common.cfg.BlockMap;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.BinaryOp;
@@ -92,6 +95,7 @@
import org.graalvm.compiler.phases.schedule.SchedulePhase.SchedulingStrategy;
import org.graalvm.compiler.phases.tiers.PhaseContext;
+import jdk.vm.ci.meta.DeoptimizationAction;
import jdk.vm.ci.meta.JavaConstant;
import jdk.vm.ci.meta.TriState;
@@ -146,8 +150,8 @@
}
protected ControlFlowGraph.RecursiveVisitor<?> createVisitor(StructuredGraph graph, @SuppressWarnings("unused") ControlFlowGraph cfg, BlockMap<List<Node>> blockToNodes,
- @SuppressWarnings("unused") NodeMap<Block> nodeToBlock, PhaseContext context) {
- return new Instance(graph, blockToNodes, context);
+ NodeMap<Block> nodeToBlock, PhaseContext context) {
+ return new Instance(graph, blockToNodes, nodeToBlock, context);
}
public static class MoveGuardsUpwards implements ControlFlowGraph.RecursiveVisitor<Block> {
@@ -244,6 +248,7 @@
public static class Instance implements ControlFlowGraph.RecursiveVisitor<Integer> {
protected final NodeMap<InfoElement> map;
protected final BlockMap<List<Node>> blockToNodes;
+ protected final NodeMap<Block> nodeToBlock;
protected final CanonicalizerTool tool;
protected final NodeStack undoOperations;
protected final StructuredGraph graph;
@@ -255,10 +260,11 @@
*/
private Deque<DeoptimizingGuard> pendingTests;
- public Instance(StructuredGraph graph, BlockMap<List<Node>> blockToNodes, PhaseContext context) {
+ public Instance(StructuredGraph graph, BlockMap<List<Node>> blockToNodes, NodeMap<Block> nodeToBlock, PhaseContext context) {
this.graph = graph;
this.debug = graph.getDebug();
this.blockToNodes = blockToNodes;
+ this.nodeToBlock = nodeToBlock;
this.undoOperations = new NodeStack();
this.map = graph.createNodeMap();
pendingTests = new ArrayDeque<>();
@@ -614,7 +620,7 @@
* never be replaced with a pi node via canonicalization).
*/
private static Stamp getOtherSafeStamp(ValueNode x) {
- if (x.isConstant()) {
+ if (x.isConstant() || x.graph().isAfterFixedReadPhase()) {
return x.stamp(NodeView.DEFAULT);
}
return x.stamp(NodeView.DEFAULT).unrestricted();
@@ -633,6 +639,23 @@
return recursiveFoldStamp(node);
}
+ /**
+ * Look for a preceding guard whose condition is implied by {@code thisGuard}. If we find
+ * one, try to move this guard just above that preceding guard so that we can fold it:
+ *
+ * <pre>
+ * guard(C1); // preceding guard
+ * ...
+ * guard(C2); // thisGuard
+ * </pre>
+ *
+ * If C2 => C1, transform to:
+ *
+ * <pre>
+ * guard(C2);
+ * ...
+ * </pre>
+ */
protected boolean foldPendingTest(DeoptimizingGuard thisGuard, ValueNode original, Stamp newStamp, GuardRewirer rewireGuardFunction) {
for (DeoptimizingGuard pendingGuard : pendingTests) {
LogicNode pendingCondition = pendingGuard.getCondition();
@@ -661,12 +684,9 @@
if (result.isKnown()) {
/*
* The test case be folded using the information available but the test can only
- * be moved up if we're sure there's no schedule dependence. For now limit it to
- * the original node and constants.
+ * be moved up if we're sure there's no schedule dependence.
*/
- InputFilter v = new InputFilter(original);
- thisGuard.getCondition().applyInputs(v);
- if (v.ok && foldGuard(thisGuard, pendingGuard, result.toBoolean(), newStamp, rewireGuardFunction)) {
+ if (canScheduleAbove(thisGuard.getCondition(), pendingGuard.asNode(), original) && foldGuard(thisGuard, pendingGuard, result.toBoolean(), newStamp, rewireGuardFunction)) {
return true;
}
}
@@ -674,8 +694,30 @@
return false;
}
+ private boolean canScheduleAbove(Node n, Node target, ValueNode knownToBeAbove) {
+ Block targetBlock = nodeToBlock.get(target);
+ Block testBlock = nodeToBlock.get(n);
+ if (targetBlock != null && testBlock != null) {
+ if (targetBlock == testBlock) {
+ for (Node fixed : blockToNodes.get(targetBlock)) {
+ if (fixed == n) {
+ return true;
+ } else if (fixed == target) {
+ break;
+ }
+ }
+ } else if (AbstractControlFlowGraph.dominates(testBlock, targetBlock)) {
+ return true;
+ }
+ }
+ InputFilter v = new InputFilter(knownToBeAbove);
+ n.applyInputs(v);
+ return v.ok;
+ }
+
protected boolean foldGuard(DeoptimizingGuard thisGuard, DeoptimizingGuard otherGuard, boolean outcome, Stamp guardedValueStamp, GuardRewirer rewireGuardFunction) {
- if (otherGuard.getAction() == thisGuard.getAction() && otherGuard.getSpeculation() == thisGuard.getSpeculation()) {
+ DeoptimizationAction action = mergeActions(otherGuard.getAction(), thisGuard.getAction());
+ if (action != null && otherGuard.getSpeculation() == thisGuard.getSpeculation()) {
LogicNode condition = (LogicNode) thisGuard.getCondition().copyWithInputs();
/*
* We have ...; guard(C1); guard(C2);...
@@ -688,12 +730,16 @@
*
* - If C2 => !C1, `mustDeopt` is true and we transform to ..; guard(C1); deopt;
*/
+ // for the second case, the action of the deopt is copied from there:
+ thisGuard.setAction(action);
GuardRewirer rewirer = (guard, result, innerGuardedValueStamp, newInput) -> {
// `result` is `outcome`, `guard` is `otherGuard`
boolean mustDeopt = result == otherGuard.isNegated();
if (rewireGuardFunction.rewire(guard, mustDeopt == thisGuard.isNegated(), innerGuardedValueStamp, newInput)) {
if (!mustDeopt) {
otherGuard.setCondition(condition, thisGuard.isNegated());
+ otherGuard.setAction(action);
+ otherGuard.setReason(thisGuard.getReason());
}
return true;
}
@@ -783,16 +829,6 @@
}
} else if (node instanceof BinaryOpLogicNode) {
BinaryOpLogicNode binaryOpLogicNode = (BinaryOpLogicNode) node;
- infoElement = getInfoElements(binaryOpLogicNode);
- while (infoElement != null) {
- if (infoElement.getStamp().equals(StampFactory.contradiction())) {
- return rewireGuards(infoElement.getGuard(), false, infoElement.getProxifiedInput(), null, rewireGuardFunction);
- } else if (infoElement.getStamp().equals(StampFactory.tautology())) {
- return rewireGuards(infoElement.getGuard(), true, infoElement.getProxifiedInput(), null, rewireGuardFunction);
- }
- infoElement = nextElement(infoElement);
- }
-
ValueNode x = binaryOpLogicNode.getX();
ValueNode y = binaryOpLogicNode.getY();
infoElement = getInfoElements(x);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/ConvertDeoptimizeToGuardPhase.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/ConvertDeoptimizeToGuardPhase.java Sat Mar 24 01:08:35 2018 +0100
@@ -131,6 +131,7 @@
}
}
+ @SuppressWarnings("try")
private void processFixedGuardAndMerge(FixedGuardNode fixedGuard, PhaseContext context, CompareNode compare, ValueNode x, ValuePhiNode xPhi, ValueNode y, ValuePhiNode yPhi,
AbstractMergeNode merge) {
List<EndNode> mergePredecessors = merge.cfgPredecessors().snapshot();
@@ -152,11 +153,14 @@
ys = yPhi.valueAt(mergePredecessor).asConstant();
}
if (xs != null && ys != null && compare.condition().foldCondition(xs, ys, context.getConstantReflection(), compare.unorderedIsTrue()) == fixedGuard.isNegated()) {
- propagateFixed(mergePredecessor, fixedGuard, context.getLowerer());
+ try (DebugCloseable position = fixedGuard.withNodeSourcePosition()) {
+ propagateFixed(mergePredecessor, fixedGuard, context.getLowerer());
+ }
}
}
}
+ @SuppressWarnings("try")
private void propagateFixed(FixedNode from, StaticDeoptimizingNode deopt, LoweringProvider loweringProvider) {
Node current = from;
while (current != null) {
@@ -179,32 +183,36 @@
return;
} else if (current.predecessor() instanceof IfNode) {
IfNode ifNode = (IfNode) current.predecessor();
- StructuredGraph graph = ifNode.graph();
- LogicNode conditionNode = ifNode.condition();
- boolean negateGuardCondition = current == ifNode.trueSuccessor();
- FixedGuardNode guard = graph.add(new FixedGuardNode(conditionNode, deopt.getReason(), deopt.getAction(), deopt.getSpeculation(), negateGuardCondition));
- FixedWithNextNode pred = (FixedWithNextNode) ifNode.predecessor();
- AbstractBeginNode survivingSuccessor;
- if (negateGuardCondition) {
- survivingSuccessor = ifNode.falseSuccessor();
- } else {
- survivingSuccessor = ifNode.trueSuccessor();
- }
- graph.removeSplitPropagate(ifNode, survivingSuccessor);
+ // Prioritize the source position of the IfNode
+ try (DebugCloseable closable = ifNode.withNodeSourcePosition()) {
+ StructuredGraph graph = ifNode.graph();
+ LogicNode conditionNode = ifNode.condition();
+ boolean negateGuardCondition = current == ifNode.trueSuccessor();
+ FixedGuardNode guard = graph.add(new FixedGuardNode(conditionNode, deopt.getReason(), deopt.getAction(), deopt.getSpeculation(), negateGuardCondition));
- Node newGuard = guard;
- if (survivingSuccessor instanceof LoopExitNode) {
- newGuard = ProxyNode.forGuard(guard, (LoopExitNode) survivingSuccessor, graph);
- }
- survivingSuccessor.replaceAtUsages(InputType.Guard, newGuard);
+ FixedWithNextNode pred = (FixedWithNextNode) ifNode.predecessor();
+ AbstractBeginNode survivingSuccessor;
+ if (negateGuardCondition) {
+ survivingSuccessor = ifNode.falseSuccessor();
+ } else {
+ survivingSuccessor = ifNode.trueSuccessor();
+ }
+ graph.removeSplitPropagate(ifNode, survivingSuccessor);
- graph.getDebug().log("Converting deopt on %-5s branch of %s to guard for remaining branch %s.", negateGuardCondition, ifNode, survivingSuccessor);
- FixedNode next = pred.next();
- pred.setNext(guard);
- guard.setNext(next);
- SimplifierTool simplifierTool = GraphUtil.getDefaultSimplifier(null, null, null, false, graph.getAssumptions(), graph.getOptions(), loweringProvider);
- survivingSuccessor.simplify(simplifierTool);
- return;
+ Node newGuard = guard;
+ if (survivingSuccessor instanceof LoopExitNode) {
+ newGuard = ProxyNode.forGuard(guard, (LoopExitNode) survivingSuccessor, graph);
+ }
+ survivingSuccessor.replaceAtUsages(InputType.Guard, newGuard);
+
+ graph.getDebug().log("Converting deopt on %-5s branch of %s to guard for remaining branch %s.", negateGuardCondition, ifNode, survivingSuccessor);
+ FixedNode next = pred.next();
+ pred.setNext(guard);
+ guard.setNext(next);
+ SimplifierTool simplifierTool = GraphUtil.getDefaultSimplifier(null, null, null, false, graph.getAssumptions(), graph.getOptions(), loweringProvider);
+ survivingSuccessor.simplify(simplifierTool);
+ return;
+ }
} else if (current.predecessor() == null || current.predecessor() instanceof ControlSplitNode) {
assert current.predecessor() != null || (current instanceof StartNode && current == ((AbstractBeginNode) current).graph().start());
moveAsDeoptAfter((AbstractBeginNode) current, deopt);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/NodeCounterPhase.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.phases.common;
+
+import org.graalvm.compiler.debug.DebugContext;
+import org.graalvm.compiler.graph.Node;
+import org.graalvm.compiler.nodes.StructuredGraph;
+import org.graalvm.compiler.options.Option;
+import org.graalvm.compiler.options.OptionKey;
+import org.graalvm.compiler.options.OptionType;
+import org.graalvm.compiler.phases.BasePhase;
+import org.graalvm.compiler.phases.tiers.PhaseContext;
+
+public class NodeCounterPhase extends BasePhase<PhaseContext> {
+
+ public static class Options {
+ // @formatter:off
+ @Option(help = "Counts the number of instances of each node class.", type = OptionType.Debug)
+ public static final OptionKey<Boolean> NodeCounters = new OptionKey<>(false);
+ // @formatter:on
+ }
+
+ @Override
+ protected void run(StructuredGraph graph, PhaseContext context) {
+ for (Node node : graph.getNodes()) {
+ DebugContext.counter("NodeCounter_%s",
+ node.getNodeClass().getClazz().getSimpleName()).increment(node.getDebug());
+ }
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/inlining/InliningUtil.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/inlining/InliningUtil.java Sat Mar 24 01:08:35 2018 +0100
@@ -39,6 +39,7 @@
import org.graalvm.collections.UnmodifiableEconomicMap;
import org.graalvm.collections.UnmodifiableMapCursor;
import org.graalvm.compiler.api.replacements.MethodSubstitution;
+import org.graalvm.compiler.api.replacements.Snippet;
import org.graalvm.compiler.core.common.GraalOptions;
import org.graalvm.compiler.core.common.type.Stamp;
import org.graalvm.compiler.core.common.type.StampFactory;
@@ -47,8 +48,8 @@
import org.graalvm.compiler.debug.DebugContext;
import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.graph.GraalGraphError;
-import org.graalvm.compiler.graph.Graph;
import org.graalvm.compiler.graph.Graph.DuplicationReplacement;
+import org.graalvm.compiler.graph.Graph.Mark;
import org.graalvm.compiler.graph.Graph.NodeEventScope;
import org.graalvm.compiler.graph.Node;
import org.graalvm.compiler.graph.NodeInputList;
@@ -68,6 +69,7 @@
import org.graalvm.compiler.nodes.FixedNode;
import org.graalvm.compiler.nodes.FixedWithNextNode;
import org.graalvm.compiler.nodes.FrameState;
+import org.graalvm.compiler.nodes.InliningLog;
import org.graalvm.compiler.nodes.Invoke;
import org.graalvm.compiler.nodes.InvokeNode;
import org.graalvm.compiler.nodes.InvokeWithExceptionNode;
@@ -82,6 +84,7 @@
import org.graalvm.compiler.nodes.StartNode;
import org.graalvm.compiler.nodes.StateSplit;
import org.graalvm.compiler.nodes.StructuredGraph;
+import org.graalvm.compiler.nodes.StructuredGraph.GuardsStage;
import org.graalvm.compiler.nodes.UnwindNode;
import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.nodes.calc.IsNullNode;
@@ -102,7 +105,6 @@
import jdk.vm.ci.meta.Assumptions;
import jdk.vm.ci.meta.DeoptimizationAction;
import jdk.vm.ci.meta.DeoptimizationReason;
-import jdk.vm.ci.meta.JavaConstant;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.ResolvedJavaMethod;
import jdk.vm.ci.meta.ResolvedJavaType;
@@ -118,43 +120,104 @@
printInlining(info.methodAt(0), info.invoke(), inliningDepth, success, msg, args);
}
+ /**
+ * @see #printInlining
+ */
private static void printInlining(final ResolvedJavaMethod method, final Invoke invoke, final int inliningDepth, final boolean success, final String msg, final Object... args) {
if (HotSpotPrintInlining.getValue(invoke.asNode().getOptions())) {
Util.printInlining(method, invoke.bci(), inliningDepth, success, msg, args);
}
}
- public static void logInlinedMethod(InlineInfo info, int inliningDepth, boolean allowLogging, String msg, Object... args) {
- logInliningDecision(info, inliningDepth, allowLogging, true, msg, args);
+ /**
+ * Trace a decision to inline a method.
+ *
+ * This prints a HotSpot-style inlining message to the console, and it also logs the decision to
+ * the logging stream.
+ *
+ * Phases that perform inlining should use this method to trace the inlining decisions, and use
+ * the {@link #traceNotInlinedMethod} methods only for debugging purposes.
+ */
+ public static void traceInlinedMethod(InlineInfo info, int inliningDepth, boolean allowLogging, String msg, Object... args) {
+ traceMethod(info, inliningDepth, allowLogging, true, msg, args);
+ }
+
+ /**
+ * Trace a decision to inline a method.
+ *
+ * This prints a HotSpot-style inlining message to the console, and it also logs the decision to
+ * the logging stream.
+ *
+ * Phases that perform inlining should use this method to trace the inlining decisions, and use
+ * the {@link #traceNotInlinedMethod} methods only for debugging purposes.
+ */
+ public static void traceInlinedMethod(Invoke invoke, int inliningDepth, boolean allowLogging, ResolvedJavaMethod method, String msg, Object... args) {
+ traceMethod(invoke, inliningDepth, allowLogging, true, method, msg, args);
}
- public static void logNotInlinedMethod(InlineInfo info, int inliningDepth, String msg, Object... args) {
- logInliningDecision(info, inliningDepth, true, false, msg, args);
+ /**
+ * Trace a decision to not inline a method.
+ *
+ * This prints a HotSpot-style inlining message to the console, and it also logs the decision to
+ * the logging stream.
+ *
+ * Phases that perform inlining should use this method to trace the inlining decisions, and use
+ * the {@link #traceNotInlinedMethod} methods only for debugging purposes.
+ */
+ public static void traceNotInlinedMethod(InlineInfo info, int inliningDepth, String msg, Object... args) {
+ traceMethod(info, inliningDepth, true, false, msg, args);
}
- public static void logInliningDecision(InlineInfo info, int inliningDepth, boolean allowLogging, boolean success, String msg, final Object... args) {
+ /**
+ * Trace a decision about not inlining a method.
+ *
+ * This prints a HotSpot-style inlining message to the console, and it also logs the decision to
+ * the logging stream.
+ *
+ * Phases that perform inlining should use this method to trace the inlining decisions, and use
+ * the {@link #traceNotInlinedMethod} methods only for debugging purposes.
+ */
+ public static void traceNotInlinedMethod(Invoke invoke, int inliningDepth, ResolvedJavaMethod method, String msg, Object... args) {
+ traceMethod(invoke, inliningDepth, true, false, method, msg, args);
+ }
+
+ private static void traceMethod(Invoke invoke, int inliningDepth, boolean allowLogging, boolean success, ResolvedJavaMethod method, String msg, Object... args) {
if (allowLogging) {
- printInlining(info, inliningDepth, success, msg, args);
- DebugContext debug = info.graph().getDebug();
- if (shouldLogInliningDecision(debug)) {
- logInliningDecision(debug, methodName(info), success, msg, args);
+ DebugContext debug = invoke.asNode().getDebug();
+ printInlining(method, invoke, inliningDepth, success, msg, args);
+ if (shouldLogMethod(debug)) {
+ String methodString = methodName(method, invoke);
+ logMethod(debug, methodString, success, msg, args);
}
}
}
- @SuppressWarnings("try")
- public static void logInliningDecision(DebugContext debug, final String msg, final Object... args) {
- try (DebugContext.Scope s = debug.scope(inliningDecisionsScopeString)) {
- // Can't use log here since we are varargs
- if (debug.isLogEnabled()) {
- debug.logv(msg, args);
+ private static void traceMethod(InlineInfo info, int inliningDepth, boolean allowLogging, boolean success, String msg, final Object... args) {
+ if (allowLogging) {
+ printInlining(info, inliningDepth, success, msg, args);
+ DebugContext debug = info.graph().getDebug();
+ if (shouldLogMethod(debug)) {
+ logMethod(debug, methodName(info), success, msg, args);
}
}
}
+ /**
+ * Output a generic inlining decision to the logging stream (e.g. inlining termination
+ * condition).
+ *
+ * Used for debugging purposes.
+ */
+ public static void logInliningDecision(DebugContext debug, final String msg, final Object... args) {
+ logInlining(debug, msg, args);
+ }
+
+ /**
+ * Output a decision about not inlining a method to the logging stream, for debugging purposes.
+ */
public static void logNotInlinedMethod(Invoke invoke, String msg) {
DebugContext debug = invoke.asNode().getDebug();
- if (shouldLogInliningDecision(debug)) {
+ if (shouldLogMethod(debug)) {
String methodString = invoke.toString();
if (invoke.callTarget() == null) {
methodString += " callTarget=null";
@@ -164,33 +227,30 @@
methodString += " " + targetName;
}
}
- logInliningDecision(debug, methodString, false, msg, new Object[0]);
+ logMethod(debug, methodString, false, msg, new Object[0]);
}
}
- public static void logNotInlined(Invoke invoke, int inliningDepth, ResolvedJavaMethod method, String msg) {
- logNotInlinedInvoke(invoke, inliningDepth, method, msg, new Object[0]);
- }
-
- public static void logNotInlinedInvoke(Invoke invoke, int inliningDepth, ResolvedJavaMethod method, String msg, Object... args) {
- DebugContext debug = invoke.asNode().getDebug();
- printInlining(method, invoke, inliningDepth, false, msg, args);
- if (shouldLogInliningDecision(debug)) {
- String methodString = methodName(method, invoke);
- logInliningDecision(debug, methodString, false, msg, args);
- }
- }
-
- private static void logInliningDecision(DebugContext debug, final String methodString, final boolean success, final String msg, final Object... args) {
+ private static void logMethod(DebugContext debug, final String methodString, final boolean success, final String msg, final Object... args) {
String inliningMsg = "inlining " + methodString + ": " + msg;
if (!success) {
inliningMsg = "not " + inliningMsg;
}
- logInliningDecision(debug, inliningMsg, args);
+ logInlining(debug, inliningMsg, args);
}
@SuppressWarnings("try")
- public static boolean shouldLogInliningDecision(DebugContext debug) {
+ private static void logInlining(DebugContext debug, final String msg, final Object... args) {
+ try (DebugContext.Scope s = debug.scope(inliningDecisionsScopeString)) {
+ // Can't use log here since we are varargs
+ if (debug.isLogEnabled()) {
+ debug.logv(msg, args);
+ }
+ }
+ }
+
+ @SuppressWarnings("try")
+ private static boolean shouldLogMethod(DebugContext debug) {
try (DebugContext.Scope s = debug.scope(inliningDecisionsScopeString)) {
return debug.isLogEnabled();
}
@@ -269,7 +329,8 @@
}
/**
- * Performs an actual inlining, thereby replacing the given invoke with the given inlineGraph.
+ * Performs an actual inlining, thereby replacing the given invoke with the given
+ * {@code inlineGraph}.
*
* @param invoke the invoke that will be replaced
* @param inlineGraph the graph that the invoke will be replaced with
@@ -279,6 +340,23 @@
*/
@SuppressWarnings("try")
public static UnmodifiableEconomicMap<Node, Node> inline(Invoke invoke, StructuredGraph inlineGraph, boolean receiverNullCheck, ResolvedJavaMethod inlineeMethod) {
+ return inline(invoke, inlineGraph, receiverNullCheck, inlineeMethod, "", "");
+ }
+
+ /**
+ * Performs an actual inlining, thereby replacing the given invoke with the given
+ * {@code inlineGraph}.
+ *
+ * @param invoke the invoke that will be replaced
+ * @param inlineGraph the graph that the invoke will be replaced with
+ * @param receiverNullCheck true if a null check needs to be generated for non-static inlinings,
+ * false if no such check is required
+ * @param inlineeMethod the actual method being inlined. Maybe be null for snippets.
+ * @param reason the reason for inlining, used in tracing
+ * @param phase the phase that invoked inlining
+ */
+ @SuppressWarnings("try")
+ public static UnmodifiableEconomicMap<Node, Node> inline(Invoke invoke, StructuredGraph inlineGraph, boolean receiverNullCheck, ResolvedJavaMethod inlineeMethod, String reason, String phase) {
FixedNode invokeNode = invoke.asNode();
StructuredGraph graph = invokeNode.graph();
final NodeInputList<ValueNode> parameters = invoke.callTarget().arguments();
@@ -339,7 +417,15 @@
assert invokeNode.successors().first() != null : invoke;
assert invokeNode.predecessor() != null;
- EconomicMap<Node, Node> duplicates = graph.addDuplicates(nodes, inlineGraph, inlineGraph.getNodeCount(), localReplacement);
+ Mark mark = graph.getMark();
+ // Instead, attach the inlining log of the child graph to the current inlining log.
+ EconomicMap<Node, Node> duplicates;
+ try (InliningLog.UpdateScope scope = graph.getInliningLog().openDefaultUpdateScope()) {
+ duplicates = graph.addDuplicates(nodes, inlineGraph, inlineGraph.getNodeCount(), localReplacement);
+ if (scope != null) {
+ graph.getInliningLog().addDecision(invoke, true, reason, phase, duplicates, inlineGraph.getInliningLog());
+ }
+ }
FrameState stateAfter = invoke.stateAfter();
assert stateAfter == null || stateAfter.isAlive();
@@ -353,7 +439,7 @@
}
}
- updateSourcePositions(invoke, inlineGraph, duplicates, !Objects.equals(inlineGraph.method(), inlineeMethod));
+ updateSourcePositions(invoke, inlineGraph, duplicates, !Objects.equals(inlineGraph.method(), inlineeMethod), mark);
if (stateAfter != null) {
processFrameStates(invoke, inlineGraph, duplicates, stateAtExceptionEdge, returnNodes.size() > 1);
int callerLockDepth = stateAfter.nestedLockDepth();
@@ -409,9 +495,14 @@
return inlineForCanonicalization(invoke, inlineGraph, receiverNullCheck, inlineeMethod, null);
}
+ public static EconomicSet<Node> inlineForCanonicalization(Invoke invoke, StructuredGraph inlineGraph, boolean receiverNullCheck, ResolvedJavaMethod inlineeMethod,
+ Consumer<UnmodifiableEconomicMap<Node, Node>> duplicatesConsumer) {
+ return inlineForCanonicalization(invoke, inlineGraph, receiverNullCheck, inlineeMethod, duplicatesConsumer, "", "");
+ }
+
@SuppressWarnings("try")
public static EconomicSet<Node> inlineForCanonicalization(Invoke invoke, StructuredGraph inlineGraph, boolean receiverNullCheck, ResolvedJavaMethod inlineeMethod,
- Consumer<UnmodifiableEconomicMap<Node, Node>> duplicatesConsumer) {
+ Consumer<UnmodifiableEconomicMap<Node, Node>> duplicatesConsumer, String reason, String phase) {
HashSetNodeEventListener listener = new HashSetNodeEventListener();
/*
* This code relies on the fact that Graph.addDuplicates doesn't trigger the
@@ -419,7 +510,7 @@
* the graph into the current graph.
*/
try (NodeEventScope nes = invoke.asNode().graph().trackNodeEvents(listener)) {
- UnmodifiableEconomicMap<Node, Node> duplicates = InliningUtil.inline(invoke, inlineGraph, receiverNullCheck, inlineeMethod);
+ UnmodifiableEconomicMap<Node, Node> duplicates = InliningUtil.inline(invoke, inlineGraph, receiverNullCheck, inlineeMethod, reason, phase);
if (duplicatesConsumer != null) {
duplicatesConsumer.accept(duplicates);
}
@@ -462,7 +553,7 @@
}
} else {
if (unwindNode != null && unwindNode.isAlive()) {
- DeoptimizeNode deoptimizeNode = graph.add(new DeoptimizeNode(DeoptimizationAction.InvalidateRecompile, DeoptimizationReason.NotCompiledExceptionHandler));
+ DeoptimizeNode deoptimizeNode = addDeoptimizeNode(graph, DeoptimizationAction.InvalidateRecompile, DeoptimizationReason.NotCompiledExceptionHandler);
unwindNode.replaceAndDelete(deoptimizeNode);
}
}
@@ -571,27 +662,47 @@
}
@SuppressWarnings("try")
- private static void updateSourcePositions(Invoke invoke, StructuredGraph inlineGraph, UnmodifiableEconomicMap<Node, Node> duplicates, boolean isSubstitution) {
- if (inlineGraph.mayHaveNodeSourcePosition() && invoke.stateAfter() != null) {
- if (invoke.asNode().getNodeSourcePosition() == null) {
- // Temporarily ignore the assert below.
- return;
- }
-
- JavaConstant constantReceiver = invoke.getInvokeKind().hasReceiver() && !isSubstitution ? invoke.getReceiver().asJavaConstant() : null;
- NodeSourcePosition invokePos = invoke.asNode().getNodeSourcePosition();
- assert invokePos != null : "missing source information";
+ private static void updateSourcePositions(Invoke invoke, StructuredGraph inlineGraph, UnmodifiableEconomicMap<Node, Node> duplicates, boolean isSub, Mark mark) {
+ FixedNode invokeNode = invoke.asNode();
+ boolean isSubstitution = isSub || inlineGraph.method().getAnnotation(MethodSubstitution.class) != null || inlineGraph.method().getAnnotation(Snippet.class) != null;
+ StructuredGraph invokeGraph = invokeNode.graph();
+ assert !invokeGraph.trackNodeSourcePosition() || inlineGraph.trackNodeSourcePosition() ||
+ isSubstitution : String.format("trackNodeSourcePosition mismatch %s %s != %s %s", invokeGraph, invokeGraph.trackNodeSourcePosition(), inlineGraph,
+ inlineGraph.trackNodeSourcePosition());
+ if (invokeGraph.trackNodeSourcePosition() && invoke.stateAfter() != null) {
+ final NodeSourcePosition invokePos = invoke.asNode().getNodeSourcePosition();
+ updateSourcePosition(invokeGraph, duplicates, mark, invokePos, isSubstitution);
+ }
+ }
- EconomicMap<NodeSourcePosition, NodeSourcePosition> posMap = EconomicMap.create(Equivalence.DEFAULT);
- UnmodifiableMapCursor<Node, Node> cursor = duplicates.getEntries();
- while (cursor.advance()) {
- NodeSourcePosition pos = cursor.getKey().getNodeSourcePosition();
- if (pos != null) {
- NodeSourcePosition callerPos = pos.addCaller(constantReceiver, invokePos);
- if (!posMap.containsKey(callerPos)) {
- posMap.put(callerPos, callerPos);
- }
- cursor.getValue().setNodeSourcePosition(posMap.get(callerPos));
+ public static void updateSourcePosition(StructuredGraph invokeGraph, UnmodifiableEconomicMap<Node, Node> duplicates, Mark mark, NodeSourcePosition invokePos, boolean isSubstitution) {
+ /*
+ * Not every duplicate node is newly created, so only update the position of the newly
+ * created nodes.
+ */
+ EconomicSet<Node> newNodes = EconomicSet.create(Equivalence.DEFAULT);
+ newNodes.addAll(invokeGraph.getNewNodes(mark));
+ EconomicMap<NodeSourcePosition, NodeSourcePosition> posMap = EconomicMap.create(Equivalence.DEFAULT);
+ UnmodifiableMapCursor<Node, Node> cursor = duplicates.getEntries();
+ while (cursor.advance()) {
+ if (!newNodes.contains(cursor.getValue())) {
+ continue;
+ }
+ NodeSourcePosition pos = cursor.getKey().getNodeSourcePosition();
+ if (pos != null) {
+ NodeSourcePosition callerPos = posMap.get(pos);
+ if (callerPos == null) {
+ callerPos = pos.addCaller(invokePos, isSubstitution);
+ posMap.put(pos, callerPos);
+ }
+ cursor.getValue().setNodeSourcePosition(callerPos);
+ } else {
+ if (isSubstitution) {
+ /*
+ * If no other position is provided at least attribute the substituted node to
+ * the original invoke.
+ */
+ cursor.getValue().setNodeSourcePosition(invokePos);
}
}
}
@@ -642,7 +753,15 @@
}
frameState.replaceAndDelete(stateAfterException);
return stateAfterException;
- } else if (frameState.bci == BytecodeFrame.UNWIND_BCI || frameState.bci == BytecodeFrame.AFTER_EXCEPTION_BCI) {
+ } else if ((frameState.bci == BytecodeFrame.UNWIND_BCI && frameState.graph().getGuardsStage() == GuardsStage.FLOATING_GUARDS) || frameState.bci == BytecodeFrame.AFTER_EXCEPTION_BCI) {
+ /*
+ * This path converts the frame states relevant for exception unwinding to
+ * deoptimization. This is only allowed in configurations when Graal compiles code for
+ * speculative execution (e.g., JIT compilation in HotSpot) but not when compiled code
+ * must be deoptimization free (e.g., AOT compilation for native image generation).
+ * There is currently no global flag in StructuredGraph to distinguish such modes, but
+ * the GuardsStage during inlining indicates the mode in which Graal operates.
+ */
handleMissingAfterExceptionFrameState(frameState, invoke, replacements, alwaysDuplicateStateAfter);
return frameState;
} else if (frameState.bci == BytecodeFrame.BEFORE_BCI) {
@@ -706,7 +825,6 @@
assert frameState.bci != BytecodeFrame.AFTER_EXCEPTION_BCI : frameState;
assert frameState.bci != BytecodeFrame.BEFORE_BCI : frameState;
assert frameState.bci != BytecodeFrame.UNKNOWN_BCI : frameState;
- assert frameState.bci != BytecodeFrame.UNWIND_BCI : frameState;
if (frameState.bci != BytecodeFrame.INVALID_FRAMESTATE_BCI) {
ResolvedJavaMethod method = frameState.getMethod();
if (method.equals(inlinedMethod)) {
@@ -738,7 +856,7 @@
}
public static FrameState handleMissingAfterExceptionFrameState(FrameState nonReplaceableFrameState, Invoke invoke, EconomicMap<Node, Node> replacements, boolean alwaysDuplicateStateAfter) {
- Graph graph = nonReplaceableFrameState.graph();
+ StructuredGraph graph = nonReplaceableFrameState.graph();
NodeWorkList workList = graph.createNodeWorkList();
workList.add(nonReplaceableFrameState);
for (Node node : workList) {
@@ -756,7 +874,7 @@
AbstractMergeNode merge = (AbstractMergeNode) fixedStateSplit;
while (merge.isAlive()) {
AbstractEndNode end = merge.forwardEnds().first();
- DeoptimizeNode deoptimizeNode = graph.add(new DeoptimizeNode(DeoptimizationAction.InvalidateRecompile, DeoptimizationReason.NotCompiledExceptionHandler));
+ DeoptimizeNode deoptimizeNode = addDeoptimizeNode(graph, DeoptimizationAction.InvalidateRecompile, DeoptimizationReason.NotCompiledExceptionHandler);
end.replaceAtPredecessor(deoptimizeNode);
GraphUtil.killCFG(end);
}
@@ -775,7 +893,7 @@
}
handleAfterBciFrameState(newInvoke.stateAfter(), invoke, alwaysDuplicateStateAfter);
} else {
- FixedNode deoptimizeNode = graph.add(new DeoptimizeNode(DeoptimizationAction.InvalidateRecompile, DeoptimizationReason.NotCompiledExceptionHandler));
+ FixedNode deoptimizeNode = addDeoptimizeNode(graph, DeoptimizationAction.InvalidateRecompile, DeoptimizationReason.NotCompiledExceptionHandler);
if (fixedStateSplit instanceof AbstractBeginNode) {
deoptimizeNode = BeginNode.begin(deoptimizeNode);
}
@@ -788,6 +906,11 @@
return nonReplaceableFrameState;
}
+ private static DeoptimizeNode addDeoptimizeNode(StructuredGraph graph, DeoptimizationAction action, DeoptimizationReason reason) {
+ GraalError.guarantee(graph.getGuardsStage() == GuardsStage.FLOATING_GUARDS, "Cannot introduce speculative deoptimization when Graal is used with fixed guards");
+ return graph.add(new DeoptimizeNode(action, reason));
+ }
+
/**
* Ensure that all states are either {@link BytecodeFrame#INVALID_FRAMESTATE_BCI} or one of
* {@link BytecodeFrame#AFTER_BCI} or {@link BytecodeFrame#BEFORE_BCI}. Mixing of before and
@@ -856,8 +979,8 @@
return replacements.hasSubstitution(target, invokeBci);
}
- public static StructuredGraph getIntrinsicGraph(Replacements replacements, ResolvedJavaMethod target, int invokeBci) {
- return replacements.getSubstitution(target, invokeBci);
+ public static StructuredGraph getIntrinsicGraph(Replacements replacements, ResolvedJavaMethod target, int invokeBci, boolean trackNodeSourcePosition, NodeSourcePosition replaceePosition) {
+ return replacements.getSubstitution(target, invokeBci, trackNodeSourcePosition, replaceePosition);
}
public static FixedWithNextNode inlineMacroNode(Invoke invoke, ResolvedJavaMethod concrete, Class<? extends FixedWithNextNode> macroNodeClass) throws GraalError {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/inlining/info/AbstractInlineInfo.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/inlining/info/AbstractInlineInfo.java Sat Mar 24 01:08:35 2018 +0100
@@ -64,7 +64,7 @@
@SuppressWarnings("try")
public final void populateInlinableElements(HighTierContext context, StructuredGraph caller, CanonicalizerPhase canonicalizer, OptionValues options) {
for (int i = 0; i < numberOfMethods(); i++) {
- Inlineable elem = Inlineable.getInlineableElement(methodAt(i), invoke, context, canonicalizer);
+ Inlineable elem = Inlineable.getInlineableElement(methodAt(i), invoke, context, canonicalizer, caller.trackNodeSourcePosition());
setInlinableElement(i, elem);
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/inlining/info/elem/Inlineable.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/inlining/info/elem/Inlineable.java Sat Mar 24 01:08:35 2018 +0100
@@ -30,10 +30,10 @@
public interface Inlineable {
- static Inlineable getInlineableElement(final ResolvedJavaMethod method, Invoke invoke, HighTierContext context, CanonicalizerPhase canonicalizer) {
+ static Inlineable getInlineableElement(final ResolvedJavaMethod method, Invoke invoke, HighTierContext context, CanonicalizerPhase canonicalizer, boolean trackNodeSourcePosition) {
assert method != null;
assert invoke != null;
- return new InlineableGraph(method, invoke, context, canonicalizer);
+ return new InlineableGraph(method, invoke, context, canonicalizer, trackNodeSourcePosition);
}
int getNodeCount();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/inlining/info/elem/InlineableGraph.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/inlining/info/elem/InlineableGraph.java Sat Mar 24 01:08:35 2018 +0100
@@ -66,8 +66,8 @@
private FixedNodeProbabilityCache probabilites = new FixedNodeProbabilityCache();
- public InlineableGraph(final ResolvedJavaMethod method, final Invoke invoke, final HighTierContext context, CanonicalizerPhase canonicalizer) {
- StructuredGraph original = getOriginalGraph(method, context, canonicalizer, invoke.asNode().graph(), invoke.bci());
+ public InlineableGraph(final ResolvedJavaMethod method, final Invoke invoke, final HighTierContext context, CanonicalizerPhase canonicalizer, boolean trackNodeSourcePosition) {
+ StructuredGraph original = getOriginalGraph(method, context, canonicalizer, invoke.asNode().graph(), invoke.bci(), trackNodeSourcePosition);
// TODO copying the graph is only necessary if it is modified or if it contains any invokes
this.graph = (StructuredGraph) original.copy(invoke.asNode().getDebug());
specializeGraphToArguments(invoke, context, canonicalizer);
@@ -78,12 +78,13 @@
* The graph thus obtained is returned, ie the caller is responsible for cloning before
* modification.
*/
- private static StructuredGraph getOriginalGraph(final ResolvedJavaMethod method, final HighTierContext context, CanonicalizerPhase canonicalizer, StructuredGraph caller, int callerBci) {
- StructuredGraph result = InliningUtil.getIntrinsicGraph(context.getReplacements(), method, callerBci);
+ private static StructuredGraph getOriginalGraph(final ResolvedJavaMethod method, final HighTierContext context, CanonicalizerPhase canonicalizer, StructuredGraph caller, int callerBci,
+ boolean trackNodeSourcePosition) {
+ StructuredGraph result = InliningUtil.getIntrinsicGraph(context.getReplacements(), method, callerBci, trackNodeSourcePosition, null);
if (result != null) {
return result;
}
- return parseBytecodes(method, context, canonicalizer, caller);
+ return parseBytecodes(method, context, canonicalizer, caller, trackNodeSourcePosition);
}
/**
@@ -193,9 +194,10 @@
* </p>
*/
@SuppressWarnings("try")
- private static StructuredGraph parseBytecodes(ResolvedJavaMethod method, HighTierContext context, CanonicalizerPhase canonicalizer, StructuredGraph caller) {
+ private static StructuredGraph parseBytecodes(ResolvedJavaMethod method, HighTierContext context, CanonicalizerPhase canonicalizer, StructuredGraph caller, boolean trackNodeSourcePosition) {
DebugContext debug = caller.getDebug();
- StructuredGraph newGraph = new StructuredGraph.Builder(caller.getOptions(), debug, AllowAssumptions.ifNonNull(caller.getAssumptions())).method(method).build();
+ StructuredGraph newGraph = new StructuredGraph.Builder(caller.getOptions(), debug, AllowAssumptions.ifNonNull(caller.getAssumptions())).method(method).trackNodeSourcePosition(
+ trackNodeSourcePosition).build();
try (DebugContext.Scope s = debug.scope("InlineGraph", newGraph)) {
if (!caller.isUnsafeAccessTrackingEnabled()) {
newGraph.disableUnsafeAccessTracking();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/inlining/policy/GreedyInliningPolicy.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/inlining/policy/GreedyInliningPolicy.java Sat Mar 24 01:08:35 2018 +0100
@@ -69,17 +69,17 @@
final double relevance = invocation.relevance();
if (InlineEverything.getValue(options)) {
- InliningUtil.logInlinedMethod(info, inliningDepth, fullyProcessed, "inline everything");
+ InliningUtil.traceInlinedMethod(info, inliningDepth, fullyProcessed, "inline everything");
return true;
}
if (isIntrinsic(replacements, info)) {
- InliningUtil.logInlinedMethod(info, inliningDepth, fullyProcessed, "intrinsic");
+ InliningUtil.traceInlinedMethod(info, inliningDepth, fullyProcessed, "intrinsic");
return true;
}
if (info.shouldInline()) {
- InliningUtil.logInlinedMethod(info, inliningDepth, fullyProcessed, "forced inlining");
+ InliningUtil.traceInlinedMethod(info, inliningDepth, fullyProcessed, "forced inlining");
return true;
}
@@ -88,13 +88,13 @@
int lowLevelGraphSize = previousLowLevelGraphSize(info);
if (SmallCompiledLowLevelGraphSize.getValue(options) > 0 && lowLevelGraphSize > SmallCompiledLowLevelGraphSize.getValue(options) * inliningBonus) {
- InliningUtil.logNotInlinedMethod(info, inliningDepth, "too large previous low-level graph (low-level-nodes: %d, relevance=%f, probability=%f, bonus=%f, nodes=%d)", lowLevelGraphSize,
+ InliningUtil.traceNotInlinedMethod(info, inliningDepth, "too large previous low-level graph (low-level-nodes: %d, relevance=%f, probability=%f, bonus=%f, nodes=%d)", lowLevelGraphSize,
relevance, probability, inliningBonus, nodes);
return false;
}
if (nodes < TrivialInliningSize.getValue(options) * inliningBonus) {
- InliningUtil.logInlinedMethod(info, inliningDepth, fullyProcessed, "trivial (relevance=%f, probability=%f, bonus=%f, nodes=%d)", relevance, probability, inliningBonus, nodes);
+ InliningUtil.traceInlinedMethod(info, inliningDepth, fullyProcessed, "trivial (relevance=%f, probability=%f, bonus=%f, nodes=%d)", relevance, probability, inliningBonus, nodes);
return true;
}
@@ -106,19 +106,19 @@
*/
double invokes = determineInvokeProbability(info);
if (LimitInlinedInvokes.getValue(options) > 0 && fullyProcessed && invokes > LimitInlinedInvokes.getValue(options) * inliningBonus) {
- InliningUtil.logNotInlinedMethod(info, inliningDepth, "callee invoke probability is too high (invokeP=%f, relevance=%f, probability=%f, bonus=%f, nodes=%d)", invokes, relevance,
+ InliningUtil.traceNotInlinedMethod(info, inliningDepth, "callee invoke probability is too high (invokeP=%f, relevance=%f, probability=%f, bonus=%f, nodes=%d)", invokes, relevance,
probability, inliningBonus, nodes);
return false;
}
double maximumNodes = computeMaximumSize(relevance, (int) (MaximumInliningSize.getValue(options) * inliningBonus));
if (nodes <= maximumNodes) {
- InliningUtil.logInlinedMethod(info, inliningDepth, fullyProcessed, "relevance-based (relevance=%f, probability=%f, bonus=%f, nodes=%d <= %f)", relevance, probability, inliningBonus,
+ InliningUtil.traceInlinedMethod(info, inliningDepth, fullyProcessed, "relevance-based (relevance=%f, probability=%f, bonus=%f, nodes=%d <= %f)", relevance, probability, inliningBonus,
nodes, maximumNodes);
return true;
}
- InliningUtil.logNotInlinedMethod(info, inliningDepth, "relevance-based (relevance=%f, probability=%f, bonus=%f, nodes=%d > %f)", relevance, probability, inliningBonus, nodes, maximumNodes);
+ InliningUtil.traceNotInlinedMethod(info, inliningDepth, "relevance-based (relevance=%f, probability=%f, bonus=%f, nodes=%d > %f)", relevance, probability, inliningBonus, nodes, maximumNodes);
return false;
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/inlining/policy/InlineMethodSubstitutionsPolicy.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/inlining/policy/InlineMethodSubstitutionsPolicy.java Sat Mar 24 01:08:35 2018 +0100
@@ -39,7 +39,7 @@
CallTargetNode callTarget = invocation.callee().invoke().callTarget();
if (callTarget instanceof MethodCallTargetNode) {
ResolvedJavaMethod calleeMethod = ((MethodCallTargetNode) callTarget).targetMethod();
- if (replacements.getSubstitution(calleeMethod, invocation.callee().invoke().bci()) != null) {
+ if (replacements.hasSubstitution(calleeMethod, invocation.callee().invoke().bci())) {
return true;
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/inlining/walker/InliningData.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/inlining/walker/InliningData.java Sat Mar 24 01:08:35 2018 +0100
@@ -166,7 +166,7 @@
if (failureMessage == null) {
return true;
} else {
- InliningUtil.logNotInlined(invoke, inliningDepth(), method, failureMessage);
+ InliningUtil.traceNotInlinedMethod(invoke, inliningDepth(), method, failureMessage);
return false;
}
}
@@ -257,13 +257,13 @@
private InlineInfo getTypeCheckedInlineInfo(Invoke invoke, ResolvedJavaMethod targetMethod) {
JavaTypeProfile typeProfile = ((MethodCallTargetNode) invoke.callTarget()).getProfile();
if (typeProfile == null) {
- InliningUtil.logNotInlined(invoke, inliningDepth(), targetMethod, "no type profile exists");
+ InliningUtil.traceNotInlinedMethod(invoke, inliningDepth(), targetMethod, "no type profile exists");
return null;
}
JavaTypeProfile.ProfiledType[] ptypes = typeProfile.getTypes();
if (ptypes == null || ptypes.length <= 0) {
- InliningUtil.logNotInlined(invoke, inliningDepth(), targetMethod, "no types in profile");
+ InliningUtil.traceNotInlinedMethod(invoke, inliningDepth(), targetMethod, "no types in profile");
return null;
}
ResolvedJavaType contextType = invoke.getContextType();
@@ -272,7 +272,7 @@
OptionValues options = invoke.asNode().getOptions();
if (ptypes.length == 1 && notRecordedTypeProbability == 0) {
if (!optimisticOpts.inlineMonomorphicCalls(options)) {
- InliningUtil.logNotInlined(invoke, inliningDepth(), targetMethod, "inlining monomorphic calls is disabled");
+ InliningUtil.traceNotInlinedMethod(invoke, inliningDepth(), targetMethod, "inlining monomorphic calls is disabled");
return null;
}
@@ -287,13 +287,13 @@
invoke.setPolymorphic(true);
if (!optimisticOpts.inlinePolymorphicCalls(options) && notRecordedTypeProbability == 0) {
- InliningUtil.logNotInlinedInvoke(invoke, inliningDepth(), targetMethod, "inlining polymorphic calls is disabled (%d types)", ptypes.length);
+ InliningUtil.traceNotInlinedMethod(invoke, inliningDepth(), targetMethod, "inlining polymorphic calls is disabled (%d types)", ptypes.length);
return null;
}
if (!optimisticOpts.inlineMegamorphicCalls(options) && notRecordedTypeProbability > 0) {
// due to filtering impossible types, notRecordedTypeProbability can be > 0 although
// the number of types is lower than what can be recorded in a type profile
- InliningUtil.logNotInlinedInvoke(invoke, inliningDepth(), targetMethod, "inlining megamorphic calls is disabled (%d types, %f %% not recorded types)", ptypes.length,
+ InliningUtil.traceNotInlinedMethod(invoke, inliningDepth(), targetMethod, "inlining megamorphic calls is disabled (%d types, %f %% not recorded types)", ptypes.length,
notRecordedTypeProbability * 100);
return null;
}
@@ -304,7 +304,7 @@
for (int i = 0; i < ptypes.length; i++) {
ResolvedJavaMethod concrete = ptypes[i].getType().resolveConcreteMethod(targetMethod, contextType);
if (concrete == null) {
- InliningUtil.logNotInlined(invoke, inliningDepth(), targetMethod, "could not resolve method");
+ InliningUtil.traceNotInlinedMethod(invoke, inliningDepth(), targetMethod, "could not resolve method");
return null;
}
int index = concreteMethods.indexOf(concrete);
@@ -331,7 +331,7 @@
if (newConcreteMethods.isEmpty()) {
// No method left that is worth inlining.
- InliningUtil.logNotInlinedInvoke(invoke, inliningDepth(), targetMethod, "no methods remaining after filtering less frequent methods (%d methods previously)",
+ InliningUtil.traceNotInlinedMethod(invoke, inliningDepth(), targetMethod, "no methods remaining after filtering less frequent methods (%d methods previously)",
concreteMethods.size());
return null;
}
@@ -341,7 +341,7 @@
}
if (concreteMethods.size() > maxMethodPerInlining) {
- InliningUtil.logNotInlinedInvoke(invoke, inliningDepth(), targetMethod, "polymorphic call with more than %d target methods", maxMethodPerInlining);
+ InliningUtil.traceNotInlinedMethod(invoke, inliningDepth(), targetMethod, "polymorphic call with more than %d target methods", maxMethodPerInlining);
return null;
}
@@ -362,13 +362,13 @@
if (usedTypes.isEmpty()) {
// No type left that is worth checking for.
- InliningUtil.logNotInlinedInvoke(invoke, inliningDepth(), targetMethod, "no types remaining after filtering less frequent types (%d types previously)", ptypes.length);
+ InliningUtil.traceNotInlinedMethod(invoke, inliningDepth(), targetMethod, "no types remaining after filtering less frequent types (%d types previously)", ptypes.length);
return null;
}
for (ResolvedJavaMethod concrete : concreteMethods) {
if (!checkTargetConditions(invoke, concrete)) {
- InliningUtil.logNotInlined(invoke, inliningDepth(), targetMethod, "it is a polymorphic method call and at least one invoked method cannot be inlined");
+ InliningUtil.traceNotInlinedMethod(invoke, inliningDepth(), targetMethod, "it is a polymorphic method call and at least one invoked method cannot be inlined");
return null;
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases/src/org/graalvm/compiler/phases/PhaseSuite.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases/src/org/graalvm/compiler/phases/PhaseSuite.java Sat Mar 24 01:08:35 2018 +0100
@@ -96,6 +96,13 @@
}
/**
+ * Gets an unmodifiable view on the phases in this suite.
+ */
+ public List<BasePhase<? super C>> getPhases() {
+ return Collections.unmodifiableList(phases);
+ }
+
+ /**
* Returns a {@link ListIterator} at the position of the first phase which is an instance of
* {@code phaseClass} or null if no such phase can be found.
*
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases/src/org/graalvm/compiler/phases/contract/VerifyNodeCosts.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases/src/org/graalvm/compiler/phases/contract/VerifyNodeCosts.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,7 +22,6 @@
*/
package org.graalvm.compiler.phases.contract;
-import java.lang.reflect.Field;
import java.lang.reflect.Modifier;
import java.util.function.Predicate;
@@ -64,13 +63,8 @@
}
private static NodeClass<?> getType(Class<?> c) {
- Field f;
try {
- f = c.getField("TYPE");
- f.setAccessible(true);
- Object val = f.get(null);
- NodeClass<?> nodeType = (NodeClass<?>) val;
- return nodeType;
+ return NodeClass.get(c);
} catch (Throwable t) {
throw new VerifyPhase.VerificationError("%s.java does not specify a TYPE field.", c.getName());
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.printer/src/org/graalvm/compiler/printer/BinaryGraphPrinter.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.printer/src/org/graalvm/compiler/printer/BinaryGraphPrinter.java Sat Mar 24 01:08:35 2018 +0100
@@ -46,6 +46,7 @@
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.graph.NodeMap;
import org.graalvm.compiler.graph.NodeSourcePosition;
+import org.graalvm.compiler.graph.SourceLanguagePosition;
import org.graalvm.compiler.nodes.AbstractBeginNode;
import org.graalvm.compiler.nodes.AbstractEndNode;
import org.graalvm.compiler.nodes.AbstractMergeNode;
@@ -67,9 +68,9 @@
import org.graalvm.graphio.GraphStructure;
import org.graalvm.graphio.GraphTypes;
+import jdk.vm.ci.meta.JavaType;
import jdk.vm.ci.meta.ResolvedJavaField;
import jdk.vm.ci.meta.ResolvedJavaMethod;
-import jdk.vm.ci.meta.ResolvedJavaType;
import jdk.vm.ci.meta.Signature;
public class BinaryGraphPrinter implements
@@ -265,6 +266,17 @@
}
props.put("category", "floating");
}
+ if (node.getNodeSourcePosition() != null) {
+ NodeSourcePosition pos = node.getNodeSourcePosition();
+ while (pos != null) {
+ SourceLanguagePosition cur = pos.getSourceLanauage();
+ if (cur != null) {
+ cur.addSourceInformation(props);
+ break;
+ }
+ pos = pos.getCaller();
+ }
+ }
if (getSnippetReflectionProvider() != null) {
for (Map.Entry<String, Object> prop : props.entrySet()) {
if (prop.getValue() instanceof JavaConstantFormattable) {
@@ -380,8 +392,8 @@
if (obj instanceof Class<?>) {
return ((Class<?>) obj).getName();
}
- if (obj instanceof ResolvedJavaType) {
- return ((ResolvedJavaType) obj).toJavaName();
+ if (obj instanceof JavaType) {
+ return ((JavaType) obj).toJavaName();
}
return null;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.printer/src/org/graalvm/compiler/printer/GraphPrinter.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.printer/src/org/graalvm/compiler/printer/GraphPrinter.java Sat Mar 24 01:08:35 2018 +0100
@@ -79,7 +79,7 @@
/**
* {@code jdk.vm.ci} module.
*/
- Object JVMCI_MODULE = JDK9Method.JAVA_SPECIFICATION_VERSION < 9 ? null : JDK9Method.getModule.invoke(Services.class);
+ Object JVMCI_MODULE = JDK9Method.JAVA_SPECIFICATION_VERSION < 9 ? null : JDK9Method.getModule(Services.class);
/**
* Classes whose {@link #toString()} method does not run any untrusted code.
@@ -111,8 +111,8 @@
return true;
}
} else {
- Object module = JDK9Method.getModule.invoke(c);
- if (JVMCI_MODULE == module || (Boolean) JDK9Method.isOpenTo.invoke(JVMCI_MODULE, JVMCI_RUNTIME_PACKAGE, module)) {
+ Object module = JDK9Method.getModule(c);
+ if (JVMCI_MODULE == module || JDK9Method.isOpenTo(JVMCI_MODULE, JVMCI_RUNTIME_PACKAGE, module)) {
// Can access non-statically-exported package in JVMCI
return true;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.printer/src/org/graalvm/compiler/printer/GraphPrinterDumpHandler.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.printer/src/org/graalvm/compiler/printer/GraphPrinterDumpHandler.java Sat Mar 24 01:08:35 2018 +0100
@@ -244,7 +244,10 @@
lastMethodOrGraph = o;
}
}
-
+ if (result.size() == 2 && result.get(1).startsWith("TruffleGraal")) {
+ result.clear();
+ result.add("Graal Graphs");
+ }
if (result.isEmpty()) {
result.add(graph.toString());
graphSeen = true;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.aarch64/src/org/graalvm/compiler/replacements/aarch64/AArch64FloatArithmeticSnippets.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.aarch64/src/org/graalvm/compiler/replacements/aarch64/AArch64FloatArithmeticSnippets.java Sat Mar 24 01:08:35 2018 +0100
@@ -75,7 +75,7 @@
Arguments args = new Arguments(snippet, graph.getGuardsStage(), tool.getLoweringStage());
args.add("x", node.getX());
args.add("y", node.getY());
- template(graph.getDebug(), args).instantiate(providers.getMetaAccess(), node, SnippetTemplate.DEFAULT_REPLACER, tool, args);
+ template(node, args).instantiate(providers.getMetaAccess(), node, SnippetTemplate.DEFAULT_REPLACER, tool, args);
}
@Snippet
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.aarch64/src/org/graalvm/compiler/replacements/aarch64/AArch64IntegerArithmeticSnippets.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.aarch64/src/org/graalvm/compiler/replacements/aarch64/AArch64IntegerArithmeticSnippets.java Sat Mar 24 01:08:35 2018 +0100
@@ -106,7 +106,7 @@
Arguments args = new Arguments(snippet, graph.getGuardsStage(), tool.getLoweringStage());
args.add("x", node.getX());
args.add("y", node.getY());
- template(graph.getDebug(), args).instantiate(providers.getMetaAccess(), node, SnippetTemplate.DEFAULT_REPLACER, args);
+ template(node, args).instantiate(providers.getMetaAccess(), node, SnippetTemplate.DEFAULT_REPLACER, args);
}
@Snippet
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.amd64/src/org/graalvm/compiler/replacements/amd64/AMD64ConvertSnippets.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.amd64/src/org/graalvm/compiler/replacements/amd64/AMD64ConvertSnippets.java Sat Mar 24 01:08:35 2018 +0100
@@ -189,7 +189,7 @@
args.add("input", convert.getValue());
args.add("result", graph.unique(new AMD64FloatConvertNode(convert.getFloatConvert(), convert.getValue())));
- SnippetTemplate template = template(convert.getDebug(), args);
+ SnippetTemplate template = template(convert, args);
convert.getDebug().log("Lowering %s in %s: node=%s, template=%s, arguments=%s", convert.getFloatConvert(), graph, convert, template, args);
template.instantiate(providers.getMetaAccess(), convert, DEFAULT_REPLACER, tool, args);
convert.safeDelete();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.amd64/src/org/graalvm/compiler/replacements/amd64/AMD64GraphBuilderPlugins.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.amd64/src/org/graalvm/compiler/replacements/amd64/AMD64GraphBuilderPlugins.java Sat Mar 24 01:08:35 2018 +0100
@@ -29,6 +29,7 @@
import static org.graalvm.compiler.replacements.nodes.UnaryMathIntrinsicNode.UnaryOperation.LOG10;
import static org.graalvm.compiler.replacements.nodes.UnaryMathIntrinsicNode.UnaryOperation.SIN;
import static org.graalvm.compiler.replacements.nodes.UnaryMathIntrinsicNode.UnaryOperation.TAN;
+import static org.graalvm.compiler.serviceprovider.JDK9Method.JAVA_SPECIFICATION_VERSION;
import static org.graalvm.compiler.serviceprovider.JDK9Method.Java8OrEarlier;
import java.util.Arrays;
@@ -75,6 +76,8 @@
registerIntegerLongPlugins(invocationPlugins, LongSubstitutions.class, JavaKind.Long, arch, replacementsBytecodeProvider);
registerUnsafePlugins(invocationPlugins, replacementsBytecodeProvider);
registerStringPlugins(invocationPlugins, arch, replacementsBytecodeProvider);
+ registerStringLatin1Plugins(invocationPlugins, replacementsBytecodeProvider);
+ registerStringUTF16Plugins(invocationPlugins, replacementsBytecodeProvider);
registerMathPlugins(invocationPlugins, arch, arithmeticStubs, replacementsBytecodeProvider);
registerArraysEqualsPlugins(invocationPlugins, replacementsBytecodeProvider);
}
@@ -183,12 +186,34 @@
}
private static void registerStringPlugins(InvocationPlugins plugins, AMD64 arch, BytecodeProvider replacementsBytecodeProvider) {
- if (Java8OrEarlier && arch.getFeatures().contains(CPUFeature.SSE4_2)) {
+ if (Java8OrEarlier) {
Registration r;
r = new Registration(plugins, String.class, replacementsBytecodeProvider);
r.setAllowOverwrite(true);
- r.registerMethodSubstitution(AMD64StringSubstitutions.class, "indexOf", char[].class, int.class,
- int.class, char[].class, int.class, int.class, int.class);
+ if (arch.getFeatures().contains(CPUFeature.SSE4_2)) {
+ r.registerMethodSubstitution(AMD64StringSubstitutions.class, "indexOf", char[].class, int.class,
+ int.class, char[].class, int.class, int.class, int.class);
+ }
+ // r.registerMethodSubstitution(AMD64StringSubstitutions.class, "compareTo",
+ // Receiver.class, String.class);
+ }
+ }
+
+ private static void registerStringLatin1Plugins(InvocationPlugins plugins, BytecodeProvider replacementsBytecodeProvider) {
+ if (JAVA_SPECIFICATION_VERSION >= 9) {
+ Registration r = new Registration(plugins, "java.lang.StringLatin1", replacementsBytecodeProvider);
+ r.setAllowOverwrite(true);
+ r.registerMethodSubstitution(AMD64StringLatin1Substitutions.class, "compareTo", byte[].class, byte[].class);
+ r.registerMethodSubstitution(AMD64StringLatin1Substitutions.class, "compareToUTF16", byte[].class, byte[].class);
+ }
+ }
+
+ private static void registerStringUTF16Plugins(InvocationPlugins plugins, BytecodeProvider replacementsBytecodeProvider) {
+ if (JAVA_SPECIFICATION_VERSION >= 9) {
+ Registration r = new Registration(plugins, "java.lang.StringUTF16", replacementsBytecodeProvider);
+ r.setAllowOverwrite(true);
+ r.registerMethodSubstitution(AMD64StringUTF16Substitutions.class, "compareTo", byte[].class, byte[].class);
+ r.registerMethodSubstitution(AMD64StringUTF16Substitutions.class, "compareToLatin1", byte[].class, byte[].class);
}
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.amd64/src/org/graalvm/compiler/replacements/amd64/AMD64StringLatin1Substitutions.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.replacements.amd64;
+
+import org.graalvm.compiler.api.replacements.ClassSubstitution;
+import org.graalvm.compiler.api.replacements.MethodSubstitution;
+import org.graalvm.compiler.replacements.nodes.ArrayCompareToNode;
+
+import jdk.vm.ci.meta.JavaKind;
+
+// JaCoCo Exclude
+
+/**
+ * Substitutions for {@code java.lang.StringLatin1} methods.
+ *
+ * Since JDK 9.
+ */
+@ClassSubstitution(className = "java.lang.StringLatin1", optional = true)
+public class AMD64StringLatin1Substitutions {
+
+ /**
+ * @param value is byte[]
+ * @param other is byte[]
+ */
+ @MethodSubstitution
+ public static int compareTo(byte[] value, byte[] other) {
+ return ArrayCompareToNode.compareTo(value, other, value.length, other.length, JavaKind.Byte, JavaKind.Byte);
+ }
+
+ /**
+ * @param value is byte[]
+ * @param other is char[]
+ */
+ @MethodSubstitution
+ public static int compareToUTF16(byte[] value, byte[] other) {
+ return ArrayCompareToNode.compareTo(value, other, value.length, other.length, JavaKind.Byte, JavaKind.Char);
+ }
+
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.amd64/src/org/graalvm/compiler/replacements/amd64/AMD64StringSubstitutions.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.amd64/src/org/graalvm/compiler/replacements/amd64/AMD64StringSubstitutions.java Sat Mar 24 01:08:35 2018 +0100
@@ -26,8 +26,11 @@
import org.graalvm.compiler.api.replacements.Fold;
import org.graalvm.compiler.api.replacements.Fold.InjectedParameter;
import org.graalvm.compiler.api.replacements.MethodSubstitution;
+import org.graalvm.compiler.core.common.SuppressFBWarnings;
import org.graalvm.compiler.core.common.spi.ArrayOffsetProvider;
import org.graalvm.compiler.graph.Node.ConstantNodeParameter;
+import org.graalvm.compiler.replacements.StringSubstitutions;
+import org.graalvm.compiler.replacements.nodes.ArrayCompareToNode;
import org.graalvm.compiler.word.Word;
import org.graalvm.word.Pointer;
@@ -86,4 +89,16 @@
}
return result;
}
+
+ @MethodSubstitution(isStatic = false)
+ @SuppressFBWarnings(value = "ES_COMPARING_PARAMETER_STRING_WITH_EQ", justification = "reference equality on the receiver is what we want")
+ public static int compareTo(String receiver, String anotherString) {
+ if (receiver == anotherString) {
+ return 0;
+ }
+ char[] value = StringSubstitutions.getValue(receiver);
+ char[] other = StringSubstitutions.getValue(anotherString);
+ return ArrayCompareToNode.compareTo(value, other, value.length << 1, other.length << 1, JavaKind.Char, JavaKind.Char);
+ }
+
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.amd64/src/org/graalvm/compiler/replacements/amd64/AMD64StringUTF16Substitutions.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.replacements.amd64;
+
+import org.graalvm.compiler.api.replacements.ClassSubstitution;
+import org.graalvm.compiler.api.replacements.MethodSubstitution;
+import org.graalvm.compiler.replacements.nodes.ArrayCompareToNode;
+
+import jdk.vm.ci.meta.JavaKind;
+
+// JaCoCo Exclude
+
+/**
+ * Substitutions for {@code java.lang.StringUTF16} methods.
+ *
+ * Since JDK 9.
+ */
+@ClassSubstitution(className = "java.lang.StringUTF16", optional = true)
+public class AMD64StringUTF16Substitutions {
+
+ /**
+ * @param value is char[]
+ * @param other is char[]
+ */
+ @MethodSubstitution
+ public static int compareTo(byte[] value, byte[] other) {
+ return ArrayCompareToNode.compareTo(value, other, value.length, other.length, JavaKind.Char, JavaKind.Char);
+ }
+
+ /**
+ * @param value is char[]
+ * @param other is byte[]
+ */
+ @MethodSubstitution
+ public static int compareToLatin1(byte[] value, byte[] other) {
+ /*
+ * Swapping array arguments because intrinsic expects order to be byte[]/char[] but kind
+ * arguments stay in original order.
+ */
+ return ArrayCompareToNode.compareTo(other, value, other.length, value.length, JavaKind.Char, JavaKind.Byte);
+ }
+
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/MethodSubstitutionTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/MethodSubstitutionTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -109,7 +109,7 @@
StructuredGraph graph = testGraph(testMethodName);
// Check to see if the resulting graph contains the expected node
- StructuredGraph replacement = getReplacements().getSubstitution(realMethod, -1);
+ StructuredGraph replacement = getReplacements().getSubstitution(realMethod, -1, false, null);
if (replacement == null && !optional) {
assertInGraph(graph, intrinsicClass);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/PEGraphDecoderTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/PEGraphDecoderTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -136,9 +136,9 @@
registerPlugins(graphBuilderConfig.getPlugins().getInvocationPlugins());
targetGraph = new StructuredGraph.Builder(getInitialOptions(), debug, AllowAssumptions.YES).method(testMethod).build();
CachingPEGraphDecoder decoder = new CachingPEGraphDecoder(getTarget().arch, targetGraph, getProviders(), graphBuilderConfig, OptimisticOptimizations.NONE, AllowAssumptions.YES,
- null, null, new InlineInvokePlugin[]{new InlineAll()}, null, null, null);
+ null, null, new InlineInvokePlugin[]{new InlineAll()}, null, null, null, null);
- decoder.decode(testMethod);
+ decoder.decode(testMethod, false);
debug.dump(DebugContext.BASIC_LEVEL, targetGraph, "Target Graph");
targetGraph.verify();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/SnippetsTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/SnippetsTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -43,6 +43,6 @@
@Override
protected StructuredGraph parse(Builder builder, PhaseSuite<HighTierContext> graphBuilderSuite) {
- return installer.makeGraph(getDebugContext(), bytecodeProvider, builder.getMethod(), null, null);
+ return installer.makeGraph(getDebugContext(), bytecodeProvider, builder.getMethod(), null, null, false, null);
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/StandardMethodSubstitutionsTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/StandardMethodSubstitutionsTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -24,8 +24,6 @@
import java.util.HashMap;
-import org.junit.Test;
-
import org.graalvm.compiler.api.replacements.MethodSubstitution;
import org.graalvm.compiler.nodes.IfNode;
import org.graalvm.compiler.nodes.StructuredGraph;
@@ -35,6 +33,7 @@
import org.graalvm.compiler.replacements.nodes.BitScanForwardNode;
import org.graalvm.compiler.replacements.nodes.BitScanReverseNode;
import org.graalvm.compiler.replacements.nodes.ReverseBytesNode;
+import org.junit.Test;
import jdk.vm.ci.code.InstalledCode;
import jdk.vm.ci.meta.ResolvedJavaMethod;
@@ -137,7 +136,7 @@
StructuredGraph graph = testGraph(testMethodName);
// Check to see if the resulting graph contains the expected node
- StructuredGraph replacement = getReplacements().getSubstitution(realJavaMethod, -1);
+ StructuredGraph replacement = getReplacements().getSubstitution(realJavaMethod, -1, false, null);
if (replacement == null && !optional) {
assertInGraph(graph, intrinsicClass);
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/StringCompareToTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.replacements.test;
+
+import org.graalvm.compiler.nodes.StructuredGraph;
+import org.graalvm.compiler.replacements.nodes.ArrayCompareToNode;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import jdk.vm.ci.amd64.AMD64;
+import jdk.vm.ci.code.InstalledCode;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+
+/**
+ * Tests compareTo method intrinsic.
+ */
+public class StringCompareToTest extends MethodSubstitutionTest {
+
+ private ResolvedJavaMethod realMethod = null;
+ private ResolvedJavaMethod testMethod = null;
+ private InstalledCode testCode = null;
+
+ private final String[] testData = new String[]{
+ "A", "\uFF21", "AB", "A", "a", "Ab", "AA", "\uFF21",
+ "A\uFF21", "ABC", "AB", "ABcD", "ABCD\uFF21\uFF21", "ABCD\uFF21", "ABCDEFG\uFF21", "ABCD",
+ "ABCDEFGH\uFF21\uFF21", "\uFF22", "\uFF21\uFF22", "\uFF21A",
+ "\uFF21\uFF21",
+ "\u043c\u0430\u043c\u0430\u0020\u043c\u044b\u043b\u0430\u0020\u0440\u0430\u043c\u0443\u002c\u0020\u0440\u0430\u043c\u0430\u0020\u0441\u044a\u0435\u043b\u0430\u0020\u043c\u0430\u043c\u0443",
+ "crazy dog jumps over laszy fox",
+ "XMM-XMM-YMM-YMM-ZMM-ZMM-ZMM-ZMM-",
+ "XMM-XMM+YMM-YMM-ZMM-ZMM-ZMM-ZMM-",
+ "XMM-XMM-YMM-YMM+ZMM-ZMM-ZMM-ZMM-",
+ "XMM-XMM-YMM-YMM-ZMM-ZMM-ZMM-ZMM+",
+ "XMM-XMM-XMM-XMM-YMM-YMM-YMM-YMM-ZMM-ZMM-ZMM-ZMM-ZMM-ZMM-ZMM-ZMM-",
+ "XMM-XMM-XMM-XMM+YMM-YMM-YMM-YMM-ZMM-ZMM-ZMM-ZMM-ZMM-ZMM-ZMM-ZMM-",
+ "XMM-XMM-XMM-XMM-YMM-YMM-YMM-YMM+ZMM-ZMM-ZMM-ZMM-ZMM-ZMM-ZMM-ZMM-",
+ "XMM-XMM-XMM-XMM-YMM-YMM-YMM-YMM-ZMM-ZMM-ZMM-ZMM-ZMM-ZMM-ZMM-ZMM+",
+ ""
+ };
+
+ public StringCompareToTest() {
+ Assume.assumeTrue(getTarget().arch instanceof AMD64);
+
+ realMethod = getResolvedJavaMethod(String.class, "compareTo", String.class);
+ testMethod = getResolvedJavaMethod("stringCompareTo");
+ StructuredGraph graph = testGraph("stringCompareTo");
+
+ // Check to see if the resulting graph contains the expected node
+ StructuredGraph replacement = getReplacements().getSubstitution(realMethod, -1, false, null);
+ if (replacement == null) {
+ assertInGraph(graph, ArrayCompareToNode.class);
+ }
+
+ // Force compilation
+ testCode = getCode(testMethod);
+ Assert.assertNotNull(testCode);
+ }
+
+ private void executeStringCompareTo(String s0, String s1) {
+ Object expected = invokeSafe(realMethod, s0, s1);
+ // Verify that the original method and the substitution produce the same value
+ assertDeepEquals(expected, invokeSafe(testMethod, null, s0, s1));
+ // Verify that the generated code and the original produce the same value
+ assertDeepEquals(expected, executeVarargsSafe(testCode, s0, s1));
+ }
+
+ public static int stringCompareTo(String a, String b) {
+ return a.compareTo(b);
+ }
+
+ @Test
+ @Ignore("GR-8748")
+ public void testEqualString() {
+ String s = "equal-string";
+ executeStringCompareTo(s, new String(s.toCharArray()));
+ }
+
+ @Test
+ @Ignore("GR-8748")
+ public void testDifferentString() {
+ executeStringCompareTo("some-string", "different-string");
+ }
+
+ @Test
+ @Ignore("GR-8748")
+ public void testAllStrings() {
+ for (String s0 : testData) {
+ for (String s1 : testData) {
+ executeStringCompareTo(s0, s1);
+ }
+ }
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/StringSubstitutionsTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/StringSubstitutionsTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,11 +22,10 @@
*/
package org.graalvm.compiler.replacements.test;
-import org.junit.Test;
-
import org.graalvm.compiler.nodes.StructuredGraph;
import org.graalvm.compiler.replacements.StringSubstitutions;
import org.graalvm.compiler.replacements.nodes.ArrayEqualsNode;
+import org.junit.Test;
import jdk.vm.ci.code.InstalledCode;
import jdk.vm.ci.meta.ResolvedJavaMethod;
@@ -42,7 +41,7 @@
StructuredGraph graph = testGraph(testMethodName);
// Check to see if the resulting graph contains the expected node
- StructuredGraph replacement = getReplacements().getSubstitution(realMethod, -1);
+ StructuredGraph replacement = getReplacements().getSubstitution(realMethod, -1, false, null);
if (replacement == null && !optional) {
assertInGraph(graph, intrinsicClass);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/WordTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/WordTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -44,7 +44,7 @@
protected StructuredGraph parse(Builder builder, PhaseSuite<HighTierContext> graphBuilderSuite) {
// create a copy to assign a valid compilation id
DebugContext debug = getDebugContext();
- StructuredGraph originalGraph = installer.makeGraph(debug, bytecodeProvider, builder.getMethod(), null, null);
+ StructuredGraph originalGraph = installer.makeGraph(debug, bytecodeProvider, builder.getMethod(), null, null, false, null);
return originalGraph.copyWithIdentifier(builder.getCompilationId(), debug);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/BoxingSnippets.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/BoxingSnippets.java Sat Mar 24 01:08:35 2018 +0100
@@ -29,8 +29,8 @@
import org.graalvm.compiler.api.replacements.Snippet;
import org.graalvm.compiler.api.replacements.Snippet.ConstantParameter;
+import org.graalvm.compiler.api.replacements.SnippetReflectionProvider;
import org.graalvm.compiler.debug.DebugHandlersFactory;
-import org.graalvm.compiler.api.replacements.SnippetReflectionProvider;
import org.graalvm.compiler.nodes.ConstantNode;
import org.graalvm.compiler.nodes.PiNode;
import org.graalvm.compiler.nodes.ValueNode;
@@ -207,7 +207,7 @@
args.add("value", box.getValue());
args.addConst("valueOfCounter", valueOfCounter);
- SnippetTemplate template = template(box.getDebug(), args);
+ SnippetTemplate template = template(box, args);
box.getDebug().log("Lowering integerValueOf in %s: node=%s, template=%s, arguments=%s", box.graph(), box, template, args);
template.instantiate(providers.getMetaAccess(), box, DEFAULT_REPLACER, args);
}
@@ -218,7 +218,7 @@
args.add("value", unbox.getValue());
args.addConst("valueCounter", valueCounter);
- SnippetTemplate template = template(unbox.getDebug(), args);
+ SnippetTemplate template = template(unbox, args);
unbox.getDebug().log("Lowering integerValueOf in %s: node=%s, template=%s, arguments=%s", unbox.graph(), unbox, template, args);
template.instantiate(providers.getMetaAccess(), unbox, DEFAULT_REPLACER, args);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/CachingPEGraphDecoder.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/CachingPEGraphDecoder.java Sat Mar 24 01:08:35 2018 +0100
@@ -27,6 +27,7 @@
import org.graalvm.collections.EconomicMap;
import org.graalvm.compiler.bytecode.BytecodeProvider;
import org.graalvm.compiler.debug.DebugContext;
+import org.graalvm.compiler.graph.SourceLanguagePositionProvider;
import org.graalvm.compiler.java.GraphBuilderPhase;
import org.graalvm.compiler.nodes.EncodedGraph;
import org.graalvm.compiler.nodes.GraphEncoder;
@@ -63,9 +64,9 @@
public CachingPEGraphDecoder(Architecture architecture, StructuredGraph graph, Providers providers, GraphBuilderConfiguration graphBuilderConfig, OptimisticOptimizations optimisticOpts,
AllowAssumptions allowAssumptions, LoopExplosionPlugin loopExplosionPlugin, InvocationPlugins invocationPlugins, InlineInvokePlugin[] inlineInvokePlugins,
ParameterPlugin parameterPlugin,
- NodePlugin[] nodePlugins, ResolvedJavaMethod callInlinedMethod) {
+ NodePlugin[] nodePlugins, ResolvedJavaMethod callInlinedMethod, SourceLanguagePositionProvider sourceLanguagePositionProvider) {
super(architecture, graph, providers.getMetaAccess(), providers.getConstantReflection(), providers.getConstantFieldProvider(), providers.getStampProvider(), loopExplosionPlugin,
- invocationPlugins, inlineInvokePlugins, parameterPlugin, nodePlugins, callInlinedMethod);
+ invocationPlugins, inlineInvokePlugins, parameterPlugin, nodePlugins, callInlinedMethod, sourceLanguagePositionProvider);
this.providers = providers;
this.graphBuilderConfig = graphBuilderConfig;
@@ -80,10 +81,11 @@
}
@SuppressWarnings("try")
- private EncodedGraph createGraph(ResolvedJavaMethod method, BytecodeProvider intrinsicBytecodeProvider) {
- StructuredGraph graphToEncode = new StructuredGraph.Builder(options, debug, allowAssumptions).useProfilingInfo(false).method(method).build();
+ private EncodedGraph createGraph(ResolvedJavaMethod method, ResolvedJavaMethod originalMethod, BytecodeProvider intrinsicBytecodeProvider) {
+ StructuredGraph graphToEncode = new StructuredGraph.Builder(options, debug, allowAssumptions).useProfilingInfo(false).trackNodeSourcePosition(
+ graphBuilderConfig.trackNodeSourcePosition()).method(method).build();
try (DebugContext.Scope scope = debug.scope("createGraph", graphToEncode)) {
- IntrinsicContext initialIntrinsicContext = intrinsicBytecodeProvider != null ? new IntrinsicContext(method, method, intrinsicBytecodeProvider, INLINE_AFTER_PARSING) : null;
+ IntrinsicContext initialIntrinsicContext = intrinsicBytecodeProvider != null ? new IntrinsicContext(originalMethod, method, intrinsicBytecodeProvider, INLINE_AFTER_PARSING) : null;
GraphBuilderPhase.Instance graphBuilderPhaseInstance = createGraphBuilderPhaseInstance(initialIntrinsicContext);
graphBuilderPhaseInstance.apply(graphToEncode);
@@ -106,10 +108,10 @@
}
@Override
- protected EncodedGraph lookupEncodedGraph(ResolvedJavaMethod method, BytecodeProvider intrinsicBytecodeProvider) {
+ protected EncodedGraph lookupEncodedGraph(ResolvedJavaMethod method, ResolvedJavaMethod originalMethod, BytecodeProvider intrinsicBytecodeProvider, boolean trackNodeSourcePosition) {
EncodedGraph result = graphCache.get(method);
if (result == null && method.hasBytecodes()) {
- result = createGraph(method, intrinsicBytecodeProvider);
+ result = createGraph(method, originalMethod, intrinsicBytecodeProvider);
}
return result;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/ConstantStringIndexOfSnippets.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/ConstantStringIndexOfSnippets.java Sat Mar 24 01:08:35 2018 +0100
@@ -65,7 +65,7 @@
char[] targetCharArray = snippetReflection.asObject(char[].class, stringIndexOf.getArgument(3).asJavaConstant());
args.addConst("md2", md2(targetCharArray));
args.addConst("cache", computeCache(targetCharArray));
- template(graph.getDebug(), args).instantiate(providers.getMetaAccess(), stringIndexOf, DEFAULT_REPLACER, args);
+ template(stringIndexOf, args).instantiate(providers.getMetaAccess(), stringIndexOf, DEFAULT_REPLACER, args);
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/GraphKit.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/GraphKit.java Sat Mar 24 01:08:35 2018 +0100
@@ -32,9 +32,11 @@
import org.graalvm.compiler.core.common.spi.ConstantFieldProvider;
import org.graalvm.compiler.core.common.type.StampFactory;
import org.graalvm.compiler.core.common.type.StampPair;
+import org.graalvm.compiler.debug.DebugCloseable;
import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.graph.Graph;
import org.graalvm.compiler.graph.Node.ValueNumberable;
+import org.graalvm.compiler.graph.NodeSourcePosition;
import org.graalvm.compiler.java.FrameStateBuilder;
import org.graalvm.compiler.java.GraphBuilderPhase;
import org.graalvm.compiler.nodes.AbstractBeginNode;
@@ -52,6 +54,7 @@
import org.graalvm.compiler.nodes.MergeNode;
import org.graalvm.compiler.nodes.NodeView;
import org.graalvm.compiler.nodes.StructuredGraph;
+import org.graalvm.compiler.nodes.UnwindNode;
import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.nodes.calc.FloatingNode;
import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderConfiguration;
@@ -224,28 +227,42 @@
* Creates and appends an {@link InvokeNode} for a call to a given method with a given set of
* arguments.
*/
+ @SuppressWarnings("try")
public InvokeNode createInvoke(ResolvedJavaMethod method, InvokeKind invokeKind, FrameStateBuilder frameStateBuilder, int bci, ValueNode... args) {
- assert method.isStatic() == (invokeKind == InvokeKind.Static);
- Signature signature = method.getSignature();
- JavaType returnType = signature.getReturnType(null);
- assert checkArgs(method, args);
- StampPair returnStamp = graphBuilderPlugins.getOverridingStamp(this, returnType, false);
- if (returnStamp == null) {
- returnStamp = StampFactory.forDeclaredType(graph.getAssumptions(), returnType, false);
+ try (DebugCloseable context = graph.withNodeSourcePosition(NodeSourcePosition.placeholder(method))) {
+ assert method.isStatic() == (invokeKind == InvokeKind.Static);
+ Signature signature = method.getSignature();
+ JavaType returnType = signature.getReturnType(null);
+ assert checkArgs(method, args);
+ StampPair returnStamp = graphBuilderPlugins.getOverridingStamp(this, returnType, false);
+ if (returnStamp == null) {
+ returnStamp = StampFactory.forDeclaredType(graph.getAssumptions(), returnType, false);
+ }
+ MethodCallTargetNode callTarget = graph.add(createMethodCallTarget(invokeKind, method, args, returnStamp, bci));
+ InvokeNode invoke = append(new InvokeNode(callTarget, bci));
+
+ if (frameStateBuilder != null) {
+ if (invoke.getStackKind() != JavaKind.Void) {
+ frameStateBuilder.push(invoke.getStackKind(), invoke);
+ }
+ invoke.setStateAfter(frameStateBuilder.create(bci, invoke));
+ if (invoke.getStackKind() != JavaKind.Void) {
+ frameStateBuilder.pop(invoke.getStackKind());
+ }
+ }
+ return invoke;
}
- MethodCallTargetNode callTarget = graph.add(createMethodCallTarget(invokeKind, method, args, returnStamp, bci));
- InvokeNode invoke = append(new InvokeNode(callTarget, bci));
+ }
+
+ public InvokeWithExceptionNode createInvokeWithExceptionAndUnwind(ResolvedJavaMethod method, InvokeKind invokeKind,
+ FrameStateBuilder frameStateBuilder, int invokeBci, int exceptionEdgeBci, ValueNode... args) {
- if (frameStateBuilder != null) {
- if (invoke.getStackKind() != JavaKind.Void) {
- frameStateBuilder.push(invoke.getStackKind(), invoke);
- }
- invoke.setStateAfter(frameStateBuilder.create(bci, invoke));
- if (invoke.getStackKind() != JavaKind.Void) {
- frameStateBuilder.pop(invoke.getStackKind());
- }
- }
- return invoke;
+ InvokeWithExceptionNode result = startInvokeWithException(method, invokeKind, frameStateBuilder, invokeBci, exceptionEdgeBci, args);
+ exceptionPart();
+ ExceptionObjectNode exception = exceptionObject();
+ append(new UnwindNode(exception));
+ endInvokeWithException();
+ return result;
}
protected MethodCallTargetNode createMethodCallTarget(InvokeKind invokeKind, ResolvedJavaMethod targetMethod, ValueNode[] args, StampPair returnStamp, @SuppressWarnings("unused") int bci) {
@@ -311,6 +328,9 @@
GraphBuilderConfiguration config = GraphBuilderConfiguration.getSnippetDefault(plugins);
StructuredGraph calleeGraph = new StructuredGraph.Builder(invoke.getOptions(), invoke.getDebug()).method(method).build();
+ if (invoke.graph().trackNodeSourcePosition()) {
+ calleeGraph.setTrackNodeSourcePosition();
+ }
IntrinsicContext initialReplacementContext = new IntrinsicContext(method, method, providers.getReplacements().getDefaultReplacementBytecodeProvider(), INLINE_AFTER_PARSING);
GraphBuilderPhase.Instance instance = new GraphBuilderPhase.Instance(metaAccess, providers.getStampProvider(), providers.getConstantReflection(), providers.getConstantFieldProvider(), config,
OptimisticOptimizations.NONE,
@@ -357,11 +377,12 @@
*
* @param condition The condition for the if-block
* @param trueProbability The estimated probability the condition is true
+ * @return the created {@link IfNode}.
*/
- public void startIf(LogicNode condition, double trueProbability) {
+ public IfNode startIf(LogicNode condition, double trueProbability) {
AbstractBeginNode thenSuccessor = graph.add(new BeginNode());
AbstractBeginNode elseSuccessor = graph.add(new BeginNode());
- append(new IfNode(condition, thenSuccessor, elseSuccessor, trueProbability));
+ IfNode node = append(new IfNode(condition, thenSuccessor, elseSuccessor, trueProbability));
lastFixedNode = null;
IfStructure s = new IfStructure();
@@ -369,6 +390,7 @@
s.thenPart = thenSuccessor;
s.elsePart = elseSuccessor;
pushStructure(s);
+ return node;
}
private IfStructure saveLastIfNode() {
@@ -403,11 +425,18 @@
s.state = IfState.ELSE_PART;
}
- public void endIf() {
+ /**
+ * Ends an if block started with {@link #startIf(LogicNode, double)}.
+ *
+ * @return the created merge node, or {@code null} if no merge node was required (for example,
+ * when one part ended with a control sink).
+ */
+ public AbstractMergeNode endIf() {
IfStructure s = saveLastIfNode();
FixedWithNextNode thenPart = s.thenPart instanceof FixedWithNextNode ? (FixedWithNextNode) s.thenPart : null;
FixedWithNextNode elsePart = s.elsePart instanceof FixedWithNextNode ? (FixedWithNextNode) s.elsePart : null;
+ AbstractMergeNode merge = null;
if (thenPart != null && elsePart != null) {
/* Both parts are alive, we need a real merge. */
@@ -416,7 +445,7 @@
EndNode elseEnd = graph.add(new EndNode());
graph.addAfterFixed(elsePart, elseEnd);
- AbstractMergeNode merge = graph.add(new MergeNode());
+ merge = graph.add(new MergeNode());
merge.addForwardEnd(thenEnd);
merge.addForwardEnd(elseEnd);
@@ -436,6 +465,7 @@
}
s.state = IfState.FINISHED;
popStructure();
+ return merge;
}
static class InvokeWithExceptionStructure extends Structure {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/InlineDuringParsingPlugin.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/InlineDuringParsingPlugin.java Sat Mar 24 01:08:35 2018 +0100
@@ -26,6 +26,7 @@
import static org.graalvm.compiler.java.BytecodeParserOptions.InlineDuringParsingMaxDepth;
import static org.graalvm.compiler.nodes.graphbuilderconf.InlineInvokePlugin.InlineInfo.createStandardInlineInfo;
+import org.graalvm.compiler.java.BytecodeParserOptions;
import org.graalvm.compiler.nodes.StructuredGraph;
import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderContext;
@@ -35,6 +36,15 @@
public final class InlineDuringParsingPlugin implements InlineInvokePlugin {
+ /**
+ * Budget which when exceeded reduces the effective value of
+ * {@link BytecodeParserOptions#InlineDuringParsingMaxDepth} to
+ * {@link #MaxDepthAfterBudgetExceeded}.
+ */
+ private static final int NodeBudget = Integer.getInteger("InlineDuringParsingPlugin.NodeBudget", 2000);
+
+ private static final int MaxDepthAfterBudgetExceeded = Integer.getInteger("InlineDuringParsingPlugin.MaxDepthAfterBudgetExceeded", 3);
+
@Override
public InlineInfo shouldInlineInvoke(GraphBuilderContext b, ResolvedJavaMethod method, ValueNode[] args) {
// @formatter:off
@@ -49,7 +59,7 @@
if (!method.isSynchronized() &&
checkSize(method, args, b.getGraph()) &&
- b.getDepth() < InlineDuringParsingMaxDepth.getValue(b.getOptions())) {
+ checkInliningDepth(b)) {
return createStandardInlineInfo(method);
}
}
@@ -57,6 +67,15 @@
return null;
}
+ private static boolean checkInliningDepth(GraphBuilderContext b) {
+ int nodeCount = b.getGraph().getNodeCount();
+ int maxDepth = InlineDuringParsingMaxDepth.getValue(b.getOptions());
+ if (nodeCount > NodeBudget && MaxDepthAfterBudgetExceeded < maxDepth) {
+ maxDepth = MaxDepthAfterBudgetExceeded;
+ }
+ return b.getDepth() < maxDepth;
+ }
+
private static boolean checkSize(ResolvedJavaMethod method, ValueNode[] args, StructuredGraph graph) {
int bonus = 1;
for (ValueNode v : args) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/InstanceOfSnippetsTemplates.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/InstanceOfSnippetsTemplates.java Sat Mar 24 01:08:35 2018 +0100
@@ -96,7 +96,7 @@
replacer.replaceUsingInstantiation();
} else {
Arguments args = makeArguments(replacer, tool);
- template(instanceOf.getDebug(), args).instantiate(providers.getMetaAccess(), instanceOf, replacer, tool, args);
+ template(instanceOf, args).instantiate(providers.getMetaAccess(), instanceOf, replacer, tool, args);
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/IntrinsicGraphBuilder.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/IntrinsicGraphBuilder.java Sat Mar 24 01:08:35 2018 +0100
@@ -31,6 +31,8 @@
import org.graalvm.compiler.core.common.type.TypeReference;
import org.graalvm.compiler.debug.DebugContext;
import org.graalvm.compiler.debug.GraalError;
+import org.graalvm.compiler.debug.DebugCloseable;
+import org.graalvm.compiler.graph.NodeSourcePosition;
import org.graalvm.compiler.nodes.CallTargetNode;
import org.graalvm.compiler.nodes.CallTargetNode.InvokeKind;
import org.graalvm.compiler.nodes.FixedNode;
@@ -91,6 +93,7 @@
this.code = code;
this.method = code.getMethod();
this.graph = new StructuredGraph.Builder(options, debug, allowAssumptions).method(method).build();
+ this.graph.setTrackNodeSourcePosition();
this.invokeBci = invokeBci;
this.lastInstr = graph.start();
@@ -255,14 +258,18 @@
return arguments[0];
}
+ @SuppressWarnings("try")
public StructuredGraph buildGraph(InvocationPlugin plugin) {
- Receiver receiver = method.isStatic() ? null : this;
- if (plugin.execute(this, method, receiver, arguments)) {
- assert (returnValue != null) == (method.getSignature().getReturnKind() != JavaKind.Void) : method;
- append(new ReturnNode(returnValue));
- return graph;
+ NodeSourcePosition position = graph.trackNodeSourcePosition() ? NodeSourcePosition.placeholder(method) : null;
+ try (DebugCloseable context = graph.withNodeSourcePosition(position)) {
+ Receiver receiver = method.isStatic() ? null : this;
+ if (plugin.execute(this, method, receiver, arguments)) {
+ assert (returnValue != null) == (method.getSignature().getReturnKind() != JavaKind.Void) : method;
+ append(new ReturnNode(returnValue));
+ return graph;
+ }
+ return null;
}
- return null;
}
@Override
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/PEGraphDecoder.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/PEGraphDecoder.java Sat Mar 24 01:08:35 2018 +0100
@@ -48,6 +48,8 @@
import org.graalvm.compiler.graph.Node;
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.graph.NodeSourcePosition;
+import org.graalvm.compiler.graph.SourceLanguagePosition;
+import org.graalvm.compiler.graph.SourceLanguagePositionProvider;
import org.graalvm.compiler.graph.spi.Canonicalizable;
import org.graalvm.compiler.java.GraphBuilderPhase;
import org.graalvm.compiler.nodeinfo.NodeInfo;
@@ -108,6 +110,7 @@
import jdk.vm.ci.code.Architecture;
import jdk.vm.ci.code.BailoutException;
import jdk.vm.ci.code.BytecodeFrame;
+import jdk.vm.ci.meta.Assumptions;
import jdk.vm.ci.meta.ConstantReflectionProvider;
import jdk.vm.ci.meta.DeoptimizationAction;
import jdk.vm.ci.meta.DeoptimizationReason;
@@ -142,7 +145,7 @@
@Option(help = "Max number of loop explosions per method.", type = OptionType.Debug)//
public static final OptionKey<Integer> MaximumLoopExplosionCount = new OptionKey<>(10000);
- @Option(help = "Do not bail out but throw an exception on failed loop explosion.", type = OptionType.Debug) //
+ @Option(help = "Do not bail out but throw an exception on failed loop explosion.", type = OptionType.Debug)//
public static final OptionKey<Boolean> FailedLoopExplosionIsFatal = new OptionKey<>(false);
}
@@ -154,6 +157,7 @@
protected final int inliningDepth;
protected final ValueNode[] arguments;
+ private final SourceLanguagePosition sourceLanguagePosition;
protected FrameState outerState;
protected FrameState exceptionState;
@@ -169,6 +173,13 @@
this.invokeData = invokeData;
this.inliningDepth = inliningDepth;
this.arguments = arguments;
+ SourceLanguagePosition position = null;
+ if (arguments != null && method.hasReceiver() && arguments.length > 0 && arguments[0].isJavaConstant()) {
+ JavaConstant constantArgument = arguments[0].asJavaConstant();
+ position = sourceLanguagePositionProvider.getPosition(constantArgument);
+ }
+ this.sourceLanguagePosition = position;
+
}
@Override
@@ -176,15 +187,24 @@
return caller != null;
}
- public NodeSourcePosition getCallerBytecodePosition() {
+ @Override
+ public NodeSourcePosition getCallerBytecodePosition(NodeSourcePosition position) {
if (caller == null) {
- return null;
+ return position;
}
if (callerBytecodePosition == null) {
- JavaConstant constantReceiver = caller.invokeData == null ? null : caller.invokeData.constantReceiver;
- NodeSourcePosition callerPosition = caller.getCallerBytecodePosition();
NodeSourcePosition invokePosition = invokeData.invoke.asNode().getNodeSourcePosition();
- callerBytecodePosition = invokePosition != null ? invokePosition.addCaller(constantReceiver, callerPosition) : callerPosition;
+ if (invokePosition == null) {
+ assert position == null : "should only happen when tracking is disabled";
+ return null;
+ }
+ callerBytecodePosition = invokePosition;
+ }
+ if (position != null) {
+ return position.addCaller(caller.sourceLanguagePosition, callerBytecodePosition);
+ }
+ if (caller.sourceLanguagePosition != null && callerBytecodePosition != null) {
+ return new NodeSourcePosition(caller.sourceLanguagePosition, callerBytecodePosition.getCaller(), callerBytecodePosition.getMethod(), callerBytecodePosition.getBCI());
}
return callerBytecodePosition;
}
@@ -361,8 +381,11 @@
}
private DebugCloseable withNodeSoucePosition() {
- if (getGraph().mayHaveNodeSourcePosition()) {
- return getGraph().withNodeSourcePosition(methodScope.getCallerBytecodePosition());
+ if (getGraph().trackNodeSourcePosition()) {
+ NodeSourcePosition callerBytecodePosition = methodScope.getCallerBytecodePosition();
+ if (callerBytecodePosition != null) {
+ return getGraph().withNodeSourcePosition(callerBytecodePosition);
+ }
}
return null;
}
@@ -440,11 +463,12 @@
private final EconomicMap<SpecialCallTargetCacheKey, Object> specialCallTargetCache;
private final EconomicMap<ResolvedJavaMethod, Object> invocationPluginCache;
private final ResolvedJavaMethod callInlinedMethod;
+ protected final SourceLanguagePositionProvider sourceLanguagePositionProvider;
public PEGraphDecoder(Architecture architecture, StructuredGraph graph, MetaAccessProvider metaAccess, ConstantReflectionProvider constantReflection, ConstantFieldProvider constantFieldProvider,
StampProvider stampProvider, LoopExplosionPlugin loopExplosionPlugin, InvocationPlugins invocationPlugins, InlineInvokePlugin[] inlineInvokePlugins,
ParameterPlugin parameterPlugin,
- NodePlugin[] nodePlugins, ResolvedJavaMethod callInlinedMethod) {
+ NodePlugin[] nodePlugins, ResolvedJavaMethod callInlinedMethod, SourceLanguagePositionProvider sourceLanguagePositionProvider) {
super(architecture, graph, metaAccess, constantReflection, constantFieldProvider, stampProvider, true);
this.loopExplosionPlugin = loopExplosionPlugin;
this.invocationPlugins = invocationPlugins;
@@ -454,6 +478,7 @@
this.specialCallTargetCache = EconomicMap.create(Equivalence.DEFAULT);
this.invocationPluginCache = EconomicMap.create(Equivalence.DEFAULT);
this.callInlinedMethod = callInlinedMethod;
+ this.sourceLanguagePositionProvider = sourceLanguagePositionProvider;
}
protected static LoopExplosionKind loopExplosionKind(ResolvedJavaMethod method, LoopExplosionPlugin loopExplosionPlugin) {
@@ -464,8 +489,8 @@
}
}
- public void decode(ResolvedJavaMethod method) {
- PEMethodScope methodScope = new PEMethodScope(graph, null, null, lookupEncodedGraph(method, null), method, null, 0, loopExplosionPlugin, null);
+ public void decode(ResolvedJavaMethod method, boolean trackNodeSourcePosition) {
+ PEMethodScope methodScope = new PEMethodScope(graph, null, null, lookupEncodedGraph(method, null, null, trackNodeSourcePosition), method, null, 0, loopExplosionPlugin, null);
decode(createInitialLoopScope(methodScope, null));
cleanupGraph(methodScope);
@@ -533,6 +558,12 @@
MethodCallTargetNode methodCall = (MethodCallTargetNode) callTarget;
if (methodCall.invokeKind().hasReceiver()) {
invokeData.constantReceiver = methodCall.arguments().get(0).asJavaConstant();
+ NodeSourcePosition invokePosition = invokeData.invoke.asNode().getNodeSourcePosition();
+ if (invokeData.constantReceiver != null && invokePosition != null) {
+ // new NodeSourcePosition(invokeData.constantReceiver,
+ // invokePosition.getCaller(), invokePosition.getMethod(),
+ // invokePosition.getBCI());
+ }
}
LoopScope inlineLoopScope = trySimplifyInvoke(methodScope, loopScope, invokeData, (MethodCallTargetNode) callTarget);
if (inlineLoopScope != null) {
@@ -687,11 +718,12 @@
protected LoopScope doInline(PEMethodScope methodScope, LoopScope loopScope, InvokeData invokeData, InlineInfo inlineInfo, ValueNode[] arguments) {
ResolvedJavaMethod inlineMethod = inlineInfo.getMethodToInline();
- EncodedGraph graphToInline = lookupEncodedGraph(inlineMethod, inlineInfo.getIntrinsicBytecodeProvider());
+ EncodedGraph graphToInline = lookupEncodedGraph(inlineMethod, inlineInfo.getOriginalMethod(), inlineInfo.getIntrinsicBytecodeProvider(), graph.trackNodeSourcePosition());
if (graphToInline == null) {
return null;
}
+ assert !graph.trackNodeSourcePosition() || graphToInline.trackNodeSourcePosition() : graph + " " + graphToInline;
if (methodScope.inliningDepth > Options.InliningDepthError.getValue(options)) {
throw tooDeepInlining(methodScope);
}
@@ -740,6 +772,32 @@
inlineLoopScope.createdNodes[firstArgumentNodeId + i] = arguments[i];
}
+ // Copy assumptions from inlinee to caller
+ Assumptions assumptions = graph.getAssumptions();
+ Assumptions inlinedAssumptions = graphToInline.getAssumptions();
+ if (assumptions != null) {
+ if (inlinedAssumptions != null) {
+ assumptions.record(inlinedAssumptions);
+ }
+ } else {
+ assert inlinedAssumptions == null : String.format("cannot inline graph (%s) which makes assumptions into a graph (%s) that doesn't", inlineMethod, graph);
+ }
+
+ // Copy inlined methods from inlinee to caller
+ List<ResolvedJavaMethod> inlinedMethods = graphToInline.getInlinedMethods();
+ if (inlinedMethods != null) {
+ graph.getMethods().addAll(inlinedMethods);
+ }
+
+ if (graphToInline.getFields() != null) {
+ for (ResolvedJavaField field : graphToInline.getFields()) {
+ graph.recordField(field);
+ }
+ }
+ if (graphToInline.hasUnsafeAccess()) {
+ graph.markUnsafeAccess();
+ }
+
/*
* Do the actual inlining by returning the initial loop scope for the inlined method scope.
*/
@@ -927,7 +985,7 @@
}
}
- protected abstract EncodedGraph lookupEncodedGraph(ResolvedJavaMethod method, BytecodeProvider intrinsicBytecodeProvider);
+ protected abstract EncodedGraph lookupEncodedGraph(ResolvedJavaMethod method, ResolvedJavaMethod originalMethod, BytecodeProvider intrinsicBytecodeProvider, boolean trackNodeSourcePosition);
@Override
protected void handleFixedNode(MethodScope s, LoopScope loopScope, int nodeOrderId, FixedNode node) {
@@ -1124,20 +1182,6 @@
}
@Override
- protected Node addFloatingNode(MethodScope s, Node node) {
- Node addedNode = super.addFloatingNode(s, node);
- PEMethodScope methodScope = (PEMethodScope) s;
- NodeSourcePosition pos = node.getNodeSourcePosition();
- if (methodScope.isInlinedMethod()) {
- if (pos != null) {
- NodeSourcePosition bytecodePosition = methodScope.getCallerBytecodePosition();
- node.setNodeSourcePosition(pos.addCaller(bytecodePosition));
- }
- }
- return addedNode;
- }
-
- @Override
protected Node handleFloatingNodeAfterAdd(MethodScope s, LoopScope loopScope, Node node) {
PEMethodScope methodScope = (PEMethodScope) s;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/ReplacementsImpl.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/ReplacementsImpl.java Sat Mar 24 01:08:35 2018 +0100
@@ -55,6 +55,7 @@
import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.debug.TimerKey;
import org.graalvm.compiler.graph.Node;
+import org.graalvm.compiler.graph.NodeSourcePosition;
import org.graalvm.compiler.graph.Node.NodeIntrinsic;
import org.graalvm.compiler.java.GraphBuilderPhase;
import org.graalvm.compiler.java.GraphBuilderPhase.Instance;
@@ -148,7 +149,7 @@
if (subst != null) {
if (b.parsingIntrinsic() || InlineDuringParsing.getValue(b.getOptions()) || InlineIntrinsicsDuringParsing.getValue(b.getOptions())) {
// Forced inlining of intrinsics
- return createIntrinsicInlineInfo(subst.getMethod(), subst.getOrigin());
+ return createIntrinsicInlineInfo(subst.getMethod(), method, subst.getOrigin());
}
return null;
}
@@ -156,7 +157,7 @@
assert b.getDepth() < MAX_GRAPH_INLINING_DEPTH : "inlining limit exceeded";
// Force inlining when parsing replacements
- return createIntrinsicInlineInfo(method, defaultBytecodeProvider);
+ return createIntrinsicInlineInfo(method, null, defaultBytecodeProvider);
} else {
assert method.getAnnotation(NodeIntrinsic.class) == null : String.format("@%s method %s must only be called from within a replacement%n%s", NodeIntrinsic.class.getSimpleName(),
method.format("%h.%n"), b);
@@ -202,8 +203,8 @@
private static final TimerKey SnippetPreparationTime = DebugContext.timer("SnippetPreparationTime");
@Override
- public StructuredGraph getSnippet(ResolvedJavaMethod method, Object[] args) {
- return getSnippet(method, null, args);
+ public StructuredGraph getSnippet(ResolvedJavaMethod method, Object[] args, boolean trackNodeSourcePosition, NodeSourcePosition replaceePosition) {
+ return getSnippet(method, null, args, trackNodeSourcePosition, replaceePosition);
}
private static final AtomicInteger nextDebugContextId = new AtomicInteger();
@@ -217,29 +218,34 @@
@Override
@SuppressWarnings("try")
- public StructuredGraph getSnippet(ResolvedJavaMethod method, ResolvedJavaMethod recursiveEntry, Object[] args) {
+ public StructuredGraph getSnippet(ResolvedJavaMethod method, ResolvedJavaMethod recursiveEntry, Object[] args, boolean trackNodeSourcePosition, NodeSourcePosition replaceePosition) {
assert method.getAnnotation(Snippet.class) != null : "Snippet must be annotated with @" + Snippet.class.getSimpleName();
assert method.hasBytecodes() : "Snippet must not be abstract or native";
StructuredGraph graph = UseSnippetGraphCache.getValue(options) ? graphs.get(method) : null;
- if (graph == null) {
+ if (graph == null || (trackNodeSourcePosition && !graph.trackNodeSourcePosition())) {
try (DebugContext debug = openDebugContext("Snippet_", method);
DebugCloseable a = SnippetPreparationTime.start(debug)) {
- StructuredGraph newGraph = makeGraph(debug, defaultBytecodeProvider, method, args, recursiveEntry);
+ StructuredGraph newGraph = makeGraph(debug, defaultBytecodeProvider, method, args, recursiveEntry, trackNodeSourcePosition, replaceePosition);
DebugContext.counter("SnippetNodeCount[%#s]", method).add(newGraph.getDebug(), newGraph.getNodeCount());
if (!UseSnippetGraphCache.getValue(options) || args != null) {
return newGraph;
}
newGraph.freeze();
- graphs.putIfAbsent(method, newGraph);
+ if (graph != null) {
+ graphs.replace(method, graph, newGraph);
+ } else {
+ graphs.putIfAbsent(method, newGraph);
+ }
graph = graphs.get(method);
}
}
+ assert !trackNodeSourcePosition || graph.trackNodeSourcePosition();
return graph;
}
@Override
- public void registerSnippet(ResolvedJavaMethod method) {
+ public void registerSnippet(ResolvedJavaMethod method, boolean trackNodeSourcePosition) {
// No initialization needed as snippet graphs are created on demand in getSnippet
}
@@ -266,7 +272,7 @@
}
@Override
- public StructuredGraph getSubstitution(ResolvedJavaMethod method, int invokeBci) {
+ public StructuredGraph getSubstitution(ResolvedJavaMethod method, int invokeBci, boolean trackNodeSourcePosition, NodeSourcePosition replaceePosition) {
StructuredGraph result;
InvocationPlugin plugin = graphBuilderPlugins.getInvocationPlugins().lookupInvocation(method);
if (plugin != null && (!plugin.inlineOnly() || invokeBci >= 0)) {
@@ -275,9 +281,9 @@
MethodSubstitutionPlugin msPlugin = (MethodSubstitutionPlugin) plugin;
ResolvedJavaMethod substitute = msPlugin.getSubstitute(metaAccess);
StructuredGraph graph = UseSnippetGraphCache.getValue(options) ? graphs.get(substitute) : null;
- if (graph == null) {
+ if (graph == null || graph.trackNodeSourcePosition() != trackNodeSourcePosition) {
try (DebugContext debug = openDebugContext("Substitution_", method)) {
- graph = makeGraph(debug, msPlugin.getBytecodeProvider(), substitute, null, method);
+ graph = makeGraph(debug, msPlugin.getBytecodeProvider(), substitute, null, method, trackNodeSourcePosition, replaceePosition);
if (!UseSnippetGraphCache.getValue(options)) {
return graph;
}
@@ -311,9 +317,11 @@
* @param args
* @param original the original method if {@code method} is a {@linkplain MethodSubstitution
* substitution} otherwise null
+ * @param trackNodeSourcePosition
*/
- public StructuredGraph makeGraph(DebugContext debug, BytecodeProvider bytecodeProvider, ResolvedJavaMethod method, Object[] args, ResolvedJavaMethod original) {
- return createGraphMaker(method, original).makeGraph(debug, bytecodeProvider, args);
+ public StructuredGraph makeGraph(DebugContext debug, BytecodeProvider bytecodeProvider, ResolvedJavaMethod method, Object[] args, ResolvedJavaMethod original, boolean trackNodeSourcePosition,
+ NodeSourcePosition replaceePosition) {
+ return createGraphMaker(method, original).makeGraph(debug, bytecodeProvider, args, trackNodeSourcePosition, replaceePosition);
}
/**
@@ -350,10 +358,10 @@
}
@SuppressWarnings("try")
- public StructuredGraph makeGraph(DebugContext debug, BytecodeProvider bytecodeProvider, Object[] args) {
+ public StructuredGraph makeGraph(DebugContext debug, BytecodeProvider bytecodeProvider, Object[] args, boolean trackNodeSourcePosition, NodeSourcePosition replaceePosition) {
try (DebugContext.Scope s = debug.scope("BuildSnippetGraph", method)) {
assert method.hasBytecodes() : method;
- StructuredGraph graph = buildInitialGraph(debug, bytecodeProvider, method, args);
+ StructuredGraph graph = buildInitialGraph(debug, bytecodeProvider, method, args, trackNodeSourcePosition, replaceePosition);
finalizeGraph(graph);
@@ -417,10 +425,12 @@
* Builds the initial graph for a replacement.
*/
@SuppressWarnings("try")
- protected StructuredGraph buildInitialGraph(DebugContext debug, BytecodeProvider bytecodeProvider, final ResolvedJavaMethod methodToParse, Object[] args) {
+ protected StructuredGraph buildInitialGraph(DebugContext debug, BytecodeProvider bytecodeProvider, final ResolvedJavaMethod methodToParse, Object[] args, boolean trackNodeSourcePosition,
+ NodeSourcePosition replaceePosition) {
// Replacements cannot have optimistic assumptions since they have
// to be valid for the entire run of the VM.
- final StructuredGraph graph = new StructuredGraph.Builder(replacements.options, debug).method(methodToParse).build();
+ final StructuredGraph graph = new StructuredGraph.Builder(replacements.options, debug).method(methodToParse).trackNodeSourcePosition(trackNodeSourcePosition).callerContext(
+ replaceePosition).build();
// Replacements are not user code so they do not participate in unsafe access
// tracking
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/SnippetCounterNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/SnippetCounterNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -156,7 +156,7 @@
args.addConst("counter", counter.getCounter());
args.add("increment", counter.getIncrement());
- template(counter.getDebug(), args).instantiate(providers.getMetaAccess(), counter, DEFAULT_REPLACER, args);
+ template(counter, args).instantiate(providers.getMetaAccess(), counter, DEFAULT_REPLACER, args);
}
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/SnippetTemplate.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/SnippetTemplate.java Sat Mar 24 01:08:35 2018 +0100
@@ -47,27 +47,15 @@
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Predicate;
-import jdk.vm.ci.code.TargetDescription;
-import jdk.vm.ci.meta.Constant;
-import jdk.vm.ci.meta.JavaConstant;
-import jdk.vm.ci.meta.JavaKind;
-import jdk.vm.ci.meta.Local;
-import jdk.vm.ci.meta.LocalVariableTable;
-import jdk.vm.ci.meta.MetaAccessProvider;
-import jdk.vm.ci.meta.ResolvedJavaMethod;
-import jdk.vm.ci.meta.ResolvedJavaType;
-import jdk.vm.ci.meta.Signature;
-import jdk.vm.ci.meta.ResolvedJavaMethod.Parameter;
-
import org.graalvm.collections.EconomicMap;
import org.graalvm.collections.EconomicSet;
import org.graalvm.collections.Equivalence;
import org.graalvm.collections.UnmodifiableEconomicMap;
import org.graalvm.compiler.api.replacements.Snippet;
-import org.graalvm.compiler.api.replacements.SnippetReflectionProvider;
import org.graalvm.compiler.api.replacements.Snippet.ConstantParameter;
import org.graalvm.compiler.api.replacements.Snippet.NonNullParameter;
import org.graalvm.compiler.api.replacements.Snippet.VarargsParameter;
+import org.graalvm.compiler.api.replacements.SnippetReflectionProvider;
import org.graalvm.compiler.core.common.GraalOptions;
import org.graalvm.compiler.core.common.type.Stamp;
import org.graalvm.compiler.core.common.type.StampFactory;
@@ -76,14 +64,15 @@
import org.graalvm.compiler.debug.CounterKey;
import org.graalvm.compiler.debug.DebugCloseable;
import org.graalvm.compiler.debug.DebugContext;
+import org.graalvm.compiler.debug.DebugContext.Description;
import org.graalvm.compiler.debug.DebugHandlersFactory;
import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.debug.TimerKey;
-import org.graalvm.compiler.debug.DebugContext.Description;
+import org.graalvm.compiler.graph.Graph.Mark;
import org.graalvm.compiler.graph.Node;
import org.graalvm.compiler.graph.NodeClass;
+import org.graalvm.compiler.graph.NodeSourcePosition;
import org.graalvm.compiler.graph.Position;
-import org.graalvm.compiler.graph.Graph.Mark;
import org.graalvm.compiler.loop.LoopEx;
import org.graalvm.compiler.loop.LoopsData;
import org.graalvm.compiler.loop.phases.LoopTransformations;
@@ -97,20 +86,21 @@
import org.graalvm.compiler.nodes.FixedNode;
import org.graalvm.compiler.nodes.FixedWithNextNode;
import org.graalvm.compiler.nodes.FrameState;
+import org.graalvm.compiler.nodes.InliningLog;
import org.graalvm.compiler.nodes.LoopBeginNode;
import org.graalvm.compiler.nodes.MergeNode;
import org.graalvm.compiler.nodes.NodeView;
import org.graalvm.compiler.nodes.ParameterNode;
import org.graalvm.compiler.nodes.PhiNode;
+import org.graalvm.compiler.nodes.PiNode.Placeholder;
+import org.graalvm.compiler.nodes.PiNode.PlaceholderStamp;
import org.graalvm.compiler.nodes.ReturnNode;
import org.graalvm.compiler.nodes.StartNode;
import org.graalvm.compiler.nodes.StateSplit;
import org.graalvm.compiler.nodes.StructuredGraph;
+import org.graalvm.compiler.nodes.StructuredGraph.GuardsStage;
import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.nodes.ValueNodeUtil;
-import org.graalvm.compiler.nodes.PiNode.Placeholder;
-import org.graalvm.compiler.nodes.PiNode.PlaceholderStamp;
-import org.graalvm.compiler.nodes.StructuredGraph.GuardsStage;
import org.graalvm.compiler.nodes.calc.FloatingNode;
import org.graalvm.compiler.nodes.java.LoadIndexedNode;
import org.graalvm.compiler.nodes.java.StoreIndexedNode;
@@ -131,10 +121,10 @@
import org.graalvm.compiler.phases.common.CanonicalizerPhase;
import org.graalvm.compiler.phases.common.DeadCodeEliminationPhase;
import org.graalvm.compiler.phases.common.FloatingReadPhase;
+import org.graalvm.compiler.phases.common.FloatingReadPhase.MemoryMapImpl;
import org.graalvm.compiler.phases.common.GuardLoweringPhase;
import org.graalvm.compiler.phases.common.LoweringPhase;
import org.graalvm.compiler.phases.common.RemoveValueProxyPhase;
-import org.graalvm.compiler.phases.common.FloatingReadPhase.MemoryMapImpl;
import org.graalvm.compiler.phases.common.inlining.InliningUtil;
import org.graalvm.compiler.phases.tiers.PhaseContext;
import org.graalvm.compiler.phases.util.Providers;
@@ -144,6 +134,18 @@
import org.graalvm.word.LocationIdentity;
import org.graalvm.word.WordBase;
+import jdk.vm.ci.code.TargetDescription;
+import jdk.vm.ci.meta.Constant;
+import jdk.vm.ci.meta.JavaConstant;
+import jdk.vm.ci.meta.JavaKind;
+import jdk.vm.ci.meta.Local;
+import jdk.vm.ci.meta.LocalVariableTable;
+import jdk.vm.ci.meta.MetaAccessProvider;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+import jdk.vm.ci.meta.ResolvedJavaMethod.Parameter;
+import jdk.vm.ci.meta.ResolvedJavaType;
+import jdk.vm.ci.meta.Signature;
+
/**
* A snippet template is a graph created by parsing a snippet method and then specialized by binding
* constants to the snippet's {@link ConstantParameter} parameters.
@@ -565,7 +567,7 @@
static class Options {
@Option(help = "Use a LRU cache for snippet templates.")//
- static final OptionKey<Boolean> UseSnippetTemplateCache = new OptionKey<>(true);
+ public static final OptionKey<Boolean> UseSnippetTemplateCache = new OptionKey<>(true);
@Option(help = "")//
static final OptionKey<Integer> MaxTemplatesPerSnippet = new OptionKey<>(50);
@@ -618,7 +620,7 @@
assert method.getAnnotation(Snippet.class) != null : method + " must be annotated with @" + Snippet.class.getSimpleName();
assert findMethod(declaringClass, methodName, method) == null : "found more than one method named " + methodName + " in " + declaringClass;
ResolvedJavaMethod javaMethod = providers.getMetaAccess().lookupJavaMethod(method);
- providers.getReplacements().registerSnippet(javaMethod);
+ providers.getReplacements().registerSnippet(javaMethod, GraalOptions.TrackNodeSourcePosition.getValue(options));
LocationIdentity[] privateLocations = GraalOptions.SnippetCounters.getValue(options) ? SnippetCounterNode.addSnippetCounters(initialPrivateLocations) : initialPrivateLocations;
if (GraalOptions.EagerSnippets.getValue(options)) {
return new EagerSnippetInfo(javaMethod, privateLocations);
@@ -641,13 +643,15 @@
* Gets a template for a given key, creating it first if necessary.
*/
@SuppressWarnings("try")
- protected SnippetTemplate template(DebugContext outer, final Arguments args) {
+ protected SnippetTemplate template(ValueNode replacee, final Arguments args) {
+ StructuredGraph graph = replacee.graph();
+ DebugContext outer = graph.getDebug();
SnippetTemplate template = Options.UseSnippetTemplateCache.getValue(options) && args.cacheable ? templates.get(args.cacheKey) : null;
- if (template == null) {
+ if (template == null || (graph.trackNodeSourcePosition() && !template.snippet.trackNodeSourcePosition())) {
try (DebugContext debug = openDebugContext(outer, args)) {
try (DebugCloseable a = SnippetTemplateCreationTime.start(debug); DebugContext.Scope s = debug.scope("SnippetSpecialization", args.info.method)) {
SnippetTemplates.increment(debug);
- template = new SnippetTemplate(options, debug, providers, snippetReflection, args);
+ template = new SnippetTemplate(options, debug, providers, snippetReflection, args, graph.trackNodeSourcePosition(), replacee);
if (Options.UseSnippetTemplateCache.getValue(options) && args.cacheable) {
templates.put(args.cacheKey, template);
}
@@ -697,12 +701,13 @@
* Creates a snippet template.
*/
@SuppressWarnings("try")
- protected SnippetTemplate(OptionValues options, DebugContext debug, final Providers providers, SnippetReflectionProvider snippetReflection, Arguments args) {
+ protected SnippetTemplate(OptionValues options, DebugContext debug, final Providers providers, SnippetReflectionProvider snippetReflection, Arguments args, boolean trackNodeSourcePosition,
+ Node replacee) {
this.snippetReflection = snippetReflection;
this.info = args.info;
Object[] constantArgs = getConstantArgs(args);
- StructuredGraph snippetGraph = providers.getReplacements().getSnippet(args.info.method, args.info.original, constantArgs);
+ StructuredGraph snippetGraph = providers.getReplacements().getSnippet(args.info.method, args.info.original, constantArgs, trackNodeSourcePosition, replacee.getNodeSourcePosition());
ResolvedJavaMethod method = snippetGraph.method();
Signature signature = method.getSignature();
@@ -710,8 +715,12 @@
PhaseContext phaseContext = new PhaseContext(providers);
// Copy snippet graph, replacing constant parameters with given arguments
- final StructuredGraph snippetCopy = new StructuredGraph.Builder(options, debug).name(snippetGraph.name).method(snippetGraph.method()).build();
-
+ final StructuredGraph snippetCopy = new StructuredGraph.Builder(options, debug).name(snippetGraph.name).method(snippetGraph.method()).trackNodeSourcePosition(
+ snippetGraph.trackNodeSourcePosition()).build();
+ assert !GraalOptions.TrackNodeSourcePosition.getValue(options) || snippetCopy.trackNodeSourcePosition();
+ if (providers.getCodeCache() != null && providers.getCodeCache().shouldDebugNonSafepoints()) {
+ snippetCopy.setTrackNodeSourcePosition();
+ }
try (DebugContext.Scope scope = debug.scope("SpecializeSnippet", snippetCopy)) {
if (!snippetGraph.isUnsafeAccessTrackingEnabled()) {
snippetCopy.disableUnsafeAccessTracking();
@@ -755,7 +764,12 @@
}
}
}
- snippetCopy.addDuplicates(snippetGraph.getNodes(), snippetGraph, snippetGraph.getNodeCount(), nodeReplacements);
+ try (InliningLog.UpdateScope updateScope = snippetCopy.getInliningLog().openDefaultUpdateScope()) {
+ UnmodifiableEconomicMap<Node, Node> duplicates = snippetCopy.addDuplicates(snippetGraph.getNodes(), snippetGraph, snippetGraph.getNodeCount(), nodeReplacements);
+ if (updateScope != null) {
+ snippetCopy.getInliningLog().replaceLog(duplicates, snippetGraph.getInliningLog());
+ }
+ }
debug.dump(DebugContext.INFO_LEVEL, snippetCopy, "Before specialization");
@@ -1396,8 +1410,7 @@
StructuredGraph replaceeGraph = replacee.graph();
EconomicMap<Node, Node> replacements = bind(replaceeGraph, metaAccess, args);
replacements.put(entryPointNode, AbstractBeginNode.prevBegin(replacee));
- UnmodifiableEconomicMap<Node, Node> duplicates = replaceeGraph.addDuplicates(nodes, snippet, snippet.getNodeCount(), replacements);
- debug.dump(DebugContext.DETAILED_LEVEL, replaceeGraph, "After inlining snippet %s", snippet.method());
+ UnmodifiableEconomicMap<Node, Node> duplicates = inlineSnippet(replacee, debug, replaceeGraph, replacements);
// Re-wire the control flow graph around the replacee
FixedNode firstCFGNodeDuplicate = (FixedNode) duplicates.get(firstCFGNode);
@@ -1490,6 +1503,27 @@
}
}
+ private UnmodifiableEconomicMap<Node, Node> inlineSnippet(Node replacee, DebugContext debug, StructuredGraph replaceeGraph, EconomicMap<Node, Node> replacements) {
+ Mark mark = replaceeGraph.getMark();
+ try (InliningLog.UpdateScope scope = replaceeGraph.getInliningLog().openUpdateScope((oldNode, newNode) -> {
+ InliningLog log = replaceeGraph.getInliningLog();
+ if (oldNode == null) {
+ log.trackNewCallsite(newNode);
+ }
+ })) {
+ UnmodifiableEconomicMap<Node, Node> duplicates = replaceeGraph.addDuplicates(nodes, snippet, snippet.getNodeCount(), replacements);
+ if (scope != null) {
+ replaceeGraph.getInliningLog().addLog(duplicates, snippet.getInliningLog());
+ }
+ NodeSourcePosition position = replacee.getNodeSourcePosition();
+ if (position != null) {
+ InliningUtil.updateSourcePosition(replaceeGraph, duplicates, mark, position, true);
+ }
+ debug.dump(DebugContext.DETAILED_LEVEL, replaceeGraph, "After inlining snippet %s", snippet.method());
+ return duplicates;
+ }
+ }
+
private void propagateStamp(Node node) {
if (node instanceof PhiNode) {
PhiNode phi = (PhiNode) node;
@@ -1549,8 +1583,7 @@
StructuredGraph replaceeGraph = replacee.graph();
EconomicMap<Node, Node> replacements = bind(replaceeGraph, metaAccess, args);
replacements.put(entryPointNode, tool.getCurrentGuardAnchor().asNode());
- UnmodifiableEconomicMap<Node, Node> duplicates = replaceeGraph.addDuplicates(nodes, snippet, snippet.getNodeCount(), replacements);
- debug.dump(DebugContext.DETAILED_LEVEL, replaceeGraph, "After inlining snippet %s", snippet.method());
+ UnmodifiableEconomicMap<Node, Node> duplicates = inlineSnippet(replacee, debug, replaceeGraph, replacements);
FixedWithNextNode lastFixedNode = tool.lastFixedNode();
assert lastFixedNode != null && lastFixedNode.isAlive() : replaceeGraph + " lastFixed=" + lastFixedNode;
@@ -1611,8 +1644,7 @@
floatingNodes.add(n);
}
}
- UnmodifiableEconomicMap<Node, Node> duplicates = replaceeGraph.addDuplicates(floatingNodes, snippet, floatingNodes.size(), replacements);
- debug.dump(DebugContext.DETAILED_LEVEL, replaceeGraph, "After inlining snippet %s", snippet.method());
+ UnmodifiableEconomicMap<Node, Node> duplicates = inlineSnippet(replacee, debug, replaceeGraph, replacements);
rewireFrameStates(replacee, duplicates);
updateStamps(replacee, duplicates);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/StandardGraphBuilderPlugins.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/StandardGraphBuilderPlugins.java Sat Mar 24 01:08:35 2018 +0100
@@ -385,6 +385,23 @@
if (allowDeoptimization) {
for (JavaKind kind : new JavaKind[]{JavaKind.Int, JavaKind.Long}) {
Class<?> type = kind.toJavaClass();
+
+ r.register1("decrementExact", type, new InvocationPlugin() {
+ @Override
+ public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode x) {
+ b.addPush(kind, new IntegerSubExactNode(x, ConstantNode.forIntegerKind(kind, 1)));
+ return true;
+ }
+ });
+
+ r.register1("incrementExact", type, new InvocationPlugin() {
+ @Override
+ public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode x) {
+ b.addPush(kind, new IntegerAddExactNode(x, ConstantNode.forIntegerKind(kind, 1)));
+ return true;
+ }
+ });
+
r.register2("addExact", type, type, new InvocationPlugin() {
@Override
public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode x, ValueNode y) {
@@ -392,6 +409,7 @@
return true;
}
});
+
r.register2("subtractExact", type, type, new InvocationPlugin() {
@Override
public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode x, ValueNode y) {
@@ -399,6 +417,7 @@
return true;
}
});
+
r.register2("multiplyExact", type, type, new InvocationPlugin() {
@Override
public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver receiver, ValueNode x, ValueNode y) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/StringSubstitutions.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/StringSubstitutions.java Sat Mar 24 01:08:35 2018 +0100
@@ -63,5 +63,5 @@
/**
* Will be intrinsified with an {@link InvocationPlugin} to a {@link LoadFieldNode}.
*/
- private static native char[] getValue(String s);
+ public static native char[] getValue(String s);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/classfile/ClassfileBytecodeProvider.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/classfile/ClassfileBytecodeProvider.java Sat Mar 24 01:08:35 2018 +0100
@@ -101,8 +101,8 @@
private static InputStream getClassfileAsStream(Class<?> c) {
String classfilePath = c.getName().replace('.', '/') + ".class";
if (JDK9Method.JAVA_SPECIFICATION_VERSION >= 9) {
- Object module = getModule.invoke(c);
- return getResourceAsStream.invoke(module, classfilePath);
+ Object module = getModule(c);
+ return getResourceAsStream(module, classfilePath);
} else {
ClassLoader cl = c.getClassLoader();
if (cl == null) {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/nodes/ArrayCompareToNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.replacements.nodes;
+
+import static org.graalvm.compiler.nodeinfo.InputType.Memory;
+import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_1024;
+import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_1024;
+
+import org.graalvm.compiler.core.common.type.StampFactory;
+import org.graalvm.compiler.graph.Node;
+import org.graalvm.compiler.graph.NodeClass;
+import org.graalvm.compiler.graph.spi.Canonicalizable;
+import org.graalvm.compiler.graph.spi.CanonicalizerTool;
+import org.graalvm.compiler.nodeinfo.NodeInfo;
+import org.graalvm.compiler.nodes.ConstantNode;
+import org.graalvm.compiler.nodes.FixedWithNextNode;
+import org.graalvm.compiler.nodes.NamedLocationIdentity;
+import org.graalvm.compiler.nodes.ValueNode;
+import org.graalvm.compiler.nodes.ValueNodeUtil;
+import org.graalvm.compiler.nodes.memory.MemoryAccess;
+import org.graalvm.compiler.nodes.memory.MemoryNode;
+import org.graalvm.compiler.nodes.spi.LIRLowerable;
+import org.graalvm.compiler.nodes.spi.NodeLIRBuilderTool;
+import org.graalvm.compiler.nodes.spi.Virtualizable;
+import org.graalvm.compiler.nodes.spi.VirtualizerTool;
+import org.graalvm.compiler.nodes.util.GraphUtil;
+import org.graalvm.word.LocationIdentity;
+
+import jdk.vm.ci.meta.JavaKind;
+import jdk.vm.ci.meta.Value;
+
+// JaCoCo Exclude
+
+/**
+ * Compares two arrays lexicographically.
+ */
+@NodeInfo(cycles = CYCLES_1024, size = SIZE_1024)
+public final class ArrayCompareToNode extends FixedWithNextNode implements LIRLowerable, Canonicalizable, Virtualizable, MemoryAccess {
+
+ public static final NodeClass<ArrayCompareToNode> TYPE = NodeClass.create(ArrayCompareToNode.class);
+
+ /** {@link JavaKind} of one array to compare. */
+ protected final JavaKind kind1;
+
+ /** {@link JavaKind} of the other array to compare. */
+ protected final JavaKind kind2;
+
+ /** One array to be tested for equality. */
+ @Input ValueNode array1;
+
+ /** The other array to be tested for equality. */
+ @Input ValueNode array2;
+
+ /** Length of one array. */
+ @Input ValueNode length1;
+
+ /** Length of the other array. */
+ @Input ValueNode length2;
+
+ @OptionalInput(Memory) MemoryNode lastLocationAccess;
+
+ public ArrayCompareToNode(ValueNode array1, ValueNode array2, ValueNode length1, ValueNode length2, @ConstantNodeParameter JavaKind kind1, @ConstantNodeParameter JavaKind kind2) {
+ super(TYPE, StampFactory.forKind(JavaKind.Int));
+ this.kind1 = kind1;
+ this.kind2 = kind2;
+ this.array1 = array1;
+ this.array2 = array2;
+ this.length1 = length1;
+ this.length2 = length2;
+ }
+
+ @Override
+ public Node canonical(CanonicalizerTool tool) {
+ if (tool.allUsagesAvailable() && hasNoUsages()) {
+ return null;
+ }
+ ValueNode a1 = GraphUtil.unproxify(array1);
+ ValueNode a2 = GraphUtil.unproxify(array2);
+ if (a1 == a2) {
+ return ConstantNode.forInt(0);
+ }
+ return this;
+ }
+
+ @Override
+ public void virtualize(VirtualizerTool tool) {
+ ValueNode alias1 = tool.getAlias(array1);
+ ValueNode alias2 = tool.getAlias(array2);
+ if (alias1 == alias2) {
+ // the same virtual objects will always have the same contents
+ tool.replaceWithValue(ConstantNode.forInt(0, graph()));
+ }
+ }
+
+ @NodeIntrinsic
+ public static native int compareTo(Object array1, Object array2, int length1, int length2, @ConstantNodeParameter JavaKind kind1, @ConstantNodeParameter JavaKind kind2);
+
+ @Override
+ public void generate(NodeLIRBuilderTool gen) {
+ Value result = gen.getLIRGeneratorTool().emitArrayCompareTo(kind1, kind2, gen.operand(array1), gen.operand(array2), gen.operand(length1), gen.operand(length2));
+ gen.setResult(this, result);
+ }
+
+ @Override
+ public LocationIdentity getLocationIdentity() {
+ return NamedLocationIdentity.getArrayLocation(kind1);
+ }
+
+ @Override
+ public MemoryNode getLastLocationAccess() {
+ return lastLocationAccess;
+ }
+
+ @Override
+ public void setLastLocationAccess(MemoryNode lla) {
+ updateUsages(ValueNodeUtil.asNode(lastLocationAccess), ValueNodeUtil.asNode(lla));
+ lastLocationAccess = lla;
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/nodes/MacroNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/nodes/MacroNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -29,12 +29,16 @@
import org.graalvm.compiler.api.replacements.MethodSubstitution;
import org.graalvm.compiler.api.replacements.Snippet;
import org.graalvm.compiler.core.common.type.StampPair;
+import org.graalvm.compiler.debug.DebugCloseable;
import org.graalvm.compiler.debug.DebugContext;
import org.graalvm.compiler.debug.GraalError;
+import org.graalvm.compiler.graph.Node;
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.graph.NodeInputList;
import org.graalvm.compiler.nodeinfo.NodeInfo;
import org.graalvm.compiler.nodes.CallTargetNode.InvokeKind;
+import org.graalvm.compiler.nodes.FixedNode;
+import org.graalvm.compiler.nodes.Invokable;
import org.graalvm.compiler.nodes.FixedWithNextNode;
import org.graalvm.compiler.nodes.FrameState;
import org.graalvm.compiler.nodes.InvokeNode;
@@ -75,7 +79,7 @@
size = SIZE_UNKNOWN,
sizeRationale = "If this node is not optimized away it will be lowered to a call, which we cannot estimate")
//@formatter:on
-public abstract class MacroNode extends FixedWithNextNode implements Lowerable {
+public abstract class MacroNode extends FixedWithNextNode implements Lowerable, Invokable {
public static final NodeClass<MacroNode> TYPE = NodeClass.create(MacroNode.class);
@Input protected NodeInputList<ValueNode> arguments;
@@ -108,10 +112,12 @@
return arguments.toArray(new ValueNode[0]);
}
- public int getBci() {
+ @Override
+ public int bci() {
return bci;
}
+ @Override
public ResolvedJavaMethod getTargetMethod() {
return targetMethod;
}
@@ -120,6 +126,16 @@
return null;
}
+ @Override
+ protected void afterClone(Node other) {
+ updateInliningLogAfterClone(other);
+ }
+
+ @Override
+ public FixedNode asFixedNode() {
+ return this;
+ }
+
/**
* Gets a snippet to be used for lowering this macro node. The returned graph (if non-null) must
* have been {@linkplain #lowerReplacement(StructuredGraph, LoweringTool) lowered}.
@@ -196,10 +212,13 @@
}
}
+ @SuppressWarnings("try")
public InvokeNode replaceWithInvoke() {
- InvokeNode invoke = createInvoke();
- graph().replaceFixedWithFixed(this, invoke);
- return invoke;
+ try (DebugCloseable context = withNodeSourcePosition()) {
+ InvokeNode invoke = createInvoke();
+ graph().replaceFixedWithFixed(this, invoke);
+ return invoke;
+ }
}
protected InvokeNode createInvoke() {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/nodes/MacroStateSplitNode.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/nodes/MacroStateSplitNode.java Sat Mar 24 01:08:35 2018 +0100
@@ -85,7 +85,7 @@
}
assert invoke.stateAfter().bci == BytecodeFrame.AFTER_BCI;
// Here we need to fix the bci of the invoke
- InvokeNode newInvoke = snippetGraph.add(new InvokeNode(invoke.callTarget(), getBci()));
+ InvokeNode newInvoke = snippetGraph.add(new InvokeNode(invoke.callTarget(), bci()));
newInvoke.setStateAfter(invoke.stateAfter());
snippetGraph.replaceFixedWithFixed((InvokeNode) invoke.asNode(), newInvoke);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.serviceprovider/src/org/graalvm/compiler/serviceprovider/GraalServices.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.serviceprovider/src/org/graalvm/compiler/serviceprovider/GraalServices.java Sat Mar 24 01:08:35 2018 +0100
@@ -54,14 +54,18 @@
* @param other all JVMCI packages will be opened to the module defining this class
*/
public static void openJVMCITo(Class<?> other) {
- Object jvmci = getModule.invoke(Services.class);
- Object otherModule = getModule.invoke(other);
+ Object jvmci = getModule(Services.class);
+ Object otherModule = getModule(other);
if (jvmci != otherModule) {
- Set<String> packages = getPackages.invoke(jvmci);
+ Set<String> packages = getPackages(jvmci);
for (String pkg : packages) {
- boolean opened = isOpenTo.invoke(jvmci, pkg, otherModule);
+ boolean opened = isOpenTo(jvmci, pkg, otherModule);
if (!opened) {
- addOpens.invoke(jvmci, pkg, otherModule);
+ try {
+ addOpens.invoke(jvmci, pkg, otherModule);
+ } catch (Throwable throwable) {
+ throw new InternalError(throwable);
+ }
}
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.serviceprovider/src/org/graalvm/compiler/serviceprovider/JDK9Method.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.serviceprovider/src/org/graalvm/compiler/serviceprovider/JDK9Method.java Sat Mar 24 01:08:35 2018 +0100
@@ -22,9 +22,11 @@
*/
package org.graalvm.compiler.serviceprovider;
-import java.lang.reflect.InvocationTargetException;
+import java.io.InputStream;
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
import java.lang.reflect.Method;
-import java.lang.reflect.Modifier;
+import java.util.Set;
/**
* Reflection based access to API introduced by JDK 9. This allows the API to be used in code that
@@ -46,9 +48,17 @@
*/
public static final int JAVA_SPECIFICATION_VERSION = getJavaSpecificationVersion();
- public JDK9Method(Class<?> declaringClass, String name, Class<?>... parameterTypes) {
+ public static MethodHandle lookupMethodHandle(Class<?> declaringClass, String name, Class<?>... parameterTypes) {
try {
- this.method = declaringClass.getMethod(name, parameterTypes);
+ return MethodHandles.lookup().unreflect(declaringClass.getMethod(name, parameterTypes));
+ } catch (Exception e) {
+ throw new InternalError(e);
+ }
+ }
+
+ private static Method lookupMethod(Class<?> declaringClass, String name, Class<?>... parameterTypes) {
+ try {
+ return declaringClass.getMethod(name, parameterTypes);
} catch (Exception e) {
throw new InternalError(e);
}
@@ -59,90 +69,85 @@
*/
public static final boolean Java8OrEarlier = JAVA_SPECIFICATION_VERSION <= 8;
- public final Method method;
-
- public Class<?> getReturnType() {
- return method.getReturnType();
- }
-
/**
* {@code Class.getModule()}.
*/
- public static final JDK9Method getModule;
+ private static final MethodHandle getModuleHandle;
+
+ public static Object getModule(Class<?> clazz) {
+ try {
+ return getModuleHandle.invoke(clazz);
+ } catch (Throwable throwable) {
+ throw new InternalError(throwable);
+ }
+ }
/**
* {@code java.lang.Module.getPackages()}.
*/
- public static final JDK9Method getPackages;
+ private static final MethodHandle getPackages;
+
+ public static Set<String> getPackages(Object module) {
+ try {
+ return (Set<String>) getPackages.invoke(module);
+ } catch (Throwable throwable) {
+ throw new InternalError(throwable);
+ }
+ }
/**
* {@code java.lang.Module.getResourceAsStream(String)}.
*/
- public static final JDK9Method getResourceAsStream;
+ private static final MethodHandle getResourceAsStream;
+
+ public static InputStream getResourceAsStream(Object module, String resource) {
+ try {
+ return (InputStream) getResourceAsStream.invoke(module, resource);
+ } catch (Throwable throwable) {
+ throw new InternalError(throwable);
+ }
+ }
/**
- * {@code java.lang.Module.addOpens(String, Module)}.
+ * {@code java.lang.Module.addOpens(String, Module)}. This only seems to work correctly when
+ * invoked through reflection.
*/
- public static final JDK9Method addOpens;
+ public static final Method addOpens;
/**
* {@code java.lang.Module.isOpen(String, Module)}.
*/
- public static final JDK9Method isOpenTo;
+ private static final MethodHandle isOpenTo;
- /**
- * Invokes the static Module API method represented by this object.
- */
- @SuppressWarnings("unchecked")
- public <T> T invokeStatic(Object... args) {
- checkAvailability();
- assert Modifier.isStatic(method.getModifiers());
+ public static boolean isOpenTo(Object module1, String pkg, Object module2) {
try {
- return (T) method.invoke(null, args);
- } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
- throw new InternalError(e);
+ return (boolean) isOpenTo.invoke(module1, pkg, module2);
+ } catch (Throwable throwable) {
+ throw new InternalError(throwable);
}
}
- /**
- * Invokes the non-static Module API method represented by this object.
- */
- @SuppressWarnings("unchecked")
- public <T> T invoke(Object receiver, Object... args) {
- checkAvailability();
- assert !Modifier.isStatic(method.getModifiers());
- try {
- return (T) method.invoke(receiver, args);
- } catch (IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
- throw new InternalError(e);
- }
- }
-
- private void checkAvailability() throws InternalError {
- if (method == null) {
- throw new InternalError("Cannot use Module API on JDK " + JAVA_SPECIFICATION_VERSION);
- }
- }
+ public static final Class<?> MODULE_CLASS;
static {
if (JAVA_SPECIFICATION_VERSION >= 9) {
- getModule = new JDK9Method(Class.class, "getModule");
- Class<?> moduleClass = getModule.getReturnType();
- getPackages = new JDK9Method(moduleClass, "getPackages");
- addOpens = new JDK9Method(moduleClass, "addOpens", String.class, moduleClass);
- getResourceAsStream = new JDK9Method(moduleClass, "getResourceAsStream", String.class);
- isOpenTo = new JDK9Method(moduleClass, "isOpen", String.class, moduleClass);
+ try {
+ MODULE_CLASS = Class.class.getMethod("getModule").getReturnType();
+ getModuleHandle = lookupMethodHandle(Class.class, "getModule");
+ getPackages = lookupMethodHandle(MODULE_CLASS, "getPackages");
+ addOpens = lookupMethod(MODULE_CLASS, "addOpens", String.class, MODULE_CLASS);
+ getResourceAsStream = lookupMethodHandle(MODULE_CLASS, "getResourceAsStream", String.class);
+ isOpenTo = lookupMethodHandle(MODULE_CLASS, "isOpen", String.class, MODULE_CLASS);
+ } catch (NoSuchMethodException e) {
+ throw new InternalError(e);
+ }
} else {
- JDK9Method unavailable = new JDK9Method();
- getModule = unavailable;
- getPackages = unavailable;
- addOpens = unavailable;
- getResourceAsStream = unavailable;
- isOpenTo = unavailable;
+ MODULE_CLASS = null;
+ getModuleHandle = null;
+ getPackages = null;
+ addOpens = null;
+ getResourceAsStream = null;
+ isOpenTo = null;
}
}
-
- private JDK9Method() {
- method = null;
- }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.graphio/src/org/graalvm/graphio/ProtocolImpl.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.graphio/src/org/graalvm/graphio/ProtocolImpl.java Sat Mar 24 01:08:35 2018 +0100
@@ -128,7 +128,11 @@
@Override
protected Object findType(Port edges, int i) {
- return structure.edgeType(edges, i);
+ Object type = structure.edgeType(edges, i);
+ if (findEnumOrdinal(type) < 0) {
+ throw new IllegalStateException("edgeType method shall return an enum! Was: " + type);
+ }
+ return type;
}
@Override
@@ -138,7 +142,11 @@
@Override
protected Object findJavaClass(NodeClass clazz) {
- return structure.nodeClassType(clazz);
+ final Object type = structure.nodeClassType(clazz);
+ if (!(type instanceof Class<?>) && findJavaTypeName(type) == null) {
+ throw new IllegalStateException("nodeClassType method shall return a Java class (instance of Class)! Was: " + type);
+ }
+ return type;
}
@Override
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.micro.benchmarks/src/micro/benchmarks/StringBenchmark.java Thu Mar 29 20:12:02 2018 +0100
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.micro.benchmarks/src/micro/benchmarks/StringBenchmark.java Sat Mar 24 01:08:35 2018 +0100
@@ -45,6 +45,7 @@
// Checkstyle: stop
String lorem = "Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.";
+ String loremLastChar = "Lorem ipsum dolor sit amet, consectetur adipisici elit, sed eiusmod tempor incidunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquid ex ea commodi consequat. Quis aute iure reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint obcaecat cupiditat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum?";
// Checkstyle: resume
}
@@ -71,4 +72,10 @@
public int indexOfStringNotFound(BenchState state) {
return state.lorem.indexOf(state.s2);
}
+
+ @Benchmark
+ @Warmup(iterations = 5)
+ public int compareTo(BenchState state) {
+ return state.lorem.compareTo(state.loremLastChar);
+ }
}
--- a/test/hotspot/gtest/classfile/test_symbolTable.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/gtest/classfile/test_symbolTable.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -22,7 +22,7 @@
*/
#include "precompiled.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "classfile/symbolTable.hpp"
#include "unittest.hpp"
--- a/test/hotspot/gtest/code/test_vtableStub.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/gtest/code/test_vtableStub.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "code/vtableStubs.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "unittest.hpp"
TEST_VM(code, vtableStubs) {
--- a/test/hotspot/gtest/gc/shared/test_oopStorage.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/gtest/gc/shared/test_oopStorage.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -30,7 +30,7 @@
#include "metaprogramming/conditional.hpp"
#include "metaprogramming/enableIf.hpp"
#include "runtime/handles.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/mutex.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/thread.hpp"
--- a/test/hotspot/gtest/logging/test_gcTraceTime.cpp Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/gtest/logging/test_gcTraceTime.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -26,7 +26,7 @@
#include "logTestFixture.hpp"
#include "logTestUtils.inline.hpp"
#include "logging/log.hpp"
-#include "runtime/interfaceSupport.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
#include "unittest.hpp"
class GCTraceTimeTest : public LogTestFixture {
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/gtest/memory/test_chunkManager.cpp Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,40 @@
+/*
+ * Copyright (c) 2016, 2018 Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+
+// The test function is only available in debug builds
+#ifdef ASSERT
+
+#include "unittest.hpp"
+
+void ChunkManager_test_list_index();
+
+TEST(ChunkManager, list_index) {
+ // The ChunkManager is only available in metaspace.cpp,
+ // so the test code is located in that file.
+ ChunkManager_test_list_index();
+
+}
+
+#endif // ASSERT
--- a/test/hotspot/jtreg/ProblemList-graal.txt Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/ProblemList-graal.txt Sat Mar 24 01:08:35 2018 +0100
@@ -55,7 +55,7 @@
compiler/unsafe/UnsafeGetStableArrayElement.java 8181833 generic-all
compiler/unsafe/UnsafeOffHeapBooleanTest.java 8181833 generic-all
compiler/unsafe/UnsafeOnHeapBooleanTest.java 8181833 generic-all
-:1
+
compiler/whitebox/ClearMethodStateTest.java 8181831 generic-all
compiler/whitebox/EnqueueMethodForCompilationTest.java 8181831 generic-all
compiler/whitebox/MakeMethodNotCompilableTest.java 8181831 generic-all
@@ -76,6 +76,6 @@
serviceability/jvmti/GetModulesInfo/JvmtiGetAllModulesTest.java 8195156 generic-all
-runtime/Metaspace/DefineClass.java 8197442 generic-all
+compiler/compilercontrol/directives/LogTest.java 8197446 generic-all
-compiler/compilercontrol/directives/LogTest.java 8197446 generic-all
+gc/g1/ihop/TestIHOPStatic.java 8199486 generic-all
--- a/test/hotspot/jtreg/TEST.groups Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/TEST.groups Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -31,6 +31,10 @@
hotspot_compiler = \
compiler
+hotspot_compiler_xcomp = \
+ :hotspot_compiler \
+ -:tier1_compiler_not_xcomp
+
hotspot_gc = \
gc
@@ -61,10 +65,14 @@
tier1_compiler = \
:tier1_compiler_1 \
:tier1_compiler_2 \
- :tier1_compiler_3
+ :tier1_compiler_3 \
+ :tier1_compiler_not_xcomp
+
+hotspot_not_fast_compiler = \
+ :hotspot_compiler \
+ -:tier1_compiler
tier1_compiler_1 = \
- compiler/aot/ \
compiler/arraycopy/ \
compiler/c1/ \
compiler/c2/ \
@@ -120,11 +128,8 @@
-compiler/loopopts/Test7052494.java \
-compiler/runtime/Test6826736.java
-hotspot_not_fast_compiler = \
- :hotspot_compiler \
- -:tier1_compiler_1 \
- -:tier1_compiler_2 \
- -:tier1_compiler_3 \
+tier1_compiler_not_xcomp = \
+ compiler/aot
ctw_1 = \
applications/ctw/modules/ \
@@ -140,6 +145,10 @@
:tier1_gc_gcold \
:tier1_gc_gcbasher
+hotspot_not_fast_gc = \
+ :hotspot_gc \
+ -:tier1_gc
+
tier1_gc_1 = \
gc/g1/
@@ -154,9 +163,9 @@
-gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java
tier1_gc_gcold = \
- gc/stress/gcold/TestGCOldWithG1.java
- gc/stress/gcold/TestGCOldWithCMS.java
- gc/stress/gcold/TestGCOldWithSerial.java
+ gc/stress/gcold/TestGCOldWithG1.java \
+ gc/stress/gcold/TestGCOldWithCMS.java \
+ gc/stress/gcold/TestGCOldWithSerial.java \
gc/stress/gcold/TestGCOldWithParallel.java
tier1_gc_gcbasher = \
@@ -271,3 +280,13 @@
runtime/ErrorHandling \
runtime/logging
+hotspot_nmt = \
+ runtime/NMT
+
+hotspot_rest_runtime = \
+ :hotspot_runtime \
+ -:tier1_runtime \
+ -:tier1_runtime_appcds_exclude \
+ -:hotspot_nmt \
+ -:hotspot_tier2_runtime_platform_agnostic
+
--- a/test/hotspot/jtreg/compiler/arguments/CheckCICompilerCount.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/compiler/arguments/CheckCICompilerCount.java Sat Mar 24 01:08:35 2018 +0100
@@ -84,11 +84,11 @@
private static final String[] NON_TIERED_EXPECTED_OUTPUTS = {
"CICompilerCount (0) must be at least 1",
- "intx CICompilerCount = 1 {product} {command line}",
- "intx CICompilerCount = 1 {product} {command line}",
+ "intx CICompilerCount = 1 {product} {command line}",
+ "intx CICompilerCount = 1 {product} {command line}",
"CICompilerCount (0) must be at least 1",
- "intx CICompilerCount = 1 {product} {command line}",
- "intx CICompilerCount = 1 {product} {command line}"
+ "intx CICompilerCount = 1 {product} {command line}",
+ "intx CICompilerCount = 1 {product} {command line}"
};
private static final int[] NON_TIERED_EXIT = {
@@ -165,13 +165,13 @@
private static final String[] TIERED_EXPECTED_OUTPUTS = {
"CICompilerCount (1) must be at least 2",
- "intx CICompilerCount = 1 {product} {command line}",
- "intx CICompilerCount = 1 {product} {command line}",
- "intx CICompilerCount = 2 {product} {command line}",
+ "intx CICompilerCount = 1 {product} {command line}",
+ "intx CICompilerCount = 1 {product} {command line}",
+ "intx CICompilerCount = 2 {product} {command line}",
"CICompilerCount (1) must be at least 2",
- "intx CICompilerCount = 1 {product} {command line}",
- "intx CICompilerCount = 1 {product} {command line}",
- "intx CICompilerCount = 2 {product} {command line}",
+ "intx CICompilerCount = 1 {product} {command line}",
+ "intx CICompilerCount = 1 {product} {command line}",
+ "intx CICompilerCount = 2 {product} {command line}",
};
private static final int[] TIERED_EXIT = {
--- a/test/hotspot/jtreg/compiler/arguments/CheckCompileThresholdScaling.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/compiler/arguments/CheckCompileThresholdScaling.java Sat Mar 24 01:08:35 2018 +0100
@@ -108,25 +108,25 @@
private static final String[][] NON_TIERED_EXPECTED_OUTPUTS = {
{
- "intx CompileThreshold = 1000 {pd product} {command line}",
- "double CompileThresholdScaling = 1.000000 {product} {default}"
+ "intx CompileThreshold = 1000 {pd product} {command line}",
+ "double CompileThresholdScaling = 1.000000 {product} {default}"
},
{
- "intx CompileThreshold = 1250 {pd product} {command line, ergonomic}",
- "double CompileThresholdScaling = 1.250000 {product} {command line}"
+ "intx CompileThreshold = 1250 {pd product} {command line, ergonomic}",
+ "double CompileThresholdScaling = 1.250000 {product} {command line}"
},
{
- "intx CompileThreshold = 750 {pd product} {command line, ergonomic}",
- "double CompileThresholdScaling = 0.750000 {product} {command line}"
+ "intx CompileThreshold = 750 {pd product} {command line, ergonomic}",
+ "double CompileThresholdScaling = 0.750000 {product} {command line}"
},
{
- "intx CompileThreshold = 1000 {pd product} {command line}",
- "double CompileThresholdScaling = 0.000000 {product} {command line}",
+ "intx CompileThreshold = 1000 {pd product} {command line}",
+ "double CompileThresholdScaling = 0.000000 {product} {command line}",
"interpreted mode"
},
{
- "intx CompileThreshold = 0 {pd product} {command line, ergonomic}",
- "double CompileThresholdScaling = 0.750000 {product} {command line}",
+ "intx CompileThreshold = 0 {pd product} {command line, ergonomic}",
+ "double CompileThresholdScaling = 0.750000 {product} {command line}",
"interpreted mode"
}
};
@@ -240,94 +240,94 @@
private static final String[][] TIERED_EXPECTED_OUTPUTS = {
{
- "intx Tier0BackedgeNotifyFreqLog = 10 {product} {command line}",
- "intx Tier0InvokeNotifyFreqLog = 7 {product} {command line}",
- "intx Tier23InlineeNotifyFreqLog = 20 {product} {command line}",
- "intx Tier2BackedgeNotifyFreqLog = 14 {product} {command line}",
- "intx Tier2InvokeNotifyFreqLog = 11 {product} {command line}",
- "intx Tier3BackEdgeThreshold = 60000 {product} {command line}",
- "intx Tier3BackedgeNotifyFreqLog = 13 {product} {command line}",
- "intx Tier3CompileThreshold = 2000 {product} {command line}",
- "intx Tier3InvocationThreshold = 200 {product} {command line}",
- "intx Tier3InvokeNotifyFreqLog = 10 {product} {command line}",
- "intx Tier3MinInvocationThreshold = 100 {product} {command line}",
- "intx Tier4BackEdgeThreshold = 40000 {product} {command line}",
- "intx Tier4CompileThreshold = 15000 {product} {command line}",
- "intx Tier4InvocationThreshold = 5000 {product} {command line}",
- "intx Tier4MinInvocationThreshold = 600 {product} {command line}",
- "double CompileThresholdScaling = 1.000000 {product} {default}"
+ "intx Tier0BackedgeNotifyFreqLog = 10 {product} {command line}",
+ "intx Tier0InvokeNotifyFreqLog = 7 {product} {command line}",
+ "intx Tier23InlineeNotifyFreqLog = 20 {product} {command line}",
+ "intx Tier2BackedgeNotifyFreqLog = 14 {product} {command line}",
+ "intx Tier2InvokeNotifyFreqLog = 11 {product} {command line}",
+ "intx Tier3BackEdgeThreshold = 60000 {product} {command line}",
+ "intx Tier3BackedgeNotifyFreqLog = 13 {product} {command line}",
+ "intx Tier3CompileThreshold = 2000 {product} {command line}",
+ "intx Tier3InvocationThreshold = 200 {product} {command line}",
+ "intx Tier3InvokeNotifyFreqLog = 10 {product} {command line}",
+ "intx Tier3MinInvocationThreshold = 100 {product} {command line}",
+ "intx Tier4BackEdgeThreshold = 40000 {product} {command line}",
+ "intx Tier4CompileThreshold = 15000 {product} {command line}",
+ "intx Tier4InvocationThreshold = 5000 {product} {command line}",
+ "intx Tier4MinInvocationThreshold = 600 {product} {command line}",
+ "double CompileThresholdScaling = 1.000000 {product} {default}"
},
{
- "intx Tier0BackedgeNotifyFreqLog = 9 {product} {command line, ergonomic}",
- "intx Tier0InvokeNotifyFreqLog = 6 {product} {command line, ergonomic}",
- "intx Tier23InlineeNotifyFreqLog = 19 {product} {command line, ergonomic}",
- "intx Tier2BackedgeNotifyFreqLog = 13 {product} {command line, ergonomic}",
- "intx Tier2InvokeNotifyFreqLog = 10 {product} {command line, ergonomic}",
- "intx Tier3BackEdgeThreshold = 45000 {product} {command line, ergonomic}",
- "intx Tier3BackedgeNotifyFreqLog = 12 {product} {command line, ergonomic}",
- "intx Tier3CompileThreshold = 1500 {product} {command line, ergonomic}",
- "intx Tier3InvocationThreshold = 150 {product} {command line, ergonomic}",
- "intx Tier3InvokeNotifyFreqLog = 9 {product} {command line, ergonomic}",
- "intx Tier3MinInvocationThreshold = 75 {product} {command line, ergonomic}",
- "intx Tier4BackEdgeThreshold = 30000 {product} {command line, ergonomic}",
- "intx Tier4CompileThreshold = 11250 {product} {command line, ergonomic}",
- "intx Tier4InvocationThreshold = 3750 {product} {command line, ergonomic}",
- "intx Tier4MinInvocationThreshold = 450 {product} {command line, ergonomic}",
- "double CompileThresholdScaling = 0.750000 {product} {command line}"
+ "intx Tier0BackedgeNotifyFreqLog = 9 {product} {command line, ergonomic}",
+ "intx Tier0InvokeNotifyFreqLog = 6 {product} {command line, ergonomic}",
+ "intx Tier23InlineeNotifyFreqLog = 19 {product} {command line, ergonomic}",
+ "intx Tier2BackedgeNotifyFreqLog = 13 {product} {command line, ergonomic}",
+ "intx Tier2InvokeNotifyFreqLog = 10 {product} {command line, ergonomic}",
+ "intx Tier3BackEdgeThreshold = 45000 {product} {command line, ergonomic}",
+ "intx Tier3BackedgeNotifyFreqLog = 12 {product} {command line, ergonomic}",
+ "intx Tier3CompileThreshold = 1500 {product} {command line, ergonomic}",
+ "intx Tier3InvocationThreshold = 150 {product} {command line, ergonomic}",
+ "intx Tier3InvokeNotifyFreqLog = 9 {product} {command line, ergonomic}",
+ "intx Tier3MinInvocationThreshold = 75 {product} {command line, ergonomic}",
+ "intx Tier4BackEdgeThreshold = 30000 {product} {command line, ergonomic}",
+ "intx Tier4CompileThreshold = 11250 {product} {command line, ergonomic}",
+ "intx Tier4InvocationThreshold = 3750 {product} {command line, ergonomic}",
+ "intx Tier4MinInvocationThreshold = 450 {product} {command line, ergonomic}",
+ "double CompileThresholdScaling = 0.750000 {product} {command line}"
},
{
- "intx Tier0BackedgeNotifyFreqLog = 10 {product} {command line, ergonomic}",
- "intx Tier0InvokeNotifyFreqLog = 7 {product} {command line, ergonomic}",
- "intx Tier23InlineeNotifyFreqLog = 20 {product} {command line, ergonomic}",
- "intx Tier2BackedgeNotifyFreqLog = 14 {product} {command line, ergonomic}",
- "intx Tier2InvokeNotifyFreqLog = 11 {product} {command line, ergonomic}",
- "intx Tier3BackEdgeThreshold = 75000 {product} {command line, ergonomic}",
- "intx Tier3BackedgeNotifyFreqLog = 13 {product} {command line, ergonomic}",
- "intx Tier3CompileThreshold = 2500 {product} {command line, ergonomic}",
- "intx Tier3InvocationThreshold = 250 {product} {command line, ergonomic}",
- "intx Tier3InvokeNotifyFreqLog = 10 {product} {command line, ergonomic}",
- "intx Tier3MinInvocationThreshold = 125 {product} {command line, ergonomic}",
- "intx Tier4BackEdgeThreshold = 50000 {product} {command line, ergonomic}",
- "intx Tier4CompileThreshold = 18750 {product} {command line, ergonomic}",
- "intx Tier4InvocationThreshold = 6250 {product} {command line, ergonomic}",
- "intx Tier4MinInvocationThreshold = 750 {product} {command line, ergonomic}",
- "double CompileThresholdScaling = 1.250000 {product} {command line}"
+ "intx Tier0BackedgeNotifyFreqLog = 10 {product} {command line, ergonomic}",
+ "intx Tier0InvokeNotifyFreqLog = 7 {product} {command line, ergonomic}",
+ "intx Tier23InlineeNotifyFreqLog = 20 {product} {command line, ergonomic}",
+ "intx Tier2BackedgeNotifyFreqLog = 14 {product} {command line, ergonomic}",
+ "intx Tier2InvokeNotifyFreqLog = 11 {product} {command line, ergonomic}",
+ "intx Tier3BackEdgeThreshold = 75000 {product} {command line, ergonomic}",
+ "intx Tier3BackedgeNotifyFreqLog = 13 {product} {command line, ergonomic}",
+ "intx Tier3CompileThreshold = 2500 {product} {command line, ergonomic}",
+ "intx Tier3InvocationThreshold = 250 {product} {command line, ergonomic}",
+ "intx Tier3InvokeNotifyFreqLog = 10 {product} {command line, ergonomic}",
+ "intx Tier3MinInvocationThreshold = 125 {product} {command line, ergonomic}",
+ "intx Tier4BackEdgeThreshold = 50000 {product} {command line, ergonomic}",
+ "intx Tier4CompileThreshold = 18750 {product} {command line, ergonomic}",
+ "intx Tier4InvocationThreshold = 6250 {product} {command line, ergonomic}",
+ "intx Tier4MinInvocationThreshold = 750 {product} {command line, ergonomic}",
+ "double CompileThresholdScaling = 1.250000 {product} {command line}"
},
{
- "intx Tier0BackedgeNotifyFreqLog = 11 {product} {command line, ergonomic}",
- "intx Tier0InvokeNotifyFreqLog = 8 {product} {command line, ergonomic}",
- "intx Tier23InlineeNotifyFreqLog = 21 {product} {command line, ergonomic}",
- "intx Tier2BackedgeNotifyFreqLog = 15 {product} {command line, ergonomic}",
- "intx Tier2InvokeNotifyFreqLog = 12 {product} {command line, ergonomic}",
- "intx Tier3BackEdgeThreshold = 120000 {product} {command line, ergonomic}",
- "intx Tier3BackedgeNotifyFreqLog = 14 {product} {command line, ergonomic}",
- "intx Tier3CompileThreshold = 4000 {product} {command line, ergonomic}",
- "intx Tier3InvocationThreshold = 400 {product} {command line, ergonomic}",
- "intx Tier3InvokeNotifyFreqLog = 11 {product} {command line, ergonomic}",
- "intx Tier3MinInvocationThreshold = 200 {product} {command line, ergonomic}",
- "intx Tier4BackEdgeThreshold = 80000 {product} {command line, ergonomic}",
- "intx Tier4CompileThreshold = 30000 {product} {command line, ergonomic}",
- "intx Tier4InvocationThreshold = 10000 {product} {command line, ergonomic}",
- "intx Tier4MinInvocationThreshold = 1200 {product} {command line, ergonomic}",
- "double CompileThresholdScaling = 2.000000 {product} {command line}"
+ "intx Tier0BackedgeNotifyFreqLog = 11 {product} {command line, ergonomic}",
+ "intx Tier0InvokeNotifyFreqLog = 8 {product} {command line, ergonomic}",
+ "intx Tier23InlineeNotifyFreqLog = 21 {product} {command line, ergonomic}",
+ "intx Tier2BackedgeNotifyFreqLog = 15 {product} {command line, ergonomic}",
+ "intx Tier2InvokeNotifyFreqLog = 12 {product} {command line, ergonomic}",
+ "intx Tier3BackEdgeThreshold = 120000 {product} {command line, ergonomic}",
+ "intx Tier3BackedgeNotifyFreqLog = 14 {product} {command line, ergonomic}",
+ "intx Tier3CompileThreshold = 4000 {product} {command line, ergonomic}",
+ "intx Tier3InvocationThreshold = 400 {product} {command line, ergonomic}",
+ "intx Tier3InvokeNotifyFreqLog = 11 {product} {command line, ergonomic}",
+ "intx Tier3MinInvocationThreshold = 200 {product} {command line, ergonomic}",
+ "intx Tier4BackEdgeThreshold = 80000 {product} {command line, ergonomic}",
+ "intx Tier4CompileThreshold = 30000 {product} {command line, ergonomic}",
+ "intx Tier4InvocationThreshold = 10000 {product} {command line, ergonomic}",
+ "intx Tier4MinInvocationThreshold = 1200 {product} {command line, ergonomic}",
+ "double CompileThresholdScaling = 2.000000 {product} {command line}"
},
{
- "intx Tier0BackedgeNotifyFreqLog = 10 {product} {command line}",
- "intx Tier0InvokeNotifyFreqLog = 7 {product} {command line}",
- "intx Tier23InlineeNotifyFreqLog = 20 {product} {command line}",
- "intx Tier2BackedgeNotifyFreqLog = 14 {product} {command line}",
- "intx Tier2InvokeNotifyFreqLog = 11 {product} {command line}",
- "intx Tier3BackEdgeThreshold = 60000 {product} {command line}",
- "intx Tier3BackedgeNotifyFreqLog = 13 {product} {command line}",
- "intx Tier3CompileThreshold = 2000 {product} {command line}",
- "intx Tier3InvocationThreshold = 200 {product} {command line}",
- "intx Tier3InvokeNotifyFreqLog = 10 {product} {command line}",
- "intx Tier3MinInvocationThreshold = 100 {product} {command line}",
- "intx Tier4BackEdgeThreshold = 40000 {product} {command line}",
- "intx Tier4CompileThreshold = 15000 {product} {command line}",
- "intx Tier4InvocationThreshold = 5000 {product} {command line}",
- "intx Tier4MinInvocationThreshold = 600 {product} {command line}",
- "double CompileThresholdScaling = 0.000000 {product} {command line}",
+ "intx Tier0BackedgeNotifyFreqLog = 10 {product} {command line}",
+ "intx Tier0InvokeNotifyFreqLog = 7 {product} {command line}",
+ "intx Tier23InlineeNotifyFreqLog = 20 {product} {command line}",
+ "intx Tier2BackedgeNotifyFreqLog = 14 {product} {command line}",
+ "intx Tier2InvokeNotifyFreqLog = 11 {product} {command line}",
+ "intx Tier3BackEdgeThreshold = 60000 {product} {command line}",
+ "intx Tier3BackedgeNotifyFreqLog = 13 {product} {command line}",
+ "intx Tier3CompileThreshold = 2000 {product} {command line}",
+ "intx Tier3InvocationThreshold = 200 {product} {command line}",
+ "intx Tier3InvokeNotifyFreqLog = 10 {product} {command line}",
+ "intx Tier3MinInvocationThreshold = 100 {product} {command line}",
+ "intx Tier4BackEdgeThreshold = 40000 {product} {command line}",
+ "intx Tier4CompileThreshold = 15000 {product} {command line}",
+ "intx Tier4InvocationThreshold = 5000 {product} {command line}",
+ "intx Tier4MinInvocationThreshold = 600 {product} {command line}",
+ "double CompileThresholdScaling = 0.000000 {product} {command line}",
"interpreted mode"
}
};
--- a/test/hotspot/jtreg/compiler/c2/Test6603011.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/compiler/c2/Test6603011.java Sat Mar 24 01:08:35 2018 +0100
@@ -28,7 +28,7 @@
* @modules java.base/jdk.internal.misc
* @library /test/lib
*
- * @run main/othervm -Xcomp -Xbatch -XX:-Inline compiler.c2.Test6603011
+ * @run main/othervm/timeout=480 -Xcomp -Xbatch -XX:-Inline compiler.c2.Test6603011
*/
//
--- a/test/hotspot/jtreg/compiler/c2/Test7009359.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/compiler/c2/Test7009359.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -24,11 +24,11 @@
/**
* @test
* @bug 7009359
- * @summary HS with -XX:+AggressiveOpts optimize new StringBuffer(null) so it does not throw NPE as expected
+ * @summary HS optimizes new StringBuffer(null) so it does not throw NPE as expected
*
* @run main/othervm -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:+OptimizeStringConcat
- * -XX:CompileCommand=dontinline,compiler.c2.Test7009359::stringmakerBUG
- * compiler.c2.Test7009359
+ * -XX:CompileCommand=dontinline,compiler.c2.Test7009359::stringmakerBUG
+ * compiler.c2.Test7009359
*/
package compiler.c2;
--- a/test/hotspot/jtreg/compiler/c2/cr6711117/Test.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/compiler/c2/cr6711117/Test.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,9 +26,8 @@
* @bug 6711117
* @summary Assertion in 64bit server vm (flat != TypePtr::BOTTOM,"cannot alias-analyze an untyped ptr")
*
- * @run main/othervm -Xcomp -XX:+IgnoreUnrecognizedVMOptions -XX:+AggressiveOpts
- * -XX:+UseCompressedOops
- * compiler.c2.cr6711117.Test
+ * @run main/othervm -Xcomp -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox -XX:AutoBoxCacheMax=20000
+ * -XX:+UseCompressedOops compiler.c2.cr6711117.Test
*/
package compiler.c2.cr6711117;
--- a/test/hotspot/jtreg/compiler/calls/common/CallsBase.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/compiler/calls/common/CallsBase.java Sat Mar 24 01:08:35 2018 +0100
@@ -152,16 +152,18 @@
calleeVisited = false; // reset state
}
// compile with requested level if needed
- if (compileCallee > 0) {
- compileMethod(calleeMethod, compileCallee);
+ if (compileCallee > 0 && !compileMethod(calleeMethod, compileCallee)) {
+ System.out.println("WARNING: Blocking compilation failed for calleeMethod (timeout?). Skipping.");
+ return;
}
if (checkCalleeCompilationLevel) {
Asserts.assertEQ(expectedCalleeCompilationLevel,
wb.getMethodCompilationLevel(calleeMethod),
"Unexpected callee compilation level");
}
- if (compileCaller > 0) {
- compileMethod(callerMethod, compileCaller);
+ if (compileCaller > 0 && !compileMethod(callerMethod, compileCaller)) {
+ System.out.println("WARNING: Blocking compilation failed for callerMethod (timeout?). Skipping.");
+ return;
}
if (checkCallerCompilationLevel) {
Asserts.assertEQ(expectedCallerCompilationLevel,
@@ -185,11 +187,12 @@
* A method to compile another method, searching it by name in current class
* @param method a method to compile
* @param compLevel a compilation level
+ * @return true if method was enqueued for compilation
*/
- protected final void compileMethod(Method method, int compLevel) {
+ protected final boolean compileMethod(Method method, int compLevel) {
wb.deoptimizeMethod(method);
Asserts.assertTrue(wb.isMethodCompilable(method, compLevel));
- wb.enqueueMethodForCompilation(method, compLevel);
+ return wb.enqueueMethodForCompilation(method, compLevel);
}
/*
--- a/test/hotspot/jtreg/compiler/codegen/Test6909839.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/compiler/codegen/Test6909839.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
* @bug 6909839
* @summary missing unsigned compare cases for some cmoves in sparc.ad
*
- * @run main/othervm -XX:+AggressiveOpts -Xbatch compiler.codegen.Test6909839
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox -XX:AutoBoxCacheMax=20000 -Xbatch compiler.codegen.Test6909839
*/
package compiler.codegen;
--- a/test/hotspot/jtreg/compiler/compilercontrol/mixed/RandomCommandsTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/compiler/compilercontrol/mixed/RandomCommandsTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -31,7 +31,7 @@
* @build sun.hotspot.WhiteBox
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
- * @run driver/timeout=600 compiler.compilercontrol.mixed.RandomCommandsTest
+ * @run driver/timeout=1200 compiler.compilercontrol.mixed.RandomCommandsTest
*/
package compiler.compilercontrol.mixed;
--- a/test/hotspot/jtreg/compiler/escapeAnalysis/Test6689060.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/compiler/escapeAnalysis/Test6689060.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,9 +26,9 @@
* @bug 6689060
* @summary Escape Analysis does not work with Compressed Oops
*
- * @run main/othervm -Xbatch -XX:+AggressiveOpts
- * -XX:CompileCommand=exclude,compiler.escapeAnalysis.Test6689060::dummy
- * compiler.escapeAnalysis.Test6689060
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:+EliminateAutoBox -XX:AutoBoxCacheMax=20000
+ * -XX:CompileCommand=exclude,compiler.escapeAnalysis.Test6689060::dummy
+ * compiler.escapeAnalysis.Test6689060
*/
package compiler.escapeAnalysis;
--- a/test/hotspot/jtreg/compiler/escapeAnalysis/Test6726999.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/compiler/escapeAnalysis/Test6726999.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,9 +26,9 @@
* @bug 6726999
* @summary nsk/stress/jck12a/jck12a010 assert(n != NULL,"Bad immediate dominator info.");
*
- * @run main/othervm -Xbatch -XX:+AggressiveOpts
- * -XX:CompileCommand=exclude,compiler.escapeAnalysis.Test6726999::dummy
- * compiler.escapeAnalysis.Test6726999
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:+EliminateAutoBox -XX:AutoBoxCacheMax=20000
+ * -XX:CompileCommand=exclude,compiler.escapeAnalysis.Test6726999::dummy
+ * compiler.escapeAnalysis.Test6726999
*/
package compiler.escapeAnalysis;
--- a/test/hotspot/jtreg/compiler/escapeAnalysis/cr6716441/Tester.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/compiler/escapeAnalysis/cr6716441/Tester.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,8 +26,8 @@
* @bug 6716441
* @summary error in meet with +DoEscapeAnalysis
*
- * @run main/othervm -Xcomp -XX:+AggressiveOpts
- * compiler.escapeAnalysis.cr6716441.Tester
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -Xcomp -XX:+EliminateAutoBox -XX:AutoBoxCacheMax=20000
+ * compiler.escapeAnalysis.cr6716441.Tester
*/
/* Complexity upper bound: 70070 ops */
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/loopopts/IterationSplitPredicateInconsistency.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/**
+ * @test
+ * @bug 8193130
+ * @summary Bad graph when unrolled loop bounds conflicts with range checks
+ *
+ * @run main/othervm IterationSplitPredicateInconsistency
+ * @run main/othervm -XX:-UseLoopPredicate IterationSplitPredicateInconsistency
+ *
+ */
+
+public class IterationSplitPredicateInconsistency {
+ static volatile int barrier;
+
+ // Pollute profile so loop appears to run for a large number of iterations
+ static boolean test1_helper(int start, int stop, double[] array1, double[] array2, int exit) {
+ for (int i = start; i < stop; i++) {
+ array1[i] = array2[i];
+ if (i == exit) {
+ return true;
+ }
+ barrier = 0x42;
+ }
+ return false;
+ }
+
+ static double[] test1(int start, double[] array2, int exit) {
+ double[] array1 = new double[10];
+ // Predication moves range checks out of loop and
+ // pre/main/post loops are created. The main loop is unrolled
+ // several times to the point where it's never executed but
+ // compiler can't tell from the loop bounds alone. The lower
+ // bound of the loop is negative and would cause range checks
+ // (that were removed from the loop body) to fail.
+ if (test1_helper(start, 5, array1, array2, exit)) {
+ return null;
+ }
+ return array1;
+ }
+
+ // Same as above with other combinations of increasing/decreasing
+ // loops, positive/negative stride
+ static boolean test2_helper(int start, int stop, double[] array1, double[] array2, int exit) {
+ for (int i = start-1; i >= stop; i--) {
+ array1[i] = array2[i];
+ if (i == exit) {
+ return true;
+ }
+ barrier = 0x42;
+ }
+ return false;
+ }
+
+ static double[] test2(int start, double[] array2, int exit) {
+ double[] array1 = new double[10];
+ if (test2_helper(start, 0, array1, array2, exit)) {
+ return null;
+ }
+ return array1;
+ }
+
+ static boolean test3_helper(int start, int stop, double[] array1, double[] array2, int exit) {
+ for (int i = start; i < stop; i++) {
+ array1[stop-i-1] = array2[stop-i-1];
+ if (i == exit) {
+ return true;
+ }
+ barrier = 0x42;
+ }
+ return false;
+ }
+
+ static double[] test3(int start, double[] array2, int exit) {
+ double[] array1 = new double[5];
+ if (test3_helper(start, 5, array1, array2, exit)) {
+ return null;
+ }
+ return array1;
+ }
+
+ static boolean test4_helper(int start, int stop, int from, double[] array1, double[] array2, int exit) {
+ for (int i = start-1; i >= stop; i--) {
+ array1[from-i-1] = array2[from-i-1];
+ if (i == exit) {
+ return true;
+ }
+ barrier = 0x42;
+ }
+ return false;
+ }
+
+ static double[] test4(int start, double[] array2, int exit) {
+ double[] array1 = new double[5];
+ if (test4_helper(start, 0, 5, array1, array2, exit)) {
+ return null;
+ }
+ return array1;
+ }
+
+ public static void main(String[] args) {
+ double[] array2 = new double[10];
+ double[] array3 = new double[1000];
+ for (int i = 0; i < 20_000; i++) {
+ test1_helper(0, 1000, array3, array3, 998);
+ test1(0, array2, 999);
+ test1(0, array2, 4);
+ test2_helper(1000, 0, array3, array3, 1);
+ test2(5, array2, 999);
+ test2(5, array2, 1);
+ test3_helper(0, 1000, array3, array3, 998);
+ test3(0, array2, 999);
+ test3(0, array2, 4);
+ test4_helper(1000, 0, 1000, array3, array3, 1);
+ test4(5, array2, 999);
+ test4(5, array2, 1);
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/compiler/loopstripmining/CheckLoopStripMiningIterShortLoop.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2018, Red Hat, Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8196294
+ * @summary when loop strip is enabled, LoopStripMiningIterShortLoop should be not null
+ * @library /test/lib /
+ * @modules java.base/jdk.internal.misc
+ * java.management
+ * @run driver CheckLoopStripMiningIterShortLoop
+ */
+
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.process.ProcessTools;
+
+public class CheckLoopStripMiningIterShortLoop {
+
+ public static void main(String[] args) throws Exception {
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+UseG1GC", "-XX:+PrintFlagsFinal", "-version");
+ OutputAnalyzer out = new OutputAnalyzer(pb.start());
+
+ long iter = Long.parseLong(out.firstMatch("uintx LoopStripMiningIter = (\\d+)", 1));
+ long iterShort = Long.parseLong(out.firstMatch("uintx LoopStripMiningIterShortLoop = (\\d+)", 1));
+
+ if (iter <= 0 || iterShort <= 0) {
+ throw new RuntimeException("Bad defaults for loop strip mining");
+ }
+ }
+}
--- a/test/hotspot/jtreg/gc/arguments/TestMinAndInitialSurvivorRatioFlags.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/gc/arguments/TestMinAndInitialSurvivorRatioFlags.java Sat Mar 24 01:08:35 2018 +0100
@@ -30,7 +30,7 @@
* java.management
* @build sun.hotspot.WhiteBox
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
- * @run driver TestMinAndInitialSurvivorRatioFlags
+ * @run driver/timeout=240 TestMinAndInitialSurvivorRatioFlags
*/
import java.lang.management.MemoryUsage;
--- a/test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/runtime/CommandLine/OptionsValidation/TestOptionsWithRanges.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
* java.management
* jdk.attach/sun.tools.attach
* jdk.internal.jvmstat/sun.jvmstat.monitor
- * @run main/othervm/timeout=900 TestOptionsWithRanges
+ * @run main/othervm/timeout=1800 TestOptionsWithRanges
*/
import java.util.ArrayList;
@@ -88,6 +88,11 @@
excludeTestMinRange("MallocMaxTestWords");
/*
+ * Exclude CMSSamplingGrain as it can cause intermittent failures on Windows
+ */
+ excludeTestRange("CMSSamplingGrain");
+
+ /*
* Exclude below options as their maximum value would consume too much memory
* and would affect other tests that run in parallel.
*/
--- a/test/hotspot/jtreg/runtime/CommandLine/VMDeprecatedOptions.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/runtime/CommandLine/VMDeprecatedOptions.java Sat Mar 24 01:08:35 2018 +0100
@@ -51,6 +51,7 @@
{"PrintSafepointStatistics", "false"},
{"PrintSafepointStatisticsCount", "3"},
{"PrintSafepointStatisticsTimeout", "3"},
+ {"AggressiveOpts", "true"},
// deprecated alias flags (see also aliased_jvm_flags):
{"DefaultMaxRAMFraction", "4"},
--- a/test/hotspot/jtreg/runtime/NMT/MallocStressTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/runtime/NMT/MallocStressTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -30,7 +30,7 @@
* java.management
* @build sun.hotspot.WhiteBox
* @run driver ClassFileInstaller sun.hotspot.WhiteBox
- * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocStressTest
+ * @run main/othervm/timeout=1200 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocStressTest
*/
import java.util.concurrent.atomic.AtomicInteger;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/runtime/NMT/UnsafeAllocMemory.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary Unsafe.allocateMemory should be tagged as Other
+ * @key nmt jcmd
+ * @library /test/lib
+ * @modules java.base/jdk.internal.misc
+ * java.management
+ * @run main/othervm -Xbootclasspath/a:. -XX:NativeMemoryTracking=summary UnsafeAllocMemory
+ */
+
+import jdk.test.lib.process.ProcessTools;
+import jdk.test.lib.process.OutputAnalyzer;
+import jdk.test.lib.JDKToolFinder;
+import jdk.internal.misc.Unsafe;
+
+public class UnsafeAllocMemory {
+ public static void main(String args[]) throws Exception {
+ OutputAnalyzer output;
+
+ // Grab my own PID
+ String pid = Long.toString(ProcessTools.getProcessId());
+ ProcessBuilder pb = new ProcessBuilder();
+
+ Unsafe unsafe = Unsafe.getUnsafe();
+ unsafe.allocateMemory(128 * 1024);
+
+ // Run 'jcmd <pid> VM.native_memory summary'
+ pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"});
+ output = new OutputAnalyzer(pb.start());
+
+ output.shouldContain("Other (reserved=");
+ }
+}
--- a/test/hotspot/jtreg/runtime/appcds/condy/CondyHelloTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/runtime/appcds/condy/CondyHelloTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -24,7 +24,7 @@
/*
* @test
* @summary Make sure CDS works with a minimal test case that uses a CONSTANT_Dynamic constant-pool entry
- * @requires os.arch != "sparcv9"
+ * @requires (os.arch != "sparcv9") & (vm.cds)
* @modules java.base/jdk.internal.misc
* @library /test/lib /test/hotspot/jtreg/runtime/appcds
* @build CondyHello
--- a/test/hotspot/jtreg/runtime/exceptionMsgs/AbstractMethodError/AbstractMethodErrorTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/runtime/exceptionMsgs/AbstractMethodError/AbstractMethodErrorTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -54,7 +54,23 @@
private static boolean enableChecks = true;
- public static void setup_test() {
+ private static boolean compile(Class<?> clazz, String name) {
+ try {
+ Method method = clazz.getMethod(name);
+ boolean enqueued = WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
+ if (!enqueued) {
+ System.out.println("Warning: Blocking compilation failed for " + clazz.getName() + "." + name + " (timeout?)");
+ return false;
+ } else if (!WHITE_BOX.isMethodCompiled(method)) {
+ throw new RuntimeException(clazz.getName() + "." + name + " is not compiled");
+ }
+ } catch (NoSuchMethodException e) {
+ throw new RuntimeException(clazz.getName() + "." + name + " not found", e);
+ }
+ return true;
+ }
+
+ public static boolean setup_test() {
// Assure all exceptions are loaded.
new AbstractMethodError();
new IncompatibleClassChangeError();
@@ -67,48 +83,19 @@
enableChecks = true;
// Compile
- try {
- Method method = AbstractMethodErrorTest.class.getMethod("test_ame5_compiled_vtable_stub");
- WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
- if (!WHITE_BOX.isMethodCompiled(method)) {
- throw new RuntimeException(method.getName() + " is not compiled");
- }
- method = AbstractMethodErrorTest.class.getMethod("test_ame6_compiled_itable_stub");
- WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
- if (!WHITE_BOX.isMethodCompiled(method)) {
- throw new RuntimeException(method.getName() + " is not compiled");
- }
- method = AME5_C.class.getMethod("c");
- WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
- if (!WHITE_BOX.isMethodCompiled(method)) {
- throw new RuntimeException("AME5_C." + method.getName() + " is not compiled");
- }
- method = AME5_D.class.getMethod("c");
- WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
- if (!WHITE_BOX.isMethodCompiled(method)) {
- throw new RuntimeException("AME5_D." + method.getName() + " is not compiled");
- }
- method = AME5_E.class.getMethod("c");
- WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
- if (!WHITE_BOX.isMethodCompiled(method)) {
- throw new RuntimeException("AME5_E." + method.getName() + " is not compiled");
- }
- method = AME6_C.class.getMethod("c");
- WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
- if (!WHITE_BOX.isMethodCompiled(method)) {
- throw new RuntimeException("AME6_C." + method.getName() + " is not compiled");
- }
- method = AME6_D.class.getMethod("c");
- WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
- if (!WHITE_BOX.isMethodCompiled(method)) {
- throw new RuntimeException("AME6_D." + method.getName() + " is not compiled");
- }
- method = AME6_E.class.getMethod("c");
- WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
- if (!WHITE_BOX.isMethodCompiled(method)) {
- throw new RuntimeException("AME6_E." + method.getName() + " is not compiled");
- }
- } catch (NoSuchMethodException e) { }
+ if (!compile(AbstractMethodErrorTest.class, "test_ame5_compiled_vtable_stub") ||
+ !compile(AbstractMethodErrorTest.class, "test_ame6_compiled_itable_stub") ||
+ !compile(AME5_C.class, "mc") ||
+ !compile(AME5_D.class, "mc") ||
+ !compile(AME5_E.class, "mc") ||
+ !compile(AME6_C.class, "mc") ||
+ !compile(AME6_D.class, "mc") ||
+ !compile(AME6_E.class, "mc")) {
+ return false;
+ }
+
+ System.out.println("warmup done.");
+ return true;
}
private static String expectedErrorMessageAME1_1 =
@@ -493,7 +480,9 @@
public static void main(String[] args) throws Exception {
- setup_test();
+ if (!setup_test()) {
+ return;
+ }
test_ame1();
test_ame2();
test_ame3_1();
@@ -756,66 +745,66 @@
// - Call errorneous B.mc() in the end to raise the AbstraceMethodError
abstract class AME5_A {
- abstract void ma();
- abstract void mb();
- abstract void mc();
+ public abstract void ma();
+ public abstract void mb();
+ public abstract void mc();
}
class AME5_B extends AME5_A {
- void ma() {
+ public void ma() {
System.out.print("B.ma() ");
}
- void mb() {
+ public void mb() {
System.out.print("B.mb() ");
}
// This method is missing in the .jasm implementation.
- void mc() {
+ public void mc() {
System.out.print("B.mc() ");
}
}
class AME5_C extends AME5_A {
- void ma() {
+ public void ma() {
System.out.print("C.ma() ");
}
- void mb() {
+ public void mb() {
System.out.print("C.mb() ");
}
- void mc() {
+ public void mc() {
System.out.print("C.mc() ");
}
}
class AME5_D extends AME5_A {
- void ma() {
+ public void ma() {
System.out.print("D.ma() ");
}
- void mb() {
+ public void mb() {
System.out.print("D.mb() ");
}
- void mc() {
+ public void mc() {
System.out.print("D.mc() ");
}
}
class AME5_E extends AME5_A {
- void ma() {
- System.out.print("E.ma() ");
- }
+ public void ma() {
+ System.out.print("E.ma() ");
+ }
- void mb() {
- System.out.print("E.mb() ");
- }
+ public void mb() {
+ System.out.print("E.mb() ");
+ }
- void mc() {
- System.out.print("E.mc() ");
- }
+ public void mc() {
+ System.out.print("E.mc() ");
+ }
}
//-------------------------------------------------------------------------
--- a/test/hotspot/jtreg/runtime/exceptionMsgs/IncompatibleClassChangeError/IncompatibleClassChangeErrorTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/runtime/exceptionMsgs/IncompatibleClassChangeError/IncompatibleClassChangeErrorTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -62,7 +62,24 @@
"Class ICC_B does not implement the requested interface ICC_iB";
// old message: "vtable stub"
- public static void setup_test() {
+
+ private static boolean compile(Class<?> clazz, String name) {
+ try {
+ Method method = clazz.getMethod(name);
+ boolean enqueued = WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
+ if (!enqueued) {
+ System.out.println("Warning: Blocking compilation failed for " + clazz.getName() + "." + name + " (timeout?)");
+ return false;
+ } else if (!WHITE_BOX.isMethodCompiled(method)) {
+ throw new RuntimeException(clazz.getName() + "." + name + " is not compiled");
+ }
+ } catch (NoSuchMethodException e) {
+ throw new RuntimeException(clazz.getName() + "." + name + " not found", e);
+ }
+ return true;
+ }
+
+ public static boolean setup_test() {
// Assure all exceptions are loaded.
new AbstractMethodError();
new IncompatibleClassChangeError();
@@ -75,29 +92,15 @@
enableChecks = true;
// Compile
- try {
- Method method = IncompatibleClassChangeErrorTest.class.getMethod("test_icc_compiled_itable_stub");
- WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
- if (!WHITE_BOX.isMethodCompiled(method)) {
- throw new RuntimeException(method.getName() + " is not compiled");
- }
- method = ICC_C.class.getMethod("b");
- WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
- if (!WHITE_BOX.isMethodCompiled(method)) {
- throw new RuntimeException("ICC_C." + method.getName() + " is not compiled");
- }
- method = ICC_D.class.getMethod("b");
- WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
- if (!WHITE_BOX.isMethodCompiled(method)) {
- throw new RuntimeException("ICC_D." + method.getName() + " is not compiled");
- }
- method = ICC_E.class.getMethod("b");
- WHITE_BOX.enqueueMethodForCompilation(method, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
- if (!WHITE_BOX.isMethodCompiled(method)) {
- throw new RuntimeException("ICC_E." + method.getName() + " is not compiled");
- }
- } catch (NoSuchMethodException e) { }
+ if (!compile(IncompatibleClassChangeErrorTest.class, "test_icc_compiled_itable_stub") ||
+ !compile(ICC_C.class, "b") ||
+ !compile(ICC_D.class, "b") ||
+ !compile(ICC_E.class, "b")) {
+ return false;
+ }
+
System.out.println("warmup done.");
+ return true;
}
// Should never be compiled.
@@ -204,7 +207,9 @@
}
public static void main(String[] args) throws Exception {
- setup_test();
+ if (!setup_test()) {
+ return;
+ }
test_iccInt();
test_icc_compiled_itable_stub();
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/ClhsdbRegionDetailsScanOopsForG1.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import jdk.test.lib.apps.LingeredApp;
+
+/*
+ * @test
+ * @bug 8175312
+ * @summary Test clhsdb 'g1regiondetails' and 'scanoops' commands for G1GC
+ * @library /test/lib
+ * @requires (vm.bits == "64" & os.maxMemory > 8g)
+ * @run main/othervm/timeout=2400 ClhsdbRegionDetailsScanOopsForG1
+ */
+
+public class ClhsdbRegionDetailsScanOopsForG1 {
+
+ public static void main(String[] args) throws Exception {
+ System.out.println("Starting ClhsdbRegionDetailsScanOopsForG1 test");
+
+ LingeredAppWithLargeStringArray theApp = null;
+ try {
+ ClhsdbLauncher test = new ClhsdbLauncher();
+ List<String> vmArgs = new ArrayList<String>();
+ vmArgs.add("-XX:+UseG1GC");
+ vmArgs.add("-Xmx8g");
+ vmArgs.add("-XX:G1HeapRegionSize=2m");
+
+ theApp = new LingeredAppWithLargeStringArray();
+ LingeredApp.startApp(vmArgs, theApp);
+ System.out.println("Started LingeredAppWithLargeStringArray with pid " + theApp.getPid());
+
+ List<String> cmds = List.of("g1regiondetails");
+ Map<String, List<String>> expStrMap = new HashMap<>();
+ Map<String, List<String>> unExpStrMap = new HashMap<>();
+
+ // Test that the various types of regions are listed with the
+ // 'g1regiondetails' command
+ expStrMap.put("g1regiondetails", List.of(
+ "Region",
+ "Eden",
+ "Survivor",
+ "StartsHumongous",
+ "ContinuesHumongous",
+ "Free"));
+ unExpStrMap.put("g1regiondetails", List.of("Unknown Region Type"));
+ String regionDetailsOutput = test.run(theApp.getPid(), cmds,
+ expStrMap, unExpStrMap);
+ if (regionDetailsOutput == null) {
+ // Output could be null due to attach permission issues
+ // and if we are skipping this.
+ LingeredApp.stopApp(theApp);
+ return;
+ }
+
+ // Test the output of 'scanoops' -- get the start and end addresses
+ // from the StartsHumongous region. Ensure that it contains an
+ // array of Strings.
+ String[] snippets = regionDetailsOutput.split(":StartsHumongous");
+ snippets = snippets[0].split("Region: ");
+ String[] words = snippets[snippets.length - 1].split(",");
+ // words[0] and words[1] represent the start and end addresses
+ String cmd = "scanoops " + words[0] + " " + words[1];
+ expStrMap = new HashMap<>();
+ expStrMap.put(cmd, List.of("[Ljava/lang/String"));
+ test.run(theApp.getPid(), List.of(cmd), expStrMap, null);
+ } catch (Exception ex) {
+ throw new RuntimeException("Test ERROR " + ex, ex);
+ } finally {
+ LingeredApp.stopApp(theApp);
+ }
+ System.out.println("Test PASSED");
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test/hotspot/jtreg/serviceability/sa/LingeredAppWithLargeStringArray.java Sat Mar 24 01:08:35 2018 +0100
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+import jdk.test.lib.apps.LingeredApp;
+
+public class LingeredAppWithLargeStringArray extends LingeredApp {
+ public static void main(String args[]) {
+ String[] hugeArray = new String[Integer.MAX_VALUE/8];
+ String[] smallArray = {"Just", "for", "testing"};
+ for (int i = 0; i < hugeArray.length/16; i++) {
+ hugeArray[i] = new String(smallArray[i%3]);
+ }
+ LingeredApp.main(args);
+ }
+ }
--- a/test/hotspot/jtreg/serviceability/sa/TestUniverse.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/hotspot/jtreg/serviceability/sa/TestUniverse.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -87,6 +87,9 @@
output.shouldContain("Heap Parameters");
if (gc.contains("G1GC")) {
output.shouldContain("garbage-first heap");
+ output.shouldContain("region size");
+ output.shouldContain("G1 Young Generation:");
+ output.shouldContain("regions =");
}
if (gc.contains("UseConcMarkSweepGC")) {
output.shouldContain("Gen 1: concurrent mark-sweep generation");
--- a/test/jdk/java/lang/Integer/ValueOf.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/jdk/java/lang/Integer/ValueOf.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2009, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,8 @@
* @bug 6807702
* @summary Basic test for Integer.valueOf
* @run main ValueOf
- * @run main/othervm -esa -XX:+AggressiveOpts ValueOf
+ * @run main/othervm -esa -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox
+ * -XX:AutoBoxCacheMax=20000 ValueOf
*/
public class ValueOf {
--- a/test/jdk/java/math/BigDecimal/DivideMcTests.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/jdk/java/math/BigDecimal/DivideMcTests.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
* @bug 7036582
* @summary Some tests for the divide(..,MathContext) method.
* @run main DivideMcTests
- * @run main/othervm -XX:+AggressiveOpts DivideMcTests
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox -XX:AutoBoxCacheMax=20000 DivideMcTests
* @author Sergey V. Kuksenko
*/
--- a/test/jdk/java/math/BigDecimal/FloatDoubleValueTests.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/jdk/java/math/BigDecimal/FloatDoubleValueTests.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
* @bug 6274390 7082971
* @summary Verify {float, double}Value methods work with condensed representation
* @run main FloatDoubleValueTests
- * @run main/othervm -XX:+AggressiveOpts FloatDoubleValueTests
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox -XX:AutoBoxCacheMax=20000 FloatDoubleValueTests
*/
import java.math.*;
--- a/test/jdk/java/math/BigDecimal/RangeTests.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/jdk/java/math/BigDecimal/RangeTests.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
* @bug 7036582
* @summary Some new tests for the add method and constructor with MathContext.
* @run main RangeTests
- * @run main/othervm -XX:+AggressiveOpts RangeTests
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox -XX:AutoBoxCacheMax=20000 RangeTests
* @author Sergey V. Kuksenko
*/
--- a/test/jdk/java/math/BigDecimal/StrippingZerosTest.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/jdk/java/math/BigDecimal/StrippingZerosTest.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
* @bug 4108852
* @summary A few tests of stripTrailingZeros
* @run main StrippingZerosTest
- * @run main/othervm -XX:+AggressiveOpts StrippingZerosTest
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox -XX:AutoBoxCacheMax=20000 StrippingZerosTest
* @author Joseph D. Darcy
*/
--- a/test/jdk/java/math/BigDecimal/ToPlainStringTests.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/jdk/java/math/BigDecimal/ToPlainStringTests.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2004, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2004, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,7 @@
* @bug 4984872
* @summary Basic tests of toPlainString method
* @run main ToPlainStringTests
- * @run main/othervm -XX:+AggressiveOpts ToPlainStringTests
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox -XX:AutoBoxCacheMax=20000 ToPlainStringTests
* @author Joseph D. Darcy
*/
--- a/test/jdk/java/util/HashMap/HashMapCloneLeak.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/jdk/java/util/HashMap/HashMapCloneLeak.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
* @summary Verify that we do not leak contents when we clone a HashMap
* @author david.buck@oracle.com
* @run main/othervm HashMapCloneLeak
- * @run main/othervm -XX:+AggressiveOpts HashMapCloneLeak
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox -XX:AutoBoxCacheMax=20000 HashMapCloneLeak
*/
import java.util.HashMap;
--- a/test/jdk/java/util/NavigableMap/LockStep.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/jdk/java/util/NavigableMap/LockStep.java Sat Mar 24 01:08:35 2018 +0100
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2006, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,8 +26,8 @@
* @bug 6420753 6242436 6691185
* @summary Compare NavigableMap implementations for identical behavior
* @run main LockStep
- * @run main/othervm -XX:+AggressiveOpts LockStep
- * @run main/othervm -XX:+AggressiveOpts -Dthorough=true LockStep
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox -XX:AutoBoxCacheMax=20000 LockStep
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+EliminateAutoBox -XX:AutoBoxCacheMax=20000 -Dthorough=true LockStep
* @author Martin Buchholz
* @key randomness
*/
--- a/test/jdk/sun/management/jdp/JdpOffTestCase.java Thu Mar 29 20:12:02 2018 +0100
+++ b/test/jdk/sun/management/jdp/JdpOffTestCase.java Sat Mar 24 01:08:35 2018 +0100
@@ -54,6 +54,14 @@
testPassed = true;
}
+ /**
+ * The socket did not timeout and no valid JDP packets were received.
+ */
+ @Override
+ protected void shutdown() throws Exception {
+ log.fine("Test timed out. Test passed!");
+ testPassed = true;
+ }
/**
* This method is executed after a correct Jdp packet, coming from this VM has been received.