--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections.test/src/jdk/internal/vm/compiler/collections/test/EconomicMapImplTest.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections.test/src/jdk/internal/vm/compiler/collections/test/EconomicMapImplTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections.test/src/jdk/internal/vm/compiler/collections/test/EconomicMapLargeTest.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections.test/src/jdk/internal/vm/compiler/collections/test/EconomicMapLargeTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections.test/src/jdk/internal/vm/compiler/collections/test/EconomicMapTest.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections.test/src/jdk/internal/vm/compiler/collections/test/EconomicMapTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections.test/src/jdk/internal/vm/compiler/collections/test/EconomicSetTest.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections.test/src/jdk/internal/vm/compiler/collections/test/EconomicSetTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections.test/src/jdk/internal/vm/compiler/collections/test/EquivalenceTest.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections.test/src/jdk/internal/vm/compiler/collections/test/EquivalenceTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections.test/src/jdk/internal/vm/compiler/collections/test/PairTest.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections.test/src/jdk/internal/vm/compiler/collections/test/PairTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/EconomicMap.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/EconomicMap.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/EconomicMapImpl.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/EconomicMapImpl.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/EconomicSet.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/EconomicSet.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/Equivalence.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/Equivalence.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/MapCursor.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/MapCursor.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/Pair.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/Pair.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/UnmodifiableEconomicMap.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/UnmodifiableEconomicMap.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/UnmodifiableEconomicSet.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/UnmodifiableEconomicSet.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/UnmodifiableMapCursor.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/UnmodifiableMapCursor.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/package-info.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.collections/src/jdk/internal/vm/compiler/collections/package-info.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/ComparableWord.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/ComparableWord.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/LocationIdentity.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/LocationIdentity.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/Pointer.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/Pointer.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/PointerBase.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/PointerBase.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/SignedWord.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/SignedWord.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/UnsignedWord.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/UnsignedWord.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/WordBase.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/WordBase.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/WordFactory.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/WordFactory.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/impl/WordBoxFactory.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/impl/WordBoxFactory.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/impl/WordFactoryOpcode.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/impl/WordFactoryOpcode.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/impl/WordFactoryOperation.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/impl/WordFactoryOperation.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/package-info.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/jdk.internal.vm.compiler.word/src/jdk/internal/vm/compiler/word/package-info.java Mon Jun 18 09:48:22 2018 -0700
@@ -4,9 +4,7 @@
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Oracle designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Oracle in the LICENSE file that accompanied this code.
+ * published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.aarch64/src/org/graalvm/compiler/asm/aarch64/AArch64Assembler.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.aarch64/src/org/graalvm/compiler/asm/aarch64/AArch64Assembler.java Mon Jun 18 09:48:22 2018 -0700
@@ -102,6 +102,7 @@
import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.STXR;
import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.SUB;
import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.SUBS;
+import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.SWP;
import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.TBZ;
import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.TBNZ;
import static org.graalvm.compiler.asm.aarch64.AArch64Assembler.Instruction.UBFM;
@@ -519,6 +520,7 @@
CAS(0x08A07C00),
LDADD(0x38200000),
+ SWP(0x38208000),
ADR(0x00000000),
ADRP(0x80000000),
@@ -1391,6 +1393,30 @@
emitInt(transferSizeEncoding | instr.encoding | rs2(rs) | rn(rn) | rt(rt) | (acquire ? 1 : 0) << LDADDAcquireOffset | (release ? 1 : 0) << LDADDReleaseOffset);
}
+ /**
+ * Atomic swap. This reads a value from an address rn, stores the value in rt, and then stores
+ * the value in rs back at address rn.
+ *
+ * @param size size of operand to read from memory. Must be 8, 16, 32, or 64.
+ * @param rs general purpose register to be stored. May not be null.
+ * @param rt general purpose register to be loaded. May not be null.
+ * @param rn general purpose register or stack pointer holding an address from which to load.
+ * @param acquire boolean value signifying if the load should use acquire semantics.
+ * @param release boolean value signifying if the store should use release semantics.
+ */
+ public void swp(int size, Register rs, Register rt, Register rn, boolean acquire, boolean release) {
+ assert size == 8 || size == 16 || size == 32 || size == 64;
+ int transferSize = NumUtil.log2Ceil(size / 8);
+ swapInstruction(SWP, rs, rt, rn, transferSize, acquire, release);
+ }
+
+ private void swapInstruction(Instruction instr, Register rs, Register rt, Register rn, int log2TransferSize, boolean acquire, boolean release) {
+ assert log2TransferSize >= 0 && log2TransferSize < 4;
+ assert rt.getRegisterCategory().equals(CPU) && rs.getRegisterCategory().equals(CPU) && !rs.equals(rt);
+ int transferSizeEncoding = log2TransferSize << LoadStoreTransferSizeOffset;
+ emitInt(transferSizeEncoding | instr.encoding | rs2(rs) | rn(rn) | rt(rt) | (acquire ? 1 : 0) << LDADDAcquireOffset | (release ? 1 : 0) << LDADDReleaseOffset);
+ }
+
/* PC-relative Address Calculation (5.4.4) */
/**
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.aarch64/src/org/graalvm/compiler/asm/aarch64/AArch64MacroAssembler.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.aarch64/src/org/graalvm/compiler/asm/aarch64/AArch64MacroAssembler.java Mon Jun 18 09:48:22 2018 -0700
@@ -641,6 +641,26 @@
}
/**
+ * dst = src + immediate.
+ *
+ * @param size register size. Has to be 32 or 64.
+ * @param dst general purpose register. May not be null or zero-register.
+ * @param src general purpose register. May not be null or zero-register.
+ * @param immediate 64-bit signed int
+ */
+ public void add(int size, Register dst, Register src, long immediate) {
+ if (NumUtil.isInt(immediate)) {
+ add(size, dst, src, (int) immediate);
+ } else {
+ assert (!dst.equals(zr) && !src.equals(zr));
+ assert !dst.equals(src);
+ assert size == 64;
+ mov(dst, immediate);
+ add(size, src, dst, dst);
+ }
+ }
+
+ /**
* dst = src + aimm and sets condition flags.
*
* @param size register size. Has to be 32 or 64.
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.amd64/src/org/graalvm/compiler/asm/amd64/AMD64Assembler.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.amd64/src/org/graalvm/compiler/asm/amd64/AMD64Assembler.java Mon Jun 18 09:48:22 2018 -0700
@@ -1859,7 +1859,7 @@
* values were equal, and cleared otherwise.
*/
public final void cmpxchgb(Register reg, AMD64Address adr) { // cmpxchg
- prefix(adr, reg);
+ prefixb(adr, reg);
emitByte(0x0F);
emitByte(0xB0);
emitOperandHelper(reg, adr, 0);
@@ -2136,7 +2136,7 @@
public final void movb(AMD64Address dst, Register src) {
assert src.getRegisterCategory().equals(AMD64.CPU) : "must have byte register";
- prefix(dst, src, true);
+ prefixb(dst, src);
emitByte(0x88);
emitOperandHelper(src, dst, 0);
}
@@ -3282,6 +3282,10 @@
}
}
+ private void prefixb(AMD64Address adr, Register reg) {
+ prefix(adr, reg, true);
+ }
+
private void prefix(AMD64Address adr, Register reg) {
prefix(adr, reg, false);
}
@@ -3705,7 +3709,7 @@
}
public final void xaddb(AMD64Address dst, Register src) {
- prefix(dst, src);
+ prefixb(dst, src);
emitByte(0x0F);
emitByte(0xC0);
emitOperandHelper(src, dst, 0);
@@ -3734,7 +3738,7 @@
}
public final void xchgb(Register dst, AMD64Address src) {
- prefix(src, dst);
+ prefixb(src, dst);
emitByte(0x86);
emitOperandHelper(dst, src, 0);
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.amd64/src/org/graalvm/compiler/asm/amd64/AMD64VectorAssembler.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,851 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.asm.amd64;
+
+import java.util.EnumSet;
+
+import org.graalvm.compiler.core.common.calc.Condition;
+import org.graalvm.compiler.debug.GraalError;
+
+import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
+
+import jdk.vm.ci.amd64.AMD64;
+import jdk.vm.ci.amd64.AMD64.CPUFeature;
+import jdk.vm.ci.amd64.AMD64Kind;
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.code.Register.RegisterCategory;
+import jdk.vm.ci.code.TargetDescription;
+
+/**
+ * This class extends the AMD64 assembler with functions that emit instructions from the AVX
+ * extension.
+ */
+public class AMD64VectorAssembler extends AMD64MacroAssembler {
+
+ public AMD64VectorAssembler(TargetDescription target) {
+ super(target);
+ assert ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX);
+ }
+
+ private static final int L128 = 0;
+ private static final int L256 = 1;
+ private static final int LIG = 0;
+
+ private static final int W0 = 0;
+ private static final int W1 = 1;
+ private static final int WIG = 0;
+
+ private static final int P_ = 0x0;
+ private static final int P_66 = 0x1;
+ private static final int P_F3 = 0x2;
+ private static final int P_F2 = 0x3;
+
+ private static final int M_0F = 0x1;
+ private static final int M_0F38 = 0x2;
+ private static final int M_0F3A = 0x3;
+
+ /**
+ * Low-level function to encode and emit the VEX prefix.
+ * <p>
+ * 2 byte form: [1100 0101] [R vvvv L pp]<br>
+ * 3 byte form: [1100 0100] [RXB m-mmmm] [W vvvv L pp]
+ * <p>
+ * The RXB and vvvv fields are stored in 1's complement in the prefix encoding. This function
+ * performs the 1s complement conversion, the caller is expected to pass plain unencoded
+ * arguments.
+ * <p>
+ * The pp field encodes an extension to the opcode:<br>
+ * 00: no extension<br>
+ * 01: 66<br>
+ * 10: F3<br>
+ * 11: F2
+ * <p>
+ * The m-mmmm field encodes the leading bytes of the opcode:<br>
+ * 00001: implied 0F leading opcode byte (default in 2-byte encoding)<br>
+ * 00010: implied 0F 38 leading opcode bytes<br>
+ * 00011: implied 0F 3A leading opcode bytes
+ * <p>
+ * This function automatically chooses the 2 or 3 byte encoding, based on the XBW flags and the
+ * m-mmmm field.
+ */
+ private void emitVEX(int l, int pp, int mmmmm, int w, int rxb, int vvvv) {
+ assert ((AMD64) target.arch).getFeatures().contains(CPUFeature.AVX) : "emitting VEX prefix on a CPU without AVX support";
+
+ assert l == L128 || l == L256 || l == LIG : "invalid value for VEX.L";
+ assert pp == P_ || pp == P_66 || pp == P_F3 || pp == P_F2 : "invalid value for VEX.pp";
+ assert mmmmm == M_0F || mmmmm == M_0F38 || mmmmm == M_0F3A : "invalid value for VEX.m-mmmm";
+ assert w == W0 || w == W1 || w == WIG : "invalid value for VEX.W";
+
+ assert (rxb & 0x07) == rxb : "invalid value for VEX.RXB";
+ assert (vvvv & 0x0F) == vvvv : "invalid value for VEX.vvvv";
+
+ int rxb1s = rxb ^ 0x07;
+ int vvvv1s = vvvv ^ 0x0F;
+ if ((rxb & 0x03) == 0 && w == WIG && mmmmm == M_0F) {
+ // 2 byte encoding
+ int byte2 = 0;
+ byte2 |= (rxb1s & 0x04) << 5;
+ byte2 |= vvvv1s << 3;
+ byte2 |= l << 2;
+ byte2 |= pp;
+
+ emitByte(0xC5);
+ emitByte(byte2);
+ } else {
+ // 3 byte encoding
+ int byte2 = 0;
+ byte2 = (rxb1s & 0x07) << 5;
+ byte2 |= mmmmm;
+
+ int byte3 = 0;
+ byte3 |= w << 7;
+ byte3 |= vvvv1s << 3;
+ byte3 |= l << 2;
+ byte3 |= pp;
+
+ emitByte(0xC4);
+ emitByte(byte2);
+ emitByte(byte3);
+ }
+ }
+
+ private static int getLFlag(AVXSize size) {
+ switch (size) {
+ case XMM:
+ return L128;
+ case YMM:
+ return L256;
+ default:
+ return LIG;
+ }
+ }
+
+ /**
+ * Emit instruction with VEX prefix and two register operands.
+ * <p>
+ * Format: [VEX] [Opcode] [ModR/M]
+ */
+ private void emitVexOp(int l, int pp, int mmmmm, int w, int op, Register reg, Register rm) {
+ emitVEX(l, pp, mmmmm, w, getRXB(reg, rm), 0);
+ emitByte(op);
+ emitModRM(reg, rm);
+ }
+
+ /**
+ * Emit instruction with VEX prefix and three register operands.
+ * <p>
+ * Format: [VEX] [Opcode] [ModR/M]
+ */
+ private void emitVexOp(int l, int pp, int mmmmm, int w, int op, Register reg, Register vvvv, Register rm) {
+ emitVEX(l, pp, mmmmm, w, getRXB(reg, rm), vvvv.encoding());
+ emitByte(op);
+ emitModRM(reg, rm);
+ }
+
+ /**
+ * Emit instruction with VEX prefix and four register operands.
+ * <p>
+ * Format: [VEX] [Opcode] [ModR/M] [Imm8[7:4]]
+ */
+ private void emitVexOp(int l, int pp, int mmmmm, int w, int op, Register reg, Register vvvv, Register rm, Register imm8) {
+ emitVEX(l, pp, mmmmm, w, getRXB(reg, rm), vvvv.encoding());
+ emitByte(op);
+ emitModRM(reg, rm);
+ emitByte(imm8.encoding() << 4);
+ }
+
+ /**
+ * Emit instruction with VEX prefix and three register operands and one memory operand.
+ * <p>
+ * Format: [VEX] [Opcode] [ModR/M] [Imm8[7:4]]
+ */
+ private void emitVexOp(int l, int pp, int mmmmm, int w, int op, Register reg, Register vvvv, AMD64Address rm, Register imm8, int additionalInstructionSize) {
+ emitVEX(l, pp, mmmmm, w, getRXB(reg, rm), vvvv.encoding());
+ emitByte(op);
+ emitOperandHelper(reg, rm, additionalInstructionSize);
+ emitByte(imm8.encoding() << 4);
+ }
+
+ /**
+ * Emit instruction with VEX prefix and two register operands and an opcode extension in the r
+ * field.
+ * <p>
+ * Format: [VEX] [Opcode] [ModR/M]
+ */
+ private void emitVexOp(int l, int pp, int mmmmm, int w, int op, int r, Register vvvv, Register rm) {
+ emitVEX(l, pp, mmmmm, w, getRXB(null, rm), vvvv.encoding());
+ emitByte(op);
+ emitModRM(r, rm);
+ }
+
+ /**
+ * Emit instruction with VEX prefix, one register operand and one memory operand.
+ * <p>
+ * Format: [VEX] [Opcode] [ModR/M] [SIB] [Disp]
+ */
+ private void emitVexOp(int l, int pp, int mmmmm, int w, int op, Register reg, AMD64Address rm, int additionalInstructionSize) {
+ emitVEX(l, pp, mmmmm, w, getRXB(reg, rm), 0);
+ emitByte(op);
+ emitOperandHelper(reg, rm, additionalInstructionSize);
+ }
+
+ /**
+ * Emit instruction with VEX prefix, two register operands and one memory operand.
+ * <p>
+ * Format: [VEX] [Opcode] [ModR/M] [SIB] [Disp]
+ */
+ private void emitVexOp(int l, int pp, int mmmmm, int w, int op, Register reg, Register vvvv, AMD64Address rm, int additionalInstructionSize) {
+ emitVEX(l, pp, mmmmm, w, getRXB(reg, rm), vvvv.encoding());
+ emitByte(op);
+ emitOperandHelper(reg, rm, additionalInstructionSize);
+ }
+
+ private static final OpAssertion AVX1 = new OpAssertion(CPUFeature.AVX, CPUFeature.AVX);
+ private static final OpAssertion AVX1_2 = new OpAssertion(CPUFeature.AVX, CPUFeature.AVX2);
+ private static final OpAssertion AVX2 = new OpAssertion(CPUFeature.AVX2, CPUFeature.AVX2);
+
+ private static final OpAssertion AVX1_128ONLY = new OpAssertion(CPUFeature.AVX, null);
+ private static final OpAssertion AVX1_256ONLY = new OpAssertion(null, CPUFeature.AVX);
+ private static final OpAssertion AVX2_256ONLY = new OpAssertion(null, CPUFeature.AVX2);
+
+ private static final OpAssertion XMM_CPU = new OpAssertion(CPUFeature.AVX, null, AMD64.XMM, null, AMD64.CPU, null);
+ private static final OpAssertion XMM_XMM_CPU = new OpAssertion(CPUFeature.AVX, null, AMD64.XMM, AMD64.XMM, AMD64.CPU, null);
+ private static final OpAssertion CPU_XMM = new OpAssertion(CPUFeature.AVX, null, AMD64.CPU, null, AMD64.XMM, null);
+
+ private static final class OpAssertion {
+ private final CPUFeature avx128feature;
+ private final CPUFeature avx256feature;
+
+ private final RegisterCategory rCategory;
+ private final RegisterCategory vCategory;
+ private final RegisterCategory mCategory;
+ private final RegisterCategory imm8Category;
+
+ private OpAssertion(CPUFeature avx128feature, CPUFeature avx256feature) {
+ this(avx128feature, avx256feature, AMD64.XMM, AMD64.XMM, AMD64.XMM, AMD64.XMM);
+ }
+
+ private OpAssertion(CPUFeature avx128feature, CPUFeature avx256feature, RegisterCategory rCategory, RegisterCategory vCategory, RegisterCategory mCategory, RegisterCategory imm8Category) {
+ this.avx128feature = avx128feature;
+ this.avx256feature = avx256feature;
+ this.rCategory = rCategory;
+ this.vCategory = vCategory;
+ this.mCategory = mCategory;
+ this.imm8Category = imm8Category;
+ }
+
+ public boolean check(AMD64 arch, AVXSize size, Register r, Register v, Register m) {
+ return check(arch, size, r, v, m, null);
+ }
+
+ public boolean check(AMD64 arch, AVXSize size, Register r, Register v, Register m, Register imm8) {
+ switch (size) {
+ case XMM:
+ assert avx128feature != null && arch.getFeatures().contains(avx128feature) : "emitting illegal 128 bit instruction";
+ break;
+ case YMM:
+ assert avx256feature != null && arch.getFeatures().contains(avx256feature) : "emitting illegal 256 bit instruction";
+ break;
+ }
+ if (r != null) {
+ assert r.getRegisterCategory().equals(rCategory);
+ }
+ if (v != null) {
+ assert v.getRegisterCategory().equals(vCategory);
+ }
+ if (m != null) {
+ assert m.getRegisterCategory().equals(mCategory);
+ }
+ if (imm8 != null) {
+ assert imm8.getRegisterCategory().equals(imm8Category);
+ }
+ return true;
+ }
+
+ public boolean supports(EnumSet<CPUFeature> features, AVXSize avxSize) {
+ switch (avxSize) {
+ case XMM:
+ return features.contains(avx128feature);
+ case YMM:
+ return features.contains(avx256feature);
+ default:
+ throw GraalError.shouldNotReachHere();
+ }
+ }
+ }
+
+ /**
+ * Base class for VEX-encoded instructions.
+ */
+ private static class VexOp {
+ protected final int pp;
+ protected final int mmmmm;
+ protected final int w;
+ protected final int op;
+
+ private final String opcode;
+ protected final OpAssertion assertion;
+
+ protected VexOp(String opcode, int pp, int mmmmm, int w, int op, OpAssertion assertion) {
+ this.pp = pp;
+ this.mmmmm = mmmmm;
+ this.w = w;
+ this.op = op;
+ this.opcode = opcode;
+ this.assertion = assertion;
+ }
+
+ public boolean isSupported(AMD64VectorAssembler vasm, AMD64Kind kind) {
+ return assertion.supports(((AMD64) vasm.target.arch).getFeatures(), AVXKind.getRegisterSize(kind));
+ }
+
+ @Override
+ public String toString() {
+ return opcode;
+ }
+ }
+
+ /**
+ * VEX-encoded instructions with an operand order of RM, but the M operand must be a register.
+ */
+ public static class VexRROp extends VexOp {
+ // @formatter:off
+ public static final VexRROp VMASKMOVDQU = new VexRROp("VMASKMOVDQU", P_66, M_0F, WIG, 0xF7, AVX1_128ONLY);
+ // @formatter:on
+
+ protected VexRROp(String opcode, int pp, int mmmmm, int w, int op) {
+ this(opcode, pp, mmmmm, w, op, AVX1);
+ }
+
+ protected VexRROp(String opcode, int pp, int mmmmm, int w, int op, OpAssertion assertion) {
+ super(opcode, pp, mmmmm, w, op, assertion);
+ }
+
+ public void emit(AMD64VectorAssembler asm, AVXSize size, Register dst, Register src) {
+ assert assertion.check((AMD64) asm.target.arch, size, dst, null, src);
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, op, dst, src);
+ }
+ }
+
+ /**
+ * VEX-encoded instructions with an operand order of RM.
+ */
+ public static class VexRMOp extends VexRROp {
+ // @formatter:off
+ public static final VexRMOp VCVTTSS2SI = new VexRMOp("VCVTTSS2SI", P_F3, M_0F, W0, 0x2C, CPU_XMM);
+ public static final VexRMOp VCVTTSS2SQ = new VexRMOp("VCVTTSS2SQ", P_F3, M_0F, W1, 0x2C, CPU_XMM);
+ public static final VexRMOp VCVTTSD2SI = new VexRMOp("VCVTTSD2SI", P_F2, M_0F, W0, 0x2C, CPU_XMM);
+ public static final VexRMOp VCVTTSD2SQ = new VexRMOp("VCVTTSD2SQ", P_F2, M_0F, W1, 0x2C, CPU_XMM);
+ public static final VexRMOp VCVTPS2PD = new VexRMOp("VCVTPS2PD", P_, M_0F, WIG, 0x5A);
+ public static final VexRMOp VCVTPD2PS = new VexRMOp("VCVTPD2PS", P_66, M_0F, WIG, 0x5A);
+ public static final VexRMOp VCVTDQ2PS = new VexRMOp("VCVTDQ2PS", P_, M_0F, WIG, 0x5B);
+ public static final VexRMOp VCVTTPS2DQ = new VexRMOp("VCVTTPS2DQ", P_F3, M_0F, WIG, 0x5B);
+ public static final VexRMOp VCVTTPD2DQ = new VexRMOp("VCVTTPD2DQ", P_66, M_0F, WIG, 0xE6);
+ public static final VexRMOp VCVTDQ2PD = new VexRMOp("VCVTDQ2PD", P_F3, M_0F, WIG, 0xE6);
+ public static final VexRMOp VBROADCASTSS = new VexRMOp("VBROADCASTSS", P_66, M_0F38, W0, 0x18);
+ public static final VexRMOp VBROADCASTSD = new VexRMOp("VBROADCASTSD", P_66, M_0F38, W0, 0x19, AVX1_256ONLY);
+ public static final VexRMOp VBROADCASTF128 = new VexRMOp("VBROADCASTF128", P_66, M_0F38, W0, 0x1A, AVX1_256ONLY);
+ public static final VexRMOp VBROADCASTI128 = new VexRMOp("VBROADCASTI128", P_66, M_0F38, W0, 0x5A, AVX2_256ONLY);
+ public static final VexRMOp VPBROADCASTB = new VexRMOp("VPBROADCASTB", P_66, M_0F38, W0, 0x78, AVX2);
+ public static final VexRMOp VPBROADCASTW = new VexRMOp("VPBROADCASTW", P_66, M_0F38, W0, 0x79, AVX2);
+ public static final VexRMOp VPBROADCASTD = new VexRMOp("VPBROADCASTD", P_66, M_0F38, W0, 0x58, AVX2);
+ public static final VexRMOp VPBROADCASTQ = new VexRMOp("VPBROADCASTQ", P_66, M_0F38, W0, 0x59, AVX2);
+ public static final VexRMOp VPMOVSXBW = new VexRMOp("VPMOVSXBW", P_66, M_0F38, WIG, 0x20);
+ public static final VexRMOp VPMOVSXBD = new VexRMOp("VPMOVSXBD", P_66, M_0F38, WIG, 0x21);
+ public static final VexRMOp VPMOVSXBQ = new VexRMOp("VPMOVSXBQ", P_66, M_0F38, WIG, 0x22);
+ public static final VexRMOp VPMOVSXWD = new VexRMOp("VPMOVSXWD", P_66, M_0F38, WIG, 0x23);
+ public static final VexRMOp VPMOVSXWQ = new VexRMOp("VPMOVSXWQ", P_66, M_0F38, WIG, 0x24);
+ public static final VexRMOp VPMOVSXDQ = new VexRMOp("VPMOVSXDQ", P_66, M_0F38, WIG, 0x25);
+ public static final VexRMOp VPMOVZXBW = new VexRMOp("VPMOVZXBW", P_66, M_0F38, WIG, 0x30);
+ public static final VexRMOp VPMOVZXBD = new VexRMOp("VPMOVZXBD", P_66, M_0F38, WIG, 0x31);
+ public static final VexRMOp VPMOVZXBQ = new VexRMOp("VPMOVZXBQ", P_66, M_0F38, WIG, 0x32);
+ public static final VexRMOp VPMOVZXWD = new VexRMOp("VPMOVZXWD", P_66, M_0F38, WIG, 0x33);
+ public static final VexRMOp VPMOVZXWQ = new VexRMOp("VPMOVZXWQ", P_66, M_0F38, WIG, 0x34);
+ public static final VexRMOp VPMOVZXDQ = new VexRMOp("VPMOVZXDQ", P_66, M_0F38, WIG, 0x35);
+ public static final VexRMOp VSQRTPD = new VexRMOp("VSQRTPD", P_66, M_0F, WIG, 0x51);
+ public static final VexRMOp VSQRTPS = new VexRMOp("VSQRTPS", P_, M_0F, WIG, 0x51);
+ public static final VexRMOp VSQRTSD = new VexRMOp("VSQRTSD", P_F2, M_0F, WIG, 0x51);
+ public static final VexRMOp VSQRTSS = new VexRMOp("VSQRTSS", P_F3, M_0F, WIG, 0x51);
+ public static final VexRMOp VUCOMISS = new VexRMOp("VUCOMISS", P_, M_0F, WIG, 0x2E);
+ public static final VexRMOp VUCOMISD = new VexRMOp("VUCOMISD", P_66, M_0F, WIG, 0x2E);
+ // @formatter:on
+
+ protected VexRMOp(String opcode, int pp, int mmmmm, int w, int op) {
+ this(opcode, pp, mmmmm, w, op, AVX1);
+ }
+
+ protected VexRMOp(String opcode, int pp, int mmmmm, int w, int op, OpAssertion assertion) {
+ super(opcode, pp, mmmmm, w, op, assertion);
+ }
+
+ public void emit(AMD64VectorAssembler asm, AVXSize size, Register dst, AMD64Address src) {
+ assert assertion.check((AMD64) asm.target.arch, size, dst, null, null);
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, op, dst, src, 0);
+ }
+ }
+
+ /**
+ * VEX-encoded move instructions.
+ * <p>
+ * These instructions have two opcodes: op is the forward move instruction with an operand order
+ * of RM, and opReverse is the reverse move instruction with an operand order of MR.
+ */
+ public static final class VexMoveOp extends VexRMOp {
+ // @formatter:off
+ public static final VexMoveOp VMOVDQA = new VexMoveOp("VMOVDQA", P_66, M_0F, WIG, 0x6F, 0x7F);
+ public static final VexMoveOp VMOVDQU = new VexMoveOp("VMOVDQU", P_F3, M_0F, WIG, 0x6F, 0x7F);
+ public static final VexMoveOp VMOVAPS = new VexMoveOp("VMOVAPS", P_, M_0F, WIG, 0x28, 0x29);
+ public static final VexMoveOp VMOVAPD = new VexMoveOp("VMOVAPD", P_66, M_0F, WIG, 0x28, 0x29);
+ public static final VexMoveOp VMOVUPS = new VexMoveOp("VMOVUPS", P_, M_0F, WIG, 0x10, 0x11);
+ public static final VexMoveOp VMOVUPD = new VexMoveOp("VMOVUPD", P_66, M_0F, WIG, 0x10, 0x11);
+ public static final VexMoveOp VMOVSS = new VexMoveOp("VMOVSS", P_F3, M_0F, WIG, 0x10, 0x11);
+ public static final VexMoveOp VMOVSD = new VexMoveOp("VMOVSD", P_F2, M_0F, WIG, 0x10, 0x11);
+ public static final VexMoveOp VMOVD = new VexMoveOp("VMOVD", P_66, M_0F, W0, 0x6E, 0x7E, XMM_CPU);
+ public static final VexMoveOp VMOVQ = new VexMoveOp("VMOVQ", P_66, M_0F, W1, 0x6E, 0x7E, XMM_CPU);
+ // @formatter:on
+
+ private final int opReverse;
+
+ private VexMoveOp(String opcode, int pp, int mmmmm, int w, int op, int opReverse) {
+ this(opcode, pp, mmmmm, w, op, opReverse, AVX1);
+ }
+
+ private VexMoveOp(String opcode, int pp, int mmmmm, int w, int op, int opReverse, OpAssertion assertion) {
+ super(opcode, pp, mmmmm, w, op, assertion);
+ this.opReverse = opReverse;
+ }
+
+ public void emit(AMD64VectorAssembler asm, AVXSize size, AMD64Address dst, Register src) {
+ assert assertion.check((AMD64) asm.target.arch, size, src, null, null);
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, opReverse, src, dst, 0);
+ }
+
+ public void emitReverse(AMD64VectorAssembler asm, AVXSize size, Register dst, Register src) {
+ assert assertion.check((AMD64) asm.target.arch, size, src, null, dst);
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, opReverse, src, dst);
+ }
+ }
+
+ public interface VexRRIOp {
+ void emit(AMD64VectorAssembler asm, AVXSize size, Register dst, Register src, int imm8);
+ }
+
+ /**
+ * VEX-encoded instructions with an operand order of RMI.
+ */
+ public static final class VexRMIOp extends VexOp implements VexRRIOp {
+ // @formatter:off
+ public static final VexRMIOp VPERMQ = new VexRMIOp("VPERMQ", P_66, M_0F3A, W1, 0x00, AVX2_256ONLY);
+ public static final VexRMIOp VPSHUFLW = new VexRMIOp("VPSHUFLW", P_F2, M_0F, WIG, 0x70, AVX1_2);
+ public static final VexRMIOp VPSHUFHW = new VexRMIOp("VPSHUFHW", P_F3, M_0F, WIG, 0x70, AVX1_2);
+ public static final VexRMIOp VPSHUFD = new VexRMIOp("VPSHUFD", P_66, M_0F, WIG, 0x70, AVX1_2);
+ // @formatter:on
+
+ private VexRMIOp(String opcode, int pp, int mmmmm, int w, int op, OpAssertion assertion) {
+ super(opcode, pp, mmmmm, w, op, assertion);
+ }
+
+ @Override
+ public void emit(AMD64VectorAssembler asm, AVXSize size, Register dst, Register src, int imm8) {
+ assert assertion.check((AMD64) asm.target.arch, size, dst, null, src);
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, op, dst, src);
+ asm.emitByte(imm8);
+ }
+
+ public void emit(AMD64VectorAssembler asm, AVXSize size, Register dst, AMD64Address src, int imm8) {
+ assert assertion.check((AMD64) asm.target.arch, size, dst, null, null);
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, op, dst, src, 1);
+ asm.emitByte(imm8);
+ }
+ }
+
+ /**
+ * VEX-encoded instructions with an operand order of MRI.
+ */
+ public static final class VexMRIOp extends VexOp implements VexRRIOp {
+ // @formatter:off
+ public static final VexMRIOp VEXTRACTF128 = new VexMRIOp("VEXTRACTF128", P_66, M_0F3A, W0, 0x19, AVX1_256ONLY);
+ public static final VexMRIOp VEXTRACTI128 = new VexMRIOp("VEXTRACTI128", P_66, M_0F3A, W0, 0x39, AVX2_256ONLY);
+ public static final VexMRIOp VPEXTRB = new VexMRIOp("VPEXTRB", P_66, M_0F3A, W0, 0x14, XMM_CPU);
+ public static final VexMRIOp VPEXTRW = new VexMRIOp("VPEXTRW", P_66, M_0F3A, W0, 0x15, XMM_CPU);
+ public static final VexMRIOp VPEXTRD = new VexMRIOp("VPEXTRD", P_66, M_0F3A, W0, 0x16, XMM_CPU);
+ public static final VexMRIOp VPEXTRQ = new VexMRIOp("VPEXTRQ", P_66, M_0F3A, W1, 0x16, XMM_CPU);
+ // @formatter:on
+
+ private VexMRIOp(String opcode, int pp, int mmmmm, int w, int op, OpAssertion assertion) {
+ super(opcode, pp, mmmmm, w, op, assertion);
+ }
+
+ @Override
+ public void emit(AMD64VectorAssembler asm, AVXSize size, Register dst, Register src, int imm8) {
+ assert assertion.check((AMD64) asm.target.arch, size, src, null, dst);
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, op, src, dst);
+ asm.emitByte(imm8);
+ }
+
+ public void emit(AMD64VectorAssembler asm, AVXSize size, AMD64Address dst, Register src, int imm8) {
+ assert assertion.check((AMD64) asm.target.arch, size, src, null, null);
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, op, src, dst, 1);
+ asm.emitByte(imm8);
+ }
+ }
+
+ /**
+ * VEX-encoded instructions with an operand order of RVMR.
+ */
+ public static class VexRVMROp extends VexOp {
+ // @formatter:off
+ public static final VexRVMROp VPBLENDVB = new VexRVMROp("VPBLENDVB", P_66, M_0F3A, W0, 0x4C, AVX1_2);
+ public static final VexRVMROp VPBLENDVPS = new VexRVMROp("VPBLENDVPS", P_66, M_0F3A, W0, 0x4A, AVX1);
+ public static final VexRVMROp VPBLENDVPD = new VexRVMROp("VPBLENDVPD", P_66, M_0F3A, W0, 0x4B, AVX1);
+ // @formatter:on
+
+ protected VexRVMROp(String opcode, int pp, int mmmmm, int w, int op, OpAssertion assertion) {
+ super(opcode, pp, mmmmm, w, op, assertion);
+ }
+
+ public void emit(AMD64VectorAssembler asm, AVXSize size, Register dst, Register mask, Register src1, Register src2) {
+ assert assertion.check((AMD64) asm.target.arch, size, dst, mask, src1, src2);
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, op, dst, src1, src2, mask);
+ }
+
+ public void emit(AMD64VectorAssembler asm, AVXSize size, Register dst, Register mask, Register src1, AMD64Address src2) {
+ assert assertion.check((AMD64) asm.target.arch, size, dst, mask, src1, null);
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, op, dst, src1, src2, mask, 0);
+ }
+ }
+
+ /**
+ * VEX-encoded instructions with an operand order of RVM.
+ */
+ public static class VexRVMOp extends VexOp {
+ // @formatter:off
+ public static final VexRVMOp VANDPS = new VexRVMOp("VANDPS", P_, M_0F, WIG, 0x54);
+ public static final VexRVMOp VANDPD = new VexRVMOp("VANDPD", P_66, M_0F, WIG, 0x54);
+ public static final VexRVMOp VORPS = new VexRVMOp("VORPS", P_, M_0F, WIG, 0x56);
+ public static final VexRVMOp VORPD = new VexRVMOp("VORPD", P_66, M_0F, WIG, 0x56);
+ public static final VexRVMOp VADDPS = new VexRVMOp("VADDPS", P_, M_0F, WIG, 0x58);
+ public static final VexRVMOp VADDPD = new VexRVMOp("VADDPD", P_66, M_0F, WIG, 0x58);
+ public static final VexRVMOp VADDSS = new VexRVMOp("VADDSS", P_F3, M_0F, WIG, 0x58);
+ public static final VexRVMOp VADDSD = new VexRVMOp("VADDSD", P_F2, M_0F, WIG, 0x58);
+ public static final VexRVMOp VXORPS = new VexRVMOp("VXORPS", P_, M_0F, WIG, 0x57);
+ public static final VexRVMOp VXORPD = new VexRVMOp("VXORPD", P_66, M_0F, WIG, 0x57);
+ public static final VexRVMOp VMULPS = new VexRVMOp("VMULPS", P_, M_0F, WIG, 0x59);
+ public static final VexRVMOp VMULPD = new VexRVMOp("VMULPD", P_66, M_0F, WIG, 0x59);
+ public static final VexRVMOp VMULSS = new VexRVMOp("VMULSS", P_F3, M_0F, WIG, 0x59);
+ public static final VexRVMOp VMULSD = new VexRVMOp("VMULSD", P_F2, M_0F, WIG, 0x59);
+ public static final VexRVMOp VSUBPS = new VexRVMOp("VSUBPS", P_, M_0F, WIG, 0x5C);
+ public static final VexRVMOp VSUBPD = new VexRVMOp("VSUBPD", P_66, M_0F, WIG, 0x5C);
+ public static final VexRVMOp VSUBSS = new VexRVMOp("VSUBSS", P_F3, M_0F, WIG, 0x5C);
+ public static final VexRVMOp VSUBSD = new VexRVMOp("VSUBSD", P_F2, M_0F, WIG, 0x5C);
+ public static final VexRVMOp VDIVPS = new VexRVMOp("VDIVPS", P_, M_0F, WIG, 0x5E);
+ public static final VexRVMOp VDIVPD = new VexRVMOp("VDIVPD", P_66, M_0F, WIG, 0x5E);
+ public static final VexRVMOp VDIVSS = new VexRVMOp("VDIVPS", P_F3, M_0F, WIG, 0x5E);
+ public static final VexRVMOp VDIVSD = new VexRVMOp("VDIVPD", P_F2, M_0F, WIG, 0x5E);
+ public static final VexRVMOp VADDSUBPS = new VexRVMOp("VADDSUBPS", P_F2, M_0F, WIG, 0xD0);
+ public static final VexRVMOp VADDSUBPD = new VexRVMOp("VADDSUBPD", P_66, M_0F, WIG, 0xD0);
+ public static final VexRVMOp VPAND = new VexRVMOp("VPAND", P_66, M_0F, WIG, 0xDB, AVX1_2);
+ public static final VexRVMOp VPOR = new VexRVMOp("VPOR", P_66, M_0F, WIG, 0xEB, AVX1_2);
+ public static final VexRVMOp VPXOR = new VexRVMOp("VPXOR", P_66, M_0F, WIG, 0xEF, AVX1_2);
+ public static final VexRVMOp VPADDB = new VexRVMOp("VPADDB", P_66, M_0F, WIG, 0xFC, AVX1_2);
+ public static final VexRVMOp VPADDW = new VexRVMOp("VPADDW", P_66, M_0F, WIG, 0xFD, AVX1_2);
+ public static final VexRVMOp VPADDD = new VexRVMOp("VPADDD", P_66, M_0F, WIG, 0xFE, AVX1_2);
+ public static final VexRVMOp VPADDQ = new VexRVMOp("VPADDQ", P_66, M_0F, WIG, 0xD4, AVX1_2);
+ public static final VexRVMOp VPMULHUW = new VexRVMOp("VPMULHUW", P_66, M_0F, WIG, 0xE4, AVX1_2);
+ public static final VexRVMOp VPMULHW = new VexRVMOp("VPMULHW", P_66, M_0F, WIG, 0xE5, AVX1_2);
+ public static final VexRVMOp VPMULLW = new VexRVMOp("VPMULLW", P_66, M_0F, WIG, 0xD5, AVX1_2);
+ public static final VexRVMOp VPMULLD = new VexRVMOp("VPMULLD", P_66, M_0F38, WIG, 0x40, AVX1_2);
+ public static final VexRVMOp VPSUBB = new VexRVMOp("VPSUBB", P_66, M_0F, WIG, 0xF8, AVX1_2);
+ public static final VexRVMOp VPSUBW = new VexRVMOp("VPSUBW", P_66, M_0F, WIG, 0xF9, AVX1_2);
+ public static final VexRVMOp VPSUBD = new VexRVMOp("VPSUBD", P_66, M_0F, WIG, 0xFA, AVX1_2);
+ public static final VexRVMOp VPSUBQ = new VexRVMOp("VPSUBQ", P_66, M_0F, WIG, 0xFB, AVX1_2);
+ public static final VexRVMOp VPSHUFB = new VexRVMOp("VPSHUFB", P_66, M_0F38, WIG, 0x00, AVX1_2);
+ public static final VexRVMOp VCVTSD2SS = new VexRVMOp("VCVTSD2SS", P_F2, M_0F, WIG, 0x5A);
+ public static final VexRVMOp VCVTSS2SD = new VexRVMOp("VCVTSS2SD", P_F3, M_0F, WIG, 0x5A);
+ public static final VexRVMOp VCVTSI2SD = new VexRVMOp("VCVTSI2SD", P_F2, M_0F, W0, 0x2A, XMM_XMM_CPU);
+ public static final VexRVMOp VCVTSQ2SD = new VexRVMOp("VCVTSQ2SD", P_F2, M_0F, W1, 0x2A, XMM_XMM_CPU);
+ public static final VexRVMOp VCVTSI2SS = new VexRVMOp("VCVTSI2SS", P_F3, M_0F, W0, 0x2A, XMM_XMM_CPU);
+ public static final VexRVMOp VCVTSQ2SS = new VexRVMOp("VCVTSQ2SS", P_F3, M_0F, W1, 0x2A, XMM_XMM_CPU);
+ public static final VexRVMOp VPCMPEQB = new VexRVMOp("VPCMPEQB", P_66, M_0F, WIG, 0x74, AVX1_2);
+ public static final VexRVMOp VPCMPEQW = new VexRVMOp("VPCMPEQW", P_66, M_0F, WIG, 0x75, AVX1_2);
+ public static final VexRVMOp VPCMPEQD = new VexRVMOp("VPCMPEQD", P_66, M_0F, WIG, 0x76, AVX1_2);
+ public static final VexRVMOp VPCMPEQQ = new VexRVMOp("VPCMPEQQ", P_66, M_0F38, WIG, 0x76, AVX1_2);
+ public static final VexRVMOp VPCMPGTB = new VexRVMOp("VPCMPGTB", P_66, M_0F, WIG, 0x64, AVX1_2);
+ public static final VexRVMOp VPCMPGTW = new VexRVMOp("VPCMPGTW", P_66, M_0F, WIG, 0x65, AVX1_2);
+ public static final VexRVMOp VPCMPGTD = new VexRVMOp("VPCMPGTD", P_66, M_0F, WIG, 0x66, AVX1_2);
+ public static final VexRVMOp VPCMPGTQ = new VexRVMOp("VPCMPGTQ", P_66, M_0F38, WIG, 0x37, AVX1_2);
+ // @formatter:on
+
+ private VexRVMOp(String opcode, int pp, int mmmmm, int w, int op) {
+ this(opcode, pp, mmmmm, w, op, AVX1);
+ }
+
+ protected VexRVMOp(String opcode, int pp, int mmmmm, int w, int op, OpAssertion assertion) {
+ super(opcode, pp, mmmmm, w, op, assertion);
+ }
+
+ public void emit(AMD64VectorAssembler asm, AVXSize size, Register dst, Register src1, Register src2) {
+ assert assertion.check((AMD64) asm.target.arch, size, dst, src1, src2);
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, op, dst, src1, src2);
+ }
+
+ public void emit(AMD64VectorAssembler asm, AVXSize size, Register dst, Register src1, AMD64Address src2) {
+ assert assertion.check((AMD64) asm.target.arch, size, dst, src1, null);
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, op, dst, src1, src2, 0);
+ }
+ }
+
+ /**
+ * VEX-encoded shift instructions with an operand order of either RVM or VMI.
+ */
+ public static final class VexShiftOp extends VexRVMOp implements VexRRIOp {
+ // @formatter:off
+ public static final VexShiftOp VPSRLW = new VexShiftOp("VPSRLW", P_66, M_0F, WIG, 0xD1, 0x71, 2);
+ public static final VexShiftOp VPSRLD = new VexShiftOp("VPSRLD", P_66, M_0F, WIG, 0xD2, 0x72, 2);
+ public static final VexShiftOp VPSRLQ = new VexShiftOp("VPSRLQ", P_66, M_0F, WIG, 0xD3, 0x73, 2);
+ public static final VexShiftOp VPSRAW = new VexShiftOp("VPSRAW", P_66, M_0F, WIG, 0xE1, 0x71, 4);
+ public static final VexShiftOp VPSRAD = new VexShiftOp("VPSRAD", P_66, M_0F, WIG, 0xE2, 0x72, 4);
+ public static final VexShiftOp VPSLLW = new VexShiftOp("VPSLLW", P_66, M_0F, WIG, 0xF1, 0x71, 6);
+ public static final VexShiftOp VPSLLD = new VexShiftOp("VPSLLD", P_66, M_0F, WIG, 0xF2, 0x72, 6);
+ public static final VexShiftOp VPSLLQ = new VexShiftOp("VPSLLQ", P_66, M_0F, WIG, 0xF3, 0x73, 6);
+ // @formatter:on
+
+ private final int immOp;
+ private final int r;
+
+ private VexShiftOp(String opcode, int pp, int mmmmm, int w, int op, int immOp, int r) {
+ super(opcode, pp, mmmmm, w, op, AVX1_2);
+ this.immOp = immOp;
+ this.r = r;
+ }
+
+ @Override
+ public void emit(AMD64VectorAssembler asm, AVXSize size, Register dst, Register src, int imm8) {
+ assert assertion.check((AMD64) asm.target.arch, size, null, dst, src);
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, immOp, r, dst, src);
+ asm.emitByte(imm8);
+ }
+ }
+
+ public static final class VexMaskMoveOp extends VexOp {
+ // @formatter:off
+ public static final VexMaskMoveOp VMASKMOVPS = new VexMaskMoveOp("VMASKMOVPS", P_66, M_0F38, W0, 0x2C, 0x2E);
+ public static final VexMaskMoveOp VMASKMOVPD = new VexMaskMoveOp("VMASKMOVPD", P_66, M_0F38, W0, 0x2D, 0x2F);
+ public static final VexMaskMoveOp VPMASKMOVD = new VexMaskMoveOp("VPMASKMOVD", P_66, M_0F38, W0, 0x8C, 0x8E, AVX2);
+ public static final VexMaskMoveOp VPMASKMOVQ = new VexMaskMoveOp("VPMASKMOVQ", P_66, M_0F38, W1, 0x8C, 0x8E, AVX2);
+ // @formatter:on
+
+ private final int opReverse;
+
+ private VexMaskMoveOp(String opcode, int pp, int mmmmm, int w, int op, int opReverse) {
+ this(opcode, pp, mmmmm, w, op, opReverse, AVX1);
+ }
+
+ private VexMaskMoveOp(String opcode, int pp, int mmmmm, int w, int op, int opReverse, OpAssertion assertion) {
+ super(opcode, pp, mmmmm, w, op, assertion);
+ this.opReverse = opReverse;
+ }
+
+ public void emit(AMD64VectorAssembler asm, AVXSize size, Register dst, Register mask, AMD64Address src) {
+ assert assertion.check((AMD64) asm.target.arch, size, dst, mask, null);
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, op, dst, mask, src, 0);
+ }
+
+ public void emit(AMD64VectorAssembler asm, AVXSize size, AMD64Address dst, Register mask, Register src) {
+ assert assertion.check((AMD64) asm.target.arch, size, src, mask, null);
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, opReverse, src, mask, dst, 0);
+ }
+ }
+
+ /**
+ * VEX-encoded instructions with an operand order of RVMI.
+ */
+ public static final class VexRVMIOp extends VexOp {
+ // @formatter:off
+ public static final VexRVMIOp VSHUFPS = new VexRVMIOp("VSHUFPS", P_, M_0F, WIG, 0xC6);
+ public static final VexRVMIOp VSHUFPD = new VexRVMIOp("VSHUFPD", P_66, M_0F, WIG, 0xC6);
+ public static final VexRVMIOp VINSERTF128 = new VexRVMIOp("VINSERTF128", P_66, M_0F3A, W0, 0x18, AVX1_256ONLY);
+ public static final VexRVMIOp VINSERTI128 = new VexRVMIOp("VINSERTI128", P_66, M_0F3A, W0, 0x38, AVX2_256ONLY);
+ // @formatter:on
+
+ private VexRVMIOp(String opcode, int pp, int mmmmm, int w, int op) {
+ this(opcode, pp, mmmmm, w, op, AVX1);
+ }
+
+ private VexRVMIOp(String opcode, int pp, int mmmmm, int w, int op, OpAssertion assertion) {
+ super(opcode, pp, mmmmm, w, op, assertion);
+ }
+
+ public void emit(AMD64VectorAssembler asm, AVXSize size, Register dst, Register src1, Register src2, int imm8) {
+ assert assertion.check((AMD64) asm.target.arch, size, dst, src1, src2);
+ assert (imm8 & 0xFF) == imm8;
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, op, dst, src1, src2);
+ asm.emitByte(imm8);
+ }
+
+ public void emit(AMD64VectorAssembler asm, AVXSize size, Register dst, Register src1, AMD64Address src2, int imm8) {
+ assert assertion.check((AMD64) asm.target.arch, size, dst, src1, null);
+ assert (imm8 & 0xFF) == imm8;
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, op, dst, src1, src2, 1);
+ asm.emitByte(imm8);
+ }
+ }
+
+ /**
+ * VEX-encoded comparison operation with an operand order of RVMI. The immediate operand is a
+ * comparison operator.
+ */
+ public static final class VexFloatCompareOp extends VexOp {
+ // @formatter:off
+ public static final VexFloatCompareOp VCMPPS = new VexFloatCompareOp("VCMPPS", P_, M_0F, WIG, 0xC2);
+ public static final VexFloatCompareOp VCMPPD = new VexFloatCompareOp("VCMPPD", P_66, M_0F, WIG, 0xC2);
+ public static final VexFloatCompareOp VCMPSS = new VexFloatCompareOp("VCMPSS", P_F2, M_0F, WIG, 0xC2);
+ public static final VexFloatCompareOp VCMPSD = new VexFloatCompareOp("VCMPSD", P_F2, M_0F, WIG, 0xC2);
+ // @formatter:on
+
+ public enum Predicate {
+ EQ_OQ(0x00),
+ LT_OS(0x01),
+ LE_OS(0x02),
+ UNORD_Q(0x03),
+ NEQ_UQ(0x04),
+ NLT_US(0x05),
+ NLE_US(0x06),
+ ORD_Q(0x07),
+ EQ_UQ(0x08),
+ NGE_US(0x09),
+ NGT_US(0x0a),
+ FALSE_OQ(0x0b),
+ NEQ_OQ(0x0c),
+ GE_OS(0x0d),
+ GT_OS(0x0e),
+ TRUE_UQ(0x0f),
+ EQ_OS(0x10),
+ LT_OQ(0x11),
+ LE_OQ(0x12),
+ UNORD_S(0x13),
+ NEQ_US(0x14),
+ NLT_UQ(0x15),
+ NLE_UQ(0x16),
+ ORD_S(0x17),
+ EQ_US(0x18),
+ NGE_UQ(0x19),
+ NGT_UQ(0x1a),
+ FALSE_OS(0x1b),
+ NEQ_OS(0x1c),
+ GE_OQ(0x1d),
+ GT_OQ(0x1e),
+ TRUE_US(0x1f);
+
+ private int imm8;
+
+ Predicate(int imm8) {
+ this.imm8 = imm8;
+ }
+
+ public static Predicate getPredicate(Condition condition, boolean unorderedIsTrue) {
+ if (unorderedIsTrue) {
+ switch (condition) {
+ case EQ:
+ return EQ_UQ;
+ case NE:
+ return NEQ_UQ;
+ case LT:
+ return NGE_UQ;
+ case LE:
+ return NGT_UQ;
+ case GT:
+ return NLE_UQ;
+ case GE:
+ return NLT_UQ;
+ default:
+ throw GraalError.shouldNotReachHere();
+ }
+ } else {
+ switch (condition) {
+ case EQ:
+ return EQ_OQ;
+ case NE:
+ return NEQ_OQ;
+ case LT:
+ return LT_OQ;
+ case LE:
+ return LE_OQ;
+ case GT:
+ return GT_OQ;
+ case GE:
+ return GE_OQ;
+ default:
+ throw GraalError.shouldNotReachHere();
+ }
+ }
+ }
+ }
+
+ private VexFloatCompareOp(String opcode, int pp, int mmmmm, int w, int op) {
+ super(opcode, pp, mmmmm, w, op, AVX1);
+ }
+
+ public void emit(AMD64VectorAssembler asm, AVXSize size, Register dst, Register src1, Register src2, Predicate p) {
+ assert assertion.check((AMD64) asm.target.arch, size, dst, src1, src2);
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, op, dst, src1, src2);
+ asm.emitByte(p.imm8);
+ }
+
+ public void emit(AMD64VectorAssembler asm, AVXSize size, Register dst, Register src1, AMD64Address src2, Predicate p) {
+ assert assertion.check((AMD64) asm.target.arch, size, dst, src1, null);
+ asm.emitVexOp(getLFlag(size), pp, mmmmm, w, op, dst, src1, src2, 1);
+ asm.emitByte(p.imm8);
+ }
+ }
+
+ @Override
+ public void movflt(Register dst, Register src) {
+ VexMoveOp.VMOVAPS.emit(this, AVXSize.XMM, dst, src);
+ }
+
+ @Override
+ public void movflt(Register dst, AMD64Address src) {
+ VexMoveOp.VMOVSS.emit(this, AVXSize.XMM, dst, src);
+ }
+
+ @Override
+ public void movflt(AMD64Address dst, Register src) {
+ VexMoveOp.VMOVSS.emit(this, AVXSize.XMM, dst, src);
+ }
+
+ @Override
+ public void movdbl(Register dst, Register src) {
+ VexMoveOp.VMOVAPD.emit(this, AVXSize.XMM, dst, src);
+ }
+
+ @Override
+ public void movdbl(Register dst, AMD64Address src) {
+ VexMoveOp.VMOVSD.emit(this, AVXSize.XMM, dst, src);
+ }
+
+ @Override
+ public void movdbl(AMD64Address dst, Register src) {
+ VexMoveOp.VMOVSD.emit(this, AVXSize.XMM, dst, src);
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.amd64/src/org/graalvm/compiler/asm/amd64/AVXKind.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.asm.amd64;
+
+import static org.graalvm.compiler.asm.amd64.AVXKind.AVXSize.DWORD;
+import static org.graalvm.compiler.asm.amd64.AVXKind.AVXSize.QWORD;
+import static org.graalvm.compiler.asm.amd64.AVXKind.AVXSize.XMM;
+import static org.graalvm.compiler.asm.amd64.AVXKind.AVXSize.YMM;
+
+import jdk.vm.ci.meta.Value;
+import org.graalvm.compiler.debug.GraalError;
+
+import jdk.vm.ci.amd64.AMD64Kind;
+
+/**
+ * Helper methods for dealing with AVX and SSE {@link AMD64Kind AMD64Kinds}.
+ */
+public final class AVXKind {
+
+ public enum AVXSize {
+ DWORD,
+ QWORD,
+ XMM,
+ YMM;
+
+ public int getBytes() {
+ switch (this) {
+ case DWORD:
+ return 4;
+ case QWORD:
+ return 8;
+ case XMM:
+ return 16;
+ case YMM:
+ return 32;
+ default:
+ return 0;
+ }
+ }
+ }
+
+ private AVXKind() {
+ }
+
+ public static AVXSize getRegisterSize(Value a) {
+ AMD64Kind kind = (AMD64Kind) a.getPlatformKind();
+ if (kind.isXMM()) {
+ return getRegisterSize(kind);
+ } else {
+ return XMM;
+ }
+ }
+
+ public static AVXSize getDataSize(AMD64Kind kind) {
+ assert kind.isXMM() : "unexpected kind " + kind;
+ switch (kind.getSizeInBytes()) {
+ case 4:
+ return DWORD;
+ case 8:
+ return QWORD;
+ case 16:
+ return XMM;
+ case 32:
+ return YMM;
+ default:
+ throw GraalError.shouldNotReachHere("unsupported kind: " + kind);
+ }
+ }
+
+ public static AVXSize getRegisterSize(AMD64Kind kind) {
+ assert kind.isXMM() : "unexpected kind " + kind;
+ if (kind.getSizeInBytes() > 16) {
+ return YMM;
+ } else {
+ return XMM;
+ }
+ }
+
+ public static AMD64Kind changeSize(AMD64Kind kind, AVXSize newSize) {
+ return getAVXKind(kind.getScalar(), newSize);
+ }
+
+ public static AMD64Kind getAVXKind(AMD64Kind base, AVXSize size) {
+ for (AMD64Kind ret : AMD64Kind.values()) {
+ if (ret.getScalar() == base && ret.getSizeInBytes() == size.getBytes()) {
+ return ret;
+ }
+ }
+ throw GraalError.shouldNotReachHere(String.format("unsupported vector kind: %s x %s", size, base));
+ }
+
+ public static AMD64Kind getAVXKind(AMD64Kind base, int length) {
+ for (AMD64Kind ret : AMD64Kind.values()) {
+ if (ret.getScalar() == base && ret.getVectorLength() == length) {
+ return ret;
+ }
+ }
+ throw GraalError.shouldNotReachHere(String.format("unsupported vector kind: %d x %s", length, base));
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64AddressLoweringByUse.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64AddressLoweringByUse.java Mon Jun 18 09:48:22 2018 -0700
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2017, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2017, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64LIRGenerator.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.aarch64/src/org/graalvm/compiler/core/aarch64/AArch64LIRGenerator.java Mon Jun 18 09:48:22 2018 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -51,9 +51,12 @@
import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.CondMoveOp;
import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.StrategySwitchOp;
import org.graalvm.compiler.lir.aarch64.AArch64ControlFlow.TableSwitchOp;
+import org.graalvm.compiler.lir.aarch64.AArch64LIRFlagsVersioned;
import org.graalvm.compiler.lir.aarch64.AArch64Move;
import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndAddOp;
+import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndAddLSEOp;
import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.CompareAndSwapOp;
+import org.graalvm.compiler.lir.aarch64.AArch64AtomicMove.AtomicReadAndWriteOp;
import org.graalvm.compiler.lir.aarch64.AArch64Move.MembarOp;
import org.graalvm.compiler.lir.aarch64.AArch64PauseOp;
import org.graalvm.compiler.lir.gen.LIRGenerationResult;
@@ -147,12 +150,21 @@
}
@Override
+ public Value emitAtomicReadAndWrite(Value address, ValueKind<?> kind, Value newValue) {
+ Variable result = newVariable(kind);
+ Variable scratch = newVariable(kind);
+ append(new AtomicReadAndWriteOp((AArch64Kind) kind.getPlatformKind(), asAllocatable(result), asAllocatable(address), asAllocatable(newValue), asAllocatable(scratch)));
+ return result;
+ }
+
+ @Override
public Value emitAtomicReadAndAdd(Value address, ValueKind<?> kind, Value delta) {
Variable result = newVariable(kind);
- Variable scratch1 = newVariable(kind);
- Variable scratch2 = newVariable(kind);
-
- append(new AtomicReadAndAddOp((AArch64Kind) kind.getPlatformKind(), asAllocatable(result), asAllocatable(address), asAllocatable(delta), asAllocatable(scratch1), asAllocatable(scratch2)));
+ if (AArch64LIRFlagsVersioned.useLSE(target().arch)) {
+ append(new AtomicReadAndAddLSEOp((AArch64Kind) kind.getPlatformKind(), asAllocatable(result), asAllocatable(address), asAllocatable(delta)));
+ } else {
+ append(new AtomicReadAndAddOp((AArch64Kind) kind.getPlatformKind(), asAllocatable(result), asAllocatable(address), delta));
+ }
return result;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/CompareCanonicalizerTest3.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/CompareCanonicalizerTest3.java Mon Jun 18 09:48:22 2018 -0700
@@ -27,7 +27,11 @@
import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderContext;
import org.graalvm.compiler.nodes.graphbuilderconf.InlineInvokePlugin;
+import org.graalvm.compiler.phases.OptimisticOptimizations;
import org.graalvm.compiler.phases.common.CanonicalizerPhase;
+import org.graalvm.compiler.phases.common.FrameStateAssignmentPhase;
+import org.graalvm.compiler.phases.common.GuardLoweringPhase;
+import org.graalvm.compiler.phases.tiers.MidTierContext;
import org.graalvm.compiler.phases.tiers.PhaseContext;
import org.junit.Ignore;
import org.junit.Test;
@@ -39,6 +43,7 @@
@SuppressWarnings("unused") private static int sink0;
@SuppressWarnings("unused") private static int sink1;
+ @Ignore("Subword input cannot be trusted.")
@Test
public void test00() {
assertCanonicallyEqual("integerTestCanonicalization00", "referenceSnippet00");
@@ -232,6 +237,8 @@
PhaseContext context = new PhaseContext(getProviders());
CanonicalizerPhase canonicalizer = new CanonicalizerPhase();
canonicalizer.apply(graph, context);
+ new GuardLoweringPhase().apply(graph, new MidTierContext(getProviders(), getTargetProvider(), OptimisticOptimizations.ALL, graph.getProfilingInfo()));
+ new FrameStateAssignmentPhase().apply(graph);
canonicalizer.apply(graph, context);
StructuredGraph referenceGraph = parseEager(reference, AllowAssumptions.YES);
canonicalizer.apply(referenceGraph, context);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/CustomizedBytecodePatternTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.core.test;
+
+import org.objectweb.asm.Opcodes;
+
+public abstract class CustomizedBytecodePatternTest extends GraalCompilerTest implements Opcodes {
+
+ protected Class<?> getClass(String className) throws ClassNotFoundException {
+ return new CachedLoader(CustomizedBytecodePatternTest.class.getClassLoader(), className).findClass(className);
+ }
+
+ private class CachedLoader extends ClassLoader {
+
+ final String className;
+ Class<?> loaded;
+
+ CachedLoader(ClassLoader parent, String className) {
+ super(parent);
+ this.className = className;
+ }
+
+ @Override
+ protected Class<?> findClass(String name) throws ClassNotFoundException {
+ if (name.equals(className)) {
+ if (loaded == null) {
+ byte[] gen = generateClass(name.replace('.', '/'));
+ loaded = defineClass(name, gen, 0, gen.length);
+ }
+ return loaded;
+ } else {
+ return super.findClass(name);
+ }
+ }
+
+ }
+
+ protected abstract byte[] generateClass(String internalClassName);
+
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/GraalCompilerTest.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/GraalCompilerTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -41,14 +41,13 @@
import java.util.Collection;
import java.util.Collections;
import java.util.EnumMap;
-import java.util.HashMap;
import java.util.List;
import java.util.ListIterator;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
import java.util.function.Supplier;
-import jdk.vm.ci.meta.JavaConstant;
import org.graalvm.compiler.api.directives.GraalDirectives;
import org.graalvm.compiler.api.replacements.SnippetReflectionProvider;
import org.graalvm.compiler.api.test.Graal;
@@ -135,6 +134,7 @@
import jdk.vm.ci.meta.Assumptions.Assumption;
import jdk.vm.ci.meta.ConstantReflectionProvider;
import jdk.vm.ci.meta.DeoptimizationReason;
+import jdk.vm.ci.meta.JavaConstant;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.JavaType;
import jdk.vm.ci.meta.MetaAccessProvider;
@@ -882,7 +882,7 @@
}
}
- private Map<ResolvedJavaMethod, InstalledCode> cache = new HashMap<>();
+ private Map<ResolvedJavaMethod, InstalledCode> cache = new ConcurrentHashMap<>();
/**
* Gets installed code for a given method, compiling it first if necessary. The graph is parsed
@@ -1075,7 +1075,7 @@
return backend.createDefaultInstalledCode(debug, method, compilationResult);
}
- private final Map<ResolvedJavaMethod, Executable> methodMap = new HashMap<>();
+ private final Map<ResolvedJavaMethod, Executable> methodMap = new ConcurrentHashMap<>();
/**
* Converts a reflection {@link Method} to a {@link ResolvedJavaMethod}.
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/NestedArithmeticTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.core.test;
+
+import org.junit.Test;
+
+public class NestedArithmeticTest extends GraalCompilerTest {
+ public static int runNestedLoopTry() {
+ int checksum = 0;
+ int i3 = 240;
+ int i5 = 13485;
+ for (int i4 = 303; i4 > 15; i4 -= 2) {
+ int f = 1;
+ do {
+ try {
+ i3 = (38726 / i5);
+ i3 = (i4 % -21500);
+ i5 = (i3 % 787);
+ } catch (ArithmeticException a_e) {
+ checksum += f + i3 + i5;
+ return checksum;
+ }
+ i3 <<= i4;
+ i5 <<= i5;
+ i3 += (8 + (f * f));
+ i5 >>= i5;
+ checksum += f;
+ } while (++f < 11);
+ }
+ return checksum;
+ }
+
+ @Test
+ public void nestedLoopTryTest() {
+ test("runNestedLoopTry");
+ }
+
+ private interface FloatSupplier {
+ float get();
+ }
+
+ private static volatile FloatSupplier problematicFloatValue = new FloatSupplier() {
+ @Override
+ public float get() {
+ return Float.intBitsToFloat(1585051832);
+ }
+ };
+
+ @SuppressWarnings("unused") private static volatile FloatSupplier normalFloatValue = new FloatSupplier() {
+ @Override
+ public float get() {
+ return 0;
+ }
+ };
+
+ public static int absConvert() {
+ int i2 = -51498;
+ int i16 = -12;
+ int i17 = -121;
+ int i18 = 1;
+ int i19 = 11;
+ long l1 = -275151857L;
+ for (int i1 = 21; 22 > i1; ++i1) {
+ float f = problematicFloatValue.get();
+ float absolute = Math.abs(f);
+ i2 = (int) absolute;
+ i2 += i2;
+ }
+ long result = i2 + l1 + i16 + i17 + i18 + i19;
+ return (int) result;
+ }
+
+ @Test
+ public void absConvertTest() {
+ test("absConvert");
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/SubWordArrayStoreTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.core.test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.graalvm.compiler.debug.GraalError;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.objectweb.asm.ClassWriter;
+import org.objectweb.asm.FieldVisitor;
+import org.objectweb.asm.MethodVisitor;
+
+import jdk.vm.ci.meta.JavaKind;
+
+@RunWith(Parameterized.class)
+public class SubWordArrayStoreTest extends CustomizedBytecodePatternTest {
+
+ @Parameterized.Parameters(name = "{0}, {1}, {2}, {3}")
+ public static List<Object[]> data() {
+ ArrayList<Object[]> ret = new ArrayList<>();
+ for (int i : new int[]{0xFFFF0000, 0xFFFF0001, 0x0000FFFF, 0x01020304}) {
+ for (boolean unsafeStore : new boolean[]{false, true}) {
+ for (boolean unsafeLoad : new boolean[]{false, true}) {
+ ret.add(new Object[]{JavaKind.Boolean, i, unsafeStore, unsafeLoad});
+ ret.add(new Object[]{JavaKind.Byte, i, unsafeStore, unsafeLoad});
+ ret.add(new Object[]{JavaKind.Short, i, unsafeStore, unsafeLoad});
+ ret.add(new Object[]{JavaKind.Char, i, unsafeStore, unsafeLoad});
+ }
+ }
+ }
+ return ret;
+ }
+
+ private static final String SNIPPET = "snippet";
+
+ private final JavaKind kind;
+ private final int value;
+ private final boolean unsafeStore;
+ private final boolean unsafeLoad;
+
+ public SubWordArrayStoreTest(JavaKind kind, int value, boolean unsafeStore, boolean unsafeLoad) {
+ this.kind = kind;
+ this.value = value;
+ this.unsafeStore = unsafeStore;
+ this.unsafeLoad = unsafeLoad;
+ }
+
+ @Test
+ public void testArrayStore() throws ClassNotFoundException {
+ Class<?> testClass = getClass(SubWordArrayStoreTest.class.getName() + "$" + kind.toString() + "Getter");
+ test(getResolvedJavaMethod(testClass, SNIPPET), null);
+ }
+
+ private static long arrayBaseOffset(JavaKind kind) {
+ switch (kind) {
+ case Boolean:
+ return UNSAFE.arrayBaseOffset(boolean[].class);
+ case Byte:
+ return UNSAFE.arrayBaseOffset(byte[].class);
+ case Short:
+ return UNSAFE.arrayBaseOffset(short[].class);
+ case Char:
+ return UNSAFE.arrayBaseOffset(char[].class);
+ default:
+ throw GraalError.shouldNotReachHere();
+ }
+ }
+
+ static int toASMType(JavaKind kind) {
+ switch (kind) {
+ case Boolean:
+ return T_BOOLEAN;
+ case Byte:
+ return T_BYTE;
+ case Short:
+ return T_SHORT;
+ case Char:
+ return T_CHAR;
+ default:
+ throw GraalError.shouldNotReachHere();
+ }
+ }
+
+ private static int toArrayStoreOpcode(JavaKind kind) {
+ switch (kind) {
+ case Boolean:
+ case Byte:
+ return BASTORE;
+ case Short:
+ return SASTORE;
+ case Char:
+ return CASTORE;
+ default:
+ throw GraalError.shouldNotReachHere();
+ }
+ }
+
+ private static int toArrayLoadOpcode(JavaKind kind) {
+ switch (kind) {
+ case Boolean:
+ case Byte:
+ return BALOAD;
+ case Short:
+ return SALOAD;
+ case Char:
+ return CALOAD;
+ default:
+ throw GraalError.shouldNotReachHere();
+ }
+ }
+
+ @Override
+ protected byte[] generateClass(String internalClassName) {
+ ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES);
+ cw.visit(52, ACC_SUPER | ACC_PUBLIC, internalClassName, null, "java/lang/Object", null);
+
+ final String fieldName = "array";
+ final String fieldDescriptor = "[" + kind.getTypeChar();
+
+ FieldVisitor field = cw.visitField(ACC_PUBLIC | ACC_STATIC, fieldName, fieldDescriptor, null, null);
+ field.visitEnd();
+
+ MethodVisitor clinit = cw.visitMethod(ACC_STATIC, "<clinit>", "()V", null, null);
+ clinit.visitCode();
+ clinit.visitIntInsn(BIPUSH, 16);
+ clinit.visitIntInsn(NEWARRAY, toASMType(kind));
+ clinit.visitFieldInsn(PUTSTATIC, internalClassName, fieldName, fieldDescriptor);
+ clinit.visitInsn(RETURN);
+ clinit.visitMaxs(1, 0);
+ clinit.visitEnd();
+
+ MethodVisitor snippet = cw.visitMethod(ACC_PUBLIC | ACC_STATIC, SNIPPET, "()Z", null, null);
+ snippet.visitCode();
+
+ if (unsafeStore) {
+ SubWordTestUtil.getUnsafe(snippet);
+ snippet.visitFieldInsn(GETSTATIC, internalClassName, fieldName, fieldDescriptor);
+ snippet.visitLdcInsn(arrayBaseOffset(kind));
+ snippet.visitLdcInsn(value);
+ snippet.visitMethodInsn(INVOKEVIRTUAL, "sun/misc/Unsafe", "put" + SubWordTestUtil.getUnsafePutMethodName(kind), "(Ljava/lang/Object;J" + kind.getTypeChar() + ")V", false);
+ } else {
+ snippet.visitFieldInsn(GETSTATIC, internalClassName, fieldName, fieldDescriptor);
+ snippet.visitInsn(ICONST_0);
+ snippet.visitLdcInsn(value);
+ snippet.visitInsn(toArrayStoreOpcode(kind));
+ }
+
+ if (unsafeLoad) {
+ SubWordTestUtil.getUnsafe(snippet);
+ snippet.visitFieldInsn(GETSTATIC, internalClassName, fieldName, fieldDescriptor);
+ snippet.visitLdcInsn(arrayBaseOffset(kind));
+ snippet.visitMethodInsn(INVOKEVIRTUAL, "sun/misc/Unsafe", "get" + SubWordTestUtil.getUnsafePutMethodName(kind), "(Ljava/lang/Object;J)" + kind.getTypeChar(), false);
+ } else {
+ snippet.visitFieldInsn(GETSTATIC, internalClassName, fieldName, fieldDescriptor);
+ snippet.visitInsn(ICONST_0);
+ snippet.visitInsn(toArrayLoadOpcode(kind));
+ }
+
+ snippet.visitLdcInsn(value);
+ SubWordTestUtil.convertToKind(snippet, kind);
+ SubWordTestUtil.testEqual(snippet);
+
+ snippet.visitMaxs(5, 0);
+ snippet.visitEnd();
+
+ cw.visitEnd();
+ return cw.toByteArray();
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/SubWordFieldStoreTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.core.test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.objectweb.asm.ClassWriter;
+import org.objectweb.asm.FieldVisitor;
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.Type;
+
+import jdk.vm.ci.meta.JavaKind;
+
+@RunWith(Parameterized.class)
+public class SubWordFieldStoreTest extends CustomizedBytecodePatternTest {
+
+ @Parameterized.Parameters(name = "{0}, {1}, {2}, {3}")
+ public static List<Object[]> data() {
+ ArrayList<Object[]> ret = new ArrayList<>();
+ for (int i : new int[]{0xFFFF0000, 0xFFFF0001, 0x0000FFFF, 0x01020304}) {
+ for (boolean unsafeStore : new boolean[]{false, true}) {
+ for (boolean unsafeLoad : new boolean[]{false, true}) {
+ ret.add(new Object[]{JavaKind.Boolean, i, unsafeStore, unsafeLoad});
+ ret.add(new Object[]{JavaKind.Byte, i, unsafeStore, unsafeLoad});
+ ret.add(new Object[]{JavaKind.Short, i, unsafeStore, unsafeLoad});
+ ret.add(new Object[]{JavaKind.Char, i, unsafeStore, unsafeLoad});
+ }
+ }
+ }
+ return ret;
+ }
+
+ private static final String SNIPPET = "snippet";
+
+ private final JavaKind kind;
+ private final int value;
+ private final boolean unsafeStore;
+ private final boolean unsafeLoad;
+
+ public SubWordFieldStoreTest(JavaKind kind, int value, boolean unsafeStore, boolean unsafeLoad) {
+ this.kind = kind;
+ this.value = value;
+ this.unsafeStore = unsafeStore;
+ this.unsafeLoad = unsafeLoad;
+ }
+
+ @Test
+ public void testFieldStore() throws ClassNotFoundException {
+ Class<?> testClass = getClass(SubWordFieldStoreTest.class.getName() + "$" + kind.toString() + "Getter");
+ test(getResolvedJavaMethod(testClass, SNIPPET), null);
+ }
+
+ @Override
+ protected byte[] generateClass(String internalClassName) {
+ ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES);
+ cw.visit(52, ACC_SUPER | ACC_PUBLIC, internalClassName, null, "java/lang/Object", null);
+
+ final String fieldName = "field";
+ final String fieldDescriptor = Character.toString(kind.getTypeChar());
+
+ FieldVisitor field = cw.visitField(ACC_PUBLIC | ACC_STATIC, fieldName, fieldDescriptor, null, value);
+ field.visitEnd();
+
+ MethodVisitor snippet = cw.visitMethod(ACC_PUBLIC | ACC_STATIC, SNIPPET, "()Z", null, new String[]{"java/lang/NoSuchFieldException"});
+ snippet.visitCode();
+
+ if (unsafeStore) {
+ snippet.visitLdcInsn(Type.getObjectType(internalClassName));
+ snippet.visitLdcInsn(fieldName);
+ snippet.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Class", "getField", "(Ljava/lang/String;)Ljava/lang/reflect/Field;", false);
+ snippet.visitVarInsn(ASTORE, 0);
+
+ SubWordTestUtil.getUnsafe(snippet);
+ snippet.visitVarInsn(ALOAD, 0);
+ snippet.visitMethodInsn(INVOKEVIRTUAL, "sun/misc/Unsafe", "staticFieldBase", "(Ljava/lang/reflect/Field;)Ljava/lang/Object;", false);
+ snippet.visitVarInsn(ASTORE, 1);
+
+ SubWordTestUtil.getUnsafe(snippet);
+ snippet.visitVarInsn(ALOAD, 0);
+ snippet.visitMethodInsn(INVOKEVIRTUAL, "sun/misc/Unsafe", "staticFieldOffset", "(Ljava/lang/reflect/Field;)J", false);
+ snippet.visitVarInsn(LSTORE, 2);
+
+ SubWordTestUtil.getUnsafe(snippet);
+ snippet.visitVarInsn(ALOAD, 1);
+ snippet.visitVarInsn(LLOAD, 2);
+ snippet.visitLdcInsn(value);
+ snippet.visitMethodInsn(INVOKEVIRTUAL, "sun/misc/Unsafe", "put" + SubWordTestUtil.getUnsafePutMethodName(kind), "(Ljava/lang/Object;J" + kind.getTypeChar() + ")V", false);
+ } else {
+ snippet.visitLdcInsn(value);
+ snippet.visitFieldInsn(PUTSTATIC, internalClassName, fieldName, fieldDescriptor);
+ }
+
+ if (unsafeLoad) {
+ if (!unsafeStore) {
+ snippet.visitLdcInsn(Type.getObjectType(internalClassName));
+ snippet.visitLdcInsn(fieldName);
+ snippet.visitMethodInsn(INVOKEVIRTUAL, "java/lang/Class", "getField", "(Ljava/lang/String;)Ljava/lang/reflect/Field;", false);
+ snippet.visitVarInsn(ASTORE, 0);
+
+ SubWordTestUtil.getUnsafe(snippet);
+ snippet.visitVarInsn(ALOAD, 0);
+ snippet.visitMethodInsn(INVOKEVIRTUAL, "sun/misc/Unsafe", "staticFieldBase", "(Ljava/lang/reflect/Field;)Ljava/lang/Object;", false);
+ snippet.visitVarInsn(ASTORE, 1);
+
+ SubWordTestUtil.getUnsafe(snippet);
+ snippet.visitVarInsn(ALOAD, 0);
+ snippet.visitMethodInsn(INVOKEVIRTUAL, "sun/misc/Unsafe", "staticFieldOffset", "(Ljava/lang/reflect/Field;)J", false);
+ snippet.visitVarInsn(LSTORE, 2);
+ }
+ SubWordTestUtil.getUnsafe(snippet);
+ snippet.visitVarInsn(ALOAD, 1);
+ snippet.visitVarInsn(LLOAD, 2);
+ snippet.visitMethodInsn(INVOKEVIRTUAL, "sun/misc/Unsafe", "get" + SubWordTestUtil.getUnsafePutMethodName(kind), "(Ljava/lang/Object;J)" + kind.getTypeChar(), false);
+ } else {
+ snippet.visitFieldInsn(GETSTATIC, internalClassName, fieldName, fieldDescriptor);
+ }
+
+ snippet.visitLdcInsn(value);
+ SubWordTestUtil.convertToKind(snippet, kind);
+ SubWordTestUtil.testEqual(snippet);
+
+ snippet.visitMaxs(5, 4);
+ snippet.visitEnd();
+
+ cw.visitEnd();
+ return cw.toByteArray();
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/SubWordInputTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.core.test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.objectweb.asm.ClassWriter;
+import org.objectweb.asm.MethodVisitor;
+
+import jdk.vm.ci.meta.DeoptimizationReason;
+import jdk.vm.ci.meta.JavaKind;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+
+@RunWith(Parameterized.class)
+public class SubWordInputTest extends CustomizedBytecodePatternTest {
+
+ @Parameterized.Parameters(name = "{0}, {1}")
+ public static List<Object[]> data() {
+ ArrayList<Object[]> ret = new ArrayList<>();
+ for (int i : new int[]{0xFFFF0000, 0xFFFF0001, 0x0000FFFF}) {
+ ret.add(new Object[]{JavaKind.Boolean, i});
+ ret.add(new Object[]{JavaKind.Byte, i});
+ ret.add(new Object[]{JavaKind.Short, i});
+ ret.add(new Object[]{JavaKind.Char, i});
+ }
+ return ret;
+ }
+
+ private static final String GET = "get";
+ private static final String WRAPPER = "wrapper";
+
+ private final JavaKind kind;
+ private final int value;
+
+ public SubWordInputTest(JavaKind kind, int value) {
+ this.kind = kind;
+ this.value = value;
+ }
+
+ @Test
+ public void testSubWordInput() throws ClassNotFoundException {
+ Class<?> testClass = getClass(SubWordInputTest.class.getName() + "$" + kind.toString() + "Getter");
+ ResolvedJavaMethod wrapper = getResolvedJavaMethod(testClass, WRAPPER);
+ Result expected = executeExpected(wrapper, null, value);
+ // test standalone callee
+ getCode(getResolvedJavaMethod(testClass, GET), null, false, true, getInitialOptions());
+ assertEquals(executeExpected(wrapper, null, value), expected);
+ // test with inlining
+ testAgainstExpected(wrapper, expected, Collections.<DeoptimizationReason> emptySet(), null, value);
+ }
+
+ @Override
+ protected byte[] generateClass(String internalClassName) {
+ ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES);
+ cw.visit(52, ACC_SUPER | ACC_PUBLIC, internalClassName, null, "java/lang/Object", null);
+
+ final char typeChar = kind.getTypeChar();
+
+ // Generates a method that returns the input subword immediately.
+ String getDescriptor = "(" + typeChar + ")" + typeChar;
+ MethodVisitor get = cw.visitMethod(ACC_PUBLIC | ACC_STATIC, GET, getDescriptor, null, null);
+ get.visitCode();
+ get.visitVarInsn(ILOAD, 0);
+ get.visitInsn(IRETURN);
+ get.visitMaxs(1, 1);
+ get.visitEnd();
+
+ // Genearates a method that compares the return value of the preceding method by passing the
+ // input value, and a manual masking of the input value.
+ MethodVisitor snippet = cw.visitMethod(ACC_PUBLIC | ACC_STATIC, WRAPPER, "(I)Z", null, null);
+ snippet.visitCode();
+ snippet.visitVarInsn(ILOAD, 0);
+ snippet.visitMethodInsn(INVOKESTATIC, internalClassName, GET, getDescriptor, false);
+
+ snippet.visitVarInsn(ILOAD, 0);
+ SubWordTestUtil.convertToKind(snippet, kind);
+ SubWordTestUtil.testEqual(snippet);
+
+ snippet.visitMaxs(2, 1);
+ snippet.visitEnd();
+
+ cw.visitEnd();
+ return cw.toByteArray();
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/SubWordInputTest2.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.core.test;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.objectweb.asm.ClassWriter;
+import org.objectweb.asm.Label;
+import org.objectweb.asm.MethodVisitor;
+
+import jdk.vm.ci.meta.DeoptimizationReason;
+import jdk.vm.ci.meta.JavaKind;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+
+@RunWith(Parameterized.class)
+public class SubWordInputTest2 extends CustomizedBytecodePatternTest {
+
+ @Parameterized.Parameters(name = "{0}, {1}")
+ public static List<Object[]> data() {
+ ArrayList<Object[]> ret = new ArrayList<>();
+ for (int i : new int[]{0xFFFF0000, 0xFFFF0001, 0x0000FFFF}) {
+ ret.add(new Object[]{JavaKind.Byte, i});
+ ret.add(new Object[]{JavaKind.Short, i});
+ ret.add(new Object[]{JavaKind.Char, i});
+ }
+ return ret;
+ }
+
+ private static final String GET = "get";
+ private static final String WRAPPER = "wrapper";
+
+ private final JavaKind kind;
+ private final int value;
+
+ public SubWordInputTest2(JavaKind kind, int value) {
+ this.kind = kind;
+ this.value = value;
+ }
+
+ @Test
+ public void testSubWordInput() throws ClassNotFoundException {
+ Class<?> testClass = getClass(SubWordInputTest2.class.getName() + "$" + kind.toString() + "Getter");
+ ResolvedJavaMethod wrapper = getResolvedJavaMethod(testClass, WRAPPER);
+ Result expected = executeExpected(wrapper, null, value);
+ // test standalone callee
+ getCode(getResolvedJavaMethod(testClass, GET), null, false, true, getInitialOptions());
+ assertEquals(executeExpected(wrapper, null, value), expected);
+ // test with inlining
+ testAgainstExpected(wrapper, expected, Collections.<DeoptimizationReason> emptySet(), null, value);
+ }
+
+ @Override
+ protected byte[] generateClass(String internalClassName) {
+ ClassWriter cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES);
+ cw.visit(52, ACC_SUPER | ACC_PUBLIC, internalClassName, null, "java/lang/Object", null);
+
+ final char typeChar = kind.getTypeChar();
+ String getDescriptor = "(" + typeChar + ")" + "Z";
+ MethodVisitor get = cw.visitMethod(ACC_PUBLIC | ACC_STATIC, GET, getDescriptor, null, null);
+ get.visitCode();
+ get.visitVarInsn(ILOAD, 0);
+ Label label = new Label();
+ get.visitJumpInsn(IFGE, label);
+ get.visitInsn(ICONST_0);
+ get.visitInsn(IRETURN);
+ get.visitLabel(label);
+ get.visitInsn(ICONST_1);
+ get.visitInsn(IRETURN);
+ get.visitMaxs(1, 1);
+ get.visitEnd();
+
+ MethodVisitor snippet = cw.visitMethod(ACC_PUBLIC | ACC_STATIC, WRAPPER, "(I)Z", null, null);
+ snippet.visitCode();
+ snippet.visitVarInsn(ILOAD, 0);
+ snippet.visitMethodInsn(INVOKESTATIC, internalClassName, GET, getDescriptor, false);
+ snippet.visitInsn(IRETURN);
+ snippet.visitMaxs(1, 1);
+ snippet.visitEnd();
+
+ cw.visitEnd();
+ return cw.toByteArray();
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/SubWordReturnTest.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/SubWordReturnTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -24,8 +24,7 @@
import java.util.ArrayList;
import java.util.List;
-import jdk.vm.ci.meta.JavaKind;
-import jdk.vm.ci.meta.ResolvedJavaMethod;
+
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
@@ -33,38 +32,16 @@
import org.objectweb.asm.ClassWriter;
import org.objectweb.asm.FieldVisitor;
import org.objectweb.asm.MethodVisitor;
-import org.objectweb.asm.Opcodes;
+
+import jdk.vm.ci.meta.JavaKind;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
@RunWith(Parameterized.class)
-public class SubWordReturnTest extends GraalCompilerTest {
+public class SubWordReturnTest extends CustomizedBytecodePatternTest {
private final JavaKind kind;
private final int value;
- private final String generatedClassName;
- private final String generatedClassNameInternal;
-
- private final String testMethodName;
-
- /**
- * The {@link AsmLoader} generates a class looking like this for the types byte, short, int and
- * char.
- */
- static class ByteGetter {
-
- // private static int intField = 1000000;
-
- private static byte get() {
- // GETSTATIC intField
- // IRETURN
- return 0;
- }
-
- public static int testByteSnippet() {
- return get();
- }
- }
-
@Parameters(name = "{0}, {1}")
public static List<Object[]> data() {
ArrayList<Object[]> ret = new ArrayList<>();
@@ -80,64 +57,58 @@
public SubWordReturnTest(JavaKind kind, int value) {
this.kind = kind;
this.value = value;
-
- this.generatedClassName = SubWordReturnTest.class.getName() + "$" + kind.toString() + "Getter";
- this.generatedClassNameInternal = generatedClassName.replace('.', '/');
- this.testMethodName = "test" + kind.name() + "Snippet";
}
@Test
- public void test() throws ClassNotFoundException {
- Class<?> testClass = new AsmLoader(SubWordReturnTest.class.getClassLoader()).findClass(generatedClassName);
- ResolvedJavaMethod method = getResolvedJavaMethod(testClass, testMethodName);
+ public void testSubWordReturn() throws ClassNotFoundException {
+ Class<?> testClass = getClass(SubWordReturnTest.class.getName() + "$" + kind.toString() + "Getter");
+ ResolvedJavaMethod method = getResolvedJavaMethod(testClass, "testSnippet");
test(method, null);
}
- class AsmLoader extends ClassLoader implements Opcodes {
-
- Class<?> loaded;
-
- AsmLoader(ClassLoader parent) {
- super(parent);
- }
+ /**
+ * {@link #generateClass} generates a class looking like this for the types boolean, byte,
+ * short, and char.
+ */
+ static class ByteGetter {
- @Override
- protected Class<?> findClass(String name) throws ClassNotFoundException {
- if (name.equals(generatedClassName)) {
- if (loaded == null) {
- byte[] gen = generateClass();
- loaded = defineClass(name, gen, 0, gen.length);
- }
- return loaded;
- } else {
- return super.findClass(name);
- }
+ // private static int intField = 1000000;
+
+ private static byte get() {
+ // GETSTATIC intField
+ // IRETURN
+ return 0;
}
- private byte[] generateClass() {
- ClassWriter cw = new ClassWriter(0);
- cw.visit(52, ACC_SUPER | ACC_PUBLIC, generatedClassNameInternal, null, "java/lang/Object", null);
-
- FieldVisitor intField = cw.visitField(ACC_PRIVATE | ACC_STATIC, "intField", "I", null, value);
- intField.visitEnd();
-
- MethodVisitor get = cw.visitMethod(ACC_PRIVATE | ACC_STATIC, "get", "()" + kind.getTypeChar(), null, null);
- get.visitCode();
- get.visitFieldInsn(GETSTATIC, generatedClassNameInternal, "intField", "I");
- get.visitInsn(IRETURN);
- get.visitMaxs(1, 0);
- get.visitEnd();
-
- MethodVisitor snippet = cw.visitMethod(ACC_PUBLIC | ACC_STATIC, testMethodName, "()I", null, null);
- snippet.visitCode();
- snippet.visitMethodInsn(INVOKESTATIC, generatedClassNameInternal, "get", "()" + kind.getTypeChar(), false);
- snippet.visitInsn(IRETURN);
- snippet.visitMaxs(1, 0);
- snippet.visitEnd();
-
- cw.visitEnd();
- return cw.toByteArray();
+ public static int testByteSnippet() {
+ return get();
}
}
+ @Override
+ protected byte[] generateClass(String internalClassName) {
+ ClassWriter cw = new ClassWriter(0);
+ cw.visit(52, ACC_SUPER | ACC_PUBLIC, internalClassName, null, "java/lang/Object", null);
+
+ FieldVisitor intField = cw.visitField(ACC_PRIVATE | ACC_STATIC, "intField", "I", null, value);
+ intField.visitEnd();
+
+ MethodVisitor get = cw.visitMethod(ACC_PRIVATE | ACC_STATIC, "get", "()" + kind.getTypeChar(), null, null);
+ get.visitCode();
+ get.visitFieldInsn(GETSTATIC, internalClassName, "intField", "I");
+ get.visitInsn(IRETURN);
+ get.visitMaxs(1, 0);
+ get.visitEnd();
+
+ MethodVisitor snippet = cw.visitMethod(ACC_PUBLIC | ACC_STATIC, "testSnippet", "()I", null, null);
+ snippet.visitCode();
+ snippet.visitMethodInsn(INVOKESTATIC, internalClassName, "get", "()" + kind.getTypeChar(), false);
+ snippet.visitInsn(IRETURN);
+ snippet.visitMaxs(1, 0);
+ snippet.visitEnd();
+
+ cw.visitEnd();
+ return cw.toByteArray();
+ }
+
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/SubWordTestUtil.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.core.test;
+
+import org.graalvm.compiler.debug.GraalError;
+import org.graalvm.compiler.test.GraalTest;
+import org.objectweb.asm.Label;
+import org.objectweb.asm.MethodVisitor;
+import org.objectweb.asm.Opcodes;
+
+import jdk.vm.ci.meta.JavaKind;
+
+public final class SubWordTestUtil implements Opcodes {
+
+ private SubWordTestUtil() {
+ }
+
+ static void convertToKind(MethodVisitor snippet, JavaKind kind) {
+ switch (kind) {
+ case Boolean:
+ snippet.visitInsn(ICONST_1);
+ snippet.visitInsn(IAND);
+ break;
+ case Byte:
+ snippet.visitInsn(I2B);
+ break;
+ case Short:
+ snippet.visitInsn(I2S);
+ break;
+ case Char:
+ snippet.visitInsn(I2C);
+ break;
+ default:
+ throw GraalError.shouldNotReachHere();
+ }
+ }
+
+ static void testEqual(MethodVisitor snippet) {
+ Label label = new Label();
+ snippet.visitJumpInsn(IF_ICMPNE, label);
+ snippet.visitInsn(ICONST_1);
+ snippet.visitInsn(IRETURN);
+ snippet.visitLabel(label);
+ snippet.visitInsn(ICONST_0);
+ snippet.visitInsn(IRETURN);
+ }
+
+ static void getUnsafe(MethodVisitor snippet) {
+ snippet.visitFieldInsn(GETSTATIC, GraalTest.class.getName().replace('.', '/'), "UNSAFE", "Lsun/misc/Unsafe;");
+ }
+
+ static String getUnsafePutMethodName(JavaKind kind) {
+ String name = kind.getJavaName();
+ return name.substring(0, 1).toUpperCase() + name.substring(1);
+ }
+
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/DiagnosticsOutputDirectory.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.debug/src/org/graalvm/compiler/debug/DiagnosticsOutputDirectory.java Mon Jun 18 09:48:22 2018 -0700
@@ -135,7 +135,7 @@
String name = dir.relativize(file).toString();
ZipEntry ze = new ZipEntry(name);
zos.putNextEntry(ze);
- zos.write(Files.readAllBytes(file));
+ Files.copy(file, zos);
zos.closeEntry();
}
toDelete.add(file);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotBackend.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64HotSpotBackend.java Mon Jun 18 09:48:22 2018 -0700
@@ -30,12 +30,14 @@
import static org.graalvm.compiler.core.common.GraalOptions.GeneratePIC;
import static org.graalvm.compiler.core.common.GraalOptions.ZapStackOnMethodEntry;
+import jdk.vm.ci.amd64.AMD64.CPUFeature;
import jdk.internal.vm.compiler.collections.EconomicSet;
import org.graalvm.compiler.asm.Assembler;
import org.graalvm.compiler.asm.Label;
import org.graalvm.compiler.asm.amd64.AMD64Address;
import org.graalvm.compiler.asm.amd64.AMD64Assembler.ConditionFlag;
import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
+import org.graalvm.compiler.asm.amd64.AMD64VectorAssembler;
import org.graalvm.compiler.code.CompilationResult;
import org.graalvm.compiler.core.amd64.AMD64NodeMatchRules;
import org.graalvm.compiler.core.common.CompilationIdentifier;
@@ -195,7 +197,11 @@
@Override
protected Assembler createAssembler(FrameMap frameMap) {
- return new AMD64MacroAssembler(getTarget());
+ if (((AMD64) getTarget().arch).getFeatures().contains(CPUFeature.AVX)) {
+ return new AMD64VectorAssembler(getTarget());
+ } else {
+ return new AMD64MacroAssembler(getTarget());
+ }
}
@Override
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CheckGraalIntrinsics.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CheckGraalIntrinsics.java Mon Jun 18 09:48:22 2018 -0700
@@ -27,7 +27,9 @@
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
+import java.util.Formatter;
import java.util.List;
+import java.util.ServiceLoader;
import java.util.Set;
import java.util.TreeSet;
import java.util.stream.Collectors;
@@ -47,6 +49,8 @@
import org.graalvm.compiler.test.GraalTest;
import org.junit.Test;
+import jdk.vm.ci.amd64.AMD64;
+import jdk.vm.ci.code.Architecture;
import jdk.vm.ci.hotspot.HotSpotVMConfigStore;
import jdk.vm.ci.hotspot.VMIntrinsicMethod;
import jdk.vm.ci.meta.MetaAccessProvider;
@@ -57,8 +61,8 @@
/**
* Checks the intrinsics implemented by Graal against the set of intrinsics declared by HotSpot. The
* purpose of this test is to detect when new intrinsics are added to HotSpot and process them
- * appropriately in Graal. This will be achieved by working through {@link #TO_BE_INVESTIGATED} and
- * either implementing the intrinsic or moving it to {@link #IGNORE} .
+ * appropriately in Graal. This will be achieved by working through {@link #toBeInvestigated} and
+ * either implementing the intrinsic or moving it to {@link #ignore} .
*/
public class CheckGraalIntrinsics extends GraalTest {
@@ -118,16 +122,33 @@
}
/**
- * The HotSpot intrinsics implemented without {@link InvocationPlugin}s or whose
- * {@link InvocationPlugin} registration is guarded by a condition that is false in the current
- * VM context.
+ * The HotSpot intrinsics that:
+ * <ul>
+ * <li>will never implemented by Graal (comments must explain why)</li>
+ * <li>are implemented without {@link InvocationPlugin}s, or</li>
+ * <li>whose {@link InvocationPlugin} registration is guarded by a condition that is false in
+ * the current VM context.</li>
+ * </ul>
*/
- private static final Set<String> IGNORE = new TreeSet<>();
+ public final Set<String> ignore = new TreeSet<>();
/**
- * The HotSpot intrinsics yet to be implemented or moved to {@link #IGNORE}.
+ * The HotSpot intrinsics whose {@link InvocationPlugin} registration is guarded by a condition
+ * too complex to duplicate here.
+ * </ul>
*/
- private static final Set<String> TO_BE_INVESTIGATED = new TreeSet<>();
+ public final Set<String> complexGuard = new TreeSet<>();
+
+ /**
+ * The HotSpot intrinsics implemented downstream.
+ * </ul>
+ */
+ public final Set<String> downstream = new TreeSet<>();
+
+ /**
+ * The HotSpot intrinsics yet to be implemented or moved to {@link #ignore}.
+ */
+ public final Set<String> toBeInvestigated = new TreeSet<>();
private static Collection<String> add(Collection<String> c, String... elements) {
String[] sorted = elements.clone();
@@ -142,9 +163,13 @@
return c;
}
- static {
+ public final HotSpotGraalRuntimeProvider rt = (HotSpotGraalRuntimeProvider) Graal.getRequiredCapability(RuntimeProvider.class);
+ public final Architecture arch = rt.getHostBackend().getTarget().arch;
+ public final GraalHotSpotVMConfig config = rt.getVMConfig();
+
+ public CheckGraalIntrinsics() {
// These are dead
- add(IGNORE,
+ add(ignore,
"java/lang/Math.atan2(DD)D",
"jdk/internal/misc/Unsafe.park(ZJ)V",
"jdk/internal/misc/Unsafe.unpark(Ljava/lang/Object;)V",
@@ -156,34 +181,34 @@
"sun/misc/Unsafe.unpark(Ljava/lang/Object;)V");
// These only exist to assist escape analysis in C2
- add(IGNORE,
+ add(ignore,
"java/lang/Throwable.fillInStackTrace()Ljava/lang/Throwable;");
// These are only used for the security handling during stack walking
- add(IGNORE,
+ add(ignore,
"java/lang/reflect/Method.invoke(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object;");
// These are marker intrinsic ids only
- add(IGNORE,
+ add(ignore,
"java/lang/invoke/MethodHandle.<compiledLambdaForm>*",
"java/lang/invoke/MethodHandle.invoke*");
// These are implemented through lowering
- add(IGNORE,
+ add(ignore,
"java/lang/ref/Reference.get()Ljava/lang/Object;");
// These are only used by C1
- add(IGNORE,
+ add(ignore,
"java/nio/Buffer.checkIndex(I)I");
// These do general compiler optimizations and convert min/max to cmov instructions. We are
// ignoring them as cmovs are not necessarily beneficial.
- add(IGNORE,
+ add(ignore,
"java/lang/Math.max(II)I",
"java/lang/Math.min(II)I");
// These are known to be implemented down stream
- add(IGNORE,
+ add(downstream,
"java/lang/Integer.toString(I)Ljava/lang/String;",
"java/lang/String.<init>(Ljava/lang/String;)V",
"java/lang/StringBuffer.<init>()V",
@@ -203,9 +228,7 @@
"java/util/Arrays.copyOf([Ljava/lang/Object;ILjava/lang/Class;)[Ljava/lang/Object;",
"java/util/Arrays.copyOfRange([Ljava/lang/Object;IILjava/lang/Class;)[Ljava/lang/Object;");
- // These are known to be implemented but the platform dependent conditions
- // for when they are enabled are complex so just ignore them all the time.
- add(IGNORE,
+ add(complexGuard,
"java/lang/Integer.bitCount(I)I",
"java/lang/Integer.numberOfLeadingZeros(I)I",
"java/lang/Integer.numberOfTrailingZeros(I)I",
@@ -214,30 +237,17 @@
"java/lang/Long.numberOfTrailingZeros(J)I");
// Relevant for Java flight recorder
- add(TO_BE_INVESTIGATED,
+ add(toBeInvestigated,
"oracle/jrockit/jfr/Timing.counterTime()J",
"oracle/jrockit/jfr/VMJFR.classID0(Ljava/lang/Class;)J",
"oracle/jrockit/jfr/VMJFR.threadID()I");
- add(TO_BE_INVESTIGATED,
- // Should be fairly easy to implement - C2 intrinsifies these to use "v !=
- // v" to check for NaN instead of looking at the bit pattern.
- "java/lang/Double.doubleToLongBits(D)J",
- "java/lang/Float.floatToIntBits(F)I",
-
- // Should be trivial to implement because we already have existing nodes
- "java/lang/Math.decrementExact(I)I",
- "java/lang/Math.decrementExact(J)J",
- "java/lang/Math.incrementExact(I)I",
- "java/lang/Math.incrementExact(J)J",
-
+ add(toBeInvestigated,
// Similar to addExact
"java/lang/Math.negateExact(I)I",
// Similar to addExact
"java/lang/Math.negateExact(J)J",
// HotSpot MacroAssembler-based intrinsic
- "java/lang/String.compareTo(Ljava/lang/String;)I",
- // HotSpot MacroAssembler-based intrinsic
"java/lang/String.indexOf(Ljava/lang/String;)I",
// Can share most implementation parts with with
// Unsafe.allocateUninitializedArray0
@@ -249,12 +259,12 @@
if (isJDK9OrHigher()) {
// Relevant for Java flight recorder
- add(TO_BE_INVESTIGATED,
+ add(toBeInvestigated,
"jdk/jfr/internal/JVM.counterTime()J",
"jdk/jfr/internal/JVM.getBufferWriter()Ljava/lang/Object;",
"jdk/jfr/internal/JVM.getClassId(Ljava/lang/Class;)J");
- add(TO_BE_INVESTIGATED,
+ add(toBeInvestigated,
// Some logic and a stub call
"com/sun/crypto/provider/CounterMode.implCrypt([BII[BI)I",
// Stub and very little logic
@@ -263,10 +273,6 @@
"java/lang/Math.fma(DDD)D",
// HotSpot MacroAssembler-based intrinsic
"java/lang/Math.fma(FFF)F",
- // Just a runtime call (the called C code has a better fast path)
- "java/lang/Object.notify()V",
- // Just a runtime call (the called C code has a better fast path)
- "java/lang/Object.notifyAll()V",
// Emit pause instruction if os::is_MP()
"java/lang/Thread.onSpinWait()V",
// Just check if the argument is a compile time constant
@@ -279,51 +285,23 @@
"java/util/zip/Adler32.updateByteBuffer(IJII)I",
// Only implemented on non-AMD64 platforms (some logic and runtime call)
"java/util/zip/Adler32.updateBytes(I[BII)I",
- // similar to CRC32.updateBytes
- "java/util/zip/CRC32C.updateBytes(I[BII)I",
- // similar to CRC32.updateDirectByteBuffer
- "java/util/zip/CRC32C.updateDirectByteBuffer(IJII)I",
// Emits a slow and a fast path and some dispatching logic
"jdk/internal/misc/Unsafe.allocateUninitializedArray0(Ljava/lang/Class;I)Ljava/lang/Object;",
- // Should be easy to implement as it seems to match the logic that is
- // already implemented in ValueCompareAndSwapNode. On the high-level, we
- // would need something similar to UnsafeCompareAndSwapNode but with a
- // different result type.
- "jdk/internal/misc/Unsafe.compareAndExchangeByte(Ljava/lang/Object;JBB)B",
- "jdk/internal/misc/Unsafe.compareAndExchangeInt(Ljava/lang/Object;JII)I",
- "jdk/internal/misc/Unsafe.compareAndExchangeLong(Ljava/lang/Object;JJJ)J",
- "jdk/internal/misc/Unsafe.compareAndExchangeObject(Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
- "jdk/internal/misc/Unsafe.compareAndExchangeShort(Ljava/lang/Object;JSS)S",
-
- // Should be easy to implement as we already have an implementation for
- // int, long, and Object.
- "jdk/internal/misc/Unsafe.compareAndSetByte(Ljava/lang/Object;JBB)Z",
- "jdk/internal/misc/Unsafe.compareAndSetShort(Ljava/lang/Object;JSS)Z",
-
- // Should be easy to implement as we already have an implementation for
- // int and long.
- "jdk/internal/misc/Unsafe.getAndAddByte(Ljava/lang/Object;JB)B",
- "jdk/internal/misc/Unsafe.getAndAddShort(Ljava/lang/Object;JS)S",
-
- // Should be easy to implement as we already have an implementation for
- // int, long, and Object.
- "jdk/internal/misc/Unsafe.getAndSetByte(Ljava/lang/Object;JB)B",
- "jdk/internal/misc/Unsafe.getAndSetShort(Ljava/lang/Object;JS)S",
-
// Control flow, deopts, and a cast
"jdk/internal/util/Preconditions.checkIndex(IILjava/util/function/BiFunction;)I",
// HotSpot MacroAssembler-based intrinsic
"sun/nio/cs/ISO_8859_1$Encoder.implEncodeISOArray([CI[BII)I",
// Runtime call and some complex compiler logic
"sun/security/provider/DigestBase.implCompressMultiBlock0([BII)I");
+
/*
* Per default, all these operations are mapped to some generic method for which we
* already have compiler intrinsics. Performance-wise it would be better to support them
* explicitly as the more generic method might be more restrictive and therefore slower
* than necessary.
*/
- add(TO_BE_INVESTIGATED,
+ add(toBeInvestigated,
// Mapped to compareAndExchange*
"jdk/internal/misc/Unsafe.compareAndExchangeByteAcquire(Ljava/lang/Object;JBB)B",
"jdk/internal/misc/Unsafe.compareAndExchangeByteRelease(Ljava/lang/Object;JBB)B",
@@ -336,37 +314,6 @@
"jdk/internal/misc/Unsafe.compareAndExchangeShortAcquire(Ljava/lang/Object;JSS)S",
"jdk/internal/misc/Unsafe.compareAndExchangeShortRelease(Ljava/lang/Object;JSS)S",
- // Mapped to get*Volatile
- "jdk/internal/misc/Unsafe.getBooleanAcquire(Ljava/lang/Object;J)Z",
- "jdk/internal/misc/Unsafe.getBooleanOpaque(Ljava/lang/Object;J)Z",
- "jdk/internal/misc/Unsafe.getByteAcquire(Ljava/lang/Object;J)B",
- "jdk/internal/misc/Unsafe.getByteOpaque(Ljava/lang/Object;J)B",
- "jdk/internal/misc/Unsafe.getCharAcquire(Ljava/lang/Object;J)C",
- "jdk/internal/misc/Unsafe.getCharOpaque(Ljava/lang/Object;J)C",
- "jdk/internal/misc/Unsafe.getDoubleAcquire(Ljava/lang/Object;J)D",
- "jdk/internal/misc/Unsafe.getDoubleOpaque(Ljava/lang/Object;J)D",
- "jdk/internal/misc/Unsafe.getFloatAcquire(Ljava/lang/Object;J)F",
- "jdk/internal/misc/Unsafe.getFloatOpaque(Ljava/lang/Object;J)F",
- "jdk/internal/misc/Unsafe.getIntAcquire(Ljava/lang/Object;J)I",
- "jdk/internal/misc/Unsafe.getIntOpaque(Ljava/lang/Object;J)I",
- "jdk/internal/misc/Unsafe.getLongAcquire(Ljava/lang/Object;J)J",
- "jdk/internal/misc/Unsafe.getLongOpaque(Ljava/lang/Object;J)J",
- "jdk/internal/misc/Unsafe.getObjectAcquire(Ljava/lang/Object;J)Ljava/lang/Object;",
- "jdk/internal/misc/Unsafe.getObjectOpaque(Ljava/lang/Object;J)Ljava/lang/Object;",
- "jdk/internal/misc/Unsafe.getShortAcquire(Ljava/lang/Object;J)S",
- "jdk/internal/misc/Unsafe.getShortOpaque(Ljava/lang/Object;J)S",
-
- // Mapped to put*Volatile
- "jdk/internal/misc/Unsafe.putBooleanOpaque(Ljava/lang/Object;JZ)V",
- "jdk/internal/misc/Unsafe.putByteOpaque(Ljava/lang/Object;JB)V",
- "jdk/internal/misc/Unsafe.putCharOpaque(Ljava/lang/Object;JC)V",
- "jdk/internal/misc/Unsafe.putDoubleOpaque(Ljava/lang/Object;JD)V",
- "jdk/internal/misc/Unsafe.putFloatOpaque(Ljava/lang/Object;JF)V",
- "jdk/internal/misc/Unsafe.putIntOpaque(Ljava/lang/Object;JI)V",
- "jdk/internal/misc/Unsafe.putLongOpaque(Ljava/lang/Object;JJ)V",
- "jdk/internal/misc/Unsafe.putObjectOpaque(Ljava/lang/Object;JLjava/lang/Object;)V",
- "jdk/internal/misc/Unsafe.putShortOpaque(Ljava/lang/Object;JS)V",
-
// Mapped to compareAndSet*
"jdk/internal/misc/Unsafe.weakCompareAndSetByte(Ljava/lang/Object;JBB)Z",
"jdk/internal/misc/Unsafe.weakCompareAndSetByteAcquire(Ljava/lang/Object;JBB)Z",
@@ -390,18 +337,14 @@
"jdk/internal/misc/Unsafe.weakCompareAndSetShortRelease(Ljava/lang/Object;JSS)Z");
// Compact string support - HotSpot MacroAssembler-based intrinsic or complex C2 logic.
- add(TO_BE_INVESTIGATED,
+ add(toBeInvestigated,
"java/lang/StringCoding.hasNegatives([BII)Z",
"java/lang/StringCoding.implEncodeISOArray([BI[BII)I",
- "java/lang/StringLatin1.compareTo([B[B)I",
- "java/lang/StringLatin1.compareToUTF16([B[B)I",
"java/lang/StringLatin1.equals([B[B)Z",
"java/lang/StringLatin1.indexOf([BI[BII)I",
"java/lang/StringLatin1.indexOf([B[B)I",
"java/lang/StringLatin1.inflate([BI[BII)V",
"java/lang/StringLatin1.inflate([BI[CII)V",
- "java/lang/StringUTF16.compareTo([B[B)I",
- "java/lang/StringUTF16.compareToLatin1([B[B)I",
"java/lang/StringUTF16.compress([BI[BII)I",
"java/lang/StringUTF16.compress([CI[BII)I",
"java/lang/StringUTF16.equals([B[B)Z",
@@ -417,20 +360,36 @@
}
if (isJDK10OrHigher()) {
- add(TO_BE_INVESTIGATED,
+ add(toBeInvestigated,
"java/lang/Math.multiplyHigh(JJ)J",
"jdk/internal/util/ArraysSupport.vectorizedMismatch(Ljava/lang/Object;JLjava/lang/Object;JII)I");
}
if (isJDK11OrHigher()) {
// Relevant for Java flight recorder
- add(TO_BE_INVESTIGATED,
+ add(toBeInvestigated,
"jdk/jfr/internal/JVM.getEventWriter()Ljava/lang/Object;");
}
- if (!getHostArchitectureName().equals("amd64")) {
+ if (!config.inlineNotify()) {
+ add(ignore, "java/lang/Object.notify()V");
+ }
+ if (!config.inlineNotifyAll()) {
+ add(ignore, "java/lang/Object.notifyAll()V");
+ }
+
+ if (!(arch instanceof AMD64)) {
// Can we implement these on non-AMD64 platforms? C2 seems to.
- add(TO_BE_INVESTIGATED,
+ add(toBeInvestigated,
+ "java/lang/String.compareTo(Ljava/lang/String;)I",
+ "jdk/internal/misc/Unsafe.compareAndExchangeByte(Ljava/lang/Object;JBB)B",
+ "jdk/internal/misc/Unsafe.compareAndExchangeShort(Ljava/lang/Object;JSS)S",
+ "jdk/internal/misc/Unsafe.compareAndSetByte(Ljava/lang/Object;JBB)Z",
+ "jdk/internal/misc/Unsafe.compareAndSetShort(Ljava/lang/Object;JSS)Z",
+ "jdk/internal/misc/Unsafe.getAndAddByte(Ljava/lang/Object;JB)B",
+ "jdk/internal/misc/Unsafe.getAndAddShort(Ljava/lang/Object;JS)S",
+ "jdk/internal/misc/Unsafe.getAndSetByte(Ljava/lang/Object;JB)B",
+ "jdk/internal/misc/Unsafe.getAndSetShort(Ljava/lang/Object;JS)S",
"sun/misc/Unsafe.getAndAddInt(Ljava/lang/Object;JI)I",
"sun/misc/Unsafe.getAndAddLong(Ljava/lang/Object;JJ)J",
"sun/misc/Unsafe.getAndSetInt(Ljava/lang/Object;JI)I",
@@ -438,7 +397,11 @@
"sun/misc/Unsafe.getAndSetObject(Ljava/lang/Object;JLjava/lang/Object;)Ljava/lang/Object;");
if (isJDK9OrHigher()) {
- add(TO_BE_INVESTIGATED,
+ add(toBeInvestigated,
+ "java/lang/StringLatin1.compareTo([B[B)I",
+ "java/lang/StringLatin1.compareToUTF16([B[B)I",
+ "java/lang/StringUTF16.compareTo([B[B)I",
+ "java/lang/StringUTF16.compareToLatin1([B[B)I",
"jdk/internal/misc/Unsafe.getAndAddInt(Ljava/lang/Object;JI)I",
"jdk/internal/misc/Unsafe.getAndAddLong(Ljava/lang/Object;JJ)J",
"jdk/internal/misc/Unsafe.getAndSetInt(Ljava/lang/Object;JI)I",
@@ -455,9 +418,6 @@
}
}
- HotSpotGraalRuntimeProvider rt = (HotSpotGraalRuntimeProvider) Graal.getRequiredCapability(RuntimeProvider.class);
- GraalHotSpotVMConfig config = rt.getVMConfig();
-
/*
* The intrinsics down here are known to be implemented but they are not always enabled on
* the HotSpot side (e.g., because they require certain CPU features). So, we are ignoring
@@ -466,13 +426,13 @@
// CRC32 intrinsics
if (!config.useCRC32Intrinsics) {
- add(IGNORE, "java/util/zip/CRC32.update(II)I");
+ add(ignore, "java/util/zip/CRC32.update(II)I");
if (isJDK9OrHigher()) {
- add(IGNORE,
+ add(ignore,
"java/util/zip/CRC32.updateByteBuffer0(IJII)I",
"java/util/zip/CRC32.updateBytes0(I[BII)I");
} else {
- add(IGNORE,
+ add(ignore,
"java/util/zip/CRC32.updateByteBuffer(IJII)I",
"java/util/zip/CRC32.updateBytes(I[BII)I");
}
@@ -480,7 +440,7 @@
// CRC32C intrinsics
if (!config.useCRC32CIntrinsics) {
- add(IGNORE,
+ add(ignore,
"java/util/zip/CRC32C.updateBytes(I[BII)I",
"java/util/zip/CRC32C.updateDirectByteBuffer(IJII)I");
}
@@ -488,13 +448,13 @@
// AES intrinsics
if (!config.useAESIntrinsics) {
if (isJDK9OrHigher()) {
- add(IGNORE,
+ add(ignore,
"com/sun/crypto/provider/AESCrypt.implDecryptBlock([BI[BI)V",
"com/sun/crypto/provider/AESCrypt.implEncryptBlock([BI[BI)V",
"com/sun/crypto/provider/CipherBlockChaining.implDecrypt([BII[BI)I",
"com/sun/crypto/provider/CipherBlockChaining.implEncrypt([BII[BI)I");
} else {
- add(IGNORE,
+ add(ignore,
"com/sun/crypto/provider/AESCrypt.decryptBlock([BI[BI)V",
"com/sun/crypto/provider/AESCrypt.encryptBlock([BI[BI)V",
"com/sun/crypto/provider/CipherBlockChaining.decrypt([BII[BI)I",
@@ -505,44 +465,44 @@
// BigInteger intrinsics
if (!config.useMultiplyToLenIntrinsic()) {
if (isJDK9OrHigher()) {
- add(IGNORE, "java/math/BigInteger.implMultiplyToLen([II[II[I)[I");
+ add(ignore, "java/math/BigInteger.implMultiplyToLen([II[II[I)[I");
} else {
- add(IGNORE, "java/math/BigInteger.multiplyToLen([II[II[I)[I");
+ add(ignore, "java/math/BigInteger.multiplyToLen([II[II[I)[I");
}
}
if (!config.useMulAddIntrinsic()) {
- add(IGNORE, "java/math/BigInteger.implMulAdd([I[IIII)I");
+ add(ignore, "java/math/BigInteger.implMulAdd([I[IIII)I");
}
if (!config.useMontgomeryMultiplyIntrinsic()) {
- add(IGNORE, "java/math/BigInteger.implMontgomeryMultiply([I[I[IIJ[I)[I");
+ add(ignore, "java/math/BigInteger.implMontgomeryMultiply([I[I[IIJ[I)[I");
}
if (!config.useMontgomerySquareIntrinsic()) {
- add(IGNORE, "java/math/BigInteger.implMontgomerySquare([I[IIJ[I)[I");
+ add(ignore, "java/math/BigInteger.implMontgomerySquare([I[IIJ[I)[I");
}
if (!config.useSquareToLenIntrinsic()) {
- add(IGNORE, "java/math/BigInteger.implSquareToLen([II[II)[I");
+ add(ignore, "java/math/BigInteger.implSquareToLen([II[II)[I");
}
// SHA intrinsics
if (!config.useSHA1Intrinsics()) {
if (isJDK9OrHigher()) {
- add(IGNORE, "sun/security/provider/SHA.implCompress0([BI)V");
+ add(ignore, "sun/security/provider/SHA.implCompress0([BI)V");
} else {
- add(IGNORE, "sun/security/provider/SHA.implCompress([BI)V");
+ add(ignore, "sun/security/provider/SHA.implCompress([BI)V");
}
}
if (!config.useSHA256Intrinsics()) {
if (isJDK9OrHigher()) {
- add(IGNORE, "sun/security/provider/SHA2.implCompress0([BI)V");
+ add(ignore, "sun/security/provider/SHA2.implCompress0([BI)V");
} else {
- add(IGNORE, "sun/security/provider/SHA2.implCompress([BI)V");
+ add(ignore, "sun/security/provider/SHA2.implCompress([BI)V");
}
}
if (!config.useSHA512Intrinsics()) {
if (isJDK9OrHigher()) {
- add(IGNORE, "sun/security/provider/SHA5.implCompress0([BI)V");
+ add(ignore, "sun/security/provider/SHA5.implCompress0([BI)V");
} else {
- add(IGNORE, "sun/security/provider/SHA5.implCompress([BI)V");
+ add(ignore, "sun/security/provider/SHA5.implCompress([BI)V");
}
}
}
@@ -559,31 +519,31 @@
return GraalServices.JAVA_SPECIFICATION_VERSION >= 11;
}
- private static String getHostArchitectureName() {
- String arch = System.getProperty("os.arch");
- if (arch.equals("x86_64")) {
- arch = "amd64";
- } else if (arch.equals("sparcv9")) {
- arch = "sparc";
- }
- return arch;
+ public interface Refiner {
+ void refine(CheckGraalIntrinsics checker);
}
@Test
@SuppressWarnings("try")
public void test() throws ClassNotFoundException {
- HotSpotGraalRuntimeProvider rt = (HotSpotGraalRuntimeProvider) Graal.getRequiredCapability(RuntimeProvider.class);
HotSpotProviders providers = rt.getHostBackend().getProviders();
Plugins graphBuilderPlugins = providers.getGraphBuilderPlugins();
InvocationPlugins invocationPlugins = graphBuilderPlugins.getInvocationPlugins();
- HotSpotVMConfigStore store = rt.getVMConfig().getStore();
+ HotSpotVMConfigStore store = config.getStore();
List<VMIntrinsicMethod> intrinsics = store.getIntrinsics();
+ for (Refiner refiner : ServiceLoader.load(Refiner.class)) {
+ refiner.refine(this);
+ }
+
List<String> missing = new ArrayList<>();
+ List<String> mischaracterizedAsToBeInvestigated = new ArrayList<>();
+ List<String> mischaracterizedAsIgnored = new ArrayList<>();
EconomicMap<String, List<Binding>> bindings = invocationPlugins.getBindings(true);
for (VMIntrinsicMethod intrinsic : intrinsics) {
InvocationPlugin plugin = findPlugin(bindings, intrinsic);
+ String m = String.format("%s.%s%s", intrinsic.declaringClass, intrinsic.name, intrinsic.descriptor);
if (plugin == null) {
ResolvedJavaMethod method = resolveIntrinsic(providers.getMetaAccess(), intrinsic);
if (method != null) {
@@ -592,17 +552,37 @@
continue;
}
}
- String m = String.format("%s.%s%s", intrinsic.declaringClass, intrinsic.name, intrinsic.descriptor);
- if (!TO_BE_INVESTIGATED.contains(m) && !IGNORE.contains(m)) {
+ if (!toBeInvestigated.contains(m) && !ignore.contains(m) && !complexGuard.contains(m) && !downstream.contains(m)) {
missing.add(m);
}
+ } else {
+ if (toBeInvestigated.contains(m)) {
+ mischaracterizedAsToBeInvestigated.add(m);
+ } else if (ignore.contains(m)) {
+ mischaracterizedAsIgnored.add(m);
+ }
}
}
+ Formatter errorMsgBuf = new Formatter();
if (!missing.isEmpty()) {
Collections.sort(missing);
String missingString = missing.stream().collect(Collectors.joining(String.format("%n ")));
- fail("missing Graal intrinsics for:%n %s", missingString);
+ errorMsgBuf.format("missing Graal intrinsics for:%n %s%n", missingString);
+ }
+ if (!mischaracterizedAsToBeInvestigated.isEmpty()) {
+ Collections.sort(mischaracterizedAsToBeInvestigated);
+ String missingString = mischaracterizedAsToBeInvestigated.stream().collect(Collectors.joining(String.format("%n ")));
+ errorMsgBuf.format("found plugins for intrinsics characterized as toBeInvestigated:%n %s%n", missingString);
+ }
+ if (!mischaracterizedAsIgnored.isEmpty()) {
+ Collections.sort(mischaracterizedAsIgnored);
+ String missingString = mischaracterizedAsIgnored.stream().collect(Collectors.joining(String.format("%n ")));
+ errorMsgBuf.format("found plugins for intrinsics characterized as IGNORED:%n %s%n", missingString);
+ }
+ String errorMsg = errorMsgBuf.toString();
+ if (!errorMsg.isEmpty()) {
+ fail(errorMsg);
}
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfigVersioned.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/GraalHotSpotVMConfigVersioned.java Mon Jun 18 09:48:22 2018 -0700
@@ -40,8 +40,13 @@
super(store);
}
+ private boolean initInlineNotify() {
+ String syncKnobs = getFlag("SyncKnobs", String.class, "");
+ return syncKnobs == null || !syncKnobs.contains("InlineNotify=0");
+ }
+
// JSK-8132287
- final boolean inlineNotify = !getFlag("SyncKnobs", String.class, "").contains("InlineNotify=0");
+ final boolean inlineNotify = initInlineNotify();
// JDK-8073583
final boolean useCRC32CIntrinsics = getFlag("UseCRC32CIntrinsics", Boolean.class);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalCompiler.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalCompiler.java Mon Jun 18 09:48:22 2018 -0700
@@ -34,6 +34,7 @@
import org.graalvm.compiler.api.runtime.GraalJVMCICompiler;
import org.graalvm.compiler.bytecode.Bytecode;
+import org.graalvm.compiler.bytecode.BytecodeProvider;
import org.graalvm.compiler.code.CompilationResult;
import org.graalvm.compiler.core.GraalCompiler;
import org.graalvm.compiler.core.common.CompilationIdentifier;
@@ -218,11 +219,18 @@
if (subst != null) {
ResolvedJavaMethod substMethod = subst.getMethod();
assert !substMethod.equals(method);
- StructuredGraph graph = new StructuredGraph.Builder(options, debug, AllowAssumptions.YES).method(substMethod).compilationId(compilationId).build();
+ BytecodeProvider bytecodeProvider = subst.getOrigin();
+ // @formatter:off
+ StructuredGraph graph = new StructuredGraph.Builder(options, debug, AllowAssumptions.YES).
+ method(substMethod).
+ compilationId(compilationId).
+ recordInlinedMethods(bytecodeProvider.shouldRecordMethodDependencies()).
+ build();
+ // @formatter:on
try (DebugContext.Scope scope = debug.scope("GetIntrinsicGraph", graph)) {
Plugins plugins = new Plugins(providers.getGraphBuilderPlugins());
GraphBuilderConfiguration config = GraphBuilderConfiguration.getSnippetDefault(plugins);
- IntrinsicContext initialReplacementContext = new IntrinsicContext(method, substMethod, subst.getOrigin(), ROOT_COMPILATION);
+ IntrinsicContext initialReplacementContext = new IntrinsicContext(method, substMethod, bytecodeProvider, ROOT_COMPILATION);
new GraphBuilderPhase.Instance(providers.getMetaAccess(), providers.getStampProvider(), providers.getConstantReflection(), providers.getConstantFieldProvider(), config,
OptimisticOptimizations.NONE, initialReplacementContext).apply(graph);
assert !graph.isFrozen();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotInvocationPlugins.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotInvocationPlugins.java Mon Jun 18 09:48:22 2018 -0700
@@ -55,14 +55,14 @@
}
@Override
- public void register(InvocationPlugin plugin, Type declaringClass, String name, Type... argumentTypes) {
+ protected void register(InvocationPlugin plugin, boolean isOptional, boolean allowOverwrite, Type declaringClass, String name, Type... argumentTypes) {
if (!config.usePopCountInstruction) {
if (name.equals("bitCount")) {
assert declaringClass.equals(Integer.class) || declaringClass.equals(Long.class);
return;
}
}
- super.register(plugin, declaringClass, name, argumentTypes);
+ super.register(plugin, isOptional, allowOverwrite, declaringClass, name, argumentTypes);
}
@Override
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/ObjectCloneNode.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/replacements/ObjectCloneNode.java Mon Jun 18 09:48:22 2018 -0700
@@ -98,7 +98,7 @@
Assumptions assumptions = graph().getAssumptions();
type = getConcreteType(getObject().stamp(NodeView.DEFAULT));
if (type != null) {
- StructuredGraph newGraph = new StructuredGraph.Builder(graph().getOptions(), graph().getDebug(), AllowAssumptions.ifNonNull(assumptions)).build();
+ StructuredGraph newGraph = new StructuredGraph.Builder(graph().getOptions(), graph().getDebug(), AllowAssumptions.ifNonNull(assumptions)).name("<clone>").build();
ParameterNode param = newGraph.addWithoutUnique(new ParameterNode(0, StampPair.createSingle(getObject().stamp(NodeView.DEFAULT))));
NewInstanceNode newInstance = newGraph.add(new NewInstanceNode(type, true));
newGraph.addAfterFixed(newGraph.start(), newInstance);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/BytecodeParser.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/BytecodeParser.java Mon Jun 18 09:48:22 2018 -0700
@@ -852,7 +852,7 @@
try (DebugCloseable context = openNodeContext()) {
if (method.isSynchronized()) {
- finishPrepare(lastInstr, BytecodeFrame.BEFORE_BCI);
+ finishPrepare(lastInstr, BytecodeFrame.BEFORE_BCI, frameState);
// add a monitor enter to the start block
methodSynchronizedObject = synchronizedObject(frameState, method);
@@ -867,7 +867,7 @@
profilingPlugin.profileInvoke(this, method, stateBefore);
}
- finishPrepare(lastInstr, 0);
+ finishPrepare(lastInstr, 0, frameState);
genInfoPointNode(InfopointReason.METHOD_START, null);
}
@@ -914,8 +914,9 @@
*
* @param instruction the current last instruction
* @param bci the current bci
+ * @param state The current frame state.
*/
- protected void finishPrepare(FixedWithNextNode instruction, int bci) {
+ protected void finishPrepare(FixedWithNextNode instruction, int bci, FrameStateBuilder state) {
}
protected void cleanupFinalGraph() {
@@ -1393,7 +1394,7 @@
}
protected void genStoreField(ValueNode receiver, ResolvedJavaField field, ValueNode value) {
- StoreFieldNode storeFieldNode = new StoreFieldNode(receiver, field, value);
+ StoreFieldNode storeFieldNode = new StoreFieldNode(receiver, field, maskSubWordValue(value, field.getJavaKind()));
append(storeFieldNode);
storeFieldNode.setStateAfter(this.createFrameState(stream.nextBCI(), storeFieldNode));
}
@@ -2528,12 +2529,7 @@
// the bytecode verifier doesn't check that the value is in the correct range
if (stamp.lowerBound() < returnKind.getMinValue() || returnKind.getMaxValue() < stamp.upperBound()) {
- ValueNode narrow = append(genNarrow(value, returnKind.getBitCount()));
- if (returnKind.isUnsigned()) {
- return append(genZeroExtend(narrow, 32));
- } else {
- return append(genSignExtend(narrow, 32));
- }
+ return maskSubWordValue(value, returnKind);
}
}
@@ -2926,7 +2922,7 @@
private void handleUnwindBlock(ExceptionDispatchBlock block) {
if (parent == null) {
- finishPrepare(lastInstr, block.deoptBci);
+ finishPrepare(lastInstr, block.deoptBci, frameState);
frameState.setRethrowException(false);
createUnwind();
} else {
@@ -2966,7 +2962,7 @@
}
genMonitorExit(methodSynchronizedObject, currentReturnValue, bci);
assert !frameState.rethrowException();
- finishPrepare(lastInstr, bci);
+ finishPrepare(lastInstr, bci, frameState);
}
if (frameState.lockDepth(false) != 0) {
throw bailout("unbalanced monitors: too few exits exiting frame");
@@ -3632,7 +3628,7 @@
frameState.storeLocal(index, kind, value);
}
- private void genLoadConstant(int cpi, int opcode) {
+ protected void genLoadConstant(int cpi, int opcode) {
Object con = lookupConstant(cpi, opcode);
if (con instanceof JavaType) {
@@ -3651,6 +3647,21 @@
}
}
+ private JavaKind refineComponentType(ValueNode array, JavaKind kind) {
+ if (kind == JavaKind.Byte) {
+ JavaType type = array.stamp(NodeView.DEFAULT).javaType(metaAccess);
+ if (type.isArray()) {
+ JavaType componentType = type.getComponentType();
+ if (componentType != null) {
+ JavaKind refinedKind = componentType.getJavaKind();
+ assert refinedKind == JavaKind.Byte || refinedKind == JavaKind.Boolean;
+ return refinedKind;
+ }
+ }
+ }
+ return kind;
+ }
+
private void genLoadIndexed(JavaKind kind) {
ValueNode index = frameState.pop(JavaKind.Int);
ValueNode array = frameState.pop(JavaKind.Object);
@@ -3664,7 +3675,8 @@
}
}
- frameState.push(kind, append(genLoadIndexed(array, index, boundsCheck, kind)));
+ JavaKind actualKind = refineComponentType(array, kind);
+ frameState.push(actualKind, append(genLoadIndexed(array, index, boundsCheck, actualKind)));
}
private void genStoreIndexed(JavaKind kind) {
@@ -3682,7 +3694,8 @@
}
}
- genStoreIndexed(array, index, boundsCheck, storeCheck, kind, value);
+ JavaKind actualKind = refineComponentType(array, kind);
+ genStoreIndexed(array, index, boundsCheck, storeCheck, actualKind, maskSubWordValue(value, actualKind));
}
private void genArithmeticOp(JavaKind kind, int opcode) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/FrameStateBuilder.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.java/src/org/graalvm/compiler/java/FrameStateBuilder.java Mon Jun 18 09:48:22 2018 -0700
@@ -221,7 +221,12 @@
stamp = plugins.getOverridingStamp(tool, type, false);
}
if (stamp == null) {
- stamp = StampFactory.forDeclaredType(assumptions, type, false);
+ // GR-714: subword inputs cannot be trusted
+ if (kind.getStackKind() != kind) {
+ stamp = StampPair.createSingle(StampFactory.forKind(JavaKind.Int));
+ } else {
+ stamp = StampFactory.forDeclaredType(assumptions, type, false);
+ }
}
FloatingNode param = null;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.aarch64/src/org/graalvm/compiler/lir/aarch64/AArch64AtomicMove.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.aarch64/src/org/graalvm/compiler/lir/aarch64/AArch64AtomicMove.java Mon Jun 18 09:48:22 2018 -0700
@@ -23,12 +23,15 @@
package org.graalvm.compiler.lir.aarch64;
import static jdk.vm.ci.code.ValueUtil.asRegister;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.CONST;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
import org.graalvm.compiler.asm.Label;
import org.graalvm.compiler.asm.aarch64.AArch64Assembler;
-import org.graalvm.compiler.asm.aarch64.AArch64Assembler.ShiftType;
import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler;
+import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler.ScratchRegister;
import org.graalvm.compiler.lir.LIRInstructionClass;
+import org.graalvm.compiler.lir.LIRValueUtil;
import org.graalvm.compiler.lir.Opcode;
import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
@@ -75,7 +78,7 @@
Register address = asRegister(addressValue);
Register result = asRegister(resultValue);
Register newVal = asRegister(newValue);
- if (AArch64LIRFlagsVersioned.useLSE(masm)) {
+ if (AArch64LIRFlagsVersioned.useLSE(masm.target.arch)) {
Register expected = asRegister(expectedValue);
masm.mov(size, result, expected);
masm.cas(size, expected, newVal, address, true /* acquire */, true /* release */);
@@ -113,20 +116,79 @@
private final AArch64Kind accessKind;
- @Def protected AllocatableValue resultValue;
- @Alive protected AllocatableValue addressValue;
- @Alive protected Value deltaValue;
- @Temp protected AllocatableValue scratchValue1;
- @Temp protected AllocatableValue scratchValue2;
+ @Def({REG}) protected AllocatableValue resultValue;
+ @Alive({REG}) protected AllocatableValue addressValue;
+ @Alive({REG, CONST}) protected Value deltaValue;
- public AtomicReadAndAddOp(AArch64Kind kind, AllocatableValue result, AllocatableValue address, Value delta, AllocatableValue scratch1, AllocatableValue scratch2) {
+ public AtomicReadAndAddOp(AArch64Kind kind, AllocatableValue result, AllocatableValue address, Value delta) {
super(TYPE);
this.accessKind = kind;
this.resultValue = result;
this.addressValue = address;
this.deltaValue = delta;
- this.scratchValue1 = scratch1;
- this.scratchValue2 = scratch2;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+ assert accessKind.isInteger();
+ final int size = accessKind.getSizeInBytes() * Byte.SIZE;
+
+ Register address = asRegister(addressValue);
+ Register result = asRegister(resultValue);
+
+ Label retry = new Label();
+ masm.bind(retry);
+ masm.ldaxr(size, result, address);
+ try (ScratchRegister scratchRegister1 = masm.getScratchRegister()) {
+ Register scratch1 = scratchRegister1.getRegister();
+ if (LIRValueUtil.isConstantValue(deltaValue)) {
+ long delta = LIRValueUtil.asConstantValue(deltaValue).getJavaConstant().asLong();
+ masm.add(size, scratch1, result, delta);
+ } else { // must be a register then
+ masm.add(size, scratch1, result, asRegister(deltaValue));
+ }
+ try (ScratchRegister scratchRegister2 = masm.getScratchRegister()) {
+ Register scratch2 = scratchRegister2.getRegister();
+ masm.stlxr(size, scratch2, scratch1, address);
+ // if scratch2 == 0 then write successful, else retry
+ masm.cbnz(32, scratch2, retry);
+ }
+ }
+ }
+ }
+
+ /**
+ * Load (Read) and Add instruction. Does the following atomically: <code>
+ * ATOMIC_READ_AND_ADD(addend, result, address):
+ * result = *address
+ * *address = result + addend
+ * return result
+ * </code>
+ *
+ * The LSE version has different properties with regards to the register allocator. To define
+ * these differences, we have to create a separate LIR instruction class.
+ *
+ * The difference to {@linkplain AtomicReadAndAddOp} is:
+ * <li>{@linkplain #deltaValue} must be a register (@Use({REG}) instead @Alive({REG,CONST}))
+ * <li>{@linkplain #resultValue} may be an alias for the input registers (@Use instead
+ * of @Alive)
+ */
+ @Opcode("ATOMIC_READ_AND_ADD")
+ public static final class AtomicReadAndAddLSEOp extends AArch64LIRInstruction {
+ public static final LIRInstructionClass<AtomicReadAndAddLSEOp> TYPE = LIRInstructionClass.create(AtomicReadAndAddLSEOp.class);
+
+ private final AArch64Kind accessKind;
+
+ @Def({REG}) protected AllocatableValue resultValue;
+ @Use({REG}) protected AllocatableValue addressValue;
+ @Use({REG}) protected AllocatableValue deltaValue;
+
+ public AtomicReadAndAddLSEOp(AArch64Kind kind, AllocatableValue result, AllocatableValue address, AllocatableValue delta) {
+ super(TYPE);
+ this.accessKind = kind;
+ this.resultValue = result;
+ this.addressValue = address;
+ this.deltaValue = delta;
}
@Override
@@ -137,19 +199,57 @@
Register address = asRegister(addressValue);
Register delta = asRegister(deltaValue);
Register result = asRegister(resultValue);
+ masm.ldadd(size, delta, result, address, true, true);
+ }
+ }
- if (AArch64LIRFlagsVersioned.useLSE(masm)) {
- masm.ldadd(size, delta, result, address, true, true);
+ /**
+ * Load (Read) and Write instruction. Does the following atomically: <code>
+ * ATOMIC_READ_AND_WRITE(newValue, result, address):
+ * result = *address
+ * *address = newValue
+ * return result
+ * </code>
+ */
+ @Opcode("ATOMIC_READ_AND_WRITE")
+ public static final class AtomicReadAndWriteOp extends AArch64LIRInstruction {
+ public static final LIRInstructionClass<AtomicReadAndWriteOp> TYPE = LIRInstructionClass.create(AtomicReadAndWriteOp.class);
+
+ private final AArch64Kind accessKind;
+
+ @Def protected AllocatableValue resultValue;
+ @Alive protected AllocatableValue addressValue;
+ @Alive protected AllocatableValue newValue;
+ @Temp protected AllocatableValue scratchValue;
+
+ public AtomicReadAndWriteOp(AArch64Kind kind, AllocatableValue result, AllocatableValue address, AllocatableValue newValue, AllocatableValue scratch) {
+ super(TYPE);
+ this.accessKind = kind;
+ this.resultValue = result;
+ this.addressValue = address;
+ this.newValue = newValue;
+ this.scratchValue = scratch;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AArch64MacroAssembler masm) {
+ assert accessKind.isInteger();
+ final int size = accessKind.getSizeInBytes() * Byte.SIZE;
+
+ Register address = asRegister(addressValue);
+ Register value = asRegister(newValue);
+ Register result = asRegister(resultValue);
+
+ if (AArch64LIRFlagsVersioned.useLSE(masm.target.arch)) {
+ masm.swp(size, value, result, address, true, true);
} else {
- Register scratch1 = asRegister(scratchValue1);
- Register scratch2 = asRegister(scratchValue2);
+ Register scratch = asRegister(scratchValue);
Label retry = new Label();
masm.bind(retry);
masm.ldaxr(size, result, address);
- masm.add(size, scratch1, result, delta, ShiftType.LSL, 0);
- masm.stlxr(size, scratch2, scratch1, address);
- // if scratch2 == 0 then write successful, else retry
- masm.cbnz(32, scratch2, retry);
+ masm.stlxr(size, scratch, value, address);
+ // if scratch == 0 then write successful, else retry
+ masm.cbnz(32, scratch, retry);
}
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.aarch64/src/org/graalvm/compiler/lir/aarch64/AArch64LIRFlagsVersioned.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.aarch64/src/org/graalvm/compiler/lir/aarch64/AArch64LIRFlagsVersioned.java Mon Jun 18 09:48:22 2018 -0700
@@ -22,13 +22,14 @@
*/
package org.graalvm.compiler.lir.aarch64;
-import org.graalvm.compiler.asm.aarch64.AArch64MacroAssembler;
-
+import jdk.vm.ci.aarch64.AArch64;
import jdk.vm.ci.aarch64.AArch64.CPUFeature;
import jdk.vm.ci.aarch64.AArch64.Flag;
+import jdk.vm.ci.code.Architecture;
public class AArch64LIRFlagsVersioned {
- public static boolean useLSE(AArch64MacroAssembler masm) {
- return masm.supports(CPUFeature.LSE) || masm.isFlagSet(Flag.UseLSE);
+ public static boolean useLSE(Architecture arch) {
+ AArch64 aarch64 = (AArch64) arch;
+ return aarch64.getFeatures().contains(CPUFeature.LSE) || aarch64.getFlags().contains(Flag.UseLSE);
}
-}
+}
\ No newline at end of file
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64ArrayCompareToOp.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/AMD64ArrayCompareToOp.java Mon Jun 18 09:48:22 2018 -0700
@@ -32,7 +32,6 @@
import java.lang.reflect.Array;
import java.lang.reflect.Field;
-import java.util.EnumSet;
import org.graalvm.compiler.asm.Label;
import org.graalvm.compiler.asm.amd64.AMD64Address;
@@ -125,10 +124,9 @@
return arch.getFeatures().contains(CPUFeature.AVX2);
}
- private static boolean supportsAVX512VLBW(TargetDescription target) {
- AMD64 arch = (AMD64) target.arch;
- EnumSet<CPUFeature> features = arch.getFeatures();
- return features.contains(CPUFeature.AVX512BW) && features.contains(CPUFeature.AVX512VL);
+ private static boolean supportsAVX512VLBW(@SuppressWarnings("unused") TargetDescription target) {
+ // TODO Add EVEX encoder in our assembler.
+ return false;
}
@Override
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorBinary.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.lir.amd64.vector;
+
+import jdk.vm.ci.meta.AllocatableValue;
+import org.graalvm.compiler.asm.amd64.AMD64Address;
+import org.graalvm.compiler.asm.amd64.AMD64VectorAssembler;
+import org.graalvm.compiler.asm.amd64.AVXKind;
+import org.graalvm.compiler.lir.LIRFrameState;
+import org.graalvm.compiler.lir.LIRInstructionClass;
+import org.graalvm.compiler.lir.Opcode;
+import org.graalvm.compiler.lir.amd64.AMD64AddressValue;
+import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
+
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+import static jdk.vm.ci.code.ValueUtil.isRegister;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.COMPOSITE;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
+
+public class AMD64VectorBinary {
+
+ public static final class AVXBinaryOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<AVXBinaryOp> TYPE = LIRInstructionClass.create(AVXBinaryOp.class);
+
+ @Opcode private final AMD64VectorAssembler.VexRVMOp opcode;
+ private final AVXKind.AVXSize size;
+
+ @Def({REG}) protected AllocatableValue result;
+ @Use({REG}) protected AllocatableValue x;
+ @Use({REG, STACK}) protected AllocatableValue y;
+
+ public AVXBinaryOp(AMD64VectorAssembler.VexRVMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AllocatableValue x, AllocatableValue y) {
+ super(TYPE);
+ this.opcode = opcode;
+ this.size = size;
+ this.result = result;
+ this.x = x;
+ this.y = y;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ if (isRegister(y)) {
+ opcode.emit(vasm, size, asRegister(result), asRegister(x), asRegister(y));
+ } else {
+ opcode.emit(vasm, size, asRegister(result), asRegister(x), (AMD64Address) crb.asAddress(y));
+ }
+ }
+ }
+
+ public static final class AVXBinaryConstOp extends AMD64VectorLIRInstruction {
+
+ public static final LIRInstructionClass<AVXBinaryConstOp> TYPE = LIRInstructionClass.create(AVXBinaryConstOp.class);
+
+ @Opcode private final AMD64VectorAssembler.VexRRIOp opcode;
+ private final AVXKind.AVXSize size;
+
+ @Def({REG}) protected AllocatableValue result;
+ @Use({REG}) protected AllocatableValue x;
+ protected int y;
+
+ public AVXBinaryConstOp(AMD64VectorAssembler.VexRRIOp opcode, AVXKind.AVXSize size, AllocatableValue result, AllocatableValue x, int y) {
+ super(TYPE);
+ assert (y & 0xFF) == y;
+ this.opcode = opcode;
+ this.size = size;
+ this.result = result;
+ this.x = x;
+ this.y = y;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ opcode.emit(vasm, size, asRegister(result), asRegister(x), y);
+ }
+ }
+
+ public static final class AVXBinaryMemoryOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<AVXBinaryMemoryOp> TYPE = LIRInstructionClass.create(AVXBinaryMemoryOp.class);
+
+ @Opcode private final AMD64VectorAssembler.VexRVMOp opcode;
+ private final AVXKind.AVXSize size;
+
+ @Def({REG}) protected AllocatableValue result;
+ @Use({REG}) protected AllocatableValue x;
+ @Use({COMPOSITE}) protected AMD64AddressValue y;
+ @State protected LIRFrameState state;
+
+ public AVXBinaryMemoryOp(AMD64VectorAssembler.VexRVMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AllocatableValue x, AMD64AddressValue y, LIRFrameState state) {
+ super(TYPE);
+ this.opcode = opcode;
+ this.size = size;
+ this.result = result;
+ this.x = x;
+ this.y = y;
+ this.state = state;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ if (state != null) {
+ crb.recordImplicitException(vasm.position(), state);
+ }
+ opcode.emit(vasm, size, asRegister(result), asRegister(x), y.toAddress());
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorClearOp.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.lir.amd64.vector;
+
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexRVMOp.VPXOR;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexRVMOp.VXORPD;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexRVMOp.VXORPS;
+import static org.graalvm.compiler.asm.amd64.AVXKind.AVXSize.XMM;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
+
+import org.graalvm.compiler.lir.LIRInstruction;
+import org.graalvm.compiler.lir.LIRInstructionClass;
+import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
+
+import org.graalvm.compiler.asm.amd64.AMD64VectorAssembler;
+import org.graalvm.compiler.asm.amd64.AVXKind;
+
+import jdk.vm.ci.amd64.AMD64Kind;
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.meta.AllocatableValue;
+
+public class AMD64VectorClearOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<AMD64VectorClearOp> TYPE = LIRInstructionClass.create(AMD64VectorClearOp.class);
+
+ protected @LIRInstruction.Def({REG}) AllocatableValue result;
+
+ public AMD64VectorClearOp(AllocatableValue result) {
+ this(TYPE, result);
+ }
+
+ protected AMD64VectorClearOp(LIRInstructionClass<? extends AMD64VectorClearOp> c, AllocatableValue result) {
+ super(c);
+ this.result = result;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ AMD64Kind kind = (AMD64Kind) result.getPlatformKind();
+ Register register = asRegister(result);
+
+ switch (kind.getScalar()) {
+ case SINGLE:
+ VXORPS.emit(vasm, AVXKind.getRegisterSize(kind), register, register, register);
+ break;
+
+ case DOUBLE:
+ VXORPD.emit(vasm, AVXKind.getRegisterSize(kind), register, register, register);
+ break;
+
+ default:
+ // on AVX1, YMM VPXOR is not supported - still it is possible to clear the whole YMM
+ // register as the upper 128-bit are implicitly cleared by the AVX1 instruction.
+ VPXOR.emit(vasm, XMM, register, register, register);
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorCompareOp.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.lir.amd64.vector;
+
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+import static jdk.vm.ci.code.ValueUtil.isRegister;
+import jdk.vm.ci.meta.AllocatableValue;
+
+import org.graalvm.compiler.asm.amd64.AMD64Address;
+import org.graalvm.compiler.asm.amd64.AMD64VectorAssembler;
+import org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexRMOp;
+import org.graalvm.compiler.lir.LIRInstructionClass;
+import org.graalvm.compiler.lir.Opcode;
+import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
+import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
+
+public final class AMD64VectorCompareOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<AMD64VectorCompareOp> TYPE = LIRInstructionClass.create(AMD64VectorCompareOp.class);
+
+ @Opcode private final VexRMOp opcode;
+ @Use({REG}) protected AllocatableValue x;
+ @Use({REG, STACK}) protected AllocatableValue y;
+
+ public AMD64VectorCompareOp(VexRMOp opcode, AllocatableValue x, AllocatableValue y) {
+ super(TYPE);
+ this.opcode = opcode;
+ this.x = x;
+ this.y = y;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ if (isRegister(y)) {
+ opcode.emit(vasm, AVXSize.XMM, asRegister(x), asRegister(y));
+ } else {
+ opcode.emit(vasm, AVXSize.XMM, asRegister(x), (AMD64Address) crb.asAddress(y));
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorLIRInstruction.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.lir.amd64.vector;
+
+import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
+import org.graalvm.compiler.asm.amd64.AMD64VectorAssembler;
+import org.graalvm.compiler.lir.LIRInstructionClass;
+import org.graalvm.compiler.lir.amd64.AMD64LIRInstruction;
+import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
+
+public abstract class AMD64VectorLIRInstruction extends AMD64LIRInstruction {
+ public static final LIRInstructionClass<AMD64VectorLIRInstruction> TYPE = LIRInstructionClass.create(AMD64VectorLIRInstruction.class);
+
+ protected AMD64VectorLIRInstruction(LIRInstructionClass<? extends AMD64LIRInstruction> c) {
+ super(c);
+ }
+
+ @Override
+ public final void emitCode(CompilationResultBuilder crb, AMD64MacroAssembler masm) {
+ emitCode(crb, (AMD64VectorAssembler) masm);
+ }
+
+ public abstract void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm);
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorMove.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,434 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.lir.amd64.vector;
+
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+import static jdk.vm.ci.code.ValueUtil.isRegister;
+import static jdk.vm.ci.code.ValueUtil.isStackSlot;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexMoveOp.VMOVD;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexMoveOp.VMOVDQU;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexMoveOp.VMOVQ;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexMoveOp.VMOVSD;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexMoveOp.VMOVSS;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexMoveOp.VMOVUPD;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexMoveOp.VMOVUPS;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexRVMOp.VXORPD;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.COMPOSITE;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.HINT;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.UNINITIALIZED;
+
+import org.graalvm.compiler.asm.amd64.AMD64Address;
+import org.graalvm.compiler.asm.amd64.AMD64MacroAssembler;
+import org.graalvm.compiler.asm.amd64.AMD64VectorAssembler;
+import org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexMoveOp;
+import org.graalvm.compiler.asm.amd64.AVXKind;
+import org.graalvm.compiler.asm.amd64.AVXKind.AVXSize;
+import org.graalvm.compiler.debug.GraalError;
+import org.graalvm.compiler.lir.LIRFrameState;
+import org.graalvm.compiler.lir.LIRInstructionClass;
+import org.graalvm.compiler.lir.Opcode;
+import org.graalvm.compiler.lir.StandardOp.LoadConstantOp;
+import org.graalvm.compiler.lir.StandardOp.ValueMoveOp;
+import org.graalvm.compiler.lir.amd64.AMD64AddressValue;
+import org.graalvm.compiler.lir.amd64.AMD64Move;
+import org.graalvm.compiler.lir.amd64.AMD64RestoreRegistersOp;
+import org.graalvm.compiler.lir.amd64.AMD64SaveRegistersOp;
+import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.amd64.AMD64Kind;
+import jdk.vm.ci.code.Register;
+import jdk.vm.ci.code.RegisterValue;
+import jdk.vm.ci.code.StackSlot;
+import jdk.vm.ci.meta.AllocatableValue;
+import jdk.vm.ci.meta.Constant;
+import jdk.vm.ci.meta.JavaConstant;
+import jdk.vm.ci.meta.Value;
+
+public class AMD64VectorMove {
+
+ @Opcode("VMOVE")
+ public static final class MoveToRegOp extends AMD64VectorLIRInstruction implements ValueMoveOp {
+ public static final LIRInstructionClass<MoveToRegOp> TYPE = LIRInstructionClass.create(MoveToRegOp.class);
+
+ @Def({REG, HINT}) protected AllocatableValue result;
+ @Use({REG, STACK}) protected AllocatableValue input;
+
+ public MoveToRegOp(AllocatableValue result, AllocatableValue input) {
+ super(TYPE);
+ this.result = result;
+ this.input = input;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ move(crb, vasm, result, input);
+ }
+
+ @Override
+ public AllocatableValue getInput() {
+ return input;
+ }
+
+ @Override
+ public AllocatableValue getResult() {
+ return result;
+ }
+ }
+
+ @Opcode("VMOVE")
+ public static final class MoveFromRegOp extends AMD64VectorLIRInstruction implements ValueMoveOp {
+ public static final LIRInstructionClass<MoveFromRegOp> TYPE = LIRInstructionClass.create(MoveFromRegOp.class);
+
+ @Def({REG, STACK}) protected AllocatableValue result;
+ @Use({REG, HINT}) protected AllocatableValue input;
+
+ public MoveFromRegOp(AllocatableValue result, AllocatableValue input) {
+ super(TYPE);
+ this.result = result;
+ this.input = input;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ move(crb, vasm, result, input);
+ }
+
+ @Override
+ public AllocatableValue getInput() {
+ return input;
+ }
+
+ @Override
+ public AllocatableValue getResult() {
+ return result;
+ }
+ }
+
+ @Opcode("VMOVE")
+ public static class MoveFromConstOp extends AMD64VectorLIRInstruction implements LoadConstantOp {
+ public static final LIRInstructionClass<MoveFromConstOp> TYPE = LIRInstructionClass.create(MoveFromConstOp.class);
+
+ @Def({REG, STACK}) protected AllocatableValue result;
+ private final JavaConstant input;
+
+ public MoveFromConstOp(AllocatableValue result, JavaConstant input) {
+ super(TYPE);
+ this.result = result;
+ this.input = input;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ if (isRegister(result)) {
+ const2reg(crb, vasm, (RegisterValue) result, input);
+ } else {
+ assert isStackSlot(result);
+ AMD64Move.const2stack(crb, vasm, result, input);
+ }
+ }
+
+ @Override
+ public Constant getConstant() {
+ return input;
+ }
+
+ @Override
+ public AllocatableValue getResult() {
+ return result;
+ }
+ }
+
+ @Opcode("VSTACKMOVE")
+ public static final class StackMoveOp extends AMD64VectorLIRInstruction implements ValueMoveOp {
+ public static final LIRInstructionClass<StackMoveOp> TYPE = LIRInstructionClass.create(StackMoveOp.class);
+
+ @Def({STACK}) protected AllocatableValue result;
+ @Use({STACK, HINT}) protected AllocatableValue input;
+ @Alive({STACK, UNINITIALIZED}) private AllocatableValue backupSlot;
+
+ private Register scratch;
+
+ public StackMoveOp(AllocatableValue result, AllocatableValue input, Register scratch, AllocatableValue backupSlot) {
+ super(TYPE);
+ this.result = result;
+ this.input = input;
+ this.backupSlot = backupSlot;
+ this.scratch = scratch;
+ }
+
+ @Override
+ public AllocatableValue getInput() {
+ return input;
+ }
+
+ @Override
+ public AllocatableValue getResult() {
+ return result;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler masm) {
+ // backup scratch register
+ move(crb, masm, backupSlot, scratch.asValue(backupSlot.getValueKind()));
+ // move stack slot
+ move(crb, masm, scratch.asValue(getInput().getValueKind()), getInput());
+ move(crb, masm, getResult(), scratch.asValue(getResult().getValueKind()));
+ // restore scratch register
+ move(crb, masm, scratch.asValue(backupSlot.getValueKind()), backupSlot);
+
+ }
+ }
+
+ public abstract static class VectorMemOp extends AMD64VectorLIRInstruction {
+
+ protected final AVXSize size;
+ protected final VexMoveOp op;
+
+ @Use({COMPOSITE}) protected AMD64AddressValue address;
+ @State protected LIRFrameState state;
+
+ protected VectorMemOp(LIRInstructionClass<? extends VectorMemOp> c, AVXSize size, VexMoveOp op, AMD64AddressValue address, LIRFrameState state) {
+ super(c);
+ this.size = size;
+ this.op = op;
+ this.address = address;
+ this.state = state;
+ }
+
+ protected abstract void emitMemAccess(AMD64VectorAssembler vasm);
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ if (state != null) {
+ crb.recordImplicitException(vasm.position(), state);
+ }
+ emitMemAccess(vasm);
+ }
+ }
+
+ public static final class VectorLoadOp extends VectorMemOp {
+ public static final LIRInstructionClass<VectorLoadOp> TYPE = LIRInstructionClass.create(VectorLoadOp.class);
+
+ @Def({REG}) protected AllocatableValue result;
+
+ public VectorLoadOp(AVXSize size, VexMoveOp op, AllocatableValue result, AMD64AddressValue address, LIRFrameState state) {
+ super(TYPE, size, op, address, state);
+ this.result = result;
+ }
+
+ @Override
+ public void emitMemAccess(AMD64VectorAssembler vasm) {
+ op.emit(vasm, size, asRegister(result), address.toAddress());
+ }
+ }
+
+ public static class VectorStoreOp extends VectorMemOp {
+ public static final LIRInstructionClass<VectorStoreOp> TYPE = LIRInstructionClass.create(VectorStoreOp.class);
+
+ @Use({REG}) protected AllocatableValue input;
+
+ public VectorStoreOp(AVXSize size, VexMoveOp op, AMD64AddressValue address, AllocatableValue input, LIRFrameState state) {
+ super(TYPE, size, op, address, state);
+ this.input = input;
+ }
+
+ @Override
+ public void emitMemAccess(AMD64VectorAssembler vasm) {
+ op.emit(vasm, size, address.toAddress(), asRegister(input));
+ }
+ }
+
+ @Opcode("SAVE_REGISTER")
+ public static class SaveRegistersOp extends AMD64SaveRegistersOp {
+ public static final LIRInstructionClass<SaveRegistersOp> TYPE = LIRInstructionClass.create(SaveRegistersOp.class);
+
+ public SaveRegistersOp(Register[] savedRegisters, AllocatableValue[] slots, boolean supportsRemove) {
+ super(TYPE, savedRegisters, slots, supportsRemove);
+ }
+
+ @Override
+ protected void saveRegister(CompilationResultBuilder crb, AMD64MacroAssembler masm, StackSlot result, Register register) {
+ AMD64Kind kind = (AMD64Kind) result.getPlatformKind();
+ if (kind.isXMM()) {
+ VexMoveOp op;
+ if (kind.getVectorLength() > 1) {
+ op = getVectorMoveOp(kind.getScalar());
+ } else {
+ op = getScalarMoveOp(kind);
+ }
+
+ AMD64Address addr = (AMD64Address) crb.asAddress(result);
+ op.emit((AMD64VectorAssembler) masm, AVXKind.getRegisterSize(kind), addr, register);
+ } else {
+ super.saveRegister(crb, masm, result, register);
+ }
+ }
+ }
+
+ @Opcode("RESTORE_REGISTER")
+ public static final class RestoreRegistersOp extends AMD64RestoreRegistersOp {
+ public static final LIRInstructionClass<RestoreRegistersOp> TYPE = LIRInstructionClass.create(RestoreRegistersOp.class);
+
+ public RestoreRegistersOp(AllocatableValue[] source, AMD64SaveRegistersOp save) {
+ super(TYPE, source, save);
+ }
+
+ @Override
+ protected void restoreRegister(CompilationResultBuilder crb, AMD64MacroAssembler masm, Register register, StackSlot input) {
+ AMD64Kind kind = (AMD64Kind) input.getPlatformKind();
+ if (kind.isXMM()) {
+ VexMoveOp op;
+ if (kind.getVectorLength() > 1) {
+ op = getVectorMoveOp(kind.getScalar());
+ } else {
+ op = getScalarMoveOp(kind);
+ }
+
+ AMD64Address addr = (AMD64Address) crb.asAddress(input);
+ op.emit((AMD64VectorAssembler) masm, AVXKind.getRegisterSize(kind), register, addr);
+ } else {
+ super.restoreRegister(crb, masm, register, input);
+ }
+ }
+ }
+
+ private static VexMoveOp getScalarMoveOp(AMD64Kind kind) {
+ switch (kind) {
+ case SINGLE:
+ return VMOVSS;
+ case DOUBLE:
+ return VMOVSD;
+ default:
+ throw GraalError.shouldNotReachHere();
+ }
+ }
+
+ private static VexMoveOp getVectorMoveOp(AMD64Kind kind) {
+ switch (kind) {
+ case SINGLE:
+ return VMOVUPS;
+ case DOUBLE:
+ return VMOVUPD;
+ default:
+ return VMOVDQU;
+ }
+ }
+
+ private static VexMoveOp getVectorMemMoveOp(AMD64Kind kind) {
+ switch (AVXKind.getDataSize(kind)) {
+ case DWORD:
+ return VMOVD;
+ case QWORD:
+ return VMOVQ;
+ default:
+ return getVectorMoveOp(kind.getScalar());
+ }
+ }
+
+ private static void move(CompilationResultBuilder crb, AMD64VectorAssembler vasm, AllocatableValue result, Value input) {
+ VexMoveOp op;
+ AVXSize size;
+ AMD64Kind kind = (AMD64Kind) result.getPlatformKind();
+ if (kind.getVectorLength() > 1) {
+ size = AVXKind.getRegisterSize(kind);
+ if (isRegister(input) && isRegister(result)) {
+ op = getVectorMoveOp(kind.getScalar());
+ } else {
+ op = getVectorMemMoveOp(kind);
+ }
+ } else {
+ size = AVXSize.XMM;
+ if (isRegister(input) && isRegister(result)) {
+ op = getVectorMoveOp(kind);
+ } else {
+ op = getScalarMoveOp(kind);
+ }
+ }
+
+ if (isRegister(input)) {
+ if (isRegister(result)) {
+ if (!asRegister(input).equals(asRegister(result))) {
+ op.emit(vasm, size, asRegister(result), asRegister(input));
+ }
+ } else {
+ assert isStackSlot(result);
+ op.emit(vasm, size, (AMD64Address) crb.asAddress(result), asRegister(input));
+ }
+ } else {
+ assert isStackSlot(input) && isRegister(result);
+ op.emit(vasm, size, asRegister(result), (AMD64Address) crb.asAddress(input));
+ }
+ }
+
+ private static void const2reg(CompilationResultBuilder crb, AMD64VectorAssembler vasm, RegisterValue result, JavaConstant input) {
+ if (input.isDefaultForKind()) {
+ AMD64Kind kind = (AMD64Kind) result.getPlatformKind();
+ Register register = result.getRegister();
+ VXORPD.emit(vasm, AVXKind.getRegisterSize(kind), register, register, register);
+ return;
+ }
+
+ AMD64Address address;
+ switch (input.getJavaKind()) {
+ case Float:
+ address = (AMD64Address) crb.asFloatConstRef(input);
+ break;
+
+ case Double:
+ address = (AMD64Address) crb.asDoubleConstRef(input);
+ break;
+
+ default:
+ throw GraalError.shouldNotReachHere();
+ }
+ VexMoveOp op = getScalarMoveOp((AMD64Kind) result.getPlatformKind());
+ op.emit(vasm, AVXSize.XMM, asRegister(result), address);
+ }
+
+ public static final class AVXMoveToIntOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<AVXMoveToIntOp> TYPE = LIRInstructionClass.create(AVXMoveToIntOp.class);
+
+ @Opcode private final VexMoveOp opcode;
+
+ @Def({REG, STACK}) protected AllocatableValue result;
+ @Use({REG}) protected AllocatableValue input;
+
+ public AVXMoveToIntOp(VexMoveOp opcode, AllocatableValue result, AllocatableValue input) {
+ super(TYPE);
+ this.opcode = opcode;
+ this.result = result;
+ this.input = input;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ if (isRegister(result)) {
+ opcode.emitReverse(vasm, AVXSize.XMM, asRegister(result), asRegister(input));
+ } else {
+ opcode.emit(vasm, AVXSize.XMM, (AMD64Address) crb.asAddress(result), asRegister(input));
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorShuffle.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,414 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.lir.amd64.vector;
+
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+import static jdk.vm.ci.code.ValueUtil.isRegister;
+import static jdk.vm.ci.code.ValueUtil.isStackSlot;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexMRIOp.VEXTRACTF128;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexMRIOp.VEXTRACTI128;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexMRIOp.VPEXTRB;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexMRIOp.VPEXTRD;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexMRIOp.VPEXTRQ;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexMRIOp.VPEXTRW;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexMoveOp.VMOVD;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexMoveOp.VMOVQ;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexRVMIOp.VINSERTF128;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexRVMIOp.VINSERTI128;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexRVMIOp.VSHUFPD;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexRVMIOp.VSHUFPS;
+import static org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexRVMOp.VPSHUFB;
+import static org.graalvm.compiler.asm.amd64.AVXKind.AVXSize.XMM;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
+
+import org.graalvm.compiler.asm.amd64.AMD64Address;
+import org.graalvm.compiler.asm.amd64.AMD64VectorAssembler;
+import org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexMRIOp;
+import org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexRMIOp;
+import org.graalvm.compiler.asm.amd64.AMD64VectorAssembler.VexRVMIOp;
+import org.graalvm.compiler.asm.amd64.AVXKind;
+import org.graalvm.compiler.debug.GraalError;
+import org.graalvm.compiler.lir.LIRInstructionClass;
+import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
+
+import jdk.vm.ci.amd64.AMD64;
+import jdk.vm.ci.amd64.AMD64.CPUFeature;
+import jdk.vm.ci.amd64.AMD64Kind;
+import jdk.vm.ci.meta.AllocatableValue;
+
+public class AMD64VectorShuffle {
+
+ public static final class IntToVectorOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<IntToVectorOp> TYPE = LIRInstructionClass.create(IntToVectorOp.class);
+
+ @Def({REG}) protected AllocatableValue result;
+ @Use({REG, STACK}) protected AllocatableValue value;
+
+ public IntToVectorOp(AllocatableValue result, AllocatableValue value) {
+ super(TYPE);
+ assert ((AMD64Kind) result.getPlatformKind()).getScalar().isInteger() : result.getPlatformKind();
+ this.result = result;
+ this.value = value;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ if (isRegister(value)) {
+ VMOVD.emit(vasm, XMM, asRegister(result), asRegister(value));
+ } else {
+ assert isStackSlot(value);
+ VMOVD.emit(vasm, XMM, asRegister(result), (AMD64Address) crb.asAddress(value));
+ }
+ }
+ }
+
+ public static final class LongToVectorOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<LongToVectorOp> TYPE = LIRInstructionClass.create(LongToVectorOp.class);
+ @Def({REG}) protected AllocatableValue result;
+ @Use({REG, STACK}) protected AllocatableValue value;
+
+ public LongToVectorOp(AllocatableValue result, AllocatableValue value) {
+ super(TYPE);
+ assert result.getPlatformKind() == AMD64Kind.V128_QWORD || result.getPlatformKind() == AMD64Kind.V256_QWORD;
+ this.result = result;
+ this.value = value;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ if (isRegister(value)) {
+ VMOVQ.emit(vasm, XMM, asRegister(result), asRegister(value));
+ } else {
+ assert isStackSlot(value);
+ VMOVQ.emit(vasm, XMM, asRegister(result), (AMD64Address) crb.asAddress(value));
+ }
+ }
+ }
+
+ public static final class ShuffleBytesOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<ShuffleBytesOp> TYPE = LIRInstructionClass.create(ShuffleBytesOp.class);
+ @Def({REG}) protected AllocatableValue result;
+ @Use({REG}) protected AllocatableValue source;
+ @Use({REG, STACK}) protected AllocatableValue selector;
+
+ public ShuffleBytesOp(AllocatableValue result, AllocatableValue source, AllocatableValue selector) {
+ super(TYPE);
+ this.result = result;
+ this.source = source;
+ this.selector = selector;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ AMD64Kind kind = (AMD64Kind) result.getPlatformKind();
+ if (isRegister(selector)) {
+ VPSHUFB.emit(vasm, AVXKind.getRegisterSize(kind), asRegister(result), asRegister(source), asRegister(selector));
+ } else {
+ assert isStackSlot(selector);
+ VPSHUFB.emit(vasm, AVXKind.getRegisterSize(kind), asRegister(result), asRegister(source), (AMD64Address) crb.asAddress(selector));
+ }
+ }
+ }
+
+ public static final class ConstShuffleBytesOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<ConstShuffleBytesOp> TYPE = LIRInstructionClass.create(ConstShuffleBytesOp.class);
+ @Def({REG}) protected AllocatableValue result;
+ @Use({REG}) protected AllocatableValue source;
+ private final byte[] selector;
+
+ public ConstShuffleBytesOp(AllocatableValue result, AllocatableValue source, byte... selector) {
+ super(TYPE);
+ assert AVXKind.getRegisterSize(((AMD64Kind) result.getPlatformKind())).getBytes() == selector.length;
+ this.result = result;
+ this.source = source;
+ this.selector = selector;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ AMD64Kind kind = (AMD64Kind) result.getPlatformKind();
+ AMD64Address address = (AMD64Address) crb.recordDataReferenceInCode(selector, selector.length);
+ VPSHUFB.emit(vasm, AVXKind.getRegisterSize(kind), asRegister(result), asRegister(source), address);
+ }
+ }
+
+ public static class ShuffleWordOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<ShuffleWordOp> TYPE = LIRInstructionClass.create(ShuffleWordOp.class);
+ private final VexRMIOp op;
+ @Def({REG}) protected AllocatableValue result;
+ @Use({REG, STACK}) protected AllocatableValue source;
+ private final int selector;
+
+ public ShuffleWordOp(VexRMIOp op, AllocatableValue result, AllocatableValue source, int selector) {
+ super(TYPE);
+ this.op = op;
+ this.result = result;
+ this.source = source;
+ this.selector = selector;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ AMD64Kind kind = (AMD64Kind) source.getPlatformKind();
+ if (isRegister(source)) {
+ op.emit(vasm, AVXKind.getRegisterSize(kind), asRegister(result), asRegister(source), selector);
+ } else {
+ op.emit(vasm, AVXKind.getRegisterSize(kind), asRegister(result), (AMD64Address) crb.asAddress(source), selector);
+ }
+ }
+ }
+
+ public static class ShuffleFloatOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<ShuffleFloatOp> TYPE = LIRInstructionClass.create(ShuffleFloatOp.class);
+ @Def({REG}) protected AllocatableValue result;
+ @Use({REG}) protected AllocatableValue source1;
+ @Use({REG, STACK}) protected AllocatableValue source2;
+ private final int selector;
+
+ public ShuffleFloatOp(AllocatableValue result, AllocatableValue source1, AllocatableValue source2, int selector) {
+ super(TYPE);
+ this.result = result;
+ this.source1 = source1;
+ this.source2 = source2;
+ this.selector = selector;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ AMD64Kind kind = (AMD64Kind) result.getPlatformKind();
+
+ VexRVMIOp op;
+ switch (kind.getScalar()) {
+ case SINGLE:
+ op = VSHUFPS;
+ break;
+ case DOUBLE:
+ op = VSHUFPD;
+ break;
+ default:
+ throw GraalError.shouldNotReachHere();
+ }
+
+ if (isRegister(source2)) {
+ op.emit(vasm, AVXKind.getRegisterSize(kind), asRegister(result), asRegister(source1), asRegister(source2), selector);
+ } else {
+ assert isStackSlot(source2);
+ op.emit(vasm, AVXKind.getRegisterSize(kind), asRegister(result), asRegister(source1), (AMD64Address) crb.asAddress(source2), selector);
+ }
+ }
+ }
+
+ public static final class Extract128Op extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<Extract128Op> TYPE = LIRInstructionClass.create(Extract128Op.class);
+ @Def({REG, STACK}) protected AllocatableValue result;
+ @Use({REG}) protected AllocatableValue source;
+ private final int selector;
+
+ public Extract128Op(AllocatableValue result, AllocatableValue source, int selector) {
+ super(TYPE);
+ this.result = result;
+ this.source = source;
+ this.selector = selector;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ AMD64Kind kind = (AMD64Kind) source.getPlatformKind();
+
+ VexMRIOp op;
+ switch (kind.getScalar()) {
+ case SINGLE:
+ case DOUBLE:
+ op = VEXTRACTF128;
+ break;
+ default:
+ AMD64 arch = (AMD64) crb.target.arch;
+ // if supported we want VEXTRACTI128
+ // on AVX1, we have to use VEXTRACTF128
+ op = arch.getFeatures().contains(CPUFeature.AVX2) ? VEXTRACTI128 : VEXTRACTF128;
+ break;
+ }
+
+ if (isRegister(result)) {
+ op.emit(vasm, AVXKind.getRegisterSize(kind), asRegister(result), asRegister(source), selector);
+ } else {
+ assert isStackSlot(result);
+ op.emit(vasm, AVXKind.getRegisterSize(kind), (AMD64Address) crb.asAddress(result), asRegister(source), selector);
+ }
+ }
+ }
+
+ public static final class Insert128Op extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<Insert128Op> TYPE = LIRInstructionClass.create(Insert128Op.class);
+ @Def({REG}) protected AllocatableValue result;
+ @Use({REG}) protected AllocatableValue source1;
+ @Use({REG, STACK}) protected AllocatableValue source2;
+ private final int selector;
+
+ public Insert128Op(AllocatableValue result, AllocatableValue source1, AllocatableValue source2, int selector) {
+ super(TYPE);
+ this.result = result;
+ this.source1 = source1;
+ this.source2 = source2;
+ this.selector = selector;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ AMD64Kind kind = (AMD64Kind) result.getPlatformKind();
+
+ VexRVMIOp op;
+ switch (kind.getScalar()) {
+ case SINGLE:
+ case DOUBLE:
+ op = VINSERTF128;
+ break;
+ default:
+ AMD64 arch = (AMD64) crb.target.arch;
+ // if supported we want VINSERTI128 - on AVX1, we have to use VINSERTF128.
+ // using instructions with an incorrect data type is possible but typically
+ // results in an additional overhead whenever the value is being accessed.
+ op = arch.getFeatures().contains(CPUFeature.AVX2) ? VINSERTI128 : VINSERTF128;
+ break;
+ }
+
+ if (isRegister(source2)) {
+ op.emit(vasm, AVXKind.getRegisterSize(kind), asRegister(result), asRegister(source1), asRegister(source2), selector);
+ } else {
+ assert isStackSlot(source2);
+ op.emit(vasm, AVXKind.getRegisterSize(kind), asRegister(result), asRegister(source1), (AMD64Address) crb.asAddress(source2), selector);
+ }
+ }
+ }
+
+ public static final class ExtractByteOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<ExtractByteOp> TYPE = LIRInstructionClass.create(ExtractByteOp.class);
+ @Def({REG}) protected AllocatableValue result;
+ @Use({REG}) protected AllocatableValue vector;
+ private final int selector;
+
+ public ExtractByteOp(AllocatableValue result, AllocatableValue vector, int selector) {
+ super(TYPE);
+ assert result.getPlatformKind() == AMD64Kind.DWORD;
+ assert ((AMD64Kind) vector.getPlatformKind()).getScalar() == AMD64Kind.BYTE;
+ this.result = result;
+ this.vector = vector;
+ this.selector = selector;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ VPEXTRB.emit(vasm, XMM, asRegister(result), asRegister(vector), selector);
+ }
+ }
+
+ public static final class ExtractShortOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<ExtractShortOp> TYPE = LIRInstructionClass.create(ExtractShortOp.class);
+ @Def({REG}) protected AllocatableValue result;
+ @Use({REG}) protected AllocatableValue vector;
+ private final int selector;
+
+ public ExtractShortOp(AllocatableValue result, AllocatableValue vector, int selector) {
+ super(TYPE);
+ assert result.getPlatformKind() == AMD64Kind.DWORD;
+ assert ((AMD64Kind) vector.getPlatformKind()).getScalar() == AMD64Kind.WORD;
+ this.result = result;
+ this.vector = vector;
+ this.selector = selector;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ VPEXTRW.emit(vasm, XMM, asRegister(result), asRegister(vector), selector);
+ }
+ }
+
+ public static final class ExtractIntOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<ExtractIntOp> TYPE = LIRInstructionClass.create(ExtractIntOp.class);
+ @Def({REG, STACK}) protected AllocatableValue result;
+ @Use({REG}) protected AllocatableValue vector;
+ private final int selector;
+
+ public ExtractIntOp(AllocatableValue result, AllocatableValue vector, int selector) {
+ super(TYPE);
+ assert result.getPlatformKind() == AMD64Kind.DWORD;
+ assert ((AMD64Kind) vector.getPlatformKind()).getScalar() == AMD64Kind.DWORD;
+ this.result = result;
+ this.vector = vector;
+ this.selector = selector;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ if (isRegister(result)) {
+ if (selector == 0) {
+ VMOVD.emitReverse(vasm, XMM, asRegister(result), asRegister(vector));
+ } else {
+ VPEXTRD.emit(vasm, XMM, asRegister(result), asRegister(vector), selector);
+ }
+ } else {
+ assert isStackSlot(result);
+ if (selector == 0) {
+ VMOVD.emit(vasm, XMM, (AMD64Address) crb.asAddress(result), asRegister(vector));
+ } else {
+ VPEXTRD.emit(vasm, XMM, (AMD64Address) crb.asAddress(result), asRegister(vector), selector);
+ }
+ }
+ }
+ }
+
+ public static final class ExtractLongOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<ExtractLongOp> TYPE = LIRInstructionClass.create(ExtractLongOp.class);
+ @Def({REG, STACK}) protected AllocatableValue result;
+ @Use({REG}) protected AllocatableValue vector;
+ private final int selector;
+
+ public ExtractLongOp(AllocatableValue result, AllocatableValue vector, int selector) {
+ super(TYPE);
+ assert result.getPlatformKind() == AMD64Kind.QWORD;
+ assert ((AMD64Kind) vector.getPlatformKind()).getScalar() == AMD64Kind.QWORD;
+ this.result = result;
+ this.vector = vector;
+ this.selector = selector;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ if (isRegister(result)) {
+ if (selector == 0) {
+ VMOVQ.emitReverse(vasm, XMM, asRegister(result), asRegister(vector));
+ } else {
+ VPEXTRQ.emit(vasm, XMM, asRegister(result), asRegister(vector), selector);
+ }
+ } else {
+ assert isStackSlot(result);
+ if (selector == 0) {
+ VMOVQ.emit(vasm, XMM, (AMD64Address) crb.asAddress(result), asRegister(vector));
+ } else {
+ VPEXTRQ.emit(vasm, XMM, (AMD64Address) crb.asAddress(result), asRegister(vector), selector);
+ }
+ }
+ }
+ }
+}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir.amd64/src/org/graalvm/compiler/lir/amd64/vector/AMD64VectorUnary.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,189 @@
+/*
+ * Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.lir.amd64.vector;
+
+import jdk.vm.ci.meta.AllocatableValue;
+import jdk.vm.ci.meta.Value;
+import org.graalvm.compiler.asm.amd64.AMD64Address;
+import org.graalvm.compiler.asm.amd64.AMD64VectorAssembler;
+import org.graalvm.compiler.asm.amd64.AVXKind;
+import org.graalvm.compiler.lir.LIRFrameState;
+import org.graalvm.compiler.lir.LIRInstructionClass;
+import org.graalvm.compiler.lir.Opcode;
+import org.graalvm.compiler.lir.amd64.AMD64AddressValue;
+import org.graalvm.compiler.lir.asm.CompilationResultBuilder;
+
+import static jdk.vm.ci.code.ValueUtil.asRegister;
+import static jdk.vm.ci.code.ValueUtil.isRegister;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.COMPOSITE;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.CONST;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.REG;
+import static org.graalvm.compiler.lir.LIRInstruction.OperandFlag.STACK;
+import static org.graalvm.compiler.lir.LIRValueUtil.asConstant;
+import static org.graalvm.compiler.lir.LIRValueUtil.isConstantValue;
+
+public class AMD64VectorUnary {
+
+ public static final class AVXUnaryOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<AVXUnaryOp> TYPE = LIRInstructionClass.create(AVXUnaryOp.class);
+
+ @Opcode private final AMD64VectorAssembler.VexRMOp opcode;
+ private final AVXKind.AVXSize size;
+
+ @Def({REG}) protected AllocatableValue result;
+ @Use({REG, STACK}) protected AllocatableValue input;
+
+ public AVXUnaryOp(AMD64VectorAssembler.VexRMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AllocatableValue input) {
+ super(TYPE);
+ this.opcode = opcode;
+ this.size = size;
+ this.result = result;
+ this.input = input;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ if (isRegister(input)) {
+ opcode.emit(vasm, size, asRegister(result), asRegister(input));
+ } else {
+ opcode.emit(vasm, size, asRegister(result), (AMD64Address) crb.asAddress(input));
+ }
+ }
+ }
+
+ public static final class AVXUnaryMemoryOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<AVXUnaryMemoryOp> TYPE = LIRInstructionClass.create(AVXUnaryMemoryOp.class);
+
+ @Opcode private final AMD64VectorAssembler.VexRMOp opcode;
+ private final AVXKind.AVXSize size;
+
+ @Def({REG}) protected AllocatableValue result;
+ @Use({COMPOSITE}) protected AMD64AddressValue input;
+ @State protected LIRFrameState state;
+
+ public AVXUnaryMemoryOp(AMD64VectorAssembler.VexRMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AMD64AddressValue input, LIRFrameState state) {
+ super(TYPE);
+ this.opcode = opcode;
+ this.size = size;
+ this.result = result;
+ this.input = input;
+ this.state = state;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ if (state != null) {
+ crb.recordImplicitException(vasm.position(), state);
+ }
+ opcode.emit(vasm, size, asRegister(result), input.toAddress());
+ }
+ }
+
+ public static final class AVXBroadcastOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<AVXBroadcastOp> TYPE = LIRInstructionClass.create(AVXBroadcastOp.class);
+
+ @Opcode private final AMD64VectorAssembler.VexRMOp opcode;
+ private final AVXKind.AVXSize size;
+
+ @Def({REG}) protected AllocatableValue result;
+ @Use({REG, STACK, CONST}) protected Value input;
+
+ public AVXBroadcastOp(AMD64VectorAssembler.VexRMOp opcode, AVXKind.AVXSize size, AllocatableValue result, Value input) {
+ super(TYPE);
+ this.opcode = opcode;
+ this.size = size;
+ this.result = result;
+ this.input = input;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ if (isRegister(input)) {
+ opcode.emit(vasm, size, asRegister(result), asRegister(input));
+ } else if (isConstantValue(input)) {
+ int align = input.getPlatformKind().getSizeInBytes();
+ AMD64Address address = (AMD64Address) crb.recordDataReferenceInCode(asConstant(input), align);
+ opcode.emit(vasm, size, asRegister(result), address);
+ } else {
+ opcode.emit(vasm, size, asRegister(result), (AMD64Address) crb.asAddress(input));
+ }
+ }
+ }
+
+ public static final class AVXConvertMemoryOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<AVXConvertMemoryOp> TYPE = LIRInstructionClass.create(AVXConvertMemoryOp.class);
+
+ @Opcode private final AMD64VectorAssembler.VexRVMOp opcode;
+ private final AVXKind.AVXSize size;
+
+ @Def({REG}) protected AllocatableValue result;
+ @Use({COMPOSITE}) protected AMD64AddressValue input;
+ @State protected LIRFrameState state;
+
+ public AVXConvertMemoryOp(AMD64VectorAssembler.VexRVMOp opcode, AVXKind.AVXSize size, AllocatableValue result, AMD64AddressValue input, LIRFrameState state) {
+ super(TYPE);
+ this.opcode = opcode;
+ this.size = size;
+ this.result = result;
+ this.input = input;
+ this.state = state;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ if (state != null) {
+ crb.recordImplicitException(vasm.position(), state);
+ }
+ opcode.emit(vasm, size, asRegister(result), asRegister(result), input.toAddress());
+ }
+ }
+
+ public static final class AVXConvertOp extends AMD64VectorLIRInstruction {
+ public static final LIRInstructionClass<AVXConvertOp> TYPE = LIRInstructionClass.create(AVXConvertOp.class);
+
+ @Opcode private final AMD64VectorAssembler.VexRVMOp opcode;
+ @Def({REG}) protected AllocatableValue result;
+ @Use({REG, STACK}) protected AllocatableValue input;
+
+ public AVXConvertOp(AMD64VectorAssembler.VexRVMOp opcode, AllocatableValue result, AllocatableValue input) {
+ super(TYPE);
+ this.opcode = opcode;
+ this.result = result;
+ this.input = input;
+ }
+
+ @Override
+ public void emitCode(CompilationResultBuilder crb, AMD64VectorAssembler vasm) {
+ if (isRegister(input)) {
+ if (!asRegister(input).equals(asRegister(result))) {
+ // clear result register to avoid unnecessary dependency
+ AMD64VectorAssembler.VexRVMOp.VXORPD.emit(vasm, AVXKind.AVXSize.XMM, asRegister(result), asRegister(result), asRegister(result));
+ }
+ opcode.emit(vasm, AVXKind.AVXSize.XMM, asRegister(result), asRegister(result), asRegister(input));
+ } else {
+ AMD64VectorAssembler.VexRVMOp.VXORPD.emit(vasm, AVXKind.AVXSize.XMM, asRegister(result), asRegister(result), asRegister(result));
+ opcode.emit(vasm, AVXKind.AVXSize.XMM, asRegister(result), asRegister(result), (AMD64Address) crb.asAddress(input));
+ }
+ }
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes.test/src/org/graalvm/compiler/nodes/test/ShortCircuitOrNodeTest.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes.test/src/org/graalvm/compiler/nodes/test/ShortCircuitOrNodeTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -24,24 +24,27 @@
import java.util.function.Function;
-import org.junit.Assert;
-import org.junit.Test;
-
import org.graalvm.compiler.core.test.GraalCompilerTest;
import org.graalvm.compiler.nodes.ConstantNode;
import org.graalvm.compiler.nodes.LogicNode;
import org.graalvm.compiler.nodes.ShortCircuitOrNode;
import org.graalvm.compiler.nodes.StructuredGraph;
+import org.graalvm.compiler.nodes.StructuredGraph.AllowAssumptions;
import org.graalvm.compiler.nodes.ValueNode;
-import org.graalvm.compiler.nodes.StructuredGraph.AllowAssumptions;
import org.graalvm.compiler.nodes.calc.ConditionalNode;
import org.graalvm.compiler.nodes.calc.IntegerEqualsNode;
import org.graalvm.compiler.nodes.graphbuilderconf.GraphBuilderContext;
import org.graalvm.compiler.nodes.graphbuilderconf.InvocationPlugin;
import org.graalvm.compiler.nodes.graphbuilderconf.InvocationPlugins;
import org.graalvm.compiler.nodes.graphbuilderconf.InvocationPlugins.Registration;
+import org.graalvm.compiler.nodes.spi.LoweringTool;
import org.graalvm.compiler.phases.common.CanonicalizerPhase;
+import org.graalvm.compiler.phases.common.FloatingReadPhase;
+import org.graalvm.compiler.phases.common.IncrementalCanonicalizerPhase;
+import org.graalvm.compiler.phases.common.LoweringPhase;
import org.graalvm.compiler.phases.tiers.PhaseContext;
+import org.junit.Assert;
+import org.junit.Test;
import jdk.vm.ci.meta.JavaKind;
import jdk.vm.ci.meta.ResolvedJavaMethod;
@@ -101,7 +104,7 @@
return trueCount;
}
- public boolean testSimpleSnippet(boolean a, boolean b) {
+ public boolean testSimpleSnippet(Boolean a, Boolean b) {
return shortCircuitOr(a, b);
}
@@ -110,259 +113,260 @@
testInputCombinations("testSimpleSnippet");
}
- public static boolean testCascadeSnippet1(boolean a, boolean b) {
+ // We cannot trust subword inputs. Parameter declared as boolean will have a stamp of int32.
+ public static boolean testCascadeSnippet1(Boolean a, Boolean b) {
return shortCircuitOr(shortCircuitOr(a, b), a);
}
- public static boolean testCascadeSnippet2(boolean a, boolean b) {
+ public static boolean testCascadeSnippet2(Boolean a, Boolean b) {
return shortCircuitOr(shortCircuitOr(b, a), a);
}
- public static boolean testCascadeSnippet3(boolean a, boolean b) {
+ public static boolean testCascadeSnippet3(Boolean a, Boolean b) {
return shortCircuitOr(a, shortCircuitOr(a, b));
}
- public static boolean testCascadeSnippet4(boolean a, boolean b) {
+ public static boolean testCascadeSnippet4(Boolean a, Boolean b) {
return shortCircuitOr(a, shortCircuitOr(b, a));
}
- public static boolean testCascadeSnippet5(boolean a, boolean b) {
+ public static boolean testCascadeSnippet5(Boolean a, Boolean b) {
return shortCircuitOr(!shortCircuitOr(a, b), a);
}
- public static boolean testCascadeSnippet6(boolean a, boolean b) {
+ public static boolean testCascadeSnippet6(Boolean a, Boolean b) {
return shortCircuitOr(!shortCircuitOr(b, a), a);
}
- public static boolean testCascadeSnippet7(boolean a, boolean b) {
+ public static boolean testCascadeSnippet7(Boolean a, Boolean b) {
return shortCircuitOr(!a, shortCircuitOr(a, b));
}
- public static boolean testCascadeSnippet8(boolean a, boolean b) {
+ public static boolean testCascadeSnippet8(Boolean a, Boolean b) {
return shortCircuitOr(!a, shortCircuitOr(b, a));
}
- public static boolean testCascadeSnippet9(boolean a, boolean b) {
+ public static boolean testCascadeSnippet9(Boolean a, Boolean b) {
return shortCircuitOr(shortCircuitOr(!a, b), a);
}
- public static boolean testCascadeSnippet10(boolean a, boolean b) {
+ public static boolean testCascadeSnippet10(Boolean a, Boolean b) {
return shortCircuitOr(shortCircuitOr(!b, a), a);
}
- public static boolean testCascadeSnippet11(boolean a, boolean b) {
+ public static boolean testCascadeSnippet11(Boolean a, Boolean b) {
return shortCircuitOr(a, !shortCircuitOr(a, b));
}
- public static boolean testCascadeSnippet12(boolean a, boolean b) {
+ public static boolean testCascadeSnippet12(Boolean a, Boolean b) {
return shortCircuitOr(a, !shortCircuitOr(b, a));
}
- public static boolean testCascadeSnippet13(boolean a, boolean b) {
+ public static boolean testCascadeSnippet13(Boolean a, Boolean b) {
return shortCircuitOr(!shortCircuitOr(!a, b), a);
}
- public static boolean testCascadeSnippet14(boolean a, boolean b) {
+ public static boolean testCascadeSnippet14(Boolean a, Boolean b) {
return shortCircuitOr(!shortCircuitOr(!b, a), a);
}
- public static boolean testCascadeSnippet15(boolean a, boolean b) {
+ public static boolean testCascadeSnippet15(Boolean a, Boolean b) {
return shortCircuitOr(!a, !shortCircuitOr(a, b));
}
- public static boolean testCascadeSnippet16(boolean a, boolean b) {
+ public static boolean testCascadeSnippet16(Boolean a, Boolean b) {
return shortCircuitOr(!a, !shortCircuitOr(!b, a));
}
- public static boolean testCascadeSnippet17(boolean a, boolean b) {
+ public static boolean testCascadeSnippet17(Boolean a, Boolean b) {
return shortCircuitOr(shortCircuitOr(a, !b), a);
}
- public static boolean testCascadeSnippet18(boolean a, boolean b) {
+ public static boolean testCascadeSnippet18(Boolean a, Boolean b) {
return shortCircuitOr(shortCircuitOr(b, !a), a);
}
- public static boolean testCascadeSnippet19(boolean a, boolean b) {
+ public static boolean testCascadeSnippet19(Boolean a, Boolean b) {
return shortCircuitOr(a, shortCircuitOr(!a, b));
}
- public static boolean testCascadeSnippet20(boolean a, boolean b) {
+ public static boolean testCascadeSnippet20(Boolean a, Boolean b) {
return shortCircuitOr(a, shortCircuitOr(!b, a));
}
- public static boolean testCascadeSnippet21(boolean a, boolean b) {
+ public static boolean testCascadeSnippet21(Boolean a, Boolean b) {
return shortCircuitOr(!shortCircuitOr(a, !b), a);
}
- public static boolean testCascadeSnippet22(boolean a, boolean b) {
+ public static boolean testCascadeSnippet22(Boolean a, Boolean b) {
return shortCircuitOr(!shortCircuitOr(b, !a), a);
}
- public static boolean testCascadeSnippet23(boolean a, boolean b) {
+ public static boolean testCascadeSnippet23(Boolean a, Boolean b) {
return shortCircuitOr(!a, shortCircuitOr(!a, b));
}
- public static boolean testCascadeSnippet24(boolean a, boolean b) {
+ public static boolean testCascadeSnippet24(Boolean a, Boolean b) {
return shortCircuitOr(!a, shortCircuitOr(!b, a));
}
- public static boolean testCascadeSnippet25(boolean a, boolean b) {
+ public static boolean testCascadeSnippet25(Boolean a, Boolean b) {
return shortCircuitOr(shortCircuitOr(!a, !b), a);
}
- public static boolean testCascadeSnippet26(boolean a, boolean b) {
+ public static boolean testCascadeSnippet26(Boolean a, Boolean b) {
return shortCircuitOr(shortCircuitOr(!b, !a), a);
}
- public static boolean testCascadeSnippet27(boolean a, boolean b) {
+ public static boolean testCascadeSnippet27(Boolean a, Boolean b) {
return shortCircuitOr(a, !shortCircuitOr(!a, b));
}
- public static boolean testCascadeSnippet28(boolean a, boolean b) {
+ public static boolean testCascadeSnippet28(Boolean a, Boolean b) {
return shortCircuitOr(a, !shortCircuitOr(!b, a));
}
- public static boolean testCascadeSnippet29(boolean a, boolean b) {
+ public static boolean testCascadeSnippet29(Boolean a, Boolean b) {
return shortCircuitOr(!shortCircuitOr(!a, !b), a);
}
- public static boolean testCascadeSnippet30(boolean a, boolean b) {
+ public static boolean testCascadeSnippet30(Boolean a, Boolean b) {
return shortCircuitOr(!shortCircuitOr(!b, !a), a);
}
- public static boolean testCascadeSnippet31(boolean a, boolean b) {
+ public static boolean testCascadeSnippet31(Boolean a, Boolean b) {
return shortCircuitOr(!a, !shortCircuitOr(!a, b));
}
- public static boolean testCascadeSnippet32(boolean a, boolean b) {
+ public static boolean testCascadeSnippet32(Boolean a, Boolean b) {
return shortCircuitOr(!a, !shortCircuitOr(!b, a));
}
- public static boolean testCascadeSnippet33(boolean a, boolean b) {
+ public static boolean testCascadeSnippet33(Boolean a, Boolean b) {
return shortCircuitOr(shortCircuitOr(a, b), !a);
}
- public static boolean testCascadeSnippet34(boolean a, boolean b) {
+ public static boolean testCascadeSnippet34(Boolean a, Boolean b) {
return shortCircuitOr(shortCircuitOr(b, a), !a);
}
- public static boolean testCascadeSnippet35(boolean a, boolean b) {
+ public static boolean testCascadeSnippet35(Boolean a, Boolean b) {
return shortCircuitOr(a, shortCircuitOr(a, !b));
}
- public static boolean testCascadeSnippet36(boolean a, boolean b) {
+ public static boolean testCascadeSnippet36(Boolean a, Boolean b) {
return shortCircuitOr(a, shortCircuitOr(b, !a));
}
- public static boolean testCascadeSnippet37(boolean a, boolean b) {
+ public static boolean testCascadeSnippet37(Boolean a, Boolean b) {
return shortCircuitOr(!shortCircuitOr(a, b), !a);
}
- public static boolean testCascadeSnippet38(boolean a, boolean b) {
+ public static boolean testCascadeSnippet38(Boolean a, Boolean b) {
return shortCircuitOr(!shortCircuitOr(b, a), !a);
}
- public static boolean testCascadeSnippet39(boolean a, boolean b) {
+ public static boolean testCascadeSnippet39(Boolean a, Boolean b) {
return shortCircuitOr(!a, shortCircuitOr(a, !b));
}
- public static boolean testCascadeSnippet40(boolean a, boolean b) {
+ public static boolean testCascadeSnippet40(Boolean a, Boolean b) {
return shortCircuitOr(!a, shortCircuitOr(b, !a));
}
- public static boolean testCascadeSnippet41(boolean a, boolean b) {
+ public static boolean testCascadeSnippet41(Boolean a, Boolean b) {
return shortCircuitOr(shortCircuitOr(!a, b), !a);
}
- public static boolean testCascadeSnippet42(boolean a, boolean b) {
+ public static boolean testCascadeSnippet42(Boolean a, Boolean b) {
return shortCircuitOr(shortCircuitOr(!b, a), !a);
}
- public static boolean testCascadeSnippet43(boolean a, boolean b) {
+ public static boolean testCascadeSnippet43(Boolean a, Boolean b) {
return shortCircuitOr(a, !shortCircuitOr(a, !b));
}
- public static boolean testCascadeSnippet44(boolean a, boolean b) {
+ public static boolean testCascadeSnippet44(Boolean a, Boolean b) {
return shortCircuitOr(a, !shortCircuitOr(b, !a));
}
- public static boolean testCascadeSnippet45(boolean a, boolean b) {
+ public static boolean testCascadeSnippet45(Boolean a, Boolean b) {
return shortCircuitOr(!shortCircuitOr(!a, b), !a);
}
- public static boolean testCascadeSnippet46(boolean a, boolean b) {
+ public static boolean testCascadeSnippet46(Boolean a, Boolean b) {
return shortCircuitOr(!shortCircuitOr(!b, a), !a);
}
- public static boolean testCascadeSnippet47(boolean a, boolean b) {
+ public static boolean testCascadeSnippet47(Boolean a, Boolean b) {
return shortCircuitOr(!a, !shortCircuitOr(a, !b));
}
- public static boolean testCascadeSnippet48(boolean a, boolean b) {
+ public static boolean testCascadeSnippet48(Boolean a, Boolean b) {
return shortCircuitOr(!a, !shortCircuitOr(!b, !a));
}
- public static boolean testCascadeSnippet49(boolean a, boolean b) {
+ public static boolean testCascadeSnippet49(Boolean a, Boolean b) {
return shortCircuitOr(shortCircuitOr(a, !b), !a);
}
- public static boolean testCascadeSnippet50(boolean a, boolean b) {
+ public static boolean testCascadeSnippet50(Boolean a, Boolean b) {
return shortCircuitOr(shortCircuitOr(b, !a), !a);
}
- public static boolean testCascadeSnippet51(boolean a, boolean b) {
+ public static boolean testCascadeSnippet51(Boolean a, Boolean b) {
return shortCircuitOr(a, shortCircuitOr(!a, !b));
}
- public static boolean testCascadeSnippet52(boolean a, boolean b) {
+ public static boolean testCascadeSnippet52(Boolean a, Boolean b) {
return shortCircuitOr(a, shortCircuitOr(!b, !a));
}
- public static boolean testCascadeSnippet53(boolean a, boolean b) {
+ public static boolean testCascadeSnippet53(Boolean a, Boolean b) {
return shortCircuitOr(!shortCircuitOr(a, !b), !a);
}
- public static boolean testCascadeSnippet54(boolean a, boolean b) {
+ public static boolean testCascadeSnippet54(Boolean a, Boolean b) {
return shortCircuitOr(!shortCircuitOr(b, !a), !a);
}
- public static boolean testCascadeSnippet55(boolean a, boolean b) {
+ public static boolean testCascadeSnippet55(Boolean a, Boolean b) {
return shortCircuitOr(!a, shortCircuitOr(!a, !b));
}
- public static boolean testCascadeSnippet56(boolean a, boolean b) {
+ public static boolean testCascadeSnippet56(Boolean a, Boolean b) {
return shortCircuitOr(!a, shortCircuitOr(!b, !a));
}
- public static boolean testCascadeSnippet57(boolean a, boolean b) {
+ public static boolean testCascadeSnippet57(Boolean a, Boolean b) {
return shortCircuitOr(shortCircuitOr(!a, !b), !a);
}
- public static boolean testCascadeSnippet58(boolean a, boolean b) {
+ public static boolean testCascadeSnippet58(Boolean a, Boolean b) {
return shortCircuitOr(shortCircuitOr(!b, !a), !a);
}
- public static boolean testCascadeSnippet59(boolean a, boolean b) {
+ public static boolean testCascadeSnippet59(Boolean a, Boolean b) {
return shortCircuitOr(a, !shortCircuitOr(!a, !b));
}
- public static boolean testCascadeSnippet60(boolean a, boolean b) {
+ public static boolean testCascadeSnippet60(Boolean a, Boolean b) {
return shortCircuitOr(a, !shortCircuitOr(!b, !a));
}
- public static boolean testCascadeSnippet61(boolean a, boolean b) {
+ public static boolean testCascadeSnippet61(Boolean a, Boolean b) {
return shortCircuitOr(!shortCircuitOr(!a, !b), !a);
}
- public static boolean testCascadeSnippet62(boolean a, boolean b) {
+ public static boolean testCascadeSnippet62(Boolean a, Boolean b) {
return shortCircuitOr(!shortCircuitOr(!b, !a), !a);
}
- public static boolean testCascadeSnippet63(boolean a, boolean b) {
+ public static boolean testCascadeSnippet63(Boolean a, Boolean b) {
return shortCircuitOr(!a, !shortCircuitOr(!a, !b));
}
- public static boolean testCascadeSnippet64(boolean a, boolean b) {
+ public static boolean testCascadeSnippet64(Boolean a, Boolean b) {
return shortCircuitOr(!a, !shortCircuitOr(!b, !a));
}
@@ -374,6 +378,8 @@
PhaseContext context = new PhaseContext(getProviders());
CanonicalizerPhase canonicalizer = new CanonicalizerPhase();
canonicalizer.apply(graph, context);
+ new LoweringPhase(canonicalizer, LoweringTool.StandardLoweringStage.HIGH_TIER).apply(graph, context);
+ new IncrementalCanonicalizerPhase<>(canonicalizer, new FloatingReadPhase()).apply(graph, context);
int shortCircuitCount = graph.getNodes(ShortCircuitOrNode.TYPE).count();
int trueCount = testInputCombinations(snippet);
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes.test/src/org/graalvm/compiler/nodes/test/StructuredGraphTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2018, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.nodes.test;
+
+import org.graalvm.compiler.core.test.GraalCompilerTest;
+import org.graalvm.compiler.debug.DebugContext;
+import org.graalvm.compiler.debug.DebugHandlersFactory;
+import org.graalvm.compiler.nodes.StructuredGraph;
+import org.graalvm.compiler.nodes.StructuredGraph.AllowAssumptions;
+import org.graalvm.compiler.options.OptionValues;
+import org.junit.Assert;
+import org.junit.Test;
+
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+
+public class StructuredGraphTest extends GraalCompilerTest {
+
+ @Test
+ public void testGetBytecodeSize() {
+ OptionValues options = getInitialOptions();
+ DebugContext debug = DebugContext.create(options, DebugHandlersFactory.LOADER);
+ ResolvedJavaMethod rootMethod = getResolvedJavaMethod("testGetBytecodeSize");
+
+ // Test graph with root method and inlined methods
+ StructuredGraph graph = new StructuredGraph.Builder(options, debug, AllowAssumptions.YES).method(rootMethod).build();
+ ResolvedJavaMethod otherMethod = getResolvedJavaMethod(GraalCompilerTest.class, "createSuites");
+ int expectedBytecodeSize = rootMethod.getCodeSize();
+ for (int i = 0; i < 10; i++) {
+ graph.recordMethod(otherMethod);
+ expectedBytecodeSize += otherMethod.getCodeSize();
+ }
+ Assert.assertEquals(expectedBytecodeSize, graph.getBytecodeSize());
+
+ // Test graph with only root method, no inlined methods
+ graph = new StructuredGraph.Builder(options, debug, AllowAssumptions.YES).method(rootMethod).build();
+ expectedBytecodeSize = rootMethod.getCodeSize();
+ Assert.assertEquals(expectedBytecodeSize, graph.getBytecodeSize());
+
+ // Test graph with no root method, only inlined methods
+ graph = new StructuredGraph.Builder(options, debug, AllowAssumptions.YES).build();
+ expectedBytecodeSize = 0;
+ for (int i = 0; i < 10; i++) {
+ graph.recordMethod(otherMethod);
+ expectedBytecodeSize += otherMethod.getCodeSize();
+ }
+ Assert.assertEquals(expectedBytecodeSize, graph.getBytecodeSize());
+
+ // Test graph with no root method, no inlined methods
+ graph = new StructuredGraph.Builder(options, debug, AllowAssumptions.YES).build();
+ expectedBytecodeSize = 0;
+ Assert.assertEquals(expectedBytecodeSize, graph.getBytecodeSize());
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/GraphDecoder.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/GraphDecoder.java Mon Jun 18 09:48:22 2018 -0700
@@ -1400,6 +1400,15 @@
DebugContext debug = graph.getDebug();
debug.dump(DebugContext.DETAILED_LEVEL, graph, "Before loop detection");
+ if (methodScope.loopExplosionHead == null) {
+ /*
+ * The to-be-exploded loop was not reached during partial evaluation (e.g., because
+ * there was a deoptimization beforehand), or the method might not even contain a loop.
+ * This is an uncommon case, but not an error.
+ */
+ return;
+ }
+
List<Loop> orderedLoops = findLoops();
assert orderedLoops.get(orderedLoops.size() - 1) == irreducibleLoopHandler : "outermost loop must be the last element in the list";
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/StructuredGraph.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/StructuredGraph.java Mon Jun 18 09:48:22 2018 -0700
@@ -25,10 +25,14 @@
import static org.graalvm.compiler.graph.Graph.SourcePositionTracking.Default;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.Iterator;
import java.util.List;
+import java.util.SortedSet;
+import java.util.TreeSet;
import java.util.concurrent.atomic.AtomicLong;
import java.util.function.Consumer;
+import java.util.stream.Collectors;
import jdk.internal.vm.compiler.collections.EconomicMap;
import jdk.internal.vm.compiler.collections.EconomicSet;
@@ -40,6 +44,7 @@
import org.graalvm.compiler.core.common.cfg.BlockMap;
import org.graalvm.compiler.core.common.type.Stamp;
import org.graalvm.compiler.debug.DebugContext;
+import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.debug.JavaMethodContext;
import org.graalvm.compiler.debug.TTY;
import org.graalvm.compiler.graph.Graph;
@@ -54,6 +59,7 @@
import org.graalvm.compiler.nodes.util.GraphUtil;
import org.graalvm.compiler.options.OptionValues;
+import jdk.vm.ci.code.BytecodeFrame;
import jdk.vm.ci.meta.Assumptions;
import jdk.vm.ci.meta.Assumptions.Assumption;
import jdk.vm.ci.meta.DefaultProfilingInfo;
@@ -170,6 +176,7 @@
private CompilationIdentifier compilationId = CompilationIdentifier.INVALID_COMPILATION_ID;
private int entryBCI = JVMCICompiler.INVOCATION_ENTRY_BCI;
private boolean useProfilingInfo = true;
+ private boolean recordInlinedMethods = true;
private SourcePositionTracking trackNodeSourcePosition = Default;
private final OptionValues options;
private Cancellable cancellable = null;
@@ -263,6 +270,15 @@
return this;
}
+ public boolean getRecordInlinedMethods() {
+ return recordInlinedMethods;
+ }
+
+ public Builder recordInlinedMethods(boolean flag) {
+ this.recordInlinedMethods = flag;
+ return this;
+ }
+
public Builder trackNodeSourcePosition(SourcePositionTracking tracking) {
this.trackNodeSourcePosition = tracking;
return this;
@@ -281,7 +297,9 @@
}
public StructuredGraph build() {
- return new StructuredGraph(name, rootMethod, entryBCI, assumptions, speculationLog, useProfilingInfo, trackNodeSourcePosition, compilationId, options, debug, cancellable, callerContext);
+ List<ResolvedJavaMethod> inlinedMethods = recordInlinedMethods ? new ArrayList<>() : null;
+ return new StructuredGraph(name, rootMethod, entryBCI, assumptions, speculationLog, useProfilingInfo, inlinedMethods,
+ trackNodeSourcePosition, compilationId, options, debug, cancellable, callerContext);
}
}
@@ -318,14 +336,14 @@
/**
* Records the methods that were used while constructing this graph, one entry for each time a
- * specific method is used.
+ * specific method is used. This will be {@code null} if recording of inlined methods is
+ * disabled for the graph.
*/
- private final List<ResolvedJavaMethod> methods = new ArrayList<>();
+ private final List<ResolvedJavaMethod> methods;
/**
* Records the fields that were accessed while constructing this graph.
*/
-
private EconomicSet<ResolvedJavaField> fields = null;
private enum UnsafeAccessState {
@@ -346,6 +364,7 @@
Assumptions assumptions,
SpeculationLog speculationLog,
boolean useProfilingInfo,
+ List<ResolvedJavaMethod> methods,
SourcePositionTracking trackNodeSourcePosition,
CompilationIdentifier compilationId,
OptionValues options,
@@ -359,6 +378,7 @@
this.compilationId = compilationId;
this.entryBCI = entryBCI;
this.assumptions = assumptions;
+ this.methods = methods;
if (speculationLog != null && !(speculationLog instanceof GraphSpeculationLog)) {
this.speculationLog = new GraphSpeculationLog(speculationLog);
} else {
@@ -514,6 +534,7 @@
assumptions == null ? null : new Assumptions(),
speculationLog,
useProfilingInfo,
+ methods != null ? new ArrayList<>(methods) : null,
trackNodeSourcePosition,
newCompilationId,
getOptions(), debugForCopy, null, callerContext);
@@ -526,6 +547,9 @@
copy.hasValueProxies = hasValueProxies;
copy.isAfterExpandLogic = isAfterExpandLogic;
copy.trackNodeSourcePosition = trackNodeSourcePosition;
+ if (fields != null) {
+ copy.fields = createFieldSet(fields);
+ }
EconomicMap<Node, Node> replacements = EconomicMap.create(Equivalence.IDENTITY);
replacements.put(start, copy.start);
UnmodifiableEconomicMap<Node, Node> duplicates;
@@ -862,16 +886,55 @@
}
/**
- * Gets the methods that were inlined while constructing this graph.
+ * Checks that any method referenced from a {@link FrameState} is also in the set of methods
+ * parsed while building this graph.
+ */
+ private boolean checkFrameStatesAgainstInlinedMethods() {
+ for (FrameState fs : getNodes(FrameState.TYPE)) {
+ if (!BytecodeFrame.isPlaceholderBci(fs.bci)) {
+ ResolvedJavaMethod m = fs.code.getMethod();
+ if (!m.equals(rootMethod) && !methods.contains(m)) {
+ SortedSet<String> haystack = new TreeSet<>();
+ if (!methods.contains(rootMethod)) {
+ haystack.add(rootMethod.format("%H.%n(%p)"));
+ }
+ for (ResolvedJavaMethod e : methods) {
+ haystack.add(e.format("%H.%n(%p)"));
+ }
+ throw new AssertionError(String.format("Could not find %s from %s in set(%s)", m.format("%H.%n(%p)"), fs, haystack.stream().collect(Collectors.joining(System.lineSeparator()))));
+ }
+ }
+ }
+ return true;
+ }
+
+ private static EconomicSet<ResolvedJavaField> createFieldSet(EconomicSet<ResolvedJavaField> init) {
+ // Multiple ResolvedJavaField objects can represent the same field so they
+ // need to be compared with equals().
+ if (init != null) {
+ return EconomicSet.create(Equivalence.DEFAULT, init);
+ }
+ return EconomicSet.create(Equivalence.DEFAULT);
+ }
+
+ /**
+ * Gets an unmodifiable view of the methods that were inlined while constructing this graph.
*/
public List<ResolvedJavaMethod> getMethods() {
- return methods;
+ if (methods != null) {
+ assert checkFrameStatesAgainstInlinedMethods();
+ return Collections.unmodifiableList(methods);
+ }
+ return Collections.emptyList();
}
/**
* Records that {@code method} was used to build this graph.
*/
public void recordMethod(ResolvedJavaMethod method) {
+ if (methods == null) {
+ throw new GraalError("inlined method recording not enabled for %s", this);
+ }
methods.add(method);
}
@@ -880,12 +943,21 @@
* to build another graph.
*/
public void updateMethods(StructuredGraph other) {
- assert this != other;
- this.methods.addAll(other.methods);
+ if (methods == null) {
+ throw new GraalError("inlined method recording not enabled for %s", this);
+ }
+ if (other.rootMethod != null) {
+ methods.add(other.rootMethod);
+ }
+ for (ResolvedJavaMethod m : other.methods) {
+ methods.add(m);
+ }
}
/**
- * Gets the fields that were accessed while constructing this graph.
+ * Gets an unmodifiable view of the fields that were accessed while constructing this graph.
+ *
+ * @return {@code null} if no field accesses were recorded
*/
public EconomicSet<ResolvedJavaField> getFields() {
return fields;
@@ -897,7 +969,7 @@
public void recordField(ResolvedJavaField field) {
assert GraalOptions.GeneratePIC.getValue(getOptions());
if (this.fields == null) {
- this.fields = EconomicSet.create(Equivalence.IDENTITY);
+ this.fields = createFieldSet(null);
}
fields.add(field);
}
@@ -911,7 +983,7 @@
assert GraalOptions.GeneratePIC.getValue(getOptions());
if (other.fields != null) {
if (this.fields == null) {
- this.fields = EconomicSet.create(Equivalence.IDENTITY);
+ this.fields = createFieldSet(null);
}
this.fields.addAll(other.fields);
}
@@ -925,8 +997,13 @@
*/
public int getBytecodeSize() {
int res = 0;
- for (ResolvedJavaMethod e : methods) {
- res += e.getCodeSize();
+ if (rootMethod != null) {
+ res += rootMethod.getCodeSize();
+ }
+ if (methods != null) {
+ for (ResolvedJavaMethod e : methods) {
+ res += e.getCodeSize();
+ }
}
return res;
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/SignExtendNode.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/calc/SignExtendNode.java Mon Jun 18 09:48:22 2018 -0700
@@ -24,12 +24,14 @@
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_1;
+import jdk.vm.ci.code.CodeUtil;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.IntegerConvertOp;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.IntegerConvertOp.Narrow;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.IntegerConvertOp.SignExtend;
import org.graalvm.compiler.core.common.type.IntegerStamp;
import org.graalvm.compiler.core.common.type.PrimitiveStamp;
+import org.graalvm.compiler.core.common.type.Stamp;
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.graph.spi.CanonicalizerTool;
import org.graalvm.compiler.lir.gen.ArithmeticLIRGeneratorTool;
@@ -107,7 +109,28 @@
return ZeroExtendNode.create(forValue, inputBits, resultBits, view, true);
}
}
-
+ if (forValue instanceof NarrowNode) {
+ NarrowNode narrow = (NarrowNode) forValue;
+ Stamp inputStamp = narrow.getValue().stamp(view);
+ if (inputStamp instanceof IntegerStamp) {
+ IntegerStamp istamp = (IntegerStamp) inputStamp;
+ long mask = CodeUtil.mask(PrimitiveStamp.getBits(narrow.stamp(view)) - 1);
+ if (~mask <= istamp.lowerBound() && istamp.upperBound() <= mask) {
+ // The original value cannot change because of the narrow and sign extend.
+ if (istamp.getBits() < resultBits) {
+ // Need to keep the sign extend, skip the narrow.
+ return create(narrow.getValue(), resultBits, view);
+ } else if (istamp.getBits() > resultBits) {
+ // Need to keep the narrow, skip the sign extend.
+ return NarrowNode.create(narrow.getValue(), resultBits, view);
+ } else {
+ assert istamp.getBits() == resultBits;
+ // Just return the original value.
+ return narrow.getValue();
+ }
+ }
+ }
+ }
return self != null ? self : new SignExtendNode(forValue, inputBits, resultBits);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/extended/BranchProbabilityNode.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/extended/BranchProbabilityNode.java Mon Jun 18 09:48:22 2018 -0700
@@ -25,7 +25,10 @@
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_0;
import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_0;
+import jdk.vm.ci.meta.JavaKind;
import org.graalvm.compiler.core.common.calc.CanonicalCondition;
+import org.graalvm.compiler.core.common.type.IntegerStamp;
+import org.graalvm.compiler.core.common.type.StampFactory;
import org.graalvm.compiler.debug.GraalError;
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.graph.iterators.NodePredicates;
@@ -40,6 +43,8 @@
import org.graalvm.compiler.nodes.calc.ConditionalNode;
import org.graalvm.compiler.nodes.calc.FloatingNode;
import org.graalvm.compiler.nodes.calc.IntegerEqualsNode;
+import org.graalvm.compiler.nodes.calc.NarrowNode;
+import org.graalvm.compiler.nodes.calc.ZeroExtendNode;
import org.graalvm.compiler.nodes.spi.Lowerable;
import org.graalvm.compiler.nodes.spi.LoweringTool;
@@ -68,7 +73,7 @@
@Input ValueNode condition;
public BranchProbabilityNode(ValueNode probability, ValueNode condition) {
- super(TYPE, condition.stamp(NodeView.DEFAULT));
+ super(TYPE, StampFactory.forKind(JavaKind.Boolean));
this.probability = probability;
this.condition = condition;
}
@@ -122,6 +127,11 @@
}
if (usageFound) {
ValueNode currentCondition = condition;
+ IntegerStamp currentStamp = (IntegerStamp) currentCondition.stamp(NodeView.DEFAULT);
+ if (currentStamp.lowerBound() < 0 || 1 < currentStamp.upperBound()) {
+ ValueNode narrow = graph().maybeAddOrUnique(NarrowNode.create(currentCondition, 1, NodeView.DEFAULT));
+ currentCondition = graph().maybeAddOrUnique(ZeroExtendNode.create(narrow, 32, NodeView.DEFAULT));
+ }
replaceAndDelete(currentCondition);
if (tool != null) {
tool.addToWorkList(currentCondition.usages());
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/extended/RawLoadNode.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/extended/RawLoadNode.java Mon Jun 18 09:48:22 2018 -0700
@@ -158,8 +158,8 @@
}
@Override
- protected ValueNode cloneAsFieldAccess(Assumptions assumptions, ResolvedJavaField field) {
- return LoadFieldNode.create(assumptions, object(), field);
+ protected ValueNode cloneAsFieldAccess(Assumptions assumptions, ResolvedJavaField field, boolean volatileAccess) {
+ return LoadFieldNode.create(assumptions, object(), field, volatileAccess);
}
@Override
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/extended/RawStoreNode.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/extended/RawStoreNode.java Mon Jun 18 09:48:22 2018 -0700
@@ -128,8 +128,8 @@
}
@Override
- protected ValueNode cloneAsFieldAccess(Assumptions assumptions, ResolvedJavaField field) {
- return new StoreFieldNode(object(), field, value(), stateAfter());
+ protected ValueNode cloneAsFieldAccess(Assumptions assumptions, ResolvedJavaField field, boolean volatileAccess) {
+ return new StoreFieldNode(object(), field, value(), stateAfter(), volatileAccess);
}
@Override
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/extended/UnsafeAccessNode.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/extended/UnsafeAccessNode.java Mon Jun 18 09:48:22 2018 -0700
@@ -99,7 +99,11 @@
// never a valid access of an arbitrary address.
if (field != null && field.getJavaKind() == this.accessKind()) {
assert !graph().isAfterFloatingReadPhase() : "cannot add more precise memory location after floating read phase";
- return cloneAsFieldAccess(graph().getAssumptions(), field);
+ // Unsafe accesses never have volatile semantics.
+ // Memory barriers are placed around such an unsafe access at construction
+ // time if necessary, unlike AccessFieldNodes which encapsulate their
+ // potential volatile semantics.
+ return cloneAsFieldAccess(graph().getAssumptions(), field, false);
}
}
}
@@ -115,7 +119,11 @@
return this;
}
- protected abstract ValueNode cloneAsFieldAccess(Assumptions assumptions, ResolvedJavaField field);
+ protected ValueNode cloneAsFieldAccess(Assumptions assumptions, ResolvedJavaField field) {
+ return cloneAsFieldAccess(assumptions, field, field.isVolatile());
+ }
+
+ protected abstract ValueNode cloneAsFieldAccess(Assumptions assumptions, ResolvedJavaField field, boolean volatileAccess);
protected abstract ValueNode cloneAsArrayAccess(ValueNode location, LocationIdentity identity);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/GraphBuilderContext.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/GraphBuilderContext.java Mon Jun 18 09:48:22 2018 -0700
@@ -43,6 +43,9 @@
import org.graalvm.compiler.nodes.StateSplit;
import org.graalvm.compiler.nodes.ValueNode;
import org.graalvm.compiler.nodes.calc.IsNullNode;
+import org.graalvm.compiler.nodes.calc.NarrowNode;
+import org.graalvm.compiler.nodes.calc.SignExtendNode;
+import org.graalvm.compiler.nodes.calc.ZeroExtendNode;
import org.graalvm.compiler.nodes.type.StampTool;
import jdk.vm.ci.code.BailoutException;
@@ -312,4 +315,25 @@
return null;
}
+ /**
+ * Adds masking to a given subword value according to a given {@Link JavaKind}, such that the
+ * masked value falls in the range of the given kind. In the cases where the given kind is not a
+ * subword kind, the input value is returned immediately.
+ *
+ * @param value the value to be masked
+ * @param kind the kind that specifies the range of the masked value
+ * @return the masked value
+ */
+ default ValueNode maskSubWordValue(ValueNode value, JavaKind kind) {
+ if (kind == kind.getStackKind()) {
+ return value;
+ }
+ // Subword value
+ ValueNode narrow = append(NarrowNode.create(value, kind.getBitCount(), NodeView.DEFAULT));
+ if (kind.isUnsigned()) {
+ return append(ZeroExtendNode.create(narrow, 32, NodeView.DEFAULT));
+ } else {
+ return append(SignExtendNode.create(narrow, 32, NodeView.DEFAULT));
+ }
+ }
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/InvocationPlugins.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/graphbuilderconf/InvocationPlugins.java Mon Jun 18 09:48:22 2018 -0700
@@ -948,11 +948,11 @@
* non-static. Upon returning, element 0 will have been rewritten to
* {@code declaringClass}
*/
- public void register(InvocationPlugin plugin, Type declaringClass, String name, Type... argumentTypes) {
+ public final void register(InvocationPlugin plugin, Type declaringClass, String name, Type... argumentTypes) {
register(plugin, false, false, declaringClass, name, argumentTypes);
}
- public void register(InvocationPlugin plugin, String declaringClass, String name, Type... argumentTypes) {
+ public final void register(InvocationPlugin plugin, String declaringClass, String name, Type... argumentTypes) {
register(plugin, false, false, new OptionalLazySymbol(declaringClass), name, argumentTypes);
}
@@ -965,7 +965,7 @@
* non-static. Upon returning, element 0 will have been rewritten to
* {@code declaringClass}
*/
- public void registerOptional(InvocationPlugin plugin, Type declaringClass, String name, Type... argumentTypes) {
+ public final void registerOptional(InvocationPlugin plugin, Type declaringClass, String name, Type... argumentTypes) {
register(plugin, true, false, declaringClass, name, argumentTypes);
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/java/AccessFieldNode.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/java/AccessFieldNode.java Mon Jun 18 09:48:22 2018 -0700
@@ -48,6 +48,7 @@
@OptionalInput ValueNode object;
protected final ResolvedJavaField field;
+ protected final boolean volatileAccess;
public ValueNode object() {
return object;
@@ -58,11 +59,24 @@
*
* @param object the instruction producing the receiver object
* @param field the compiler interface representation of the field
+ * @param volatileAccess specifies if the access is volatile or not, this overrides the field
+ * volatile modifier.
*/
- public AccessFieldNode(NodeClass<? extends AccessFieldNode> c, Stamp stamp, ValueNode object, ResolvedJavaField field) {
+ public AccessFieldNode(NodeClass<? extends AccessFieldNode> c, Stamp stamp, ValueNode object, ResolvedJavaField field, boolean volatileAccess) {
super(c, stamp);
this.object = object;
this.field = field;
+ this.volatileAccess = volatileAccess;
+ }
+
+ /**
+ * Constructs a new access field object.
+ *
+ * @param object the instruction producing the receiver object
+ * @param field the compiler interface representation of the field
+ */
+ public AccessFieldNode(NodeClass<? extends AccessFieldNode> c, Stamp stamp, ValueNode object, ResolvedJavaField field) {
+ this(c, stamp, object, field, field.isVolatile());
}
/**
@@ -84,12 +98,13 @@
}
/**
- * Checks whether this field is declared volatile.
+ * Checks whether this access has volatile semantics.
*
- * @return {@code true} if the field is resolved and declared volatile
+ * The field access semantics are coupled to the access and not to the field. e.g. it's possible
+ * to access volatile fields using non-volatile semantics via VarHandles.
*/
public boolean isVolatile() {
- return field.isVolatile();
+ return volatileAccess;
}
@Override
@@ -114,7 +129,7 @@
@Override
public NodeSize estimatedNodeSize() {
- if (field.isVolatile()) {
+ if (isVolatile()) {
return SIZE_2;
}
return super.estimatedNodeSize();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/java/LoadFieldNode.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/java/LoadFieldNode.java Mon Jun 18 09:48:22 2018 -0700
@@ -69,12 +69,20 @@
private final Stamp uncheckedStamp;
protected LoadFieldNode(StampPair stamp, ValueNode object, ResolvedJavaField field) {
- super(TYPE, stamp.getTrustedStamp(), object, field);
+ this(stamp, object, field, field.isVolatile());
+ }
+
+ protected LoadFieldNode(StampPair stamp, ValueNode object, ResolvedJavaField field, boolean volatileAccess) {
+ super(TYPE, stamp.getTrustedStamp(), object, field, volatileAccess);
this.uncheckedStamp = stamp.getUncheckedStamp();
}
public static LoadFieldNode create(Assumptions assumptions, ValueNode object, ResolvedJavaField field) {
- return new LoadFieldNode(StampFactory.forDeclaredType(assumptions, field.getType(), false), object, field);
+ return create(assumptions, object, field, field.isVolatile());
+ }
+
+ public static LoadFieldNode create(Assumptions assumptions, ValueNode object, ResolvedJavaField field, boolean volatileAccess) {
+ return new LoadFieldNode(StampFactory.forDeclaredType(assumptions, field.getType(), false), object, field, volatileAccess);
}
public static ValueNode create(ConstantFieldProvider constantFields, ConstantReflectionProvider constantReflection, MetaAccessProvider metaAccess,
@@ -202,7 +210,7 @@
@Override
public NodeCycles estimatedNodeCycles() {
- if (field.isVolatile()) {
+ if (isVolatile()) {
return CYCLES_2;
}
return super.estimatedNodeCycles();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/java/StoreFieldNode.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/java/StoreFieldNode.java Mon Jun 18 09:48:22 2018 -0700
@@ -71,12 +71,16 @@
}
public StoreFieldNode(ValueNode object, ResolvedJavaField field, ValueNode value) {
- super(TYPE, StampFactory.forVoid(), object, field);
+ this(object, field, value, field.isVolatile());
+ }
+
+ public StoreFieldNode(ValueNode object, ResolvedJavaField field, ValueNode value, boolean volatileAccess) {
+ super(TYPE, StampFactory.forVoid(), object, field, volatileAccess);
this.value = value;
}
- public StoreFieldNode(ValueNode object, ResolvedJavaField field, ValueNode value, FrameState stateAfter) {
- super(TYPE, StampFactory.forVoid(), object, field);
+ public StoreFieldNode(ValueNode object, ResolvedJavaField field, ValueNode value, FrameState stateAfter, boolean volatileAccess) {
+ super(TYPE, StampFactory.forVoid(), object, field, volatileAccess);
this.value = value;
this.stateAfter = stateAfter;
}
@@ -100,7 +104,7 @@
@Override
public NodeCycles estimatedNodeCycles() {
- if (field.isVolatile()) {
+ if (isVolatile()) {
return CYCLES_8;
}
return super.estimatedNodeCycles();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/java/UnsafeCompareAndExchangeNode.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/java/UnsafeCompareAndExchangeNode.java Mon Jun 18 09:48:22 2018 -0700
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/inlining/info/elem/InlineableGraph.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.phases.common/src/org/graalvm/compiler/phases/common/inlining/info/elem/InlineableGraph.java Mon Jun 18 09:48:22 2018 -0700
@@ -67,27 +67,20 @@
private FixedNodeProbabilityCache probabilites = new FixedNodeProbabilityCache();
public InlineableGraph(final ResolvedJavaMethod method, final Invoke invoke, final HighTierContext context, CanonicalizerPhase canonicalizer, boolean trackNodeSourcePosition) {
- StructuredGraph original = getOriginalGraph(method, context, canonicalizer, invoke.asNode().graph(), invoke.bci(), trackNodeSourcePosition);
- // TODO copying the graph is only necessary if it is modified or if it contains any invokes
- this.graph = (StructuredGraph) original.copy(invoke.asNode().getDebug());
+ StructuredGraph original = InliningUtil.getIntrinsicGraph(context.getReplacements(), method, invoke.bci(), trackNodeSourcePosition, null);
+ if (original == null) {
+ original = parseBytecodes(method, context, canonicalizer, invoke.asNode().graph(), trackNodeSourcePosition);
+ } else if (original.isFrozen()) {
+ // Graph may be modified by specializeGraphToArguments so defensively
+ // make a copy. We rely on the frozen state of a graph to denote
+ // whether it is shared.
+ original = (StructuredGraph) original.copy(invoke.asNode().getDebug());
+ }
+ this.graph = original;
specializeGraphToArguments(invoke, context, canonicalizer);
}
/**
- * This method looks up in a cache the graph for the argument, if not found bytecode is parsed.
- * The graph thus obtained is returned, ie the caller is responsible for cloning before
- * modification.
- */
- private static StructuredGraph getOriginalGraph(final ResolvedJavaMethod method, final HighTierContext context, CanonicalizerPhase canonicalizer, StructuredGraph caller, int callerBci,
- boolean trackNodeSourcePosition) {
- StructuredGraph result = InliningUtil.getIntrinsicGraph(context.getReplacements(), method, callerBci, trackNodeSourcePosition, null);
- if (result != null) {
- return result;
- }
- return parseBytecodes(method, context, canonicalizer, caller, trackNodeSourcePosition);
- }
-
- /**
* @return true iff one or more parameters <code>newGraph</code> were specialized to account for
* a constant argument, or an argument with a more specific stamp.
*/
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.processor/src/org/graalvm/compiler/processor/AbstractProcessor.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.processor/src/org/graalvm/compiler/processor/AbstractProcessor.java Mon Jun 18 09:48:22 2018 -0700
@@ -74,7 +74,7 @@
/**
* Gets the {@link TypeMirror} for a given class name.
*
- * @rturn {@code null} if the class cannot be resolved
+ * @return {@code null} if the class cannot be resolved
*/
public TypeMirror getTypeOrNull(String className) {
TypeElement element = getTypeElementOrNull(className);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.aarch64/src/org/graalvm/compiler/replacements/aarch64/AArch64GraphBuilderPlugins.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.aarch64/src/org/graalvm/compiler/replacements/aarch64/AArch64GraphBuilderPlugins.java Mon Jun 18 09:48:22 2018 -0700
@@ -41,6 +41,7 @@
import org.graalvm.compiler.nodes.graphbuilderconf.InvocationPlugins;
import org.graalvm.compiler.nodes.graphbuilderconf.InvocationPlugins.Registration;
import org.graalvm.compiler.nodes.java.AtomicReadAndAddNode;
+import org.graalvm.compiler.nodes.java.AtomicReadAndWriteNode;
import org.graalvm.compiler.nodes.memory.address.AddressNode;
import org.graalvm.compiler.nodes.memory.address.OffsetAddressNode;
import org.graalvm.compiler.replacements.StandardGraphBuilderPlugins;
@@ -65,7 +66,7 @@
registerMathPlugins(invocationPlugins);
registerStringLatin1Plugins(invocationPlugins, bytecodeProvider);
registerStringUTF16Plugins(invocationPlugins, bytecodeProvider);
- registerUnsafeReadAndAddPlugins(invocationPlugins, bytecodeProvider);
+ registerUnsafePlugins(invocationPlugins, bytecodeProvider);
// This is temporarily disabled until we implement correct emitting of the CAS
// instructions of the proper width.
StandardGraphBuilderPlugins.registerPlatformSpecificUnsafePlugins(invocationPlugins, bytecodeProvider,
@@ -163,18 +164,32 @@
}
}
- private static void registerUnsafeReadAndAddPlugins(InvocationPlugins plugins, BytecodeProvider replacementsBytecodeProvider) {
+ private static void registerUnsafePlugins(InvocationPlugins plugins, BytecodeProvider replacementsBytecodeProvider) {
Registration r;
+ JavaKind[] unsafeJavaKinds;
if (Java8OrEarlier) {
r = new Registration(plugins, Unsafe.class);
+ unsafeJavaKinds = new JavaKind[]{JavaKind.Int, JavaKind.Long, JavaKind.Object};
} else {
r = new Registration(plugins, "jdk.internal.misc.Unsafe", replacementsBytecodeProvider);
+ unsafeJavaKinds = new JavaKind[]{JavaKind.Int, JavaKind.Long, JavaKind.Object};
}
- for (JavaKind kind : new JavaKind[]{JavaKind.Int, JavaKind.Long, JavaKind.Object}) {
+ for (JavaKind kind : unsafeJavaKinds) {
Class<?> javaClass = kind == JavaKind.Object ? Object.class : kind.toJavaClass();
- if (kind != JavaKind.Object) {
+ r.register4("getAndSet" + kind.name(), Receiver.class, Object.class, long.class, javaClass, new InvocationPlugin() {
+ @Override
+ public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver unsafe, ValueNode object, ValueNode offset, ValueNode value) {
+ // Emits a null-check for the otherwise unused receiver
+ unsafe.get();
+ b.addPush(kind, new AtomicReadAndWriteNode(object, offset, value, kind, LocationIdentity.any()));
+ b.getGraph().markUnsafeAccess();
+ return true;
+ }
+ });
+
+ if (kind != JavaKind.Boolean && kind.isNumericInteger()) {
r.register4("getAndAdd" + kind.name(), Receiver.class, Object.class, long.class, javaClass, new InvocationPlugin() {
@Override
public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver unsafe, ValueNode object, ValueNode offset, ValueNode delta) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.amd64/src/org/graalvm/compiler/replacements/amd64/AMD64FloatConvertNode.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.amd64/src/org/graalvm/compiler/replacements/amd64/AMD64FloatConvertNode.java Mon Jun 18 09:48:22 2018 -0700
@@ -25,8 +25,12 @@
import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_8;
import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_1;
+import jdk.vm.ci.meta.JavaConstant;
import org.graalvm.compiler.core.common.calc.FloatConvert;
import org.graalvm.compiler.core.common.type.ArithmeticOpTable.FloatConvertOp;
+import org.graalvm.compiler.core.common.type.IntegerStamp;
+import org.graalvm.compiler.core.common.type.Stamp;
+import org.graalvm.compiler.core.common.type.StampFactory;
import org.graalvm.compiler.graph.NodeClass;
import org.graalvm.compiler.graph.spi.CanonicalizerTool;
import org.graalvm.compiler.lir.gen.ArithmeticLIRGeneratorTool;
@@ -41,6 +45,9 @@
* This node has the semantics of the AMD64 floating point conversions. It is used in the lowering
* of the {@link FloatConvertNode} which, on AMD64 needs a {@link AMD64FloatConvertNode} plus some
* fixup code that handles the corner cases that differ between AMD64 and Java.
+ *
+ * Since this node evaluates to a special value if the conversion is inexact, its stamp must be
+ * modified to avoid optimizing away {@link AMD64ConvertSnippets}.
*/
@NodeInfo(cycles = CYCLES_8, size = SIZE_1)
public final class AMD64FloatConvertNode extends UnaryArithmeticNode<FloatConvertOp> implements ArithmeticLIRLowerable {
@@ -51,6 +58,7 @@
public AMD64FloatConvertNode(FloatConvert op, ValueNode value) {
super(TYPE, table -> table.getFloatConvert(op), value);
this.op = op;
+ this.stamp = this.stamp.meet(createInexactCaseStamp());
}
@Override
@@ -60,6 +68,20 @@
}
@Override
+ public Stamp foldStamp(Stamp newStamp) {
+ // The semantics of the x64 CVTTSS2SI instruction allow returning 0x8000000 in the special
+ // cases.
+ Stamp foldedStamp = super.foldStamp(newStamp);
+ return foldedStamp.meet(createInexactCaseStamp());
+ }
+
+ private Stamp createInexactCaseStamp() {
+ IntegerStamp intStamp = (IntegerStamp) this.stamp;
+ long inexactValue = intStamp.getBits() <= 32 ? 0x8000_0000L : 0x8000_0000_0000_0000L;
+ return StampFactory.forConstant(JavaConstant.forPrimitiveInt(intStamp.getBits(), inexactValue));
+ }
+
+ @Override
public void generate(NodeLIRBuilderTool nodeValueMap, ArithmeticLIRGeneratorTool gen) {
nodeValueMap.setResult(this, gen.emitFloatConvert(op, nodeValueMap.operand(getValue())));
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.amd64/src/org/graalvm/compiler/replacements/amd64/AMD64GraphBuilderPlugins.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.amd64/src/org/graalvm/compiler/replacements/amd64/AMD64GraphBuilderPlugins.java Mon Jun 18 09:48:22 2018 -0700
@@ -220,16 +220,14 @@
}
private static void registerUnsafePlugins(InvocationPlugins plugins, BytecodeProvider replacementsBytecodeProvider) {
- Registration r;
- JavaKind[] unsafeJavaKinds;
- if (Java8OrEarlier) {
- r = new Registration(plugins, Unsafe.class);
- unsafeJavaKinds = new JavaKind[]{JavaKind.Int, JavaKind.Long, JavaKind.Object};
- } else {
- r = new Registration(plugins, "jdk.internal.misc.Unsafe", replacementsBytecodeProvider);
- unsafeJavaKinds = new JavaKind[]{JavaKind.Boolean, JavaKind.Byte, JavaKind.Char, JavaKind.Short, JavaKind.Int, JavaKind.Long, JavaKind.Object};
+ registerUnsafePlugins(new Registration(plugins, Unsafe.class), new JavaKind[]{JavaKind.Int, JavaKind.Long, JavaKind.Object});
+ if (!Java8OrEarlier) {
+ registerUnsafePlugins(new Registration(plugins, "jdk.internal.misc.Unsafe", replacementsBytecodeProvider),
+ new JavaKind[]{JavaKind.Boolean, JavaKind.Byte, JavaKind.Char, JavaKind.Short, JavaKind.Int, JavaKind.Long, JavaKind.Object});
}
+ }
+ private static void registerUnsafePlugins(Registration r, JavaKind[] unsafeJavaKinds) {
for (JavaKind kind : unsafeJavaKinds) {
Class<?> javaClass = kind == JavaKind.Object ? Object.class : kind.toJavaClass();
@@ -260,8 +258,8 @@
for (JavaKind kind : new JavaKind[]{JavaKind.Char, JavaKind.Short, JavaKind.Int, JavaKind.Long}) {
Class<?> javaClass = kind.toJavaClass();
- r.registerOptional3("get" + kind.name() + "Unaligned", Receiver.class, Object.class, long.class, new UnsafeGetPlugin(kind, false));
- r.registerOptional4("put" + kind.name() + "Unaligned", Receiver.class, Object.class, long.class, javaClass, new UnsafePutPlugin(kind, false));
+ r.registerOptional3("get" + kind.name() + "Unaligned", Receiver.class, Object.class, long.class, new UnsafeGetPlugin(kind));
+ r.registerOptional4("put" + kind.name() + "Unaligned", Receiver.class, Object.class, long.class, javaClass, new UnsafePutPlugin(kind));
}
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.jdk9.test/src/org/graalvm/compiler/replacements/jdk9/UnsafeReplacementsTest.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.jdk9.test/src/org/graalvm/compiler/replacements/jdk9/UnsafeReplacementsTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -22,9 +22,9 @@
*/
package org.graalvm.compiler.replacements.jdk9;
+import jdk.vm.ci.aarch64.AArch64;
import jdk.vm.ci.amd64.AMD64;
import jdk.vm.ci.code.TargetDescription;
-import jdk.vm.ci.meta.ResolvedJavaMethod;
import org.graalvm.compiler.api.test.Graal;
import org.graalvm.compiler.core.phases.HighTier;
import org.graalvm.compiler.options.OptionValues;
@@ -38,9 +38,6 @@
@AddExports("java.base/jdk.internal.misc")
public class UnsafeReplacementsTest extends MethodSubstitutionTest {
- // See GR-9819.
- @SuppressWarnings("unused") ResolvedJavaMethod method = null;
-
static class Container {
public volatile boolean booleanField;
public volatile byte byteField = 17;
@@ -247,6 +244,8 @@
testGraph("unsafeGetAndAddByte");
testGraph("unsafeGetAndAddChar");
testGraph("unsafeGetAndAddShort");
+ }
+ if (target.arch instanceof AMD64 || target.arch instanceof AArch64) {
testGraph("unsafeGetAndAddInt");
testGraph("unsafeGetAndAddLong");
}
@@ -302,6 +301,8 @@
testGraph("unsafeGetAndSetByte");
testGraph("unsafeGetAndSetChar");
testGraph("unsafeGetAndSetShort");
+ }
+ if (target.arch instanceof AMD64 || target.arch instanceof AArch64) {
testGraph("unsafeGetAndSetInt");
testGraph("unsafeGetAndSetLong");
testGraph("unsafeGetAndSetObject");
@@ -342,6 +343,12 @@
test(new OptionValues(getInitialOptions(), HighTier.Options.Inline, false), "fieldStatic");
}
+ public static void assertEquals(Object seen, Object expected, String message) {
+ if (seen != expected) {
+ throw new AssertionError(message + " - seen: " + seen + ", expected: " + expected);
+ }
+ }
+
public static class JdkInternalMiscUnsafeAccessTestBoolean {
static final int ITERATIONS = 100000;
@@ -405,12 +412,6 @@
}
}
- public static void assertEquals(Object seen, Object expected, String message) {
- if (seen != expected) {
- throw new AssertionError(message + " - seen: " + seen + ", expected: " + expected);
- }
- }
-
// Checkstyle: stop
@BytecodeParserForceInline
public static void testAccess(Object base, long offset) {
@@ -510,4 +511,312 @@
}
// Checkstyle: resume
}
+
+ public static boolean unsafeGetPutBoolean() {
+ Container container = new Container();
+ unsafe.putBoolean(container, booleanOffset, true);
+ return unsafe.getBoolean(container, booleanOffset);
+ }
+
+ public static byte unsafeGetPutByte() {
+ Container container = new Container();
+ unsafe.putByte(container, byteOffset, (byte) 0x12);
+ return unsafe.getByte(container, byteOffset);
+ }
+
+ public static short unsafeGetPutShort() {
+ Container container = new Container();
+ unsafe.putShort(container, shortOffset, (short) 0x1234);
+ return unsafe.getShort(container, shortOffset);
+ }
+
+ public static char unsafeGetPutChar() {
+ Container container = new Container();
+ unsafe.putChar(container, charOffset, 'x');
+ return unsafe.getChar(container, charOffset);
+ }
+
+ public static int unsafeGetPutInt() {
+ Container container = new Container();
+ unsafe.putInt(container, intOffset, 0x01234567);
+ return unsafe.getInt(container, intOffset);
+ }
+
+ public static long unsafeGetPutLong() {
+ Container container = new Container();
+ unsafe.putLong(container, longOffset, 0x01234567890ABCDEFL);
+ return unsafe.getLong(container, longOffset);
+ }
+
+ public static float unsafeGetPutFloat() {
+ Container container = new Container();
+ unsafe.putFloat(container, floatOffset, 1.234F);
+ return unsafe.getFloat(container, floatOffset);
+ }
+
+ public static double unsafeGetPutDouble() {
+ Container container = new Container();
+ unsafe.putDouble(container, doubleOffset, 1.23456789);
+ return unsafe.getDouble(container, doubleOffset);
+ }
+
+ public static Object unsafeGetPutObject() {
+ Container container = new Container();
+ unsafe.putObject(container, objectOffset, "Hello there");
+ return unsafe.getObject(container, objectOffset);
+ }
+
+ public static boolean unsafeGetPutBooleanOpaque() {
+ Container container = new Container();
+ unsafe.putBooleanOpaque(container, booleanOffset, true);
+ return unsafe.getBooleanOpaque(container, booleanOffset);
+ }
+
+ public static byte unsafeGetPutByteOpaque() {
+ Container container = new Container();
+ unsafe.putByteOpaque(container, byteOffset, (byte) 0x12);
+ return unsafe.getByteOpaque(container, byteOffset);
+ }
+
+ public static short unsafeGetPutShortOpaque() {
+ Container container = new Container();
+ unsafe.putShortOpaque(container, shortOffset, (short) 0x1234);
+ return unsafe.getShortOpaque(container, shortOffset);
+ }
+
+ public static char unsafeGetPutCharOpaque() {
+ Container container = new Container();
+ unsafe.putCharOpaque(container, charOffset, 'x');
+ return unsafe.getCharOpaque(container, charOffset);
+ }
+
+ public static int unsafeGetPutIntOpaque() {
+ Container container = new Container();
+ unsafe.putIntOpaque(container, intOffset, 0x01234567);
+ return unsafe.getIntOpaque(container, intOffset);
+ }
+
+ public static long unsafeGetPutLongOpaque() {
+ Container container = new Container();
+ unsafe.putLongOpaque(container, longOffset, 0x01234567890ABCDEFL);
+ return unsafe.getLongOpaque(container, longOffset);
+ }
+
+ public static float unsafeGetPutFloatOpaque() {
+ Container container = new Container();
+ unsafe.putFloatOpaque(container, floatOffset, 1.234F);
+ return unsafe.getFloatOpaque(container, floatOffset);
+ }
+
+ public static double unsafeGetPutDoubleOpaque() {
+ Container container = new Container();
+ unsafe.putDoubleOpaque(container, doubleOffset, 1.23456789);
+ return unsafe.getDoubleOpaque(container, doubleOffset);
+ }
+
+ public static Object unsafeGetPutObjectOpaque() {
+ Container container = new Container();
+ unsafe.putObjectOpaque(container, objectOffset, "Hello there");
+ return unsafe.getObjectOpaque(container, objectOffset);
+ }
+
+ public static boolean unsafeGetPutBooleanRA() {
+ Container container = new Container();
+ unsafe.putBooleanRelease(container, booleanOffset, true);
+ return unsafe.getBooleanAcquire(container, booleanOffset);
+ }
+
+ public static byte unsafeGetPutByteRA() {
+ Container container = new Container();
+ unsafe.putByteRelease(container, byteOffset, (byte) 0x12);
+ return unsafe.getByteAcquire(container, byteOffset);
+ }
+
+ public static short unsafeGetPutShortRA() {
+ Container container = new Container();
+ unsafe.putShortRelease(container, shortOffset, (short) 0x1234);
+ return unsafe.getShortAcquire(container, shortOffset);
+ }
+
+ public static char unsafeGetPutCharRA() {
+ Container container = new Container();
+ unsafe.putCharRelease(container, charOffset, 'x');
+ return unsafe.getCharAcquire(container, charOffset);
+ }
+
+ public static int unsafeGetPutIntRA() {
+ Container container = new Container();
+ unsafe.putIntRelease(container, intOffset, 0x01234567);
+ return unsafe.getIntAcquire(container, intOffset);
+ }
+
+ public static long unsafeGetPutLongRA() {
+ Container container = new Container();
+ unsafe.putLongRelease(container, longOffset, 0x01234567890ABCDEFL);
+ return unsafe.getLongAcquire(container, longOffset);
+ }
+
+ public static float unsafeGetPutFloatRA() {
+ Container container = new Container();
+ unsafe.putFloatRelease(container, floatOffset, 1.234F);
+ return unsafe.getFloatAcquire(container, floatOffset);
+ }
+
+ public static double unsafeGetPutDoubleRA() {
+ Container container = new Container();
+ unsafe.putDoubleRelease(container, doubleOffset, 1.23456789);
+ return unsafe.getDoubleAcquire(container, doubleOffset);
+ }
+
+ public static Object unsafeGetPutObjectRA() {
+ Container container = new Container();
+ unsafe.putObjectRelease(container, objectOffset, "Hello there");
+ return unsafe.getObjectAcquire(container, objectOffset);
+ }
+
+ public static boolean unsafeGetPutBooleanVolatile() {
+ Container container = new Container();
+ unsafe.putBooleanVolatile(container, booleanOffset, true);
+ return unsafe.getBooleanVolatile(container, booleanOffset);
+ }
+
+ public static byte unsafeGetPutByteVolatile() {
+ Container container = new Container();
+ unsafe.putByteVolatile(container, byteOffset, (byte) 0x12);
+ return unsafe.getByteVolatile(container, byteOffset);
+ }
+
+ public static short unsafeGetPutShortVolatile() {
+ Container container = new Container();
+ unsafe.putShortVolatile(container, shortOffset, (short) 0x1234);
+ return unsafe.getShortVolatile(container, shortOffset);
+ }
+
+ public static char unsafeGetPutCharVolatile() {
+ Container container = new Container();
+ unsafe.putCharVolatile(container, charOffset, 'x');
+ return unsafe.getCharVolatile(container, charOffset);
+ }
+
+ public static int unsafeGetPutIntVolatile() {
+ Container container = new Container();
+ unsafe.putIntVolatile(container, intOffset, 0x01234567);
+ return unsafe.getIntVolatile(container, intOffset);
+ }
+
+ public static long unsafeGetPutLongVolatile() {
+ Container container = new Container();
+ unsafe.putLongVolatile(container, longOffset, 0x01234567890ABCDEFL);
+ return unsafe.getLongVolatile(container, longOffset);
+ }
+
+ public static float unsafeGetPutFloatVolatile() {
+ Container container = new Container();
+ unsafe.putFloatVolatile(container, floatOffset, 1.234F);
+ return unsafe.getFloatVolatile(container, floatOffset);
+ }
+
+ public static double unsafeGetPutDoubleVolatile() {
+ Container container = new Container();
+ unsafe.putDoubleVolatile(container, doubleOffset, 1.23456789);
+ return unsafe.getDoubleVolatile(container, doubleOffset);
+ }
+
+ public static Object unsafeGetPutObjectVolatile() {
+ Container container = new Container();
+ unsafe.putObjectVolatile(container, objectOffset, "Hello there");
+ return unsafe.getObjectVolatile(container, objectOffset);
+ }
+
+ @Test
+ public void testUnsafeGetPutPlain() {
+ testGraph("unsafeGetPutBoolean");
+ testGraph("unsafeGetPutByte");
+ testGraph("unsafeGetPutShort");
+ testGraph("unsafeGetPutChar");
+ testGraph("unsafeGetPutInt");
+ testGraph("unsafeGetPutLong");
+ testGraph("unsafeGetPutFloat");
+ testGraph("unsafeGetPutDouble");
+ testGraph("unsafeGetPutObject");
+
+ test("unsafeGetPutBoolean");
+ test("unsafeGetPutByte");
+ test("unsafeGetPutShort");
+ test("unsafeGetPutChar");
+ test("unsafeGetPutInt");
+ test("unsafeGetPutLong");
+ test("unsafeGetPutFloat");
+ test("unsafeGetPutDouble");
+ test("unsafeGetPutObject");
+ }
+
+ @Test
+ public void testUnsafeGetPutOpaque() {
+ testGraph("unsafeGetPutBooleanOpaque");
+ testGraph("unsafeGetPutByteOpaque");
+ testGraph("unsafeGetPutShortOpaque");
+ testGraph("unsafeGetPutCharOpaque");
+ testGraph("unsafeGetPutIntOpaque");
+ testGraph("unsafeGetPutLongOpaque");
+ testGraph("unsafeGetPutFloatOpaque");
+ testGraph("unsafeGetPutDoubleOpaque");
+ testGraph("unsafeGetPutObjectOpaque");
+
+ test("unsafeGetPutBooleanOpaque");
+ test("unsafeGetPutByteOpaque");
+ test("unsafeGetPutShortOpaque");
+ test("unsafeGetPutCharOpaque");
+ test("unsafeGetPutIntOpaque");
+ test("unsafeGetPutLongOpaque");
+ test("unsafeGetPutFloatOpaque");
+ test("unsafeGetPutDoubleOpaque");
+ test("unsafeGetPutObjectOpaque");
+ }
+
+ @Test
+ public void testUnsafeGetPutReleaseAcquire() {
+ testGraph("unsafeGetPutBooleanRA");
+ testGraph("unsafeGetPutByteRA");
+ testGraph("unsafeGetPutShortRA");
+ testGraph("unsafeGetPutCharRA");
+ testGraph("unsafeGetPutIntRA");
+ testGraph("unsafeGetPutLongRA");
+ testGraph("unsafeGetPutFloatRA");
+ testGraph("unsafeGetPutDoubleRA");
+ testGraph("unsafeGetPutObjectRA");
+
+ test("unsafeGetPutBooleanRA");
+ test("unsafeGetPutByteRA");
+ test("unsafeGetPutShortRA");
+ test("unsafeGetPutCharRA");
+ test("unsafeGetPutIntRA");
+ test("unsafeGetPutLongRA");
+ test("unsafeGetPutFloatRA");
+ test("unsafeGetPutDoubleRA");
+ test("unsafeGetPutObjectRA");
+ }
+
+ @Test
+ public void testUnsafeGetPutVolatile() {
+ testGraph("unsafeGetPutBooleanVolatile");
+ testGraph("unsafeGetPutByteVolatile");
+ testGraph("unsafeGetPutShortVolatile");
+ testGraph("unsafeGetPutCharVolatile");
+ testGraph("unsafeGetPutIntVolatile");
+ testGraph("unsafeGetPutLongVolatile");
+ testGraph("unsafeGetPutFloatVolatile");
+ testGraph("unsafeGetPutDoubleVolatile");
+ testGraph("unsafeGetPutObjectVolatile");
+
+ test("unsafeGetPutBooleanVolatile");
+ test("unsafeGetPutByteVolatile");
+ test("unsafeGetPutShortVolatile");
+ test("unsafeGetPutCharVolatile");
+ test("unsafeGetPutIntVolatile");
+ test("unsafeGetPutLongVolatile");
+ test("unsafeGetPutFloatVolatile");
+ test("unsafeGetPutDoubleVolatile");
+ test("unsafeGetPutObjectVolatile");
+ }
}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.jdk9.test/src/org/graalvm/compiler/replacements/jdk9/VarHandleTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -0,0 +1,180 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.replacements.jdk9;
+
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.VarHandle;
+
+import org.graalvm.compiler.core.test.GraalCompilerTest;
+import org.graalvm.compiler.debug.GraalError;
+import org.graalvm.compiler.graph.Node;
+import org.graalvm.compiler.nodes.StartNode;
+import org.graalvm.compiler.nodes.StructuredGraph;
+import org.graalvm.compiler.nodes.extended.MembarNode;
+import org.graalvm.compiler.nodes.memory.MemoryCheckpoint;
+import org.graalvm.compiler.nodes.memory.ReadNode;
+import org.graalvm.compiler.nodes.memory.WriteNode;
+import jdk.internal.vm.compiler.word.LocationIdentity;
+import org.junit.Assert;
+import org.junit.Test;
+
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+
+public class VarHandleTest extends GraalCompilerTest {
+
+ static class Holder {
+ /* Field is declared volatile, but accessed with non-volatile semantics in the tests. */
+ volatile int volatileField = 42;
+
+ /* Field is declared non-volatile, but accessed with volatile semantics in the tests. */
+ int field = 2018;
+
+ static final VarHandle VOLATILE_FIELD;
+ static final VarHandle FIELD;
+
+ static {
+ try {
+ VOLATILE_FIELD = MethodHandles.lookup().findVarHandle(Holder.class, "volatileField", int.class);
+ FIELD = MethodHandles.lookup().findVarHandle(Holder.class, "field", int.class);
+ } catch (ReflectiveOperationException ex) {
+ throw GraalError.shouldNotReachHere(ex);
+ }
+ }
+ }
+
+ public static int testRead1Snippet(Holder h) {
+ /* Explicitly access the volatile field with non-volatile access semantics. */
+ return (int) Holder.VOLATILE_FIELD.get(h);
+ }
+
+ public static int testRead2Snippet(Holder h) {
+ /* Explicitly access the volatile field with volatile access semantics. */
+ return (int) Holder.VOLATILE_FIELD.getVolatile(h);
+ }
+
+ public static int testRead3Snippet(Holder h) {
+ /* Explicitly access the non-volatile field with non-volatile access semantics. */
+ return (int) Holder.FIELD.get(h);
+ }
+
+ public static int testRead4Snippet(Holder h) {
+ /* Explicitly access the non-volatile field with volatile access semantics. */
+ return (int) Holder.FIELD.getVolatile(h);
+ }
+
+ public static void testWrite1Snippet(Holder h) {
+ /* Explicitly access the volatile field with non-volatile access semantics. */
+ Holder.VOLATILE_FIELD.set(h, 123);
+ }
+
+ public static void testWrite2Snippet(Holder h) {
+ /* Explicitly access the volatile field with volatile access semantics. */
+ Holder.VOLATILE_FIELD.setVolatile(h, 123);
+ }
+
+ public static void testWrite3Snippet(Holder h) {
+ /* Explicitly access the non-volatile field with non-volatile access semantics. */
+ Holder.FIELD.set(h, 123);
+ }
+
+ public static void testWrite4Snippet(Holder h) {
+ /* Explicitly access the non-volatile field with volatile access semantics. */
+ Holder.FIELD.setVolatile(h, 123);
+ }
+
+ void testAccess(String name, int expectedReads, int expectedWrites, int expectedMembars, int expectedAnyKill) {
+ ResolvedJavaMethod method = getResolvedJavaMethod(name);
+ StructuredGraph graph = parseForCompile(method);
+ compile(method, graph);
+ Assert.assertEquals(expectedReads, graph.getNodes().filter(ReadNode.class).count());
+ Assert.assertEquals(expectedWrites, graph.getNodes().filter(WriteNode.class).count());
+ Assert.assertEquals(expectedMembars, graph.getNodes().filter(MembarNode.class).count());
+ Assert.assertEquals(expectedAnyKill, countAnyKill(graph));
+ }
+
+ @Test
+ public void testRead1() {
+ testAccess("testRead1Snippet", 1, 0, 0, 0);
+ }
+
+ @Test
+ public void testRead2() {
+ testAccess("testRead2Snippet", 1, 0, 2, 2);
+ }
+
+ @Test
+ public void testRead3() {
+ testAccess("testRead3Snippet", 1, 0, 0, 0);
+ }
+
+ @Test
+ public void testRead4() {
+ testAccess("testRead4Snippet", 1, 0, 2, 2);
+ }
+
+ @Test
+ public void testWrite1() {
+ testAccess("testWrite1Snippet", 0, 1, 0, 0);
+ }
+
+ @Test
+ public void testWrite2() {
+ testAccess("testWrite2Snippet", 0, 1, 2, 2);
+ }
+
+ @Test
+ public void testWrite3() {
+ testAccess("testWrite3Snippet", 0, 1, 0, 0);
+ }
+
+ @Test
+ public void testWrite4() {
+ testAccess("testWrite4Snippet", 0, 1, 2, 2);
+ }
+
+ private static int countAnyKill(StructuredGraph graph) {
+ int anyKillCount = 0;
+ int startNodes = 0;
+ for (Node n : graph.getNodes()) {
+ if (n instanceof StartNode) {
+ startNodes++;
+ } else if (n instanceof MemoryCheckpoint.Single) {
+ MemoryCheckpoint.Single single = (MemoryCheckpoint.Single) n;
+ if (single.getLocationIdentity().isAny()) {
+ anyKillCount++;
+ }
+ } else if (n instanceof MemoryCheckpoint.Multi) {
+ MemoryCheckpoint.Multi multi = (MemoryCheckpoint.Multi) n;
+ for (LocationIdentity loc : multi.getLocationIdentities()) {
+ if (loc.isAny()) {
+ anyKillCount++;
+ break;
+ }
+ }
+ }
+ }
+ // Ignore single StartNode.
+ Assert.assertEquals(1, startNodes);
+ return anyKillCount;
+ }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/BitOpNodesTest.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/BitOpNodesTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -38,6 +38,7 @@
import org.graalvm.compiler.phases.tiers.HighTierContext;
import org.graalvm.compiler.replacements.nodes.BitScanReverseNode;
+import jdk.vm.ci.aarch64.AArch64;
import jdk.vm.ci.amd64.AMD64;
import jdk.vm.ci.code.Architecture;
import jdk.vm.ci.meta.JavaKind;
@@ -59,6 +60,47 @@
* Tests for BitCountNode canonicalizations.
*/
+ /**
+ * Determines if the current VM context supports intrinsics for the {@code bitCount} methods in
+ * {@link Integer} and {@link Long}.
+ */
+ public static boolean isBitCountIntrinsicSupported(Architecture arch) {
+ if (arch instanceof AMD64) {
+ AMD64 amd64 = (AMD64) arch;
+ return amd64.getFeatures().contains(AMD64.CPUFeature.POPCNT);
+ } else {
+ // Even though there are AArch64 intrinsics for bitCount, they do
+ // not use BitCountNode.
+ return arch instanceof SPARC;
+ }
+ }
+
+ /**
+ * Determines if the current VM context supports intrinsics for the {@code numberOfLeadingZeros}
+ * methods in {@link Integer} and {@link Long}.
+ */
+ public static boolean isNumberLeadingZerosIntrinsicSupported(Architecture arch) {
+ if (arch instanceof AMD64) {
+ AMD64 amd64 = (AMD64) arch;
+ return amd64.getFeatures().contains(AMD64.CPUFeature.LZCNT) && amd64.getFlags().contains(AMD64.Flag.UseCountLeadingZerosInstruction);
+ } else {
+ return arch instanceof SPARC || arch instanceof AArch64;
+ }
+ }
+
+ /**
+ * Determines if the current VM context supports intrinsics for the
+ * {@code numberOfTrailingZeros} methods in {@link Integer} and {@link Long}.
+ */
+ public static boolean isNumberTrailingZerosIntrinsicSupported(Architecture arch) {
+ if (arch instanceof AMD64) {
+ AMD64 amd64 = (AMD64) arch;
+ return amd64.getFeatures().contains(AMD64.CPUFeature.BMI1) && amd64.getFlags().contains(AMD64.Flag.UseCountTrailingZerosInstruction);
+ } else {
+ return arch instanceof SPARC || arch instanceof AArch64;
+ }
+ }
+
public static int bitCountIntConstantSnippet() {
return Integer.bitCount(INT_CONSTANT_1) + Integer.bitCount(INT_CONSTANT_2) + Integer.bitCount(INT_CONSTANT_3);
}
@@ -79,10 +121,7 @@
@Test
public void testBitCountInt() {
- Architecture arch = getBackend().getTarget().arch;
- boolean isAmd64WithPopCount = arch instanceof AMD64 && ((AMD64) arch).getFeatures().contains(AMD64.CPUFeature.POPCNT);
- boolean isSparc = arch instanceof SPARC;
- Assume.assumeTrue("Only works on hardware with popcnt at the moment", isAmd64WithPopCount || isSparc);
+ Assume.assumeTrue(isBitCountIntrinsicSupported(getBackend().getTarget().arch));
ValueNode result = parseAndInline("bitCountIntSnippet");
Assert.assertEquals(StampFactory.forInteger(JavaKind.Int, 8, 24), result.stamp(NodeView.DEFAULT));
}
@@ -93,10 +132,7 @@
@Test
public void testBitCountIntEmpty() {
- Architecture arch = getBackend().getTarget().arch;
- boolean isAmd64WithPopCount = arch instanceof AMD64 && ((AMD64) arch).getFeatures().contains(AMD64.CPUFeature.POPCNT);
- boolean isSparc = arch instanceof SPARC;
- Assume.assumeTrue("Only works on hardware with popcnt at the moment", isAmd64WithPopCount || isSparc);
+ Assume.assumeTrue(isBitCountIntrinsicSupported(getBackend().getTarget().arch));
ValueNode result = parseAndInline("bitCountIntEmptySnippet");
Assert.assertEquals(StampFactory.forInteger(JavaKind.Int, 0, 24), result.stamp(NodeView.DEFAULT));
}
@@ -113,10 +149,7 @@
@Test
public void testBitCountLong() {
- Architecture arch = getBackend().getTarget().arch;
- boolean isAmd64WithPopCount = arch instanceof AMD64 && ((AMD64) arch).getFeatures().contains(AMD64.CPUFeature.POPCNT);
- boolean isSparc = arch instanceof SPARC;
- Assume.assumeTrue("Only works on hardware with popcnt at the moment", isAmd64WithPopCount || isSparc);
+ Assume.assumeTrue(isBitCountIntrinsicSupported(getBackend().getTarget().arch));
ValueNode result = parseAndInline("bitCountLongSnippet");
Assert.assertEquals(StampFactory.forInteger(JavaKind.Int, 8, 40), result.stamp(NodeView.DEFAULT));
}
@@ -127,10 +160,7 @@
@Test
public void testBitCountLongEmpty() {
- Architecture arch = getBackend().getTarget().arch;
- boolean isAmd64WithPopCount = arch instanceof AMD64 && ((AMD64) arch).getFeatures().contains(AMD64.CPUFeature.POPCNT);
- boolean isSparc = arch instanceof SPARC;
- Assume.assumeTrue("Only works on hardware with popcnt at the moment", isAmd64WithPopCount || isSparc);
+ Assume.assumeTrue(isBitCountIntrinsicSupported(getBackend().getTarget().arch));
ValueNode result = parseAndInline("bitCountLongEmptySnippet");
Assert.assertEquals(StampFactory.forInteger(JavaKind.Int, 0, 40), result.stamp(NodeView.DEFAULT));
}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/ObjectAccessTest.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/ObjectAccessTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -28,6 +28,8 @@
import org.graalvm.compiler.nodes.ReturnNode;
import org.graalvm.compiler.nodes.StructuredGraph;
import org.graalvm.compiler.nodes.StructuredGraph.AllowAssumptions;
+import org.graalvm.compiler.nodes.ValueNode;
+import org.graalvm.compiler.nodes.calc.ConvertNode;
import org.graalvm.compiler.nodes.calc.SignExtendNode;
import org.graalvm.compiler.nodes.extended.JavaReadNode;
import org.graalvm.compiler.nodes.extended.JavaWriteNode;
@@ -74,21 +76,21 @@
@Test
public void testWrite1() {
for (JavaKind kind : KINDS) {
- assertWrite(parseEager("write" + kind.name() + "1", AllowAssumptions.YES), true, ID);
+ assertWrite(parseEager("write" + kind.name() + "1", AllowAssumptions.YES), kind, true, ID);
}
}
@Test
public void testWrite2() {
for (JavaKind kind : KINDS) {
- assertWrite(parseEager("write" + kind.name() + "2", AllowAssumptions.YES), true, ID);
+ assertWrite(parseEager("write" + kind.name() + "2", AllowAssumptions.YES), kind, true, ID);
}
}
@Test
public void testWrite3() {
for (JavaKind kind : KINDS) {
- assertWrite(parseEager("write" + kind.name() + "3", AllowAssumptions.YES), true, LocationIdentity.any());
+ assertWrite(parseEager("write" + kind.name() + "3", AllowAssumptions.YES), kind, true, LocationIdentity.any());
}
}
@@ -113,10 +115,15 @@
Assert.assertEquals(read, ret.result());
}
- private static void assertWrite(StructuredGraph graph, boolean indexConvert, LocationIdentity locationIdentity) {
+ private static void assertWrite(StructuredGraph graph, JavaKind kind, boolean indexConvert, LocationIdentity locationIdentity) {
JavaWriteNode write = (JavaWriteNode) graph.start().next();
- Assert.assertEquals(graph.getParameter(2), write.value());
-
+ ValueNode valueNode = write.value();
+ if (kind != kind.getStackKind()) {
+ while (valueNode instanceof ConvertNode) {
+ valueNode = ((ConvertNode) valueNode).getValue();
+ }
+ }
+ Assert.assertEquals(graph.getParameter(2), valueNode);
OffsetAddressNode address = (OffsetAddressNode) write.getAddress();
Assert.assertEquals(graph.getParameter(0), address.getBase());
Assert.assertEquals(BytecodeFrame.AFTER_BCI, write.stateAfter().bci);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/PointerTest.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.test/src/org/graalvm/compiler/replacements/test/PointerTest.java Mon Jun 18 09:48:22 2018 -0700
@@ -28,6 +28,8 @@
import org.graalvm.compiler.nodes.ReturnNode;
import org.graalvm.compiler.nodes.StructuredGraph;
import org.graalvm.compiler.nodes.StructuredGraph.AllowAssumptions;
+import org.graalvm.compiler.nodes.ValueNode;
+import org.graalvm.compiler.nodes.calc.ConvertNode;
import org.graalvm.compiler.nodes.calc.SignExtendNode;
import org.graalvm.compiler.nodes.extended.JavaReadNode;
import org.graalvm.compiler.nodes.extended.JavaWriteNode;
@@ -84,21 +86,21 @@
@Test
public void testWrite1() {
for (JavaKind kind : KINDS) {
- assertWrite(parseEager("write" + kind.name() + "1", AllowAssumptions.YES), true, ID);
+ assertWrite(parseEager("write" + kind.name() + "1", AllowAssumptions.YES), kind, true, ID);
}
}
@Test
public void testWrite2() {
for (JavaKind kind : KINDS) {
- assertWrite(parseEager("write" + kind.name() + "2", AllowAssumptions.YES), true, ID);
+ assertWrite(parseEager("write" + kind.name() + "2", AllowAssumptions.YES), kind, true, ID);
}
}
@Test
public void testWrite3() {
for (JavaKind kind : KINDS) {
- assertWrite(parseEager("write" + kind.name() + "3", AllowAssumptions.YES), true, LocationIdentity.any());
+ assertWrite(parseEager("write" + kind.name() + "3", AllowAssumptions.YES), kind, true, LocationIdentity.any());
}
}
@@ -128,11 +130,17 @@
Assert.assertEquals(read, ret.result());
}
- private void assertWrite(StructuredGraph graph, boolean indexConvert, LocationIdentity locationIdentity) {
+ private void assertWrite(StructuredGraph graph, JavaKind kind, boolean indexConvert, LocationIdentity locationIdentity) {
WordCastNode cast = (WordCastNode) graph.start().next();
JavaWriteNode write = (JavaWriteNode) cast.next();
- Assert.assertEquals(graph.getParameter(2), write.value());
+ ValueNode valueNode = write.value();
+ if (kind != kind.getStackKind()) {
+ while (valueNode instanceof ConvertNode) {
+ valueNode = ((ConvertNode) valueNode).getValue();
+ }
+ }
+ Assert.assertEquals(graph.getParameter(2), valueNode);
Assert.assertEquals(BytecodeFrame.AFTER_BCI, write.stateAfter().bci);
OffsetAddressNode address = (OffsetAddressNode) write.getAddress();
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/Log.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/Log.java Mon Jun 18 09:48:22 2018 -0700
@@ -40,7 +40,7 @@
public static final ForeignCallDescriptor LOG_PRIMITIVE = new ForeignCallDescriptor("logPrimitive", void.class, int.class, long.class, boolean.class);
public static final ForeignCallDescriptor LOG_OBJECT = new ForeignCallDescriptor("logObject", void.class, Object.class, boolean.class, boolean.class);
- public static final ForeignCallDescriptor LOG_PRINTF = new ForeignCallDescriptor("logPrintf", void.class, Object.class, long.class, long.class, long.class);
+ public static final ForeignCallDescriptor LOG_PRINTF = new ForeignCallDescriptor("logPrintf", void.class, String.class, long.class, long.class, long.class);
@NodeIntrinsic(ForeignCallNode.class)
private static native void log(@ConstantNodeParameter ForeignCallDescriptor logObject, Object object, boolean asString, boolean newline);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/PEGraphDecoder.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/PEGraphDecoder.java Mon Jun 18 09:48:22 2018 -0700
@@ -846,7 +846,9 @@
// Copy inlined methods from inlinee to caller
List<ResolvedJavaMethod> inlinedMethods = graphToInline.getInlinedMethods();
if (inlinedMethods != null) {
- graph.getMethods().addAll(inlinedMethods);
+ for (ResolvedJavaMethod other : inlinedMethods) {
+ graph.recordMethod(other);
+ }
}
if (graphToInline.getFields() != null) {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/StandardGraphBuilderPlugins.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/StandardGraphBuilderPlugins.java Mon Jun 18 09:48:22 2018 -0700
@@ -267,14 +267,15 @@
private static UnsafeCompareAndExchangePluginsRegistrar unsafeCompareAndExchangePluginsRegistrar = new UnsafeCompareAndExchangePluginsRegistrar();
public static void registerPlatformSpecificUnsafePlugins(InvocationPlugins plugins, BytecodeProvider bytecodeProvider, JavaKind[] supportedCasKinds) {
- Registration r;
- if (Java8OrEarlier) {
- r = new Registration(plugins, Unsafe.class);
- } else {
- r = new Registration(plugins, "jdk.internal.misc.Unsafe", bytecodeProvider);
+ registerPlatformSpecificUnsafePlugins(supportedCasKinds, new Registration(plugins, Unsafe.class), true);
+ if (!Java8OrEarlier) {
+ registerPlatformSpecificUnsafePlugins(supportedCasKinds, new Registration(plugins, "jdk.internal.misc.Unsafe", bytecodeProvider), false);
}
- if (Java8OrEarlier) {
+ }
+
+ private static void registerPlatformSpecificUnsafePlugins(JavaKind[] supportedCasKinds, Registration r, boolean java8OrEarlier) {
+ if (java8OrEarlier) {
unsafeCompareAndSwapPluginsRegistrar.register(r, "compareAndSwap", new JavaKind[]{JavaKind.Int, JavaKind.Long, JavaKind.Object});
} else {
unsafeCompareAndSwapPluginsRegistrar.register(r, "compareAndSet", supportedCasKinds);
@@ -283,13 +284,13 @@
}
private static void registerUnsafePlugins(InvocationPlugins plugins, BytecodeProvider bytecodeProvider) {
- Registration r;
- if (Java8OrEarlier) {
- r = new Registration(plugins, Unsafe.class);
- } else {
- r = new Registration(plugins, "jdk.internal.misc.Unsafe", bytecodeProvider);
+ registerUnsafePlugins(new Registration(plugins, Unsafe.class), true);
+ if (!Java8OrEarlier) {
+ registerUnsafePlugins(new Registration(plugins, "jdk.internal.misc.Unsafe", bytecodeProvider), false);
}
+ }
+ private static void registerUnsafePlugins(Registration r, boolean java8OrEarlier) {
for (JavaKind kind : JavaKind.values()) {
if ((kind.isPrimitive() && kind != JavaKind.Void) || kind == JavaKind.Object) {
Class<?> javaClass = kind == JavaKind.Object ? Object.class : kind.toJavaClass();
@@ -297,30 +298,33 @@
String getName = "get" + kindName;
String putName = "put" + kindName;
// Object-based accesses
- r.register3(getName, Receiver.class, Object.class, long.class, new UnsafeGetPlugin(kind, false));
- r.register4(putName, Receiver.class, Object.class, long.class, javaClass, new UnsafePutPlugin(kind, false));
+ r.register3(getName, Receiver.class, Object.class, long.class, new UnsafeGetPlugin(kind));
+ r.register4(putName, Receiver.class, Object.class, long.class, javaClass, new UnsafePutPlugin(kind));
// Volatile object-based accesses
- r.register3(getName + "Volatile", Receiver.class, Object.class, long.class, new UnsafeGetPlugin(kind, true));
- r.register4(putName + "Volatile", Receiver.class, Object.class, long.class, javaClass, new UnsafePutPlugin(kind, true));
+ r.register3(getName + "Volatile", Receiver.class, Object.class, long.class, new UnsafeGetPlugin(kind, AccessKind.VOLATILE));
+ r.register4(putName + "Volatile", Receiver.class, Object.class, long.class, javaClass, new UnsafePutPlugin(kind, AccessKind.VOLATILE));
// Ordered object-based accesses
- if (Java8OrEarlier) {
+ if (java8OrEarlier) {
if (kind == JavaKind.Int || kind == JavaKind.Long || kind == JavaKind.Object) {
- r.register4("putOrdered" + kindName, Receiver.class, Object.class, long.class, javaClass, UnsafePutPlugin.putOrdered(kind));
+ r.register4("putOrdered" + kindName, Receiver.class, Object.class, long.class, javaClass, new UnsafePutPlugin(kind, AccessKind.RELEASE_ACQUIRE));
}
} else {
- r.register4("put" + kindName + "Release", Receiver.class, Object.class, long.class, javaClass, UnsafePutPlugin.putOrdered(kind));
+ r.register4("put" + kindName + "Release", Receiver.class, Object.class, long.class, javaClass, new UnsafePutPlugin(kind, AccessKind.RELEASE_ACQUIRE));
+ r.register3("get" + kindName + "Acquire", Receiver.class, Object.class, long.class, new UnsafeGetPlugin(kind, AccessKind.RELEASE_ACQUIRE));
+ r.register4("put" + kindName + "Opaque", Receiver.class, Object.class, long.class, javaClass, new UnsafePutPlugin(kind, AccessKind.OPAQUE));
+ r.register3("get" + kindName + "Opaque", Receiver.class, Object.class, long.class, new UnsafeGetPlugin(kind, AccessKind.OPAQUE));
}
if (kind != JavaKind.Boolean && kind != JavaKind.Object) {
// Raw accesses to memory addresses
- r.register2(getName, Receiver.class, long.class, new UnsafeGetPlugin(kind, false));
- r.register3(putName, Receiver.class, long.class, kind.toJavaClass(), new UnsafePutPlugin(kind, false));
+ r.register2(getName, Receiver.class, long.class, new UnsafeGetPlugin(kind));
+ r.register3(putName, Receiver.class, long.class, kind.toJavaClass(), new UnsafePutPlugin(kind));
}
}
}
// Accesses to native memory addresses.
- r.register2("getAddress", Receiver.class, long.class, new UnsafeGetPlugin(JavaKind.Long, false));
- r.register3("putAddress", Receiver.class, long.class, long.class, new UnsafePutPlugin(JavaKind.Long, false));
+ r.register2("getAddress", Receiver.class, long.class, new UnsafeGetPlugin(JavaKind.Long));
+ r.register3("putAddress", Receiver.class, long.class, long.class, new UnsafePutPlugin(JavaKind.Long));
r.register2("allocateInstance", Receiver.class, Class.class, new InvocationPlugin() {
@@ -726,14 +730,52 @@
}
}
+ /**
+ * The new memory order modes (JDK9+) are defined with cumulative effect, from weakest to
+ * strongest: Plain, Opaque, Release/Acquire, and Volatile. The existing Plain and Volatile
+ * modes are defined compatibly with their pre-JDK 9 forms. Any guaranteed property of a weaker
+ * mode, plus more, holds for a stronger mode. (Conversely, implementations are allowed to use a
+ * stronger mode than requested for any access.) In JDK 9, these are provided without a full
+ * formal specification.
+ */
+ enum AccessKind {
+ PLAIN(0, 0, 0, 0, false),
+ /**
+ * Opaque accesses are wrapped by dummy membars to avoid floating/hoisting, this is stronger
+ * than required since Opaque mode does not directly impose any ordering constraints with
+ * respect to other variables beyond Plain mode.
+ */
+ OPAQUE(0, 0, 0, 0, true),
+ RELEASE_ACQUIRE(0, LOAD_LOAD | LOAD_STORE, LOAD_STORE | STORE_STORE, 0, true),
+ VOLATILE(JMM_PRE_VOLATILE_READ, JMM_POST_VOLATILE_READ, JMM_PRE_VOLATILE_WRITE, JMM_POST_VOLATILE_WRITE, true);
+
+ public final boolean emitBarriers;
+ public final int preReadBarriers;
+ public final int postReadBarriers;
+ public final int preWriteBarriers;
+ public final int postWriteBarriers;
+
+ AccessKind(int preReadBarriers, int postReadBarriers, int preWriteBarriers, int postWriteBarriers, boolean emitBarriers) {
+ this.emitBarriers = emitBarriers;
+ this.preReadBarriers = preReadBarriers;
+ this.postReadBarriers = postReadBarriers;
+ this.preWriteBarriers = preWriteBarriers;
+ this.postWriteBarriers = postWriteBarriers;
+ }
+ }
+
public static class UnsafeGetPlugin implements InvocationPlugin {
private final JavaKind returnKind;
- private final boolean isVolatile;
+ private final AccessKind accessKind;
- public UnsafeGetPlugin(JavaKind returnKind, boolean isVolatile) {
+ public UnsafeGetPlugin(JavaKind returnKind) {
+ this(returnKind, AccessKind.PLAIN);
+ }
+
+ public UnsafeGetPlugin(JavaKind returnKind, AccessKind accessKind) {
this.returnKind = returnKind;
- this.isVolatile = isVolatile;
+ this.accessKind = accessKind;
}
@Override
@@ -749,13 +791,13 @@
public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver unsafe, ValueNode object, ValueNode offset) {
// Emits a null-check for the otherwise unused receiver
unsafe.get();
- if (isVolatile) {
- b.add(new MembarNode(JMM_PRE_VOLATILE_READ));
+ if (accessKind.emitBarriers) {
+ b.add(new MembarNode(accessKind.preReadBarriers));
}
LocationIdentity locationIdentity = object.isNullConstant() ? OFF_HEAP_LOCATION : LocationIdentity.any();
b.addPush(returnKind, new RawLoadNode(object, offset, returnKind, locationIdentity));
- if (isVolatile) {
- b.add(new MembarNode(JMM_POST_VOLATILE_READ));
+ if (accessKind.emitBarriers) {
+ b.add(new MembarNode(accessKind.postReadBarriers));
}
b.getGraph().markUnsafeAccess();
return true;
@@ -763,31 +805,22 @@
}
public static class UnsafePutPlugin implements InvocationPlugin {
+ private final JavaKind kind;
+ private final AccessKind accessKind;
- private final JavaKind kind;
- private final boolean hasBarrier;
- private final int preWrite;
- private final int postWrite;
-
- public UnsafePutPlugin(JavaKind kind, boolean isVolatile) {
- this(kind, isVolatile, JMM_PRE_VOLATILE_WRITE, JMM_POST_VOLATILE_WRITE);
+ public UnsafePutPlugin(JavaKind kind) {
+ this(kind, AccessKind.PLAIN);
}
- private UnsafePutPlugin(JavaKind kind, boolean hasBarrier, int preWrite, int postWrite) {
+ private UnsafePutPlugin(JavaKind kind, AccessKind accessKind) {
super();
this.kind = kind;
- this.hasBarrier = hasBarrier;
- this.preWrite = preWrite;
- this.postWrite = postWrite;
- }
-
- public static UnsafePutPlugin putOrdered(JavaKind kind) {
- return new UnsafePutPlugin(kind, true, LOAD_STORE | STORE_STORE, 0);
+ this.accessKind = accessKind;
}
@Override
public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver unsafe, ValueNode address, ValueNode value) {
- assert !hasBarrier : "Barriers for address based Unsafe put is not supported.";
+ assert !accessKind.emitBarriers : "Barriers for address based Unsafe put is not supported.";
// Emits a null-check for the otherwise unused receiver
unsafe.get();
b.add(new UnsafeMemoryStoreNode(address, value, kind, OFF_HEAP_LOCATION));
@@ -799,13 +832,13 @@
public boolean apply(GraphBuilderContext b, ResolvedJavaMethod targetMethod, Receiver unsafe, ValueNode object, ValueNode offset, ValueNode value) {
// Emits a null-check for the otherwise unused receiver
unsafe.get();
- if (hasBarrier) {
- b.add(new MembarNode(preWrite));
+ if (accessKind.emitBarriers) {
+ b.add(new MembarNode(accessKind.preWriteBarriers));
}
LocationIdentity locationIdentity = object.isNullConstant() ? OFF_HEAP_LOCATION : LocationIdentity.any();
- b.add(new RawStoreNode(object, offset, value, kind, locationIdentity));
- if (hasBarrier) {
- b.add(new MembarNode(postWrite));
+ b.add(new RawStoreNode(object, offset, b.maskSubWordValue(value, kind), kind, locationIdentity));
+ if (accessKind.emitBarriers) {
+ b.add(new MembarNode(accessKind.postWriteBarriers));
}
b.getGraph().markUnsafeAccess();
return true;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.virtual/src/org/graalvm/compiler/virtual/phases/ea/PEReadEliminationClosure.java Mon Jun 18 16:13:21 2018 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.virtual/src/org/graalvm/compiler/virtual/phases/ea/PEReadEliminationClosure.java Mon Jun 18 09:48:22 2018 -0700
@@ -244,29 +244,9 @@
return processLoad(load, load.object(), new FieldLocationIdentity(load.field()), -1, load.field().getJavaKind(), state, effects);
}
- private static JavaKind getElementKindFromStamp(ValueNode array) {
- ResolvedJavaType type = StampTool.typeOrNull(array);
- if (type != null && type.isArray()) {
- return type.getComponentType().getJavaKind();
- } else {
- // It is likely an OSRLocal without valid stamp
- return JavaKind.Illegal;
- }
- }
-
private boolean processStoreIndexed(StoreIndexedNode store, PEReadEliminationBlockState state, GraphEffectList effects) {
int index = store.index().isConstant() ? ((JavaConstant) store.index().asConstant()).asInt() : -1;
- // BASTORE (with elementKind being Byte) can be used to store values in boolean arrays.
JavaKind elementKind = store.elementKind();
- if (elementKind == JavaKind.Byte) {
- elementKind = getElementKindFromStamp(store.array());
- if (elementKind == JavaKind.Illegal) {
- // Could not determine the actual access kind from stamp. Hence kill both.
- state.killReadCache(NamedLocationIdentity.getArrayLocation(JavaKind.Boolean), index);
- state.killReadCache(NamedLocationIdentity.getArrayLocation(JavaKind.Byte), index);
- return false;
- }
- }
LocationIdentity arrayLocation = NamedLocationIdentity.getArrayLocation(elementKind);
if (index != -1) {
return processStore(store, store.array(), arrayLocation, index, elementKind, false, store.value(), state, effects);
@@ -279,15 +259,7 @@
private boolean processLoadIndexed(LoadIndexedNode load, PEReadEliminationBlockState state, GraphEffectList effects) {
if (load.index().isConstant()) {
int index = ((JavaConstant) load.index().asConstant()).asInt();
- // BALOAD (with elementKind being Byte) can be used to retrieve values from boolean
- // arrays.
JavaKind elementKind = load.elementKind();
- if (elementKind == JavaKind.Byte) {
- elementKind = getElementKindFromStamp(load.array());
- if (elementKind == JavaKind.Illegal) {
- return false;
- }
- }
LocationIdentity arrayLocation = NamedLocationIdentity.getArrayLocation(elementKind);
return processLoad(load, load.array(), arrayLocation, index, elementKind, state, effects);
}